title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
ENH/BUG: add count to grouper / ensure that grouper keys are not included in the returned
diff --git a/doc/source/release.rst b/doc/source/release.rst index 40e99b879dc29..0e96491fb3aa1 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -179,6 +179,8 @@ API Changes validation warnings in :func:`read_csv`/:func:`read_table` (:issue:`6607`) - Raise a ``TypeError`` when ``DataFrame`` is passed an iterator as the ``data`` argument (:issue:`5357`) +- groupby will now not return the grouped column for non-cython functions (:issue:`5610`), + as its already the index Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index ccbde36b9a09f..f89f56e7a1aa2 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -110,12 +110,29 @@ API changes .. ipython:: python - DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) + df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) g = df.groupby('A') g.nth(0) # can also use negative ints g.nth(0, dropna='any') # similar to old behaviour + groupby will now not return the grouped column for non-cython functions (:issue:`5610`), + as its already the index + + .. ipython:: python + + df = DataFrame([[1, np.nan], [1, 4], [5, 6], [5, 8]], columns=['A', 'B']) + g = df.groupby('A') + g.count() + g.describe() + + passing ``as_index`` will leave the grouped column in-place (this is not change in 0.14.0) + + df = DataFrame([[1, np.nan], [1, 4], [5, 6], [5, 8]], columns=['A', 'B']) + g = df.groupby('A',as_index=False) + g.count() + g.describe() + - Allow specification of a more complex groupby via ``pd.Grouper``, such as grouping by a Time and a string field simultaneously. See :ref:`the docs <groupby.specify>`. (:issue:`3794`) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 23fccc3719278..b255831e51ae0 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -226,11 +226,13 @@ def describe(self): """ # Hack? from pandas.core.frame import DataFrame - grouped = DataFrame(self.labels).groupby(0) - counts = grouped.count().values.squeeze() + counts = DataFrame({ + 'labels' : self.labels, + 'values' : self.labels } + ).groupby('labels').count().squeeze().values freqs = counts / float(counts.sum()) - return DataFrame.from_dict({ + return DataFrame({ 'counts': counts, 'freqs': freqs, 'levels': self.levels - }).set_index('levels') + }).set_index('levels') diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2c2f133dd52c1..01af7534d458d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -611,11 +611,19 @@ def __neg__(self): arr = operator.inv(values) else: arr = operator.neg(values) - return self._wrap_array(arr, self.axes, copy=False) + return self.__array_wrap__(arr) def __invert__(self): - arr = operator.inv(_values_from_object(self)) - return self._wrap_array(arr, self.axes, copy=False) + try: + arr = operator.inv(_values_from_object(self)) + return self.__array_wrap__(arr) + except: + + # inv fails with 0 len + if not np.prod(self.shape): + return self + + raise def equals(self, other): """ @@ -707,15 +715,11 @@ def __abs__(self): #---------------------------------------------------------------------- # Array Interface - def _wrap_array(self, arr, axes, copy=False): - d = self._construct_axes_dict_from(self, axes, copy=copy) - return self._constructor(arr, **d).__finalize__(self) - def __array__(self, dtype=None): return _values_from_object(self) - def __array_wrap__(self, result): - d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) + def __array_wrap__(self, result, copy=False): + d = self._construct_axes_dict(self._AXIS_ORDERS, copy=copy) return self._constructor(result, **d).__finalize__(self) # ideally we would define this to avoid the getattr checks, but diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 27001bb69cd05..2a36ea65667d6 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -445,6 +445,23 @@ def _selection_list(self): return [self._selection] return self._selection + @cache_readonly + def _selected_obj(self): + + if self._selection is None or isinstance(self.obj, Series): + return self.obj + else: + return self.obj[self._selection] + + def _set_selection_from_grouper(self): + """ we may need create a selection if we have non-level groupers """ + grp = self.grouper + if self._selection is None and self.as_index and getattr(grp,'groupings',None) is not None: + ax = self.obj._info_axis + groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ] + if len(groupers): + self._selection = (ax-Index(groupers)).tolist() + def _local_dir(self): return sorted(set(self.obj._local_dir() + list(self._apply_whitelist))) @@ -453,7 +470,6 @@ def __getattr__(self, attr): return object.__getattribute__(self, attr) if attr in self.obj: return self[attr] - if hasattr(self.obj, attr): return self._make_wrapper(attr) @@ -472,6 +488,10 @@ def _make_wrapper(self, name): type(self).__name__)) raise AttributeError(msg) + # need to setup the selection + # as are not passed directly but in the grouper + self._set_selection_from_grouper() + f = getattr(self._selected_obj, name) if not isinstance(f, types.MethodType): return self.apply(lambda self: getattr(self, name)) @@ -503,7 +523,19 @@ def curried(x): try: return self.apply(curried_with_axis) except Exception: - return self.apply(curried) + try: + return self.apply(curried) + except Exception: + + # related to : GH3688 + # try item-by-item + # this can be called recursively, so need to raise ValueError if + # we don't have this method to indicated to aggregate to + # mark this column as an error + try: + return self._aggregate_item_by_item(name, *args, **kwargs) + except (AttributeError): + raise ValueError return wrapper @@ -624,6 +656,7 @@ def mean(self): except GroupByError: raise except Exception: # pragma: no cover + self._set_selection_from_grouper() f = lambda x: x.mean(axis=self.axis) return self._python_agg_general(f) @@ -639,6 +672,7 @@ def median(self): raise except Exception: # pragma: no cover + self._set_selection_from_grouper() def f(x): if isinstance(x, np.ndarray): x = Series(x) @@ -655,6 +689,7 @@ def std(self, ddof=1): if ddof == 1: return self._cython_agg_general('std') else: + self._set_selection_from_grouper() f = lambda x: x.std(ddof=ddof) return self._python_agg_general(f) @@ -667,15 +702,26 @@ def var(self, ddof=1): if ddof == 1: return self._cython_agg_general('var') else: + self._set_selection_from_grouper() f = lambda x: x.var(ddof=ddof) return self._python_agg_general(f) def size(self): """ Compute group sizes + """ return self.grouper.size() + def count(self, axis=0): + """ + Number of non-null items in each group. + axis : axis number, default 0 + the grouping axis + """ + self._set_selection_from_grouper() + return self._python_agg_general(lambda x: notnull(x).sum(axis=axis)).astype('int64') + sum = _groupby_function('sum', 'add', np.sum) prod = _groupby_function('prod', 'prod', np.prod) min = _groupby_function('min', 'min', np.min, numeric_only=False) @@ -685,14 +731,14 @@ def size(self): last = _groupby_function('last', 'last', _last_compat, numeric_only=False, _convert=True) + def ohlc(self): """ Compute sum of values, excluding missing values - For multiple groupings, the result index will be a MultiIndex - """ - return self._cython_agg_general('ohlc') + return self._apply_to_column_groupbys( + lambda x: x._cython_agg_general('ohlc')) def nth(self, n, dropna=None): """ @@ -888,13 +934,6 @@ def _cumcount_array(self, arr=None, **kwargs): cumcounts[v] = arr[len(v)-1::-1] return cumcounts - @cache_readonly - def _selected_obj(self): - if self._selection is None or isinstance(self.obj, Series): - return self.obj - else: - return self.obj[self._selection] - def _index_with_as_index(self, b): """ Take boolean mask of index to be returned from apply, if as_index=True @@ -990,12 +1029,23 @@ def _concat_objects(self, keys, values, not_indexed_same=False): result = result.reindex(ax) else: result = result.reindex_axis(ax, axis=self.axis) - elif self.group_keys and self.as_index: - group_keys = keys - group_levels = self.grouper.levels - group_names = self.grouper.names - result = concat(values, axis=self.axis, keys=group_keys, - levels=group_levels, names=group_names) + + elif self.group_keys: + + if self.as_index: + + # possible MI return case + group_keys = keys + group_levels = self.grouper.levels + group_names = self.grouper.names + result = concat(values, axis=self.axis, keys=group_keys, + levels=group_levels, names=group_names) + else: + + # GH5610, returns a MI, with the first level being a + # range index + keys = list(range(len(values))) + result = concat(values, axis=self.axis, keys=keys) else: result = concat(values, axis=self.axis) @@ -2187,6 +2237,9 @@ def true_and_notnull(x, *args, **kwargs): filtered = self._apply_filter(indices, dropna) return filtered + def _apply_to_column_groupbys(self, func): + """ return a pass thru """ + return func(self) class NDFrameGroupBy(GroupBy): @@ -2486,6 +2539,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): elif hasattr(self.grouper, 'groupings'): if len(self.grouper.groupings) > 1: key_index = MultiIndex.from_tuples(keys, names=key_names) + else: ping = self.grouper.groupings[0] if len(keys) == ping.ngroups: @@ -2498,8 +2552,13 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # reorder the values values = [values[i] for i in indexer] else: + key_index = Index(keys, name=key_names[0]) + # don't use the key indexer + if not self.as_index: + key_index = None + # make Nones an empty object if com._count_not_none(*values) != len(values): v = next(v for v in values if v is not None) @@ -2569,7 +2628,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # normally use vstack as its faster than concat # and if we have mi-columns - if not _np_version_under1p7 or isinstance(v.index,MultiIndex): + if not _np_version_under1p7 or isinstance(v.index,MultiIndex) or key_index is None: stacked_values = np.vstack([np.asarray(x) for x in values]) result = DataFrame(stacked_values,index=key_index,columns=index) else: @@ -2889,16 +2948,6 @@ def _apply_to_column_groupbys(self, func): in self._iterate_column_groupbys()), keys=self._selected_obj.columns, axis=1) - def ohlc(self): - """ - Compute sum of values, excluding missing values - - For multiple groupings, the result index will be a MultiIndex - """ - return self._apply_to_column_groupbys( - lambda x: x._cython_agg_general('ohlc')) - - from pandas.tools.plotting import boxplot_frame_groupby DataFrameGroupBy.boxplot = boxplot_frame_groupby diff --git a/pandas/core/series.py b/pandas/core/series.py index c94d7dc9acefd..9c642280169f0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -370,12 +370,12 @@ def __array__(self, result=None): """ the array interface, return my values """ return self.values - def __array_wrap__(self, result): + def __array_wrap__(self, result, copy=False): """ Gets called prior to a ufunc (and after) """ return self._constructor(result, index=self.index, - copy=False).__finalize__(self) + copy=copy).__finalize__(self) def __contains__(self, key): return key in self.index @@ -959,19 +959,6 @@ def iteritems(self): if compat.PY3: # pragma: no cover items = iteritems - # inversion - def __neg__(self): - values = self.values - if values.dtype == np.bool_: - arr = operator.inv(values) - else: - arr = operator.neg(values) - return self._constructor(arr, self.index).__finalize__(self) - - def __invert__(self): - arr = operator.inv(self.values) - return self._constructor(arr, self.index).__finalize__(self) - #---------------------------------------------------------------------- # unbox reductions diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index fde9156017c4e..fcc4eb83b0af9 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -1378,7 +1378,8 @@ def test_groupby_as_index_apply(self): res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index # apply doesn't maintain the original ordering - exp_not_as_apply = Index([0, 2, 1, 4]) + # changed in GH5610 as the as_index=False returns a MI here + exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (2, 4)]) exp_as_apply = MultiIndex.from_tuples([(1, 0), (1, 2), (2, 1), (3, 4)]) assert_index_equal(res_as_apply, exp_as_apply) @@ -1970,6 +1971,64 @@ def test_size(self): for key, group in grouped: self.assertEquals(result[key], len(group)) + def test_count(self): + + # GH5610 + # count counts non-nulls + df = pd.DataFrame([[1, 2, 'foo'], [1, nan, 'bar'], [3, nan, nan]], columns=['A', 'B', 'C']) + + count_as = df.groupby('A').count() + count_not_as = df.groupby('A', as_index=False).count() + + expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'], index=[1,3]) + expected.index.name='A' + assert_frame_equal(count_not_as, expected.reset_index()) + assert_frame_equal(count_as, expected) + + count_B = df.groupby('A')['B'].count() + assert_series_equal(count_B, expected['B']) + + def test_non_cython_api(self): + + # GH5610 + # non-cython calls should not include the grouper + + df = DataFrame([[1, 2, 'foo'], [1, nan, 'bar',], [3, nan, 'baz']], columns=['A', 'B','C']) + g = df.groupby('A') + gni = df.groupby('A',as_index=False) + + # mad + expected = DataFrame([[0],[nan]],columns=['B'],index=[1,3]) + expected.index.name = 'A' + result = g.mad() + assert_frame_equal(result,expected) + + expected = DataFrame([[0.,0.],[0,nan]],columns=['A','B'],index=[0,1]) + result = gni.mad() + assert_frame_equal(result,expected) + + # describe + expected = DataFrame(dict(B = concat([df.loc[[0,1],'B'].describe(),df.loc[[2],'B'].describe()],keys=[1,3]))) + expected.index.names = ['A',None] + result = g.describe() + assert_frame_equal(result,expected) + + expected = concat([df.loc[[0,1],['A','B']].describe(),df.loc[[2],['A','B']].describe()],keys=[0,1]) + result = gni.describe() + assert_frame_equal(result,expected) + + # any + expected = DataFrame([[True, True],[False, True]],columns=['B','C'],index=[1,3]) + expected.index.name = 'A' + result = g.any() + assert_frame_equal(result,expected) + + # idxmax + expected = DataFrame([[0],[nan]],columns=['B'],index=[1,3]) + expected.index.name = 'A' + result = g.idxmax() + assert_frame_equal(result,expected) + def test_grouping_ndarray(self): grouped = self.df.groupby(self.df['A'].values) @@ -2925,7 +2984,7 @@ def test_groupby_with_timegrouper(self): DT.datetime(2013,12,2,12,0), DT.datetime(2013,9,2,14,0), ]}) - + # GH 6908 change target column's order df_reordered = df_original.sort(columns='Quantity') @@ -3937,8 +3996,14 @@ def test_frame_groupby_plot_boxplot(self): self.assertEqual(len(res), 2) tm.close() + # now works with GH 5610 as gender is excluded + res = df.groupby('gender').hist() + tm.close() + + df2 = df.copy() + df2['gender2'] = df['gender'] with tm.assertRaisesRegexp(TypeError, '.*str.+float'): - gb.hist() + df2.groupby('gender').hist() @slow def test_frame_groupby_hist(self): diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 5f975105cd80e..7fe8ab8ca642e 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -1126,9 +1126,9 @@ def test_evenly_divisible_with_no_extra_bins(self): expected = DataFrame( [{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14, 'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4, - index=index).unstack().swaplevel(1,0).sortlevel() + index=index) result = df.resample('7D', how='count') - assert_series_equal(result,expected) + assert_frame_equal(result,expected) expected = DataFrame( [{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700,
closes #5610
https://api.github.com/repos/pandas-dev/pandas/pulls/7000
2014-04-29T15:30:07Z
2014-04-29T20:12:46Z
2014-04-29T20:12:46Z
2014-06-15T01:48:09Z
ENH: Implement core/strings/wrap method
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index fe3fc42992468..110dd8f3872e9 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1223,6 +1223,7 @@ Methods like ``match``, ``contains``, ``startswith``, and ``endswith`` take ``repeat``,Duplicate values (``s.str.repeat(3)`` equivalent to ``x * 3``) ``pad``,"Add whitespace to left, right, or both sides of strings" ``center``,Equivalent to ``pad(side='both')`` + ``wrap``,Split long strings into lines with length less than a given width ``slice``,Slice each string in the Series ``slice_replace``,Replace slice in each string with passed value ``count``,Count occurrences of pattern diff --git a/doc/source/release.rst b/doc/source/release.rst index df0f472c390c7..f864fd2caf35d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -185,6 +185,7 @@ Improvements to existing features - Performance improvement when converting ``DatetimeIndex`` to floating ordinals using ``DatetimeConverter`` (:issue:`6636`) - Performance improvement for ``DataFrame.shift`` (:issue: `5609`) +- Arrays of strings can be wrapped to a specified width (``str.wrap``) (:issue:`6999`) .. _release.bug_fixes-0.14.0: diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index a2990644fa124..c87d1e96c75d0 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -362,6 +362,7 @@ Enhancements file. (:issue:`6545`) - ``pandas.io.gbq`` now handles reading unicode strings properly. (:issue:`5940`) - Improve performance of ``CustomBusinessDay`` (:issue:`6584`) +- str.wrap implemented (:issue:`6999`) Performance ~~~~~~~~~~~ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 6add1767a05d6..f84d7e4601190 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -8,6 +8,7 @@ import re import pandas.lib as lib import warnings +import textwrap def _get_array_list(arr, others): @@ -710,20 +711,63 @@ def str_rstrip(arr, to_strip=None): return _na_map(lambda x: x.rstrip(to_strip), arr) -def str_wrap(arr, width=80): +def str_wrap(arr, width, **kwargs): """ Wrap long strings to be formatted in paragraphs Parameters ---------- + Same keyword parameters and defaults as :class:`textwrap.TextWrapper` width : int Maximum line-width + expand_tabs : bool, optional + If true, tab characters will be expanded to spaces (default: True) + replace_whitespace : bool, optional + If true, each whitespace character (as defined by string.whitespace) remaining + after tab expansion will be replaced by a single space (default: True) + drop_whitespace : bool, optional + If true, whitespace that, after wrapping, happens to end up at the beginning + or end of a line is dropped (default: True) + break_long_words : bool, optional + If true, then words longer than width will be broken in order to ensure that + no lines are longer than width. If it is false, long words will not be broken, + and some lines may be longer than width. (default: True) + break_on_hyphens : bool, optional + If true, wrapping will occur preferably on whitespace and right after hyphens + in compound words, as it is customary in English. If false, only whitespaces + will be considered as potentially good places for line breaks, but you need + to set break_long_words to false if you want truly insecable words. + (default: True) Returns ------- wrapped : array + + Notes + ----- + Internally, this method uses a :class:`textwrap.TextWrapper` instance with default + settings. To achieve behavior matching R's stringr library str_wrap function, use + the arguments: + + expand_tabs = False + replace_whitespace = True + drop_whitespace = True + break_long_words = False + break_on_hyphens = False + + Examples + -------- + + >>> s = pd.Series(['line to be wrapped', 'another line to be wrapped']) + >>> s.str.wrap(12) + 0 line to be\nwrapped + 1 another line\nto be\nwrapped """ - raise NotImplementedError + kwargs['width'] = width + + tw = textwrap.TextWrapper(**kwargs) + + return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr) def str_get(arr, i): @@ -948,6 +992,11 @@ def rstrip(self, to_strip=None): result = str_rstrip(self.series, to_strip) return self._wrap_result(result) + @copy(str_wrap) + def wrap(self, width, **kwargs): + result = str_wrap(self.series, width, **kwargs) + return self._wrap_result(result) + @copy(str_get_dummies) def get_dummies(self, sep='|'): result = str_get_dummies(self.series, sep) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 2721edcc89e59..4c0d78a575f92 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -942,7 +942,31 @@ def test_strip_lstrip_rstrip_args_unicode(self): assert_series_equal(rs, xp) def test_wrap(self): - pass + # test values are: two words less than width, two words equal to width, + # two words greater than width, one word less than width, one word + # equal to width, one word greater than width, multiple tokens with trailing + # whitespace equal to width + values = Series([u('hello world'), u('hello world!'), + u('hello world!!'), u('abcdefabcde'), + u('abcdefabcdef'), u('abcdefabcdefa'), + u('ab ab ab ab '), u('ab ab ab ab a'), + u('\t')]) + + # expected values + xp = Series([u('hello world'), u('hello world!'), + u('hello\nworld!!'), u('abcdefabcde'), + u('abcdefabcdef'), u('abcdefabcdef\na'), + u('ab ab ab ab'), u('ab ab ab ab\na'), + u('')]) + + rs = values.str.wrap(12, break_long_words=True) + assert_series_equal(rs, xp) + + # test with pre and post whitespace (non-unicode), NaN, and non-ascii Unicode + values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')]) + xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')]) + rs = values.str.wrap(6) + assert_series_equal(rs, xp) def test_get(self): values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
This patch implements the str.wrap function within core/strings. Example: ``` >>> s = pd.Series(['line to be wrapped', 'another line to be wrapped']) >>> s.str.wrap(12) 0 line to be\nwrapped 1 another line\nto be\nwrapped ``` This is a cleaned branch; the original is at #6705.
https://api.github.com/repos/pandas-dev/pandas/pulls/6999
2014-04-29T14:52:17Z
2014-04-29T21:04:04Z
null
2014-06-25T22:25:38Z
make grouping column an agg
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 27001bb69cd05..9159b43536550 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -673,9 +673,17 @@ def var(self, ddof=1): def size(self): """ Compute group sizes + """ return self.grouper.size() + def count(self): + """ + Number of non-null items in each group. + + """ + return self._python_agg_general(lambda x: notnull(x).sum()) + sum = _groupby_function('sum', 'add', np.sum) prod = _groupby_function('prod', 'prod', np.prod) min = _groupby_function('min', 'min', np.min, numeric_only=False) @@ -687,12 +695,10 @@ def size(self): def ohlc(self): """ - Compute sum of values, excluding missing values - - For multiple groupings, the result index will be a MultiIndex + Deprecated, use .resample(how="ohlc") instead. """ - return self._cython_agg_general('ohlc') + raise AttributeError('ohlc is deprecated, use resample(how="ohlc").') def nth(self, n, dropna=None): """ @@ -939,6 +945,7 @@ def _cython_agg_general(self, how, numeric_only=True): result, names = self.grouper.aggregate(obj.values, how) except AssertionError as e: raise GroupByError(str(e)) + # infer old dytpe output[name] = self._try_cast(result, obj) if len(output) == 0: @@ -947,6 +954,8 @@ def _cython_agg_general(self, how, numeric_only=True): return self._wrap_aggregated_output(output, names) def _python_agg_general(self, func, *args, **kwargs): + _dtype = kwargs.pop("_dtype", None) + func = _intercept_function(func) f = lambda x: func(x, *args, **kwargs) @@ -955,7 +964,14 @@ def _python_agg_general(self, func, *args, **kwargs): for name, obj in self._iterate_slices(): try: result, counts = self.grouper.agg_series(obj, f) - output[name] = self._try_cast(result, obj) + + if _dtype is None: # infer old dytpe + output[name] = self._try_cast(result, obj) + elif _dtype is False: + output[name] = result + else: + output[name] = _possibly_downcast_to_dtype(result, _dtype) + except TypeError: continue @@ -2889,16 +2905,6 @@ def _apply_to_column_groupbys(self, func): in self._iterate_column_groupbys()), keys=self._selected_obj.columns, axis=1) - def ohlc(self): - """ - Compute sum of values, excluding missing values - - For multiple groupings, the result index will be a MultiIndex - """ - return self._apply_to_column_groupbys( - lambda x: x._cython_agg_general('ohlc')) - - from pandas.tools.plotting import boxplot_frame_groupby DataFrameGroupBy.boxplot = boxplot_frame_groupby diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index fde9156017c4e..f460214954bfd 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -1970,6 +1970,18 @@ def test_size(self): for key, group in grouped: self.assertEquals(result[key], len(group)) + def test_count(self): + df = pd.DataFrame([[1, 2], [1, nan], [3, nan]], columns=['A', 'B']) + count_as = df.groupby('A').count() + count_not_as = df.groupby('A', as_index=False).count() + + res = pd.DataFrame([[1, 1], [3, 0]], columns=['A', 'B']) + assert_frame_equal(count_not_as, res) + assert_frame_equal(count_as, res.set_index('A')) + + count_B = df.groupby('A')['B'].count() + assert_series_equal(count_B, res['B']) + def test_grouping_ndarray(self): grouped = self.df.groupby(self.df['A'].values)
fixes #5610
https://api.github.com/repos/pandas-dev/pandas/pulls/6997
2014-04-29T00:19:34Z
2014-04-29T15:32:00Z
null
2014-07-17T17:02:27Z
BUG: GroupBy.get_group raises ValueError when group key contains NaT
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 7ad2641dec52a..c9e18b585c764 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -784,11 +784,11 @@ will be (silently) dropped. Thus, this does not pose any problems: df.groupby('A').std() -NA group handling -~~~~~~~~~~~~~~~~~ +NA and NaT group handling +~~~~~~~~~~~~~~~~~~~~~~~~~ -If there are any NaN values in the grouping key, these will be automatically -excluded. So there will never be an "NA group". This was not the case in older +If there are any NaN or NaT values in the grouping key, these will be automatically +excluded. So there will never be an "NA group" or "NaT group". This was not the case in older versions of pandas, but users were generally discarding the NA group anyway (and supporting it was an implementation headache). diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index a7917e81f7057..5d4d149798d21 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -69,13 +69,19 @@ Bug Fixes - Bug in ``Timestamp``'s' ``microsecond``, ``quarter``, ``dayofyear``, ``week`` and ``daysinmonth`` properties return ``np.int`` type, not built-in ``int``. (:issue:`10050`) - Bug in ``NaT`` raises ``AttributeError`` when accessing to ``daysinmonth``, ``dayofweek`` properties. (:issue:`10096`) + - Bug in getting timezone data with ``dateutil`` on various platforms ( :issue:`9059`, :issue:`8639`, :issue:`9663`, :issue:`10121`) - Bug in display datetimes with mixed frequencies uniformly; display 'ms' datetimes to the proper precision. (:issue:`10170`) + - Bug in ``DatetimeIndex`` and ``TimedeltaIndex`` names are lost after timedelta arithmetics ( :issue:`9926`) + - Bug in `Series.plot(label="LABEL")` not correctly setting the label (:issue:`10119`) - Bug in `plot` not defaulting to matplotlib `axes.grid` setting (:issue:`9792`) +- Bug in GroupBy.get_group raises ValueError when group key contains NaT (:issue:`6992`) + + diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index ffc3e6a08221c..51674bad60f5b 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -426,7 +426,11 @@ def convert(key, s): return Timestamp(key).asm8 return key - sample = next(iter(self.indices)) + if len(self.indices) > 0: + sample = next(iter(self.indices)) + else: + sample = None # Dummy sample + if isinstance(sample, tuple): if not isinstance(name, tuple): msg = ("must supply a tuple to get_group with multiple" diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index a0cdc0ff5e841..598cdff30e4f7 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -37,6 +37,8 @@ cimport util from util cimport is_array, _checknull, _checknan, get_nat +cimport lib +from lib cimport is_null_datetimelike cdef int64_t iNaT = get_nat() @@ -673,7 +675,7 @@ def groupby_%(name)s(ndarray[%(c_type)s] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx index 79722a26ebedc..428decd4dca10 100644 --- a/pandas/src/generated.pyx +++ b/pandas/src/generated.pyx @@ -28,6 +28,8 @@ ctypedef unsigned char UChar cimport util from util cimport is_array, _checknull, _checknan, get_nat +cimport lib +from lib cimport is_null_datetimelike cdef int64_t iNaT = get_nat() @@ -2096,7 +2098,7 @@ def groupby_float64(ndarray[float64_t] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2124,7 +2126,7 @@ def groupby_float32(ndarray[float32_t] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2152,7 +2154,7 @@ def groupby_object(ndarray[object] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2180,7 +2182,7 @@ def groupby_int32(ndarray[int32_t] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2208,7 +2210,7 @@ def groupby_int64(ndarray[int64_t] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2236,7 +2238,7 @@ def groupby_bool(ndarray[uint8_t] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index c308308603167..0789e20df3945 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -699,7 +699,6 @@ def test_get_group(self): expected = wp.reindex(major=[x for x in wp.major_axis if x.month == 1]) assert_panel_equal(gp, expected) - # GH 5267 # be datelike friendly df = DataFrame({'DATE' : pd.to_datetime(['10-Oct-2013', '10-Oct-2013', '10-Oct-2013', @@ -2837,6 +2836,49 @@ def test_groupby_list_infer_array_like(self): result = df.groupby(['foo', 'bar']).mean() expected = df.groupby([df['foo'], df['bar']]).mean()[['val']] + def test_groupby_nat_exclude(self): + # GH 6992 + df = pd.DataFrame({'values': np.random.randn(8), + 'dt': [np.nan, pd.Timestamp('2013-01-01'), np.nan, pd.Timestamp('2013-02-01'), + np.nan, pd.Timestamp('2013-02-01'), np.nan, pd.Timestamp('2013-01-01')], + 'str': [np.nan, 'a', np.nan, 'a', + np.nan, 'a', np.nan, 'b']}) + grouped = df.groupby('dt') + + expected = [[1, 7], [3, 5]] + keys = sorted(grouped.groups.keys()) + self.assertEqual(len(keys), 2) + for k, e in zip(keys, expected): + # grouped.groups keys are np.datetime64 with system tz + # not to be affected by tz, only compare values + self.assertEqual(grouped.groups[k], e) + + # confirm obj is not filtered + tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df) + self.assertEqual(grouped.ngroups, 2) + expected = {Timestamp('2013-01-01 00:00:00'): np.array([1, 7]), + Timestamp('2013-02-01 00:00:00'): np.array([3, 5])} + for k in grouped.indices: + self.assert_numpy_array_equal(grouped.indices[k], expected[k]) + + tm.assert_frame_equal(grouped.get_group(Timestamp('2013-01-01')), df.iloc[[1, 7]]) + tm.assert_frame_equal(grouped.get_group(Timestamp('2013-02-01')), df.iloc[[3, 5]]) + + self.assertRaises(KeyError, grouped.get_group, pd.NaT) + + nan_df = DataFrame({'nan': [np.nan, np.nan, np.nan], + 'nat': [pd.NaT, pd.NaT, pd.NaT]}) + self.assertEqual(nan_df['nan'].dtype, 'float64') + self.assertEqual(nan_df['nat'].dtype, 'datetime64[ns]') + + for key in ['nan', 'nat']: + grouped = nan_df.groupby(key) + self.assertEqual(grouped.groups, {}) + self.assertEqual(grouped.ngroups, 0) + self.assertEqual(grouped.indices, {}) + self.assertRaises(KeyError, grouped.get_group, np.nan) + self.assertRaises(KeyError, grouped.get_group, pd.NaT) + def test_dictify(self): dict(iter(self.df.groupby('A'))) dict(iter(self.df.groupby(['A', 'B']))) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 444aa2a0bab1e..93299292cf353 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1858,6 +1858,25 @@ def test_ufunc_compat(self): expected = Float64Index(np.sin(np.arange(5,dtype='int64'))) tm.assert_index_equal(result, expected) + def test_index_groupby(self): + int_idx = Index(range(6)) + float_idx = Index(np.arange(0, 0.6, 0.1)) + obj_idx = Index('A B C D E F'.split()) + dt_idx = pd.date_range('2013-01-01', freq='M', periods=6) + + for idx in [int_idx, float_idx, obj_idx, dt_idx]: + to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1]) + self.assertEqual(idx.groupby(to_groupby), + {1.0: [idx[0], idx[5]], 2.0: [idx[1], idx[4]]}) + + to_groupby = Index([datetime(2011, 11, 1), datetime(2011, 12, 1), + pd.NaT, pd.NaT, + datetime(2011, 12, 1), datetime(2011, 11, 1)], tz='UTC').values + + ex_keys = pd.tslib.datetime_to_datetime64(np.array([Timestamp('2011-11-01'), Timestamp('2011-12-01')])) + expected = {ex_keys[0][0]: [idx[0], idx[5]], ex_keys[0][1]: [idx[1], idx[4]]} + self.assertEqual(idx.groupby(to_groupby), expected) + class TestFloat64Index(Numeric, tm.TestCase): _holder = Float64Index
Closes #6992. Made `GroupBy.get_group` works even if the key contains `NaT`. NOTE: One issue is that `GroupBy.groups` returns incorrect key in numpy 1.6. This seems to be caused by `_convert_grouper` uses `grouper.reindex(axis).values` to return value. This looks doesn't affect to main functionalities, but is there any expected result? ``` import pandas as pd import numpy as np >>> np.__version__ 1.6.2 >>> df = pd.DataFrame({'values': np.random.randn(8), 'dt': [np.nan, pd.Timestamp('2013-01-01'), np.nan, pd.Timestamp('2013-02-01'), np.nan, pd.Timestamp('2013-02-01'), np.nan, pd.Timestamp('2013-01-01')]}) >>> grouped = df.groupby('dt') >>> grouped.groups {1970-01-16 48:00:00: [1, 7], 1970-01-16 24:00:00: [3, 5]} ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6996
2014-04-28T23:44:28Z
2015-05-30T13:05:57Z
2015-05-30T13:05:57Z
2015-06-02T19:26:59Z
BUG: df.boxplot fails to use existing axis/subplot (#3578)
diff --git a/doc/source/release.rst b/doc/source/release.rst index b5a11091779ec..ab89cbfe2929b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -475,6 +475,7 @@ Bug Fixes caused possible color/class mismatch (:issue:`6956`) - Bug in ``radviz`` and ``andrews_curves`` where multiple values of 'color' were being passed to plotting method (:issue:`6956`) +- Bug in ``DataFrame.boxplot`` where it failed to use the axis passed as the ``ax`` argument (:issue:`3578`) pandas 0.13.1 ------------- diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index e3f49e14400d1..ca17e74d5eb07 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1083,6 +1083,25 @@ def test_boxplot(self): df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B']) _check_plot_works(df.boxplot, by='X') + # When ax is supplied, existing axes should be used: + import matplotlib.pyplot as plt + fig, ax = plt.subplots() + axes = df.boxplot('Col1', by='X', ax=ax) + self.assertIs(ax.get_axes(), axes) + + # Multiple columns with an ax argument is not supported + fig, ax = plt.subplots() + self.assertRaisesRegexp( + ValueError, 'existing axis', df.boxplot, + column=['Col1', 'Col2'], by='X', ax=ax + ) + + # When by is None, check that all relevant lines are present in the dict + fig, ax = plt.subplots() + d = df.boxplot(ax=ax) + lines = list(itertools.chain.from_iterable(d.values())) + self.assertEqual(len(ax.get_lines()), len(lines)) + @slow def test_kde(self): _skip_if_no_scipy() diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index b11d71f48baf2..33f6d4464bc43 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2760,10 +2760,16 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, columns = data._get_numeric_data().columns - by ngroups = len(columns) - nrows, ncols = _get_layout(ngroups) - fig, axes = _subplots(nrows=nrows, ncols=ncols, - sharex=True, sharey=True, - figsize=figsize, ax=ax) + if ax is None: + nrows, ncols = _get_layout(ngroups) + fig, axes = _subplots(nrows=nrows, ncols=ncols, + sharex=True, sharey=True, + figsize=figsize, ax=ax) + else: + if ngroups > 1: + raise ValueError("Using an existing axis is not supported when plotting multiple columns.") + fig = ax.get_figure() + axes = ax.get_axes() if isinstance(axes, plt.Axes): ravel_axes = [axes]
Alright, I think I have a fix for issue #3578. In `plotting._grouped_plot_by_column`, there was no check being done for whether `ax` was `None`, which was the main source of the issue. When the boxplot is of multiple columns, I don't think there's anything sensible that can be done with the `ax` argument, so that now raises a `ValueError`. I've written some tests, but I'm not sure if they really get to the heart of the problem, so any insight on how to improve them would be appreciated. Testing code adapted from the original bug report to demonstrate the new behaviour: ``` python import matplotlib.pyplot as plt import pandas from pandas import DataFrame, Series data = {'day': Series([1, 1, 1, 2, 2, 2, 3, 3, 3]), 'group2': Series([2, 2, 2, 2, 2, 1, 1, 1, 1]), 'val': Series([3, 4, 5, 6, 7, 8, 9, 10, 11]), 'val2': Series([8, 9, 10, 11, 12, 13, 14, 15])} df = pandas.DataFrame(data) # Single-column using existing axis: should create a single plot plt.figure() plt.subplot(2, 2, 1) plt.plot([1, 2, 3]) plt.subplot(2, 2, 4) ax = plt.gca() fig = ax.get_figure() axes = df.boxplot('val', 'day', ax=ax) print("Testing identity of returned axes: (should be True)") print(id(axes) == id(ax.get_axes())) plt.show() # Multiple column, not using existing axis: should create two plots plt.figure() plt.subplot(2, 2, 1) plt.plot([1, 2, 3]) plt.subplot(2, 2, 4) ax = plt.gca() axes = df.boxplot(['val', 'val2'], 'day') print("Testing identity of returned axes: (should be False)") print(id(axes) == id(ax.get_axes())) plt.show() # Multiple column using existing axis: should raise an exception, # since it's hard to know what to do: we need an axis for each # column and ax is just a single axis object plt.figure() plt.subplot(2, 2, 1) plt.plot([1, 2, 3]) plt.subplot(2, 2, 4) ax = plt.gca() try: df.boxplot(['val', 'val2'], 'day', ax=ax) except ValueError: print("Raising exception as expected") plt.show() ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6991
2014-04-28T12:52:51Z
2014-05-08T13:21:04Z
null
2014-06-25T17:29:12Z
TST: fix checking for less_precise in floats
diff --git a/.travis.yml b/.travis.yml index ef49d9f7b6fea..cb8376a9f5761 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ language: python env: global: # scatterci API key - - secure: "Bx5umgo6WjuGY+5XFa004xjCiX/vq0CyMZ/ETzcs7EIBI1BE/0fIDXOoWhoxbY9HPfdPGlDnDgB9nGqr5wArO2s+BavyKBWg6osZ3dmkfuJPMOWeyCa92EeP+sfKw8e5HSU5MizW9e319wHWOF/xkzdHR7T67Qd5erhv91x4DnQ=" + #- secure: "Bx5umgo6WjuGY+5XFa004xjCiX/vq0CyMZ/ETzcs7EIBI1BE/0fIDXOoWhoxbY9HPfdPGlDnDgB9nGqr5wArO2s+BavyKBWg6osZ3dmkfuJPMOWeyCa92EeP+sfKw8e5HSU5MizW9e319wHWOF/xkzdHR7T67Qd5erhv91x4DnQ=" # ironcache API key - secure: "e4eEFn9nDQc3Xa5BWYkzfX37jaWVq89XidVX+rcCNEr5OlOImvveeXnF1IzbRXznH4Sv0YsLwUd8RGUWOmyCvkONq/VJeqCHWtTMyfaCIdqSyhIP9Odz8r9ahch+Y0XFepBey92AJHmlnTh+2GjCDgIiqq4fzglojnp56Vg1ojA=" - secure: "CjmYmY5qEu3KrvMtel6zWFEtMq8ORBeS1S1odJHnjQpbwT1KY2YFZRVlLphfyDQXSz6svKUdeRrCNp65baBzs3DQNA8lIuXGIBYFeJxqVGtYAZZs6+TzBPfJJK798sGOj5RshrOJkFG2rdlWNuTq/XphI0JOrN3nPUkRrdQRpAw=" @@ -51,6 +51,7 @@ matrix: - JOB_NAME: "27_numpy_master" - JOB_TAG=_NUMPY_DEV_master - NUMPY_BUILD=master + - PANDAS_TESTING_MODE="numpy_deprecate" allow_failures: - python: 2.7 env: @@ -58,6 +59,7 @@ matrix: - JOB_NAME: "27_numpy_master" - JOB_TAG=_NUMPY_DEV_master - NUMPY_BUILD=master + - PANDAS_TESTING_MODE="numpy_deprecate" # allow importing from site-packages, # so apt-get python-x works for system pythons diff --git a/ci/after_script.sh b/ci/after_script.sh index 1c145062b69ec..b17d69daa5b8d 100755 --- a/ci/after_script.sh +++ b/ci/after_script.sh @@ -1,7 +1,7 @@ #!/bin/bash -wget https://raw.github.com/y-p/ScatterCI-CLI/master/scatter_cli.py -chmod u+x scatter_cli.py +#wget https://raw.github.com/y-p/ScatterCI-CLI/master/scatter_cli.py +#chmod u+x scatter_cli.py pip install -I requests==2.1.0 echo "${TRAVIS_PYTHON_VERSION:0:4}" @@ -12,7 +12,6 @@ fi # ScatterCI accepts a build log, but currently does nothing with it. echo '' > /tmp/build.log -# These should be in the environment, but not in source control # nore exposed in the build logs #export SCATTERCI_ACCESS_KEY= #export SCATTERCI_HOST= @@ -22,6 +21,6 @@ ci/print_versions.py -j /tmp/env.json # nose ran using "--with-xunit --xunit-file nosetest.xml" and generated /tmp/nosetest.xml # Will timeout if server not available, and should not fail the build -python scatter_cli.py --xunit-file /tmp/nosetests.xml --log-file /tmp/build.log --env-file /tmp/env.json --build-name "$JOB_NAME" --succeed +#python scatter_cli.py --xunit-file /tmp/nosetests.xml --log-file /tmp/build.log --env-file /tmp/env.json --build-name "$JOB_NAME" --succeed true # never fail because bad things happened here diff --git a/ci/script.sh b/ci/script.sh index e76789b689c94..152a2f1ebdcf9 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -16,13 +16,6 @@ fi "$TRAVIS_BUILD_DIR"/ci/build_docs.sh 2>&1 > /tmp/doc.log & # doc build log will be shown after tests -# export the testing mode -if [ -n "$NUMPY_BUILD" ]; then - - export PANDAS_TESTING_MODE="numpy_deprecate" - -fi - echo nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml diff --git a/pandas/src/testing.pyx b/pandas/src/testing.pyx index 290226e1daf6a..bff070421c841 100644 --- a/pandas/src/testing.pyx +++ b/pandas/src/testing.pyx @@ -118,10 +118,7 @@ cpdef assert_almost_equal(a, b, bint check_less_precise=False): # deal with differing dtypes if check_less_precise: - dtype_a = np.dtype(type(a)) - dtype_b = np.dtype(type(b)) - if dtype_a.kind == 'f' and dtype_b == 'f': - decimal = 3 + decimal = 3 if np.isinf(a): assert np.isinf(b), "First object is inf, second isn't" @@ -132,11 +129,11 @@ cpdef assert_almost_equal(a, b, bint check_less_precise=False): if abs(fa) < 1e-5: if not decimal_almost_equal(fa, fb, decimal): assert False, ( - '(very low values) expected %.5f but got %.5f' % (b, a) + '(very low values) expected %.5f but got %.5f, with decimal %d' % (fb, fa, decimal) ) else: if not decimal_almost_equal(1, fb / fa, decimal): - assert False, 'expected %.5f but got %.5f' % (b, a) + assert False, 'expected %.5f but got %.5f, with decimal %d' % (fb, fa, decimal) else: assert a == b, "%r != %r" % (a, b) diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py index 986e44ced83a2..298fa73c69064 100644 --- a/pandas/tests/test_testing.py +++ b/pandas/tests/test_testing.py @@ -154,6 +154,25 @@ def test_not_equal(self): # ATM meta data is not checked in assert_series_equal # self._assert_not_equal(Series(range(3)),Series(range(3),name='foo'),check_names=True) + def test_less_precise(self): + s1 = Series([0.12345],dtype='float64') + s2 = Series([0.12346],dtype='float64') + + self.assertRaises(AssertionError, assert_series_equal, s1, s2) + self._assert_equal(s1,s2,check_less_precise=True) + + s1 = Series([0.12345],dtype='float32') + s2 = Series([0.12346],dtype='float32') + + self.assertRaises(AssertionError, assert_series_equal, s1, s2) + self._assert_equal(s1,s2,check_less_precise=True) + + # even less than less precise + s1 = Series([0.1235],dtype='float32') + s2 = Series([0.1236],dtype='float32') + + self.assertRaises(AssertionError, assert_series_equal, s1, s2) + self.assertRaises(AssertionError, assert_series_equal, s1, s2, True) class TestRNGContext(unittest.TestCase):
really close #6982
https://api.github.com/repos/pandas-dev/pandas/pulls/6990
2014-04-28T11:59:07Z
2014-04-28T12:44:00Z
2014-04-28T12:44:00Z
2014-07-16T09:03:47Z
DOC: fix some doc build warnings
diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 79d85ae9586ed..7b064c69c721c 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -242,8 +242,8 @@ accept the following arguments: instead of ``freq`` that referred to the legacy time rule constants - ``how``: optionally specify method for down or re-sampling. Default is is min for ``rolling_min``, max for ``rolling_max``, median for - ``rolling_median``, and mean for all other rolling functions. See - :meth:`DataFrame.resample`'s how argument for more information. + ``rolling_median``, and mean for all other rolling functions. See + :meth:`DataFrame.resample`'s how argument for more information. These functions can be applied to ndarrays or Series objects: diff --git a/doc/source/conf.py b/doc/source/conf.py index dd6635b8d70df..117aa1724c4f2 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -36,6 +36,7 @@ # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. sphinxext. extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.extlinks', 'sphinx.ext.todo', @@ -63,7 +64,6 @@ autosummary_generate = False if any([re.match("\s*api\s*",l) for l in lines]): - extensions.append('sphinx.ext.autosummary') autosummary_generate = True ds = [] diff --git a/doc/source/release.rst b/doc/source/release.rst index 38e95eaba0b0f..c73331a49641f 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -164,7 +164,7 @@ API Changes - ``False``: Do nothing (default). - ``True``: Draw a table using the ``DataFrame`` or ``Series`` called ``plot`` method. Data will be transposed to meet matplotlib's default layout. - ``DataFrame`` or ``Series``: Draw matplotlib.table using the passed data. The data will be drawn as displayed in print method (not transposed automatically). - Also, helper function ``pandas.tools.plotting.table`` is added to create a table from ``DataFrame`` and ``Series``, and add it to an ``matplotlib.Axes``. + Also, helper function ``pandas.tools.plotting.table`` is added to create a table from ``DataFrame`` and ``Series``, and add it to an ``matplotlib.Axes``. - drop unused order argument from ``Series.sort``; args now in the same orders as ``Series.order``; add ``na_position`` arg to conform to ``Series.order`` (:issue:`6847`) @@ -429,7 +429,7 @@ Bug Fixes - Bug in C parser with leading whitespace (:issue:`3374`) - Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines - Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) -- Bug in ``DataFrame.apply`` with functions that used *args or **kwargs and returned +- Bug in ``DataFrame.apply`` with functions that used \*args`` or \*\*kwargs and returned an empty result (:issue:`6952`) - Bug in sum/mean on 32-bit platforms on overflows (:issue:`6915`) diff --git a/doc/source/v0.11.0.txt b/doc/source/v0.11.0.txt index 84d0806e457bf..3a56794151b1e 100644 --- a/doc/source/v0.11.0.txt +++ b/doc/source/v0.11.0.txt @@ -118,6 +118,7 @@ Forcing Date coercion (and setting ``NaT`` when not datelike) .. ipython:: python + from datetime import datetime s = Series([datetime(2001,1,1,0,0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'],dtype='O') s.convert_objects(convert_dates='coerce') diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index 8d9c266a864b3..91226fe180548 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -370,6 +370,7 @@ Experimental Features .. ipython:: python from pandas.tseries.offsets import CustomBusinessDay + from datetime import datetime # As an interesting example, let's look at Egypt where # a Friday-Saturday weekend is observed. weekmask_egypt = 'Sun Mon Tue Wed Thu'
https://api.github.com/repos/pandas-dev/pandas/pulls/6989
2014-04-28T11:24:07Z
2014-04-28T11:58:53Z
2014-04-28T11:58:53Z
2014-07-16T09:03:46Z
CLN: drop internals._invert_reordering in favour of lib.get_reverse_indexer
diff --git a/doc/source/release.rst b/doc/source/release.rst index 38e95eaba0b0f..91e523cfc882c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -295,6 +295,8 @@ Improvements to existing features - ``read_excel`` can now read milliseconds in Excel dates and times with xlrd >= 0.9.3. (:issue:`5945`) - ``pivot_table`` can now accept ``Grouper`` by ``index`` and ``columns`` keywords (:issue:`6913`) - Improved performance of compatible pickles (:issue:`6899`) +- Refactor Block classes removing `Block.items` attributes to avoid duplication + in item handling (:issue:`6745`, :issue:`6988`). .. _release.bug_fixes-0.14.0: diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 43096b133f26e..2c29804cbafa0 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -27,6 +27,12 @@ users upgrade to this version. - :ref:`Bug Fixes <release.bug_fixes-0.14.0>` +.. warning:: + + In 0.14.0 all ``NDFrame`` based containers have underwent significant internal refactoring. Before that each block of + homogeneous data had its own labels and extra care was necessary to keep those in sync with parent container's labels. + As stated, the refactoring is internal and no publicly visible changes should happen. + .. _whatsnew_0140.api: API changes diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 887f7562421d7..48d047baaa6c0 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2316,7 +2316,7 @@ def combine(self, blocks, copy=True): # FIXME: optimization potential indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) - inv_indexer = _invert_reordering(indexer) + inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) new_items = self.items.take(indexer) new_blocks = [] @@ -3506,71 +3506,10 @@ def _possibly_compare(a, b, op): return res - - def _concat_indexes(indexes): return indexes[0].append(indexes[1:]) -def _invert_reordering(reordering, minlength=None): - """ - Invert reordering operation. - - Given array `reordering`, make `reordering_inv` of it, such that:: - - reordering_inv[reordering[x]] = x - - There are two types of indexers: - - source - is when element *s* at position *i* means that values to fill *i-th* - item of reindex operation should be taken from *s-th* item of the - original (this is what is returned by `pandas.Index.reindex`). - destination - is when element *d* at position *i* means that values from *i-th* item - of source should be used to fill *d-th* item of reindexing operation. - - This function will convert from *source* to *destination* and vice-versa. - - .. note:: trailing ``-1`` may be lost upon conversion (this is what - `minlength` is there for). - - .. note:: if *source* indexer is not unique, corresponding *destination* - indexer will have ``dtype=object`` and will contain lists. - - Examples: - - >>> _invert_reordering([3, -1, 2, 4, -1]) - array([-1, -1, 2, 0, 3]) - >>> _invert_reordering([-1, -1, 0, 2, 3]) - array([3, -1, 2, 4]) - >>> _invert_reordering([1,3,5]) - array([-1, 0, -1, 1, -1, 2]) - - """ - reordering = np.asanyarray(reordering, dtype=np.int64) - if not com.is_integer_dtype(reordering): - raise ValueError("Only integer indexers are supported") - - nonneg_indices = reordering[reordering >= 0].astype(np.int_) - counts = np.bincount(nonneg_indices, minlength=minlength) - has_non_unique = (counts > 1).any() - - dtype = np.dtype(np.object_) if has_non_unique else np.dtype(np.int64) - inverted = np.empty_like(counts, dtype=dtype) - inverted.fill(-1) - - nonneg_positions = np.arange(len(reordering), dtype=np.int64)[reordering >= 0] - np.put(inverted, nonneg_indices, nonneg_positions) - - if has_non_unique: - nonunique_elements = np.arange(len(counts))[counts > 1] - for elt in nonunique_elements: - inverted[elt] = nonneg_positions[nonneg_indices == elt].tolist() - - return inverted - - def _get_blkno_placements(blknos, blk_count, group=True): """ diff --git a/pandas/lib.pyx b/pandas/lib.pyx index c7494c2f4344f..53c4e0a44e8e9 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -495,6 +495,17 @@ def fast_zip(list ndarrays): return result def get_reverse_indexer(ndarray[int64_t] indexer, Py_ssize_t length): + """ + Reverse indexing operation. + + Given `indexer`, make `indexer_inv` of it, such that:: + + indexer_inv[indexer[x]] = x + + .. note:: If indexer is not unique, only first occurrence is accounted. + + """ + cdef: Py_ssize_t i, n = len(indexer) ndarray[int64_t] rev_indexer
I've implemented the former while refactoring BlockManager (#6745) because I didn't find the latter in lib module. This PR will remove the duplication (and also add missing changelog entry).
https://api.github.com/repos/pandas-dev/pandas/pulls/6988
2014-04-28T09:24:28Z
2014-04-28T11:20:38Z
2014-04-28T11:20:38Z
2014-06-25T22:25:34Z
SQL: bunch of fixes based on the old tests
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 158ef7b7ed791..c18a4aef5355b 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -6,6 +6,7 @@ from datetime import datetime, date, timedelta import warnings +import traceback import itertools import re import numpy as np @@ -97,80 +98,130 @@ def execute(sql, con, cur=None, params=None, flavor='sqlite'): ------- Results Iterable """ - pandas_sql = pandasSQL_builder(con, flavor=flavor) + if cur is None: + pandas_sql = pandasSQL_builder(con, flavor=flavor) + else: + pandas_sql = pandasSQL_builder(cur, flavor=flavor, is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args) -def tquery(sql, con, cur=None, params=None, flavor='sqlite'): +#------------------------------------------------------------------------------ +#--- Deprecated tquery and uquery + +def _safe_fetch(cur): + try: + result = cur.fetchall() + if not isinstance(result, list): + result = list(result) + return result + except Exception as e: # pragma: no cover + excName = e.__class__.__name__ + if excName == 'OperationalError': + return [] + +def tquery(sql, con=None, cur=None, retry=True): """ - Returns list of tuples corresponding to each row in given sql + DEPRECATED. Returns list of tuples corresponding to each row in given sql query. If only one column selected, then plain list is returned. + To obtain the same result in the future, you can use the following: + + >>> execute(sql, con, params).fetchall() + Parameters ---------- sql: string SQL query to be executed - con: SQLAlchemy engine or DBAPI2 connection (legacy mode) - Using SQLAlchemy makes it possible to use any DB supported by that - library. - If a DBAPI2 object is given, a supported SQL flavor must also be provided + con: DBAPI2 connection cur: depreciated, cursor is obtained from connection - params: list or tuple, optional - List of parameters to pass to execute method. - flavor : string "sqlite", "mysql" - Specifies the flavor of SQL to use. - Ignored when using SQLAlchemy engine. Required when using DBAPI2 - connection. + Returns ------- Results Iterable + """ warnings.warn( - "tquery is depreciated, and will be removed in future versions", - DeprecationWarning) + "tquery is depreciated, and will be removed in future versions. " + "You can use ``execute(...).fetchall()`` instead.", + FutureWarning) - pandas_sql = pandasSQL_builder(con, flavor=flavor) - args = _convert_params(sql, params) - return pandas_sql.tquery(*args) + cur = execute(sql, con, cur=cur) + result = _safe_fetch(cur) + if con is not None: + try: + cur.close() + con.commit() + except Exception as e: + excName = e.__class__.__name__ + if excName == 'OperationalError': # pragma: no cover + print('Failed to commit, may need to restart interpreter') + else: + raise -def uquery(sql, con, cur=None, params=None, engine=None, flavor='sqlite'): + traceback.print_exc() + if retry: + return tquery(sql, con=con, retry=False) + + if result and len(result[0]) == 1: + # python 3 compat + result = list(lzip(*result)[0]) + elif result is None: # pragma: no cover + result = [] + + return result + + +def uquery(sql, con=None, cur=None, retry=True, params=None): """ - Does the same thing as tquery, but instead of returning results, it + DEPRECATED. Does the same thing as tquery, but instead of returning results, it returns the number of rows affected. Good for update queries. + To obtain the same result in the future, you can use the following: + + >>> execute(sql, con).rowcount + Parameters ---------- sql: string SQL query to be executed - con: SQLAlchemy engine or DBAPI2 connection (legacy mode) - Using SQLAlchemy makes it possible to use any DB supported by that - library. - If a DBAPI2 object is given, a supported SQL flavor must also be provided + con: DBAPI2 connection cur: depreciated, cursor is obtained from connection params: list or tuple, optional List of parameters to pass to execute method. - flavor : string "sqlite", "mysql" - Specifies the flavor of SQL to use. - Ignored when using SQLAlchemy engine. Required when using DBAPI2 - connection. + Returns ------- Number of affected rows + """ warnings.warn( - "uquery is depreciated, and will be removed in future versions", - DeprecationWarning) - pandas_sql = pandasSQL_builder(con, flavor=flavor) - args = _convert_params(sql, params) - return pandas_sql.uquery(*args) + "uquery is depreciated, and will be removed in future versions. " + "You can use ``execute(...).rowcount`` instead.", + FutureWarning) + + cur = execute(sql, con, cur=cur, params=params) + + result = cur.rowcount + try: + con.commit() + except Exception as e: + excName = e.__class__.__name__ + if excName != 'OperationalError': + raise + + traceback.print_exc() + if retry: + print('Looks like your connection failed, reconnecting...') + return uquery(sql, con, retry=False) + return result #------------------------------------------------------------------------------ -# Read and write to DataFrames +#--- Read and write to DataFrames def read_sql_table(table_name, con, meta=None, index_col=None, coerce_float=True, parse_dates=None, columns=None): @@ -212,7 +263,7 @@ def read_sql_table(table_name, con, meta=None, index_col=None, -------- read_sql_query : Read SQL query into a DataFrame. read_sql - + """ pandas_sql = PandasSQLAlchemy(con, meta=meta) @@ -322,8 +373,8 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, Notes ----- This function is a convenience wrapper around ``read_sql_table`` and - ``read_sql_query`` (and for backward compatibility) and will delegate - to the specific function depending on the provided input (database + ``read_sql_query`` (and for backward compatibility) and will delegate + to the specific function depending on the provided input (database table name or sql query). See also @@ -334,7 +385,19 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, """ pandas_sql = pandasSQL_builder(con, flavor=flavor) - if pandas_sql.has_table(sql): + if 'select' in sql.lower(): + try: + if pandas_sql.has_table(sql): + return pandas_sql.read_table( + sql, index_col=index_col, coerce_float=coerce_float, + parse_dates=parse_dates, columns=columns) + except: + pass + + return pandas_sql.read_sql( + sql, index_col=index_col, params=params, + coerce_float=coerce_float, parse_dates=parse_dates) + else: if isinstance(pandas_sql, PandasSQLLegacy): raise ValueError("Reading a table with read_sql is not supported " "for a DBAPI2 connection. Use an SQLAlchemy " @@ -342,10 +405,6 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) - else: - return pandas_sql.read_sql( - sql, index_col=index_col, params=params, coerce_float=coerce_float, - parse_dates=parse_dates) def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, @@ -377,6 +436,9 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, A sequence should be given if the DataFrame uses MultiIndex. """ + if if_exists not in ('fail', 'replace', 'append'): + raise ValueError("'{0}' is not valid for if_exists".format(if_exists)) + pandas_sql = pandasSQL_builder(con, flavor=flavor) if isinstance(frame, Series): @@ -388,7 +450,7 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, index_label=index_label) -def has_table(table_name, con, meta=None, flavor='sqlite'): +def has_table(table_name, con, flavor='sqlite'): """ Check if DataBase has named table. @@ -411,34 +473,37 @@ def has_table(table_name, con, meta=None, flavor='sqlite'): pandas_sql = pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name) +table_exists = has_table -def pandasSQL_builder(con, flavor=None, meta=None): + +def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): """ Convenience function to return the correct PandasSQL subclass based on the provided parameters """ + # When support for DBAPI connections is removed, + # is_cursor should not be necessary. try: import sqlalchemy if isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta) else: - warnings.warn( - """Not an SQLAlchemy engine, - attempting to use as legacy DBAPI connection""") + warnings.warn("Not an SQLAlchemy engine, " + "attempting to use as legacy DBAPI connection") if flavor is None: raise ValueError( - """PandasSQL must be created with an SQLAlchemy engine - or a DBAPI2 connection and SQL flavour""") + "PandasSQL must be created with an SQLAlchemy engine " + "or a DBAPI2 connection and SQL flavor") else: - return PandasSQLLegacy(con, flavor) + return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except ImportError: warnings.warn("SQLAlchemy not installed, using legacy mode") if flavor is None: raise SQLAlchemyRequired else: - return PandasSQLLegacy(con, flavor) + return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) class PandasSQLTable(PandasObject): @@ -471,6 +536,9 @@ def __init__(self, name, pandas_sql_engine, frame=None, index=True, self.table = self.pd_sql.get_table(self.name) if self.table is None: self.table = self._create_table_statement() + else: + raise ValueError( + "'{0}' is not valid for if_exists".format(if_exists)) else: self.table = self._create_table_statement() self.create() @@ -485,7 +553,8 @@ def exists(self): return self.pd_sql.has_table(self.name) def sql_schema(self): - return str(self.table.compile()) + from sqlalchemy.schema import CreateTable + return str(CreateTable(self.table)) def create(self): self.table.create() @@ -722,14 +791,6 @@ def execute(self, *args, **kwargs): """Simple passthrough to SQLAlchemy engine""" return self.engine.execute(*args, **kwargs) - def tquery(self, *args, **kwargs): - result = self.execute(*args, **kwargs) - return result.fetchall() - - def uquery(self, *args, **kwargs): - result = self.execute(*args, **kwargs) - return result.rowcount - def read_table(self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None): @@ -783,7 +844,7 @@ def drop_table(self, table_name): def _create_sql_schema(self, frame, table_name): table = PandasSQLTable(table_name, self, frame=frame) - return str(table.compile()) + return str(table.sql_schema()) # ---- SQL without SQLAlchemy --- @@ -927,7 +988,8 @@ def _sql_type_name(self, dtype): class PandasSQLLegacy(PandasSQL): - def __init__(self, con, flavor): + def __init__(self, con, flavor, is_cursor=False): + self.is_cursor = is_cursor self.con = con if flavor not in ['sqlite', 'mysql']: raise NotImplementedError @@ -935,8 +997,11 @@ def __init__(self, con, flavor): self.flavor = flavor def execute(self, *args, **kwargs): - try: + if self.is_cursor: + cur = self.con + else: cur = self.con.cursor() + try: if kwargs: cur.execute(*args, **kwargs) else: @@ -953,22 +1018,6 @@ def execute(self, *args, **kwargs): ex = DatabaseError("Execution failed on sql: %s" % args[0]) raise_with_traceback(ex) - def tquery(self, *args): - cur = self.execute(*args) - result = self._fetchall_as_list(cur) - - # This makes into tuples - if result and len(result[0]) == 1: - # python 3 compat - result = list(lzip(*result)[0]) - elif result is None: # pragma: no cover - result = [] - return result - - def uquery(self, *args): - cur = self.execute(*args) - return cur.rowcount - def read_sql(self, sql, index_col=None, coerce_float=True, params=None, parse_dates=None): args = _convert_params(sql, params) @@ -1006,7 +1055,7 @@ def to_sql(self, frame, name, if_exists='fail', index=True, fail: If table exists, do nothing. replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if does not exist. - + """ table = PandasSQLTableLegacy( name, self, frame=frame, index=index, if_exists=if_exists, @@ -1020,7 +1069,7 @@ def has_table(self, name): 'mysql': "SHOW TABLES LIKE '%s'" % name} query = flavor_map.get(self.flavor) - return len(self.tquery(query)) > 0 + return len(self.execute(query).fetchall()) > 0 def get_table(self, table_name): return None # not supported in Legacy mode @@ -1029,32 +1078,90 @@ def drop_table(self, name): drop_sql = "DROP TABLE %s" % name self.execute(drop_sql) + def _create_sql_schema(self, frame, table_name): + table = PandasSQLTableLegacy(table_name, self, frame=frame) + return str(table.sql_schema()) -# legacy names, with depreciation warnings and copied docs -def get_schema(frame, name, con, flavor='sqlite'): + +def get_schema(frame, name, flavor='sqlite', keys=None, con=None): """ Get the SQL db table schema for the given frame Parameters ---------- - frame: DataFrame - name: name of SQL table - con: an open SQL database connection object - engine: an SQLAlchemy engine - replaces connection and flavor - flavor: {'sqlite', 'mysql', 'postgres'}, default 'sqlite' + frame : DataFrame + name : name of SQL table + flavor : {'sqlite', 'mysql'}, default 'sqlite' + keys : columns to use a primary key + con: an open SQL database connection object or an SQLAlchemy engine """ - warnings.warn( - "get_schema is depreciated", DeprecationWarning) + + if con is None: + return _get_schema_legacy(frame, name, flavor, keys) + pandas_sql = pandasSQL_builder(con=con, flavor=flavor) return pandas_sql._create_sql_schema(frame, name) +def _get_schema_legacy(frame, name, flavor, keys=None): + """Old function from 0.13.1. To keep backwards compatibility. + When mysql legacy support is dropped, it should be possible to + remove this code + """ + + def get_sqltype(dtype, flavor): + pytype = dtype.type + pytype_name = "text" + if issubclass(pytype, np.floating): + pytype_name = "float" + elif issubclass(pytype, np.integer): + pytype_name = "int" + elif issubclass(pytype, np.datetime64) or pytype is datetime: + # Caution: np.datetime64 is also a subclass of np.number. + pytype_name = "datetime" + elif pytype is datetime.date: + pytype_name = "date" + elif issubclass(pytype, np.bool_): + pytype_name = "bool" + + return _SQL_TYPES[pytype_name][flavor] + + lookup_type = lambda dtype: get_sqltype(dtype, flavor) + + column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes)) + if flavor == 'sqlite': + columns = ',\n '.join('[%s] %s' % x for x in column_types) + else: + columns = ',\n '.join('`%s` %s' % x for x in column_types) + + keystr = '' + if keys is not None: + if isinstance(keys, string_types): + keys = (keys,) + keystr = ', PRIMARY KEY (%s)' % ','.join(keys) + template = """CREATE TABLE %(name)s ( + %(columns)s + %(keystr)s + );""" + create_statement = template % {'name': name, 'columns': columns, + 'keystr': keystr} + return create_statement + + +# legacy names, with depreciation warnings and copied docs + def read_frame(*args, **kwargs): """DEPRECIATED - use read_sql """ - warnings.warn( - "read_frame is depreciated, use read_sql", DeprecationWarning) + warnings.warn("read_frame is depreciated, use read_sql", FutureWarning) + return read_sql(*args, **kwargs) + + +def frame_query(*args, **kwargs): + """DEPRECIATED - use read_sql + """ + warnings.warn("frame_query is depreciated, use read_sql", FutureWarning) return read_sql(*args, **kwargs) @@ -1092,7 +1199,7 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): pandas.DataFrame.to_sql """ - warnings.warn("write_frame is depreciated, use to_sql", DeprecationWarning) + warnings.warn("write_frame is depreciated, use to_sql", FutureWarning) # for backwards compatibility, set index=False when not specified index = kwargs.pop('index', False) @@ -1102,3 +1209,4 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): # Append wrapped function docstrings read_frame.__doc__ += read_sql.__doc__ +frame_query.__doc__ += read_sql.__doc__ diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index ad3fa57ab48a7..9a34e84c153a0 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -20,13 +20,18 @@ import sqlite3 import csv import os +import sys import nose +import warnings import numpy as np -from pandas import DataFrame, Series, MultiIndex -from pandas.compat import range -#from pandas.core.datetools import format as date_format +from datetime import datetime + +from pandas import DataFrame, Series, Index, MultiIndex, isnull +import pandas.compat as compat +from pandas.compat import StringIO, range, lrange +from pandas.core.datetools import format as date_format import pandas.io.sql as sql import pandas.util.testing as tm @@ -296,11 +301,6 @@ def _execute_sql(self): row = iris_results.fetchone() tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) - def _tquery(self): - iris_results = self.pandasSQL.tquery("SELECT * FROM iris") - row = iris_results[0] - tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) - #------------------------------------------------------------------------------ #--- Testing the public API @@ -336,8 +336,9 @@ def test_read_sql_iris(self): self._check_iris_loaded_frame(iris_frame) def test_legacy_read_frame(self): - iris_frame = sql.read_frame( - "SELECT * FROM iris", self.conn, flavor='sqlite') + with tm.assert_produces_warning(FutureWarning): + iris_frame = sql.read_frame( + "SELECT * FROM iris", self.conn, flavor='sqlite') self._check_iris_loaded_frame(iris_frame) def test_to_sql(self): @@ -402,8 +403,10 @@ def test_to_sql_panel(self): def test_legacy_write_frame(self): # Assume that functionality is already tested above so just do # quick check that it basically works - sql.write_frame(self.test_frame1, 'test_frame_legacy', self.conn, - flavor='sqlite') + with tm.assert_produces_warning(FutureWarning): + sql.write_frame(self.test_frame1, 'test_frame_legacy', self.conn, + flavor='sqlite') + self.assertTrue( sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'), 'Table not written to DB') @@ -430,12 +433,6 @@ def test_execute_sql(self): row = iris_results.fetchone() tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) - def test_tquery(self): - iris_results = sql.tquery( - "SELECT * FROM iris", con=self.conn, flavor='sqlite') - row = iris_results[0] - tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) - def test_date_parsing(self): # Test date parsing in read_sq # No Parsing @@ -555,6 +552,11 @@ def test_integer_col_names(self): sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists='replace') + def test_get_schema(self): + create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite', + con=self.conn) + self.assert_('CREATE' in create_sql) + class TestSQLApi(_TestSQLApi): """ @@ -674,6 +676,22 @@ def test_safe_names_warning(self): sql.to_sql(df, "test_frame3_legacy", self.conn, flavor="sqlite", index=False) + def test_get_schema2(self): + # without providing a connection object (available for backwards comp) + create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite') + self.assert_('CREATE' in create_sql) + + def test_tquery(self): + with tm.assert_produces_warning(FutureWarning): + iris_results = sql.tquery("SELECT * FROM iris", con=self.conn) + row = iris_results[0] + tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) + + def test_uquery(self): + with tm.assert_produces_warning(FutureWarning): + rows = sql.uquery("SELECT * FROM iris LIMIT 1", con=self.conn) + self.assertEqual(rows, -1) + #------------------------------------------------------------------------------ #--- Database flavor specific tests @@ -1043,9 +1061,6 @@ def test_roundtrip(self): def test_execute_sql(self): self._execute_sql() - def test_tquery(self): - self._tquery() - class TestMySQLLegacy(TestSQLiteLegacy): """ @@ -1095,6 +1110,598 @@ def tearDown(self): self.conn.close() +#------------------------------------------------------------------------------ +#--- Old tests from 0.13.1 (before refactor using sqlalchemy) + + +_formatters = { + datetime: lambda dt: "'%s'" % date_format(dt), + str: lambda x: "'%s'" % x, + np.str_: lambda x: "'%s'" % x, + compat.text_type: lambda x: "'%s'" % x, + compat.binary_type: lambda x: "'%s'" % x, + float: lambda x: "%.8f" % x, + int: lambda x: "%s" % x, + type(None): lambda x: "NULL", + np.float64: lambda x: "%.10f" % x, + bool: lambda x: "'%s'" % x, +} + +def format_query(sql, *args): + """ + + """ + processed_args = [] + for arg in args: + if isinstance(arg, float) and isnull(arg): + arg = None + + formatter = _formatters[type(arg)] + processed_args.append(formatter(arg)) + + return sql % tuple(processed_args) + +def _skip_if_no_pymysql(): + try: + import pymysql + except ImportError: + raise nose.SkipTest('pymysql not installed, skipping') + + +class TestXSQLite(tm.TestCase): + + def setUp(self): + self.db = sqlite3.connect(':memory:') + + def test_basic(self): + frame = tm.makeTimeDataFrame() + self._check_roundtrip(frame) + + def test_write_row_by_row(self): + + frame = tm.makeTimeDataFrame() + frame.ix[0, 0] = np.nan + create_sql = sql.get_schema(frame, 'test', 'sqlite') + cur = self.db.cursor() + cur.execute(create_sql) + + cur = self.db.cursor() + + ins = "INSERT INTO test VALUES (%s, %s, %s, %s)" + for idx, row in frame.iterrows(): + fmt_sql = format_query(ins, *row) + sql.tquery(fmt_sql, cur=cur) + + self.db.commit() + + result = sql.read_frame("select * from test", con=self.db) + result.index = frame.index + tm.assert_frame_equal(result, frame) + + def test_execute(self): + frame = tm.makeTimeDataFrame() + create_sql = sql.get_schema(frame, 'test', 'sqlite') + cur = self.db.cursor() + cur.execute(create_sql) + ins = "INSERT INTO test VALUES (?, ?, ?, ?)" + + row = frame.ix[0] + sql.execute(ins, self.db, params=tuple(row)) + self.db.commit() + + result = sql.read_frame("select * from test", self.db) + result.index = frame.index[:1] + tm.assert_frame_equal(result, frame[:1]) + + def test_schema(self): + frame = tm.makeTimeDataFrame() + create_sql = sql.get_schema(frame, 'test', 'sqlite') + lines = create_sql.splitlines() + for l in lines: + tokens = l.split(' ') + if len(tokens) == 2 and tokens[0] == 'A': + self.assert_(tokens[1] == 'DATETIME') + + frame = tm.makeTimeDataFrame() + create_sql = sql.get_schema(frame, 'test', 'sqlite', keys=['A', 'B'],) + lines = create_sql.splitlines() + self.assert_('PRIMARY KEY (A,B)' in create_sql) + cur = self.db.cursor() + cur.execute(create_sql) + + def test_execute_fail(self): + create_sql = """ + CREATE TABLE test + ( + a TEXT, + b TEXT, + c REAL, + PRIMARY KEY (a, b) + ); + """ + cur = self.db.cursor() + cur.execute(create_sql) + + sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db) + sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.db) + + try: + sys.stdout = StringIO() + self.assertRaises(Exception, sql.execute, + 'INSERT INTO test VALUES("foo", "bar", 7)', + self.db) + finally: + sys.stdout = sys.__stdout__ + + def test_execute_closed_connection(self): + create_sql = """ + CREATE TABLE test + ( + a TEXT, + b TEXT, + c REAL, + PRIMARY KEY (a, b) + ); + """ + cur = self.db.cursor() + cur.execute(create_sql) + + sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db) + self.db.close() + try: + sys.stdout = StringIO() + self.assertRaises(Exception, sql.tquery, "select * from test", + con=self.db) + finally: + sys.stdout = sys.__stdout__ + + def test_na_roundtrip(self): + pass + + def _check_roundtrip(self, frame): + sql.write_frame(frame, name='test_table', con=self.db) + result = sql.read_frame("select * from test_table", self.db) + + # HACK! Change this once indexes are handled properly. + result.index = frame.index + + expected = frame + tm.assert_frame_equal(result, expected) + + frame['txt'] = ['a'] * len(frame) + frame2 = frame.copy() + frame2['Idx'] = Index(lrange(len(frame2))) + 10 + sql.write_frame(frame2, name='test_table2', con=self.db) + result = sql.read_frame("select * from test_table2", self.db, + index_col='Idx') + expected = frame.copy() + expected.index = Index(lrange(len(frame2))) + 10 + expected.index.name = 'Idx' + print(expected.index.names) + print(result.index.names) + tm.assert_frame_equal(expected, result) + + def test_tquery(self): + frame = tm.makeTimeDataFrame() + sql.write_frame(frame, name='test_table', con=self.db) + result = sql.tquery("select A from test_table", self.db) + expected = frame.A + result = Series(result, frame.index) + tm.assert_series_equal(result, expected) + + try: + sys.stdout = StringIO() + self.assertRaises(sql.DatabaseError, sql.tquery, + 'select * from blah', con=self.db) + + self.assertRaises(sql.DatabaseError, sql.tquery, + 'select * from blah', con=self.db, retry=True) + finally: + sys.stdout = sys.__stdout__ + + def test_uquery(self): + frame = tm.makeTimeDataFrame() + sql.write_frame(frame, name='test_table', con=self.db) + stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)' + self.assertEqual(sql.uquery(stmt, con=self.db), 1) + + try: + sys.stdout = StringIO() + + self.assertRaises(sql.DatabaseError, sql.tquery, + 'insert into blah values (1)', con=self.db) + + self.assertRaises(sql.DatabaseError, sql.tquery, + 'insert into blah values (1)', con=self.db, + retry=True) + finally: + sys.stdout = sys.__stdout__ + + def test_keyword_as_column_names(self): + ''' + ''' + df = DataFrame({'From':np.ones(5)}) + sql.write_frame(df, con = self.db, name = 'testkeywords') + + def test_onecolumn_of_integer(self): + # GH 3628 + # a column_of_integers dataframe should transfer well to sql + + mono_df=DataFrame([1 , 2], columns=['c0']) + sql.write_frame(mono_df, con = self.db, name = 'mono_df') + # computing the sum via sql + con_x=self.db + the_sum=sum([my_c0[0] for my_c0 in con_x.execute("select * from mono_df")]) + # it should not fail, and gives 3 ( Issue #3628 ) + self.assertEqual(the_sum , 3) + + result = sql.read_frame("select * from mono_df",con_x) + tm.assert_frame_equal(result,mono_df) + + def test_if_exists(self): + df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']}) + df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']}) + table_name = 'table_if_exists' + sql_select = "SELECT * FROM %s" % table_name + + def clean_up(test_table_to_drop): + """ + Drops tables created from individual tests + so no dependencies arise from sequential tests + """ + if sql.table_exists(test_table_to_drop, self.db, flavor='sqlite'): + cur = self.db.cursor() + cur.execute("DROP TABLE %s" % test_table_to_drop) + cur.close() + + # test if invalid value for if_exists raises appropriate error + self.assertRaises(ValueError, + sql.write_frame, + frame=df_if_exists_1, + con=self.db, + name=table_name, + flavor='sqlite', + if_exists='notvalidvalue') + clean_up(table_name) + + # test if_exists='fail' + sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name, + flavor='sqlite', if_exists='fail') + self.assertRaises(ValueError, + sql.write_frame, + frame=df_if_exists_1, + con=self.db, + name=table_name, + flavor='sqlite', + if_exists='fail') + + # test if_exists='replace' + sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name, + flavor='sqlite', if_exists='replace') + self.assertEqual(sql.tquery(sql_select, con=self.db), + [(1, 'A'), (2, 'B')]) + sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name, + flavor='sqlite', if_exists='replace') + self.assertEqual(sql.tquery(sql_select, con=self.db), + [(3, 'C'), (4, 'D'), (5, 'E')]) + clean_up(table_name) + + # test if_exists='append' + sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name, + flavor='sqlite', if_exists='fail') + self.assertEqual(sql.tquery(sql_select, con=self.db), + [(1, 'A'), (2, 'B')]) + sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name, + flavor='sqlite', if_exists='append') + self.assertEqual(sql.tquery(sql_select, con=self.db), + [(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')]) + clean_up(table_name) + + +class TestXMySQL(tm.TestCase): + + def setUp(self): + _skip_if_no_pymysql() + import pymysql + try: + # Try Travis defaults. + # No real user should allow root access with a blank password. + self.db = pymysql.connect(host='localhost', user='root', passwd='', + db='pandas_nosetest') + except: + pass + else: + return + try: + self.db = pymysql.connect(read_default_group='pandas') + except pymysql.ProgrammingError as e: + raise nose.SkipTest( + "Create a group of connection parameters under the heading " + "[pandas] in your system's mysql default file, " + "typically located at ~/.my.cnf or /etc/.my.cnf. ") + except pymysql.Error as e: + raise nose.SkipTest( + "Cannot connect to database. " + "Create a group of connection parameters under the heading " + "[pandas] in your system's mysql default file, " + "typically located at ~/.my.cnf or /etc/.my.cnf. ") + + def test_basic(self): + _skip_if_no_pymysql() + frame = tm.makeTimeDataFrame() + self._check_roundtrip(frame) + + def test_write_row_by_row(self): + + _skip_if_no_pymysql() + frame = tm.makeTimeDataFrame() + frame.ix[0, 0] = np.nan + drop_sql = "DROP TABLE IF EXISTS test" + create_sql = sql.get_schema(frame, 'test', 'mysql') + cur = self.db.cursor() + cur.execute(drop_sql) + cur.execute(create_sql) + ins = "INSERT INTO test VALUES (%s, %s, %s, %s)" + for idx, row in frame.iterrows(): + fmt_sql = format_query(ins, *row) + sql.tquery(fmt_sql, cur=cur) + + self.db.commit() + + result = sql.read_frame("select * from test", con=self.db) + result.index = frame.index + tm.assert_frame_equal(result, frame) + + def test_execute(self): + _skip_if_no_pymysql() + frame = tm.makeTimeDataFrame() + drop_sql = "DROP TABLE IF EXISTS test" + create_sql = sql.get_schema(frame, 'test', 'mysql') + cur = self.db.cursor() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Unknown table.*") + cur.execute(drop_sql) + cur.execute(create_sql) + ins = "INSERT INTO test VALUES (%s, %s, %s, %s)" + + row = frame.ix[0].values.tolist() + sql.execute(ins, self.db, params=tuple(row)) + self.db.commit() + + result = sql.read_frame("select * from test", self.db) + result.index = frame.index[:1] + tm.assert_frame_equal(result, frame[:1]) + + def test_schema(self): + _skip_if_no_pymysql() + frame = tm.makeTimeDataFrame() + create_sql = sql.get_schema(frame, 'test', 'mysql') + lines = create_sql.splitlines() + for l in lines: + tokens = l.split(' ') + if len(tokens) == 2 and tokens[0] == 'A': + self.assert_(tokens[1] == 'DATETIME') + + frame = tm.makeTimeDataFrame() + drop_sql = "DROP TABLE IF EXISTS test" + create_sql = sql.get_schema(frame, 'test', 'mysql', keys=['A', 'B'],) + lines = create_sql.splitlines() + self.assert_('PRIMARY KEY (A,B)' in create_sql) + cur = self.db.cursor() + cur.execute(drop_sql) + cur.execute(create_sql) + + def test_execute_fail(self): + _skip_if_no_pymysql() + drop_sql = "DROP TABLE IF EXISTS test" + create_sql = """ + CREATE TABLE test + ( + a TEXT, + b TEXT, + c REAL, + PRIMARY KEY (a(5), b(5)) + ); + """ + cur = self.db.cursor() + cur.execute(drop_sql) + cur.execute(create_sql) + + sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db) + sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.db) + + try: + sys.stdout = StringIO() + self.assertRaises(Exception, sql.execute, + 'INSERT INTO test VALUES("foo", "bar", 7)', + self.db) + finally: + sys.stdout = sys.__stdout__ + + def test_execute_closed_connection(self): + _skip_if_no_pymysql() + drop_sql = "DROP TABLE IF EXISTS test" + create_sql = """ + CREATE TABLE test + ( + a TEXT, + b TEXT, + c REAL, + PRIMARY KEY (a(5), b(5)) + ); + """ + cur = self.db.cursor() + cur.execute(drop_sql) + cur.execute(create_sql) + + sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db) + self.db.close() + try: + sys.stdout = StringIO() + self.assertRaises(Exception, sql.tquery, "select * from test", + con=self.db) + finally: + sys.stdout = sys.__stdout__ + + def test_na_roundtrip(self): + _skip_if_no_pymysql() + pass + + def _check_roundtrip(self, frame): + _skip_if_no_pymysql() + drop_sql = "DROP TABLE IF EXISTS test_table" + cur = self.db.cursor() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Unknown table.*") + cur.execute(drop_sql) + sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql') + result = sql.read_frame("select * from test_table", self.db) + + # HACK! Change this once indexes are handled properly. + result.index = frame.index + result.index.name = frame.index.name + + expected = frame + tm.assert_frame_equal(result, expected) + + frame['txt'] = ['a'] * len(frame) + frame2 = frame.copy() + index = Index(lrange(len(frame2))) + 10 + frame2['Idx'] = index + drop_sql = "DROP TABLE IF EXISTS test_table2" + cur = self.db.cursor() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Unknown table.*") + cur.execute(drop_sql) + sql.write_frame(frame2, name='test_table2', con=self.db, flavor='mysql') + result = sql.read_frame("select * from test_table2", self.db, + index_col='Idx') + expected = frame.copy() + + # HACK! Change this once indexes are handled properly. + expected.index = index + expected.index.names = result.index.names + tm.assert_frame_equal(expected, result) + + def test_tquery(self): + try: + import pymysql + except ImportError: + raise nose.SkipTest("no pymysql") + frame = tm.makeTimeDataFrame() + drop_sql = "DROP TABLE IF EXISTS test_table" + cur = self.db.cursor() + cur.execute(drop_sql) + sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql') + result = sql.tquery("select A from test_table", self.db) + expected = frame.A + result = Series(result, frame.index) + tm.assert_series_equal(result, expected) + + try: + sys.stdout = StringIO() + self.assertRaises(sql.DatabaseError, sql.tquery, + 'select * from blah', con=self.db) + + self.assertRaises(sql.DatabaseError, sql.tquery, + 'select * from blah', con=self.db, retry=True) + finally: + sys.stdout = sys.__stdout__ + + def test_uquery(self): + try: + import pymysql + except ImportError: + raise nose.SkipTest("no pymysql") + frame = tm.makeTimeDataFrame() + drop_sql = "DROP TABLE IF EXISTS test_table" + cur = self.db.cursor() + cur.execute(drop_sql) + sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql') + stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)' + self.assertEqual(sql.uquery(stmt, con=self.db), 1) + + try: + sys.stdout = StringIO() + + self.assertRaises(sql.DatabaseError, sql.tquery, + 'insert into blah values (1)', con=self.db) + + self.assertRaises(sql.DatabaseError, sql.tquery, + 'insert into blah values (1)', con=self.db, + retry=True) + finally: + sys.stdout = sys.__stdout__ + + def test_keyword_as_column_names(self): + ''' + ''' + _skip_if_no_pymysql() + df = DataFrame({'From':np.ones(5)}) + sql.write_frame(df, con = self.db, name = 'testkeywords', + if_exists='replace', flavor='mysql') + + def test_if_exists(self): + _skip_if_no_pymysql() + df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']}) + df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']}) + table_name = 'table_if_exists' + sql_select = "SELECT * FROM %s" % table_name + + def clean_up(test_table_to_drop): + """ + Drops tables created from individual tests + so no dependencies arise from sequential tests + """ + if sql.table_exists(test_table_to_drop, self.db, flavor='mysql'): + cur = self.db.cursor() + cur.execute("DROP TABLE %s" % test_table_to_drop) + cur.close() + + # test if invalid value for if_exists raises appropriate error + self.assertRaises(ValueError, + sql.write_frame, + frame=df_if_exists_1, + con=self.db, + name=table_name, + flavor='mysql', + if_exists='notvalidvalue') + clean_up(table_name) + + # test if_exists='fail' + sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name, + flavor='mysql', if_exists='fail') + self.assertRaises(ValueError, + sql.write_frame, + frame=df_if_exists_1, + con=self.db, + name=table_name, + flavor='mysql', + if_exists='fail') + + # test if_exists='replace' + sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name, + flavor='mysql', if_exists='replace') + self.assertEqual(sql.tquery(sql_select, con=self.db), + [(1, 'A'), (2, 'B')]) + sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name, + flavor='mysql', if_exists='replace') + self.assertEqual(sql.tquery(sql_select, con=self.db), + [(3, 'C'), (4, 'D'), (5, 'E')]) + clean_up(table_name) + + # test if_exists='append' + sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name, + flavor='mysql', if_exists='fail') + self.assertEqual(sql.tquery(sql_select, con=self.db), + [(1, 'A'), (2, 'B')]) + sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name, + flavor='mysql', if_exists='append') + self.assertEqual(sql.tquery(sql_select, con=self.db), + [(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')]) + clean_up(table_name) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
Some first fixes based on running the old sql tests (from 0.13.1 tag) - fixed `get_schema` (and adapted is signature back to how it was for 0.13.1). For this I added back the old function, as this functionality was not possible with the refactored class based approach (for those you always have to provide a connection object or engine, this was not necessary for `get_schema`) - `table_exists` seems to be renamed to `has_table`? I don't know why (and we should also decide if we expose this as a public function), but added it back for now as an alias. All tests from the old test suite are now running with the new code (for sqlite, not yet tested for mysql), except for the following issues: - `tquery` had a `retry` and `cur` arguments (which were at least used in the tests), but not that simple to put back with the new implementation - OperationalError is changed to DatabaseError @jreback I also changed the DeprecationWarnings to FutureWarnings (I am right this is the canonical way to raise deprecation warnings in pandas?) and catched them in the tests. I don't know if this solves #6984 for the sql warnings?
https://api.github.com/repos/pandas-dev/pandas/pulls/6987
2014-04-27T21:53:08Z
2014-05-08T12:26:22Z
2014-05-08T12:26:22Z
2014-06-12T12:52:48Z
COMPAT: remove deprecated optinos from printing in test suite (GH6984)
diff --git a/doc/source/release.rst b/doc/source/release.rst index f7b47b06ef841..fce5f2f93e68b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -198,7 +198,9 @@ Deprecations will not be supported in a future release (:issue:`6645`) - Indexers will warn ``FutureWarning`` when used with a scalar indexer and - a non-floating point Index (:issue:`4892`) + a non-floating point Index (:issue:`4892`, :issue:`6960`) + +- Numpy 1.9 compat w.r.t. deprecation warnings (:issue:`6960`) - :meth:`Panel.shift` now has a function signature that matches :meth:`DataFrame.shift`. The old positional argument ``lags`` has been changed to a keyword argument diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index dd6a21ccecf55..acacbd1c0b43c 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -392,7 +392,7 @@ Deprecations will not be supported in a future release (:issue:`6645`) - Indexers will warn ``FutureWarning`` when used with a scalar indexer and - a non-floating point Index (:issue:`4892`) + a non-floating point Index (:issue:`4892`, :issue:`6960`) .. code-block:: python @@ -401,17 +401,25 @@ Deprecations pandas/core/index.py:469: FutureWarning: scalar indexers for index type Int64Index should be integers and not floating point Out[1]: 1 - In [5]: Series(1,np.arange(5)).iloc[3.0] + In [2]: Series(1,np.arange(5)).iloc[3.0] pandas/core/index.py:469: FutureWarning: scalar indexers for index type Int64Index should be integers and not floating point - Out[5]: 1 + Out[2]: 1 - # these are Float64Indexes, so integer or floating point is acceptable - In [3]: Series(1,np.arange(5.))[3] - Out[3]: 1 + In [3]: Series(1,np.arange(5)).iloc[3.0:4] + pandas/core/index.py:527: FutureWarning: slice indexers when using iloc should be integers and not floating point + Out[3]: + 3 1 + dtype: int64 - In [4]: Series(1,np.arange(5.))[3.0] + # these are Float64Indexes, so integer or floating point is acceptable + In [4]: Series(1,np.arange(5.))[3] Out[4]: 1 + In [5]: Series(1,np.arange(5.))[3.0] + Out[6]: 1 + +- Numpy 1.9 compat w.r.t. deprecation warnings (:issue:`6960`) + - :meth:`Panel.shift` now has a function signature that matches :meth:`DataFrame.shift`. The old positional argument ``lags`` has been changed to a keyword argument ``periods`` with a default value of 1. A ``FutureWarning`` is raised if the @@ -484,13 +492,13 @@ Enhancements - Added ``how`` option to rolling-moment functions to dictate how to handle resampling; :func:``rolling_max`` defaults to max, :func:``rolling_min`` defaults to min, and all others default to mean (:issue:`6297`) - ``CustomBuisnessMonthBegin`` and ``CustomBusinessMonthEnd`` are now available (:issue:`6866`) -- :meth:`Series.quantile` and :meth:`DataFrame.quantile` now accept an array of +- :meth:`Series.quantile` and :meth:`DataFrame.quantile` now accept an array of quantiles. - ``pivot_table`` can now accept ``Grouper`` by ``index`` and ``columns`` keywords (:issue:`6913`) .. ipython:: python - import datetime + import datetime df = DataFrame({ 'Branch' : 'A A A A A B'.split(), 'Buyer': 'Carl Mark Carl Carl Joe Joe'.split(), diff --git a/pandas/core/config.py b/pandas/core/config.py index f2f932e39759a..ebac3d40221e7 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -140,7 +140,7 @@ def _describe_option(pat='', _print_desc=True): return s -def _reset_option(pat): +def _reset_option(pat, silent=False): keys = _select_options(pat) @@ -154,7 +154,7 @@ def _reset_option(pat): 'value') for k in keys: - _set_option(k, _registered_options[k].defval) + _set_option(k, _registered_options[k].defval, silent=silent) def get_default_val(pat): @@ -361,7 +361,7 @@ def __enter__(self): def __exit__(self, *args): if self.undo: for pat, val in self.undo: - _set_option(pat, val) + _set_option(pat, val, silent=True) def register_option(key, defval, doc='', validator=None, cb=None): @@ -567,6 +567,7 @@ def _warn_if_deprecated(key): d = _get_deprecated_option(key) if d: if d.msg: + print(d.msg) warnings.warn(d.msg, DeprecationWarning) else: msg = "'%s' is deprecated" % key diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index aee1fad6c3253..3404e4ddb7d44 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -104,8 +104,7 @@ def test_eng_float_formatter(self): fmt.set_eng_float_format(accuracy=0) repr(self.frame) - - fmt.reset_option('^display.') + self.reset_display_options() def test_repr_tuples(self): buf = StringIO() @@ -1034,7 +1033,7 @@ def test_to_string_no_index(self): assert(df_s == expected) def test_to_string_float_formatting(self): - fmt.reset_option('^display.') + self.reset_display_options() fmt.set_option('display.precision', 6, 'display.column_space', 12, 'display.notebook_repr_html', False) @@ -1065,7 +1064,7 @@ def test_to_string_float_formatting(self): '1 0.253') assert(df_s == expected) - fmt.reset_option('^display.') + self.reset_display_options() self.assertEqual(get_option("display.precision"), 7) df = DataFrame({'x': [1e9, 0.2512]}) @@ -1157,7 +1156,7 @@ def test_to_string_index_formatter(self): self.assertEqual(rs, xp) def test_to_string_left_justify_cols(self): - fmt.reset_option('^display.') + self.reset_display_options() df = DataFrame({'x': [3234, 0.253]}) df_s = df.to_string(justify='left') expected = (' x \n' @@ -1166,7 +1165,7 @@ def test_to_string_left_justify_cols(self): assert(df_s == expected) def test_to_string_format_na(self): - fmt.reset_option('^display.') + self.reset_display_options() df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4], 'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']}) result = df.to_string() @@ -1434,14 +1433,14 @@ def test_repr_html(self): fmt.set_option('display.notebook_repr_html', False) self.frame._repr_html_() - fmt.reset_option('^display.') + self.reset_display_options() df = DataFrame([[1, 2], [3, 4]]) self.assertTrue('2 rows' in df._repr_html_()) fmt.set_option('display.show_dimensions', False) self.assertFalse('2 rows' in df._repr_html_()) - fmt.reset_option('^display.') + self.reset_display_options() def test_repr_html_wide(self): row = lambda l, k: [tm.rands(k) for _ in range(l)] @@ -1580,7 +1579,7 @@ def get_ipython(): repstr = self.frame._repr_html_() self.assertIn('class', repstr) # info fallback - fmt.reset_option('^display.') + self.reset_display_options() def test_to_html_with_classes(self): df = pandas.DataFrame() @@ -2092,7 +2091,7 @@ def test_eng_float_formatter(self): '3 1E+06') self.assertEqual(result, expected) - fmt.reset_option('^display.') + self.reset_display_options() def compare(self, formatter, input, output): formatted_input = formatter(input) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 11c065a52d78e..d96fbb9ec05d7 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -4355,7 +4355,7 @@ def test_repr_dimensions(self): fmt.set_option('display.show_dimensions', False) self.assertFalse("2 rows x 2 columns" in repr(df)) - fmt.reset_option('^display\.') + self.reset_display_options() @slow def test_repr_big(self): @@ -4391,7 +4391,7 @@ def test_repr_unsortable(self): fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000) repr(self.frame) - fmt.reset_option('^display\.') + self.reset_display_options() warnings.filters = warn_filters diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index ecb09ac395417..c3cea9cfbe0a0 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -2145,7 +2145,7 @@ def test_format_sparse_config(self): result = self.index.format() self.assertEqual(result[1], 'foo two') - pd.reset_option("^display\.") + self.reset_display_options() warnings.filters = warn_filters diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 95fcc7848c433..0d1f2118fbe88 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -69,6 +69,10 @@ def setUpClass(cls): def tearDownClass(cls): pass + def reset_display_options(self): + # reset the display options + pd.reset_option('^display.',silent=True) + def assert_numpy_array_equal(self, np_array, assert_equal): if np.array_equal(np_array, assert_equal): return
closes #6984
https://api.github.com/repos/pandas-dev/pandas/pulls/6986
2014-04-27T20:26:29Z
2014-04-27T21:38:28Z
2014-04-27T21:38:28Z
2014-07-16T09:03:34Z
TST: test_frame/test_sum not comparing correctly on smaller sized dtypes (GH6982)
diff --git a/pandas/src/testing.pyx b/pandas/src/testing.pyx index c573d8b2afbad..38be1970de7b3 100644 --- a/pandas/src/testing.pyx +++ b/pandas/src/testing.pyx @@ -121,7 +121,7 @@ cpdef assert_almost_equal(a, b, bint check_less_precise=False): dtype_a = np.dtype(type(a)) dtype_b = np.dtype(type(b)) if dtype_a.kind == 'f' and dtype_b == 'f': - if dtype_a.itemsize <= 4 and dtype_b.itemsize <= 4: + if dtype_a.itemsize <= 4 or dtype_b.itemsize <= 4: decimal = 3 if np.isinf(a):
closes #6982
https://api.github.com/repos/pandas-dev/pandas/pulls/6985
2014-04-27T19:02:09Z
2014-04-27T22:16:48Z
2014-04-27T22:16:48Z
2014-07-16T09:03:32Z
PERF: improved performance of compatible pickles (GH6899)
diff --git a/doc/source/release.rst b/doc/source/release.rst index fce5f2f93e68b..d100541ecbf92 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -292,6 +292,7 @@ Improvements to existing features specified (:issue:`6607`) - ``read_excel`` can now read milliseconds in Excel dates and times with xlrd >= 0.9.3. (:issue:`5945`) - ``pivot_table`` can now accept ``Grouper`` by ``index`` and ``columns`` keywords (:issue:`6913`) +- Improved performance of compatible pickles (:issue:`6899`) .. _release.bug_fixes-0.14.0: diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index acacbd1c0b43c..43096b133f26e 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -522,6 +522,8 @@ Performance (e.g. MonthEnd,BusinessMonthEnd), (:issue:`6479`) - Improve performance of ``CustomBusinessDay`` (:issue:`6584`) - improve performance of slice indexing on Series with string keys (:issue:`6341`, :issue:`6372`) +- Performance improvements in timedelta conversions for integer dtypes (:issue:`6754`) +- Improved performance of compatible pickles (:issue:`6899`) Experimental ~~~~~~~~~~~~ diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 915c1e9ae1574..e80bfec9c8dba 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -34,16 +34,28 @@ def read_pickle(path): """ def try_read(path, encoding=None): + # try with cPickle # try with current pickle, if we have a Type Error then # try with the compat pickle to handle subclass changes # pass encoding only if its not None as py2 doesn't handle # the param + + # cpickle + # GH 6899 try: with open(path, 'rb') as fh: - return pc.load(fh, encoding=encoding, compat=False) + return pkl.load(fh) except: - with open(path, 'rb') as fh: - return pc.load(fh, encoding=encoding, compat=True) + + # reg/patched pickle + try: + with open(path, 'rb') as fh: + return pc.load(fh, encoding=encoding, compat=False) + + # compat pickle + except: + with open(path, 'rb') as fh: + return pc.load(fh, encoding=encoding, compat=True) try: return try_read(path) diff --git a/vb_suite/packers.py b/vb_suite/packers.py index f2eac0e28cd44..ca0193e9b2c10 100644 --- a/vb_suite/packers.py +++ b/vb_suite/packers.py @@ -7,6 +7,7 @@ import os import pandas as pd from pandas.core import common as com +from random import randrange f = '__test__.msg' def remove(f): @@ -15,10 +16,18 @@ def remove(f): except: pass -index = date_range('20000101',periods=50000,freq='H') -df = DataFrame({'float1' : randn(50000), - 'float2' : randn(50000)}, +N=100000 +C=5 +index = date_range('20000101',periods=N,freq='H') +df = DataFrame(dict([ ("float{0}".format(i),randn(N)) for i in range(C) ]), index=index) + +N=100000 +C=5 +index = date_range('20000101',periods=N,freq='H') +df2 = DataFrame(dict([ ("float{0}".format(i),randn(N)) for i in range(C) ]), + index=index) +df2['object'] = ['%08x'%randrange(16**8) for _ in range(N)] remove(f) """ @@ -26,7 +35,7 @@ def remove(f): # msgpack setup = common_setup + """ -df.to_msgpack(f) +df2.to_msgpack(f) """ packers_read_pack = Benchmark("pd.read_msgpack(f)", setup, start_date=start_date) @@ -34,13 +43,13 @@ def remove(f): setup = common_setup + """ """ -packers_write_pack = Benchmark("df.to_msgpack(f)", setup, cleanup="remove(f)", start_date=start_date) +packers_write_pack = Benchmark("df2.to_msgpack(f)", setup, cleanup="remove(f)", start_date=start_date) #---------------------------------------------------------------------- # pickle setup = common_setup + """ -df.to_pickle(f) +df2.to_pickle(f) """ packers_read_pickle = Benchmark("pd.read_pickle(f)", setup, start_date=start_date) @@ -48,7 +57,7 @@ def remove(f): setup = common_setup + """ """ -packers_write_pickle = Benchmark("df.to_pickle(f)", setup, cleanup="remove(f)", start_date=start_date) +packers_write_pickle = Benchmark("df2.to_pickle(f)", setup, cleanup="remove(f)", start_date=start_date) #---------------------------------------------------------------------- # csv @@ -68,7 +77,7 @@ def remove(f): # hdf store setup = common_setup + """ -df.to_hdf(f,'df') +df2.to_hdf(f,'df') """ packers_read_hdf_store = Benchmark("pd.read_hdf(f,'df')", setup, start_date=start_date) @@ -76,13 +85,13 @@ def remove(f): setup = common_setup + """ """ -packers_write_hdf_store = Benchmark("df.to_hdf(f,'df')", setup, cleanup="remove(f)", start_date=start_date) +packers_write_hdf_store = Benchmark("df2.to_hdf(f,'df')", setup, cleanup="remove(f)", start_date=start_date) #---------------------------------------------------------------------- # hdf table setup = common_setup + """ -df.to_hdf(f,'df',table=True) +df2.to_hdf(f,'df',table=True) """ packers_read_hdf_table = Benchmark("pd.read_hdf(f,'df')", setup, start_date=start_date) @@ -90,7 +99,7 @@ def remove(f): setup = common_setup + """ """ -packers_write_hdf_table = Benchmark("df.to_hdf(f,'df',table=True)", setup, cleanup="remove(f)", start_date=start_date) +packers_write_hdf_table = Benchmark("df2.to_hdf(f,'df',table=True)", setup, cleanup="remove(f)", start_date=start_date) #---------------------------------------------------------------------- # json
closes #6899 though seems that cPickle can on a small file/dataset be slower so waiting for results on #6899 tests
https://api.github.com/repos/pandas-dev/pandas/pulls/6983
2014-04-27T16:31:13Z
2014-04-27T22:15:44Z
2014-04-27T22:15:44Z
2014-06-30T18:09:34Z
COMPAT: windows dtype fix in for Panel.count
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 9c2df9b5dde9d..aaf0530487061 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -99,7 +99,7 @@ def _bn_ok_dtype(dt, name): # bottleneck does not properly upcast during the sum # so can overflow if name == 'nansum': - if dt != np.bool_ and dt.itemsize < 8: + if dt.itemsize < 8: return False return True
https://api.github.com/repos/pandas-dev/pandas/pulls/6981
2014-04-27T16:09:21Z
2014-04-27T16:09:32Z
2014-04-27T16:09:32Z
2014-07-22T18:40:54Z
ERR: Add check for iterators when creating DataFrame, fixes #5357
diff --git a/doc/source/release.rst b/doc/source/release.rst index fce5f2f93e68b..705d31695bd23 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -178,6 +178,8 @@ API Changes - change ``AssertionError`` to ``TypeError`` for invalid types passed to ``concat`` (:issue:`6583`) - Add :class:`~pandas.io.parsers.ParserWarning` class for fallback and option validation warnings in :func:`read_csv`/:func:`read_table` (:issue:`6607`) +- Raise a ``TypeError`` when ``DataFrame`` is passed an iterator as the + ``data`` argument (:issue:`5357`) Deprecations ~~~~~~~~~~~~ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 10a0c9050af50..ba730c0c0fe41 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -253,6 +253,8 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) + elif isinstance(data, collections.Iterator): + raise TypeError("data argument can't be an iterator") else: try: arr = np.array(data, dtype=dtype, copy=copy) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index d96fbb9ec05d7..fc68449f75e0f 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -3135,6 +3135,10 @@ def test_constructor_miscast_na_int_dtype(self): expected = DataFrame([[np.nan, 1], [1, 0]]) assert_frame_equal(df, expected) + def test_constructor_iterator_failure(self): + with assertRaisesRegexp(TypeError, 'iterator'): + df = DataFrame(iter([1, 2, 3])) + def test_constructor_column_duplicates(self): # it works! #2079 df = DataFrame([[8, 5]], columns=['a', 'a'])
This is a fix for #5357, and adds a more helpful error message when trying to use an iterator as the `data` argument for a `DataFrame`. I've added the type check for iterators in the final else clause where all the valid options have already been exhausted, so it shouldn't interfere with any valid cases like passing a list of iterators.
https://api.github.com/repos/pandas-dev/pandas/pulls/6977
2014-04-27T02:18:18Z
2014-04-27T23:39:52Z
2014-04-27T23:39:52Z
2014-07-16T09:03:26Z
ENH: support pie plot in series and dataframe plot
diff --git a/doc/source/conf.py b/doc/source/conf.py index 117aa1724c4f2..4f01fe4f4b278 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -277,6 +277,7 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'statsmodels': ('http://statsmodels.sourceforge.net/devel/', None), + 'matplotlib': ('http://matplotlib.org/', None), 'python': ('http://docs.python.org/', None) } import glob diff --git a/doc/source/release.rst b/doc/source/release.rst index 9bfc3609f5b6d..58754e62610c6 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -58,6 +58,7 @@ New features ``DataFrame(dict)`` and ``Series(dict)`` create ``MultiIndex`` columns and index where applicable (:issue:`4187`) - Hexagonal bin plots from ``DataFrame.plot`` with ``kind='hexbin'`` (:issue:`5478`) +- Pie plots from ``Series.plot`` and ``DataFrame.plot`` with ``kind='pie'`` (:issue:`6976`) - Added the ``sym_diff`` method to ``Index`` (:issue:`5543`) - Added ``to_julian_date`` to ``TimeStamp`` and ``DatetimeIndex``. The Julian Date is used primarily in astronomy and represents the number of days from diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index c70e32fd18694..16b63ec374fa2 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -364,7 +364,8 @@ Plotting ~~~~~~~~ - Hexagonal bin plots from ``DataFrame.plot`` with ``kind='hexbin'`` (:issue:`5478`), See :ref:`the docs<visualization.hexbin>`. -- ``DataFrame.plot`` and ``Series.plot`` now supports area plot with specifying ``kind='area'`` (:issue:`6656`) +- ``DataFrame.plot`` and ``Series.plot`` now supports area plot with specifying ``kind='area'`` (:issue:`6656`), See :ref:`the docs<visualization.area>` +- Pie plots from ``Series.plot`` and ``DataFrame.plot`` with ``kind='pie'`` (:issue:`6976`), See :ref:`the docs<visualization.pie>`. - Plotting with Error Bars is now supported in the ``.plot`` method of ``DataFrame`` and ``Series`` objects (:issue:`3796`, :issue:`6834`), See :ref:`the docs<visualization.errorbars>`. - ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``, See :ref:`the docs<visualization.table>`. - ``plot(legend='reverse')`` will now reverse the order of legend labels for diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 8906e82eb937b..255acad7f927b 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -588,6 +588,80 @@ given by column ``z``. The bins are aggregated with numpy's ``max`` function. See the `matplotlib hexbin documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin>`__ for more. +.. _visualization.pie: + +Pie plot +~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 0.14 + +You can create pie plot with ``DataFrame.plot`` or ``Series.plot`` with ``kind='pie'``. +If data includes ``NaN``, it will be automatically filled by 0. +If data contains negative value, ``ValueError`` will be raised. + +.. ipython:: python + :suppress: + + plt.figure() + +.. ipython:: python + + series = Series(3 * rand(4), index=['a', 'b', 'c', 'd'], name='series') + + @savefig series_pie_plot.png + series.plot(kind='pie') + +Note that pie plot with ``DataFrame`` requires either to specify target column by ``y`` +argument or ``subplots=True``. When ``y`` is specified, pie plot of selected column +will be drawn. If ``subplots=True`` is specified, pie plots for each columns are drawn as subplots. +Legend will be drawn in each pie plots by default, specify ``legend=False`` to hide it. + +.. ipython:: python + :suppress: + + plt.figure() + +.. ipython:: python + + df = DataFrame(3 * rand(4, 2), index=['a', 'b', 'c', 'd'], columns=['x', 'y']) + + @savefig df_pie_plot.png + df.plot(kind='pie', subplots=True) + +You can use ``labels`` and ``colors`` keywords to specify labels and colors of each wedges +(Cannot use ``label`` and ``color``, because of matplotlib's specification). +If you want to hide wedge labels, specify ``labels=None``. +If ``fontsize`` is specified, the value will be applied to wedge labels. +Also, other keywords supported by :func:`matplotlib.pyplot.pie` can be used. + + +.. ipython:: python + :suppress: + + plt.figure() + +.. ipython:: python + + @savefig series_pie_plot_options.png + series.plot(kind='pie', labels=['AA', 'BB', 'CC', 'DD'], colors=['r', 'g', 'b', 'c'], + autopct='%.2f', fontsize=20) + +If you pass values which sum total is less than 1.0, matplotlib draws semicircle. + +.. ipython:: python + :suppress: + + plt.figure() + +.. ipython:: python + + series = Series([0.1] * 4, index=['a', 'b', 'c', 'd'], name='series2') + + @savefig series_pie_plot_semi.png + series.plot(kind='pie') + +See the `matplotlib pie documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie>`__ for more. + .. _visualization.andrews_curves: Andrews Curves diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 0186ac4c2b74b..829b2b296155f 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -2,6 +2,7 @@ # coding: utf-8 import nose +import itertools import os import string from distutils.version import LooseVersion @@ -138,6 +139,63 @@ def test_irregular_datetime(self): ax.set_xlim('1/1/1999', '1/1/2001') self.assertEqual(xp, ax.get_xlim()[0]) + @slow + def test_pie_series(self): + # if sum of values is less than 1.0, pie handle them as rate and draw semicircle. + series = Series(np.random.randint(1, 5), + index=['a', 'b', 'c', 'd', 'e'], name='YLABEL') + ax = _check_plot_works(series.plot, kind='pie') + for t, expected in zip(ax.texts, series.index): + self.assertEqual(t.get_text(), expected) + self.assertEqual(ax.get_ylabel(), 'YLABEL') + + # without wedge labels + ax = _check_plot_works(series.plot, kind='pie', labels=None) + for t, expected in zip(ax.texts, [''] * 5): + self.assertEqual(t.get_text(), expected) + + # with less colors than elements + color_args = ['r', 'g', 'b'] + ax = _check_plot_works(series.plot, kind='pie', colors=color_args) + + import matplotlib.colors as colors + conv = colors.colorConverter + color_expected = ['r', 'g', 'b', 'r', 'g'] + for p, expected in zip(ax.patches, color_expected): + self.assertEqual(p.get_facecolor(), conv.to_rgba(expected)) + + # with labels and colors + labels = ['A', 'B', 'C', 'D', 'E'] + color_args = ['r', 'g', 'b', 'c', 'm'] + ax = _check_plot_works(series.plot, kind='pie', labels=labels, colors=color_args) + + for t, expected in zip(ax.texts, labels): + self.assertEqual(t.get_text(), expected) + for p, expected in zip(ax.patches, color_args): + self.assertEqual(p.get_facecolor(), conv.to_rgba(expected)) + + # with autopct and fontsize + ax = _check_plot_works(series.plot, kind='pie', colors=color_args, + autopct='%.2f', fontsize=7) + pcts = ['{0:.2f}'.format(s * 100) for s in series.values / float(series.sum())] + iters = [iter(series.index), iter(pcts)] + expected_texts = list(it.next() for it in itertools.cycle(iters)) + for t, expected in zip(ax.texts, expected_texts): + self.assertEqual(t.get_text(), expected) + self.assertEqual(t.get_fontsize(), 7) + + # includes negative value + with tm.assertRaises(ValueError): + series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e']) + series.plot(kind='pie') + + # includes nan + series = Series([1, 2, np.nan, 4], + index=['a', 'b', 'c', 'd'], name='YLABEL') + ax = _check_plot_works(series.plot, kind='pie') + for t, expected in zip(ax.texts, series.index): + self.assertEqual(t.get_text(), expected) + @slow def test_hist(self): _check_plot_works(self.ts.hist) @@ -1511,6 +1569,39 @@ def test_allow_cmap(self): df.plot(kind='hexbin', x='A', y='B', cmap='YlGn', colormap='BuGn') + @slow + def test_pie_df(self): + df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'], + index=['a', 'b', 'c', 'd', 'e']) + with tm.assertRaises(ValueError): + df.plot(kind='pie') + + ax = _check_plot_works(df.plot, kind='pie', y='Y') + for t, expected in zip(ax.texts, df.index): + self.assertEqual(t.get_text(), expected) + + axes = _check_plot_works(df.plot, kind='pie', subplots=True) + self.assertEqual(len(axes), len(df.columns)) + for ax in axes: + for t, expected in zip(ax.texts, df.index): + self.assertEqual(t.get_text(), expected) + for ax, ylabel in zip(axes, df.columns): + self.assertEqual(ax.get_ylabel(), ylabel) + + labels = ['A', 'B', 'C', 'D', 'E'] + color_args = ['r', 'g', 'b', 'c', 'm'] + axes = _check_plot_works(df.plot, kind='pie', subplots=True, + labels=labels, colors=color_args) + self.assertEqual(len(axes), len(df.columns)) + + import matplotlib.colors as colors + conv = colors.colorConverter + for ax in axes: + for t, expected in zip(ax.texts, labels): + self.assertEqual(t.get_text(), expected) + for p, expected in zip(ax.patches, color_args): + self.assertEqual(p.get_facecolor(), conv.to_rgba(expected)) + def test_errorbar_plot(self): d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)} @@ -1918,6 +2009,7 @@ def _check_plot_works(f, *args, **kwargs): plt.savefig(path) finally: tm.close(fig) + return ret diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 55aa01fd2e265..4453b1db359e9 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1251,16 +1251,17 @@ def _get_style(self, i, col_name): return style or None - def _get_colors(self): + def _get_colors(self, num_colors=None, color_kwds='color'): from pandas.core.frame import DataFrame - if isinstance(self.data, DataFrame): - num_colors = len(self.data.columns) - else: - num_colors = 1 + if num_colors is None: + if isinstance(self.data, DataFrame): + num_colors = len(self.data.columns) + else: + num_colors = 1 return _get_standard_colors(num_colors=num_colors, colormap=self.colormap, - color=self.kwds.get('color')) + color=self.kwds.get(color_kwds)) def _maybe_add_color(self, colors, kwds, style, i): has_color = 'color' in kwds or self.colormap is not None @@ -1939,6 +1940,63 @@ def _post_plot_logic(self): # self.axes[0].legend(loc='best') +class PiePlot(MPLPlot): + + def __init__(self, data, kind=None, **kwargs): + data = data.fillna(value=0) + if (data < 0).any().any(): + raise ValueError("{0} doesn't allow negative values".format(kind)) + MPLPlot.__init__(self, data, kind=kind, **kwargs) + + def _args_adjust(self): + self.grid = False + self.logy = False + self.logx = False + self.loglog = False + + def _get_layout(self): + from pandas import DataFrame + if isinstance(self.data, DataFrame): + return (1, len(self.data.columns)) + else: + return (1, 1) + + def _validate_color_args(self): + pass + + def _make_plot(self): + self.kwds.setdefault('colors', self._get_colors(num_colors=len(self.data), + color_kwds='colors')) + + for i, (label, y) in enumerate(self._iter_data()): + ax = self._get_ax(i) + if label is not None: + label = com.pprint_thing(label) + ax.set_ylabel(label) + + kwds = self.kwds.copy() + + idx = [com.pprint_thing(v) for v in self.data.index] + labels = kwds.pop('labels', idx) + # labels is used for each wedge's labels + results = ax.pie(y, labels=labels, **kwds) + + if kwds.get('autopct', None) is not None: + patches, texts, autotexts = results + else: + patches, texts = results + autotexts = [] + + if self.fontsize is not None: + for t in texts + autotexts: + t.set_fontsize(self.fontsize) + + # leglabels is used for legend labels + leglabels = labels if labels is not None else idx + for p, l in zip(patches, leglabels): + self._add_legend_handle(p, l) + + class BoxPlot(MPLPlot): pass @@ -1950,12 +2008,14 @@ class HistPlot(MPLPlot): _common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area'] # kinds supported by dataframe _dataframe_kinds = ['scatter', 'hexbin'] -_all_kinds = _common_kinds + _dataframe_kinds +# kinds supported only by series or dataframe single column +_series_kinds = ['pie'] +_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds _plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot, 'kde': KdePlot, 'scatter': ScatterPlot, 'hexbin': HexBinPlot, - 'area': AreaPlot} + 'area': AreaPlot, 'pie': PiePlot} def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, @@ -2054,7 +2114,7 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, """ kind = _get_standard_kind(kind.lower().strip()) - if kind in _dataframe_kinds or kind in _common_kinds: + if kind in _all_kinds: klass = _plot_klass[kind] else: raise ValueError('Invalid chart type given %s' % kind) @@ -2068,6 +2128,24 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, figsize=figsize, logx=logx, logy=logy, sort_columns=sort_columns, secondary_y=secondary_y, **kwds) + elif kind in _series_kinds: + if y is None and subplots is False: + msg = "{0} requires either y column or 'subplots=True'" + raise ValueError(msg.format(kind)) + elif y is not None: + if com.is_integer(y) and not frame.columns.holds_integer(): + y = frame.columns[y] + frame = frame[y] # converted to series actually + frame.index.name = y + + plot_obj = klass(frame, kind=kind, subplots=subplots, + rot=rot,legend=legend, ax=ax, style=style, + fontsize=fontsize, use_index=use_index, sharex=sharex, + sharey=sharey, xticks=xticks, yticks=yticks, + xlim=xlim, ylim=ylim, title=title, grid=grid, + figsize=figsize, + sort_columns=sort_columns, + **kwds) else: if x is not None: if com.is_integer(x) and not frame.columns.holds_integer(): @@ -2168,7 +2246,7 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None, """ kind = _get_standard_kind(kind.lower().strip()) - if kind in _common_kinds: + if kind in _common_kinds or kind in _series_kinds: klass = _plot_klass[kind] else: raise ValueError('Invalid chart type given %s' % kind)
Related to #413, added pie plot for `Series.plot` and `DataFrame.plot` kind. If data includes `NaN`, it will be automatically filled by 0. If data contains negative value, `ValueError` will be raised. ``` import pandas as pd import numpy as np series = pd.Series(3 * np.random.rand(4), index=['a', 'b', 'c', 'd'], name='series') series.plot(kind='pie') ``` ![series_pie_plot](https://cloud.githubusercontent.com/assets/1696302/2810158/526e4f82-cda1-11e3-95b3-422d8e5b18ef.png) ### Plotting with DataFrame Pie plot with `DataFrame` requires either to specify target column by `y` argument or `subplots=True`. When `y` is specified, pie plot of selected column will be drawn. If `subplots=True` is specified, pie plots for each columns are drawn as subplots. Legend will be drawn in each pie plots by default, specify `legend=False` to hide it. ``` df = pd.DataFrame(3 * np.random.rand(4, 2), index=['a', 'b', 'c', 'd'], columns=['x', 'y']) df.plot(kind='pie', subplots=True) ``` ![df_pie_plot](https://cloud.githubusercontent.com/assets/1696302/2810159/5691fc76-cda1-11e3-9532-43a7ebafc3e5.png) ### Plotting with Options You can use `labels` and `colors` keywords to specify labels and colors of each wedges (Cannot use `label` and `color`, because of matplotlib's specification). If you want to hide wedge labels, specify `labels=None`. If `fontsize` is specified, the value will be applied to wedge labels. Also, other keywords supported by `matplotlib.pyplot.pie` can be used. ``` series.plot(kind='pie', labels=['AA', 'BB', 'CC', 'DD'], colors=['r', 'g', 'b', 'c'], autopct='%.2f', fontsize=20) ``` ![series_pie_plot_options](https://cloud.githubusercontent.com/assets/1696302/2810160/5b5d8f72-cda1-11e3-87b0-de164b887c75.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/6976
2014-04-27T00:22:28Z
2014-05-05T16:15:22Z
2014-05-05T16:15:22Z
2014-06-12T21:10:18Z
Fix for GH 6885 - get_dummies chokes on unicode values
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6d8f915e2ebb8..d4fbd288221f3 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -381,6 +381,7 @@ Bug Fixes - Bug in arithmetic operations affecting to NaT (:issue:`6873`) - Bug in ``Series.str.extract`` where the resulting ``Series`` from a single group match wasn't renamed to the group name +- Bug causing UnicodeEncodeError when get_dummies called with unicode values and a prefix (:issue:`6885`) pandas 0.13.1 ------------- diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 7dc266617c5fd..2f4dbc2598126 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -1017,7 +1017,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False): dummy_mat[cat.labels == -1] = 0 if prefix is not None: - dummy_cols = ['%s%s%s' % (prefix, prefix_sep, str(v)) + dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) for v in levels] else: dummy_cols = levels diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py index b04fb979e6c8e..42427617991af 100644 --- a/pandas/tests/test_reshape.py +++ b/pandas/tests/test_reshape.py @@ -18,7 +18,7 @@ from pandas.core.reshape import (melt, convert_dummies, lreshape, get_dummies, wide_to_long) import pandas.util.testing as tm -from pandas.compat import StringIO, cPickle, range +from pandas.compat import StringIO, cPickle, range, u _multiprocess_can_split_ = True @@ -199,6 +199,16 @@ def test_include_na(self): exp_just_na = DataFrame(Series(1.0,index=[0]),columns=[nan]) assert_array_equal(res_just_na.values, exp_just_na.values) + def test_unicode(self): # See GH 6885 - get_dummies chokes on unicode values + import unicodedata + e = 'e' + eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE') + s = [e, eacute, eacute] + res = get_dummies(s, prefix='letter') + exp = DataFrame({'letter_e': {0: 1.0, 1: 0.0, 2: 0.0}, + u('letter_%s') % eacute: {0: 0.0, 1: 1.0, 2: 1.0}}) + assert_frame_equal(res, exp) + class TestConvertDummies(tm.TestCase): def test_convert_dummies(self): df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
closes #6885 Please be gentle - this is the first time I've tried to contribute either to someone else's python project or another project on github, so if my python or git foo is lacking, I apologise in advance!
https://api.github.com/repos/pandas-dev/pandas/pulls/6975
2014-04-26T22:55:12Z
2014-04-30T12:40:10Z
null
2014-06-20T23:25:14Z
Panel shift revert
diff --git a/doc/source/release.rst b/doc/source/release.rst index c975143b0ef67..4eb40818ce4e5 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -151,7 +151,6 @@ API Changes to a non-unique item in the ``Index`` (previously raised a ``KeyError``). (:issue:`6738`) - all offset operations now return ``Timestamp`` types (rather than datetime), Business/Week frequencies were incorrect (:issue:`4069`) - ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) -- ``Panel.shift`` now uses ``NDFrame.shift``. It no longer drops the ``nan`` data and retains its original shape. (:issue:`4867`) - ``to_excel`` now converts ``np.inf`` into a string representation, customizable by the ``inf_rep`` keyword argument (Excel has no native inf representation) (:issue:`6782`) @@ -424,6 +423,7 @@ Bug Fixes - Bug in ``DataFrame.apply`` with functions that used *args or **kwargs and returned an empty result (:issue:`6952`) - Bug in sum/mean on 32-bit platforms on overflows (:issue:`6915`) +- Moved ``Panel.shift`` to ``NDFrame.slice_shift`` and fixed to respect multiple dtypes. (:issue:`6959`) pandas 0.13.1 ------------- diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 34480668df8c9..6743ee5981e57 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -199,7 +199,6 @@ API changes covs[df.index[-1]] - ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) -- ``Panel.shift`` now uses ``NDFrame.shift``. It no longer drops the ``nan`` data and retains its original shape. (:issue:`4867`) - Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) - ``stack`` and ``unstack`` now raise a ``ValueError`` when the ``level`` keyword refers diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3f2ecd8afd2d4..aa45f072a72ac 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3227,6 +3227,42 @@ def shift(self, periods=1, freq=None, axis=0, **kwds): return self._constructor(new_data).__finalize__(self) + def slice_shift(self, periods=1, axis=0, **kwds): + """ + Equivalent to `shift` without copying data. The shifted data will + not include the dropped periods and the shifted axis will be smaller + than the original. + + Parameters + ---------- + periods : int + Number of periods to move, can be positive or negative + + Notes + ----- + While the `slice_shift` is faster than `shift`, you may pay for it + later during alignment. + + Returns + ------- + shifted : same type as caller + """ + if periods == 0: + return self + + if periods > 0: + vslicer = slice(None, -periods) + islicer = slice(periods, None) + else: + vslicer = slice(-periods, None) + islicer = slice(None, periods) + + new_obj = self._slice(vslicer, axis=axis) + shifted_axis = self._get_axis(axis)[islicer] + new_obj.set_axis(axis, shifted_axis) + + return new_obj.__finalize__(self) + def tshift(self, periods=1, freq=None, axis=0, **kwds): """ Shift the time index, using the index's frequency if available diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 5eccf49dd9e42..757bef0d1526d 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1154,7 +1154,8 @@ def count(self, axis='major'): @deprecate_kwarg(old_arg_name='lags', new_arg_name='periods') def shift(self, periods=1, freq=None, axis='major'): """ - Shift major or minor axis by specified number of leads/lags. + Shift major or minor axis by specified number of leads/lags. Drops + periods right now compared with DataFrame.shift Parameters ---------- @@ -1171,7 +1172,7 @@ def shift(self, periods=1, freq=None, axis='major'): if axis == 'items': raise ValueError('Invalid axis') - return super(Panel, self).shift(periods, freq=freq, axis=axis) + return super(Panel, self).slice_shift(periods, axis=axis) def tshift(self, periods=1, freq=None, axis='major', **kwds): return super(Panel, self).tshift(periods, freq, axis, **kwds) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 21207a6f97ddd..04749073166d0 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -21,7 +21,8 @@ assert_almost_equal, ensure_clean, assertRaisesRegexp, - makeCustomDataframe as mkdf + makeCustomDataframe as mkdf, + makeMixedDataFrame ) import pandas.core.panel as panelm import pandas.util.testing as tm @@ -1652,10 +1653,17 @@ def test_shift(self): # negative numbers, #2164 result = self.panel.shift(-1) - expected = Panel(dict((i, f.shift(-1)) + expected = Panel(dict((i, f.shift(-1)[:-1]) for i, f in compat.iteritems(self.panel))) assert_panel_equal(result, expected) + # mixed dtypes #6959 + data = [('item '+ch, makeMixedDataFrame()) for ch in list('abcde')] + data = dict(data) + mixed_panel = Panel.from_dict(data, orient='minor') + shifted = mixed_panel.shift(1) + assert_series_equal(mixed_panel.dtypes, shifted.dtypes) + def test_tshift(self): # PeriodIndex ps = tm.makePeriodPanel()
Reverts #6605 closes #6959 #6826 ## vs 13.1 ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- panel_shift | 0.0773 | 0.0990 | 0.7809 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [3d0099a] : BUG: Change Panel.shift to use slice_shift #6605 #6826 Base [d10a658] : RLS: set released to True. v0.13.1 ``` ## vs master Note that the `pct_change is slower due to the deferred alignment. ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- panel_shift | 0.1650 | 522.0180 | 0.0003 | panel_pct_change_major | 7615.9450 | 6213.1660 | 1.2258 | panel_pct_change_minor | 7864.9763 | 5877.1353 | 1.3382 | panel_pct_change_items | 8646.0753 | 5884.1163 | 1.4694 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [3ec426f] : BUG: Change Panel.shift to use slice_shift #6605 #6826 Base [0d2966f] : COMPAT: 32-bit platform compat for Panel.count ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6974
2014-04-26T19:50:48Z
2014-04-28T23:21:23Z
2014-04-28T23:21:23Z
2014-06-18T12:23:39Z
Cookbook additions
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 5fcd8961d4e08..46a5d8c870a9d 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -11,6 +11,7 @@ np.random.seed(123456) from pandas import * options.display.max_rows=15 + options.display.mpl_style='default' import pandas as pd randn = np.random.randn randint = np.random.randint @@ -346,6 +347,24 @@ The :ref:`Plotting <visualization>` docs. `Generate Embedded plots in excel files using Pandas, Vincent and xlsxwriter <http://pandas-xlsxwriter-charts.readthedocs.org/en/latest/introduction.html>`__ +`Boxplot for each quartile of a stratifying variable +<http://stackoverflow.com/questions/23232989/boxplot-stratified-by-column-in-python-pandas>`__ + +.. ipython:: python + + df = pd.DataFrame( + {u'stratifying_var': np.random.uniform(0, 100, 20), + u'price': np.random.normal(100, 5, 20)} + ) + df[u'quartiles'] = pd.qcut( + df[u'stratifying_var'], + 4, + labels=[u'0-25%', u'25-50%', u'50-75%', u'75-100%'] + ) + + @savefig quartile_boxplot.png + df.boxplot(column=u'price', by=u'quartiles') + Data In/Out -----------
As suggested by Jeff on this [StackOverflow answer](http://stackoverflow.com/a/23233196/1222578), I'm adding an example of a boxplot that uses quartiles of a second variable as the `by` variable. Jeff also suggested adding the example in-line, rather than just providing a link to StackOverflow, so that's what I've done for now. The resulting plot currently looks like this, I'm happy to tweak it to better get the idea across: ![Boxplot example](http://i.imgur.com/yQZMlGH.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/6972
2014-04-26T13:34:43Z
2014-04-27T13:58:57Z
null
2014-06-13T16:46:14Z
ENH: Support dateutil timezones. GH4688.
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 5897b1a43054f..48acacd7ced08 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1244,21 +1244,53 @@ the quarter end: Time Zone Handling ------------------ -Using ``pytz``, pandas provides rich support for working with timestamps in -different time zones. By default, pandas objects are time zone unaware: +Pandas provides rich support for working with timestamps in different time zones using ``pytz`` and ``dateutil`` libraries. +``dateutil`` support is new [in 0.14.1] and currently only supported for fixed offset and tzfile zones. The default library is ``pytz``. +Support for ``dateutil`` is provided for compatibility with other applications e.g. if you use ``dateutil`` in other python packages. + +By default, pandas objects are time zone unaware: .. ipython:: python rng = date_range('3/6/2012 00:00', periods=15, freq='D') - print(rng.tz) + rng.tz is None To supply the time zone, you can use the ``tz`` keyword to ``date_range`` and -other functions: +other functions. Dateutil time zone strings are distinguished from ``pytz`` +time zones by starting with ``dateutil/``. In ``pytz`` you can find a list of +common (and less common) time zones using ``from pytz import common_timezones, all_timezones``. +``dateutil`` uses the OS timezones so there isn't a fixed list available. For +common zones, the names are the same as ``pytz``. .. ipython:: python - + + # pytz rng_utc = date_range('3/6/2012 00:00', periods=10, freq='D', tz='UTC') - print(rng_utc.tz) + rng_utc.tz + + # dateutil + rng_utc_dateutil = date_range('3/6/2012 00:00', periods=10, freq='D', + tz='dateutil/UTC') + rng_utc_dateutil.tz + +You can also construct the timezone explicitly first, which gives you more control over which +time zone is used: + +.. ipython:: python + + # pytz + import pytz + tz_pytz = pytz.timezone('UTC') + rng_utc = date_range('3/6/2012 00:00', periods=10, freq='D', tz=tz_pytz) + rng_utc.tz + + # dateutil + import dateutil + tz_dateutil = dateutil.tz.gettz('UTC') + rng_utc_dateutil = date_range('3/6/2012 00:00', periods=10, freq='D', + tz=tz_dateutil) + rng_utc_dateutil.tz + Timestamps, like Python's ``datetime.datetime`` object can be either time zone naive or time zone aware. Naive time series and DatetimeIndex objects can be @@ -1271,6 +1303,7 @@ naive or time zone aware. Naive time series and DatetimeIndex objects can be ts_utc = ts.tz_localize('UTC') ts_utc +Again, you can explicitly construct the timezone object first. You can use the ``tz_convert`` method to convert pandas objects to convert tz-aware data to another time zone: @@ -1278,6 +1311,11 @@ tz-aware data to another time zone: ts_utc.tz_convert('US/Eastern') +.. warning:: + Be very wary of conversions between libraries as ``pytz`` and ``dateutil`` + may have different definitions of the time zones. This is more of a problem for + unusual timezones than for 'standard' zones like ``US/Eastern``. + Under the hood, all timestamps are stored in UTC. Scalar values from a ``DatetimeIndex`` with a time zone will have their fields (day, hour, minute) localized to the time zone. However, timestamps with the same UTC value are diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 48eac7fb1b761..e4effc1c78798 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -9,6 +9,8 @@ users upgrade to this version. - Highlights include: + - Support for ``dateutil`` timezones. + - :ref:`Other Enhancements <whatsnew_0141.enhancements>` - :ref:`API Changes <whatsnew_0141.api>` @@ -53,6 +55,17 @@ Enhancements ~~~~~~~~~~~~ - Tests for basic reading of public S3 buckets now exist (:issue:`7281`). +- Support for dateutil timezones, which can now be used in the same way as + pytz timezones across pandas. (:issue:`4688`) + + .. ipython:: python + + rng_utc_dateutil = date_range('3/6/2012 00:00', periods=10, freq='D', + tz='dateutil/UTC') + rng_utc_dateutil.tz + + See :ref:`the docs <timeseries.timezone>`. + .. _whatsnew_0141.performance: Performance diff --git a/pandas/index.pyx b/pandas/index.pyx index 4f8e780ded808..3dcdbf207fb3f 100644 --- a/pandas/index.pyx +++ b/pandas/index.pyx @@ -605,11 +605,11 @@ cdef inline _to_i8(object val): return get_datetime64_value(val) elif PyDateTime_Check(val): tzinfo = getattr(val, 'tzinfo', None) - val = _pydatetime_to_dts(val, &dts) + ival = _pydatetime_to_dts(val, &dts) # Save the original date value so we can get the utcoffset from it. if tzinfo is not None and not _is_utc(tzinfo): offset = tslib._get_utcoffset(tzinfo, val) - val -= tslib._delta_to_nanoseconds(offset) - + ival -= tslib._delta_to_nanoseconds(offset) + return ival return val cdef inline bint _is_utc(object tz): diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 9fabf0ae960fe..cee1867e73179 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1729,11 +1729,23 @@ def set_atom(self, block, block_items, existing_col, min_itemsize, if getattr(rvalues[0], 'tzinfo', None) is not None: # if this block has more than one timezone, raise - if len(set([r.tzinfo for r in rvalues])) != 1: - raise TypeError( - "too many timezones in this block, create separate " - "data columns" - ) + try: + # pytz timezones: compare on zone name (to avoid issues with DST being a different zone to STD). + zones = [r.tzinfo.zone for r in rvalues] + except: + # dateutil timezones: compare on == + zones = [r.tzinfo for r in rvalues] + if any(zones[0] != zone_i for zone_i in zones[1:]): + raise TypeError( + "too many timezones in this block, create separate " + "data columns" + ) + else: + if len(set(zones)) != 1: + raise TypeError( + "too many timezones in this block, create separate " + "data columns" + ) # convert this column to datetime64[ns] utc, and save the tz index = DatetimeIndex(rvalues) diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py index 32057f9ffd35c..fcd5515419537 100644 --- a/pandas/io/tests/test_json/test_ujson.py +++ b/pandas/io/tests/test_json/test_ujson.py @@ -25,6 +25,7 @@ assert_array_almost_equal_nulp, assert_approx_equal) import pytz +import dateutil from pandas import DataFrame, Series, Index, NaT, DatetimeIndex import pandas.util.testing as tm @@ -361,7 +362,9 @@ def test_encodeTimeConversion(self): datetime.time(), datetime.time(1, 2, 3), datetime.time(10, 12, 15, 343243), - datetime.time(10, 12, 15, 343243, pytz.utc)] + datetime.time(10, 12, 15, 343243, pytz.utc), +# datetime.time(10, 12, 15, 343243, dateutil.tz.gettz('UTC')), # this segfaults! No idea why. + ] for test in tests: output = ujson.encode(test) expected = '"%s"' % test.isoformat() diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 77555ad81a45b..edaae26acb29e 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -1991,7 +1991,7 @@ def test_unimplemented_dtypes_table_columns(self): # this fails because we have a date in the object block...... self.assertRaises(TypeError, store.append, 'df_unimplemented', df) - def test_append_with_timezones(self): + def test_append_with_timezones_pytz(self): from datetime import timedelta @@ -2020,7 +2020,8 @@ def compare(a,b): compare(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]]) _maybe_remove(store, 'df_tz') - df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130103',tz='US/Eastern')),index=range(5)) + # ensure we include dates in DST and STD time here. + df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130603',tz='US/Eastern')),index=range(5)) store.append('df_tz',df) result = store['df_tz'] compare(result,df) @@ -2057,6 +2058,78 @@ def compare(a,b): result = store.select('df') assert_frame_equal(result,df) + def test_append_with_timezones_dateutil(self): + + from datetime import timedelta + + try: + import dateutil + except ImportError: + raise nose.SkipTest + + def compare(a, b): + tm.assert_frame_equal(a, b) + + # compare the zones on each element + for c in a.columns: + for i in a.index: + a_e = a[c][i] + b_e = b[c][i] + if not (a_e == b_e and a_e.tz == b_e.tz): + raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e, b_e)) + + # as columns + with ensure_clean_store(self.path) as store: + + _maybe_remove(store, 'df_tz') + df = DataFrame(dict(A=[ Timestamp('20130102 2:00:00', tz=dateutil.tz.gettz('US/Eastern')) + timedelta(hours=1) * i for i in range(5) ])) + store.append('df_tz', df, data_columns=['A']) + result = store['df_tz'] + compare(result, df) + assert_frame_equal(result, df) + + # select with tz aware + compare(store.select('df_tz', where=Term('A>=df.A[3]')), df[df.A >= df.A[3]]) + + _maybe_remove(store, 'df_tz') + # ensure we include dates in DST and STD time here. + df = DataFrame(dict(A=Timestamp('20130102', tz=dateutil.tz.gettz('US/Eastern')), B=Timestamp('20130603', tz=dateutil.tz.gettz('US/Eastern'))), index=range(5)) + store.append('df_tz', df) + result = store['df_tz'] + compare(result, df) + assert_frame_equal(result, df) + + _maybe_remove(store, 'df_tz') + df = DataFrame(dict(A=Timestamp('20130102', tz=dateutil.tz.gettz('US/Eastern')), B=Timestamp('20130102', tz=dateutil.tz.gettz('EET'))), index=range(5)) + self.assertRaises(TypeError, store.append, 'df_tz', df) + + # this is ok + _maybe_remove(store, 'df_tz') + store.append('df_tz', df, data_columns=['A', 'B']) + result = store['df_tz'] + compare(result, df) + assert_frame_equal(result, df) + + # can't append with diff timezone + df = DataFrame(dict(A=Timestamp('20130102', tz=dateutil.tz.gettz('US/Eastern')), B=Timestamp('20130102', tz=dateutil.tz.gettz('CET'))), index=range(5)) + self.assertRaises(ValueError, store.append, 'df_tz', df) + + # as index + with ensure_clean_store(self.path) as store: + + # GH 4098 example + df = DataFrame(dict(A=Series(lrange(3), index=date_range('2000-1-1', periods=3, freq='H', tz=dateutil.tz.gettz('US/Eastern'))))) + + _maybe_remove(store, 'df') + store.put('df', df) + result = store.select('df') + assert_frame_equal(result, df) + + _maybe_remove(store, 'df') + store.append('df', df) + result = store.select('df') + assert_frame_equal(result, df) + def test_store_timezone(self): # GH2852 # issue storing datetime.date with a timezone as it resets when read back in a new timezone diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 8e405dc98f3da..302b8ca9983e0 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -92,6 +92,13 @@ def _skip_if_no_pytz(): except ImportError: raise nose.SkipTest("pytz not installed") +def _skip_if_no_dateutil(): + try: + import dateutil + except ImportError: + raise nose.SkipTest("dateutil not installed") + + class TestDataFrameFormatting(tm.TestCase): _multiprocess_can_split_ = True @@ -2922,7 +2929,7 @@ def test_no_tz(self): ts_nanos_micros = Timestamp(1200) self.assertEqual(str(ts_nanos_micros), "1970-01-01 00:00:00.000001200") - def test_tz(self): + def test_tz_pytz(self): _skip_if_no_pytz() import pytz @@ -2936,6 +2943,20 @@ def test_tz(self): dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc) self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us))) + def test_tz_dateutil(self): + _skip_if_no_dateutil() + import dateutil + utc = dateutil.tz.gettz('UTC') + + dt_date = datetime(2013, 1, 2, tzinfo=utc) + self.assertEqual(str(dt_date), str(Timestamp(dt_date))) + + dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc) + self.assertEqual(str(dt_datetime), str(Timestamp(dt_datetime))) + + dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc) + self.assertEqual(str(dt_datetime_us), str(Timestamp(dt_datetime_us))) + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index bc12cc5aaaa3b..44587248e6d51 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -47,6 +47,12 @@ def _skip_if_no_pytz(): except ImportError: raise nose.SkipTest("pytz not installed") +def _skip_if_no_dateutil(): + try: + import dateutil + except ImportError: + raise nose.SkipTest("dateutil not installed") + #------------------------------------------------------------------------------ # Series test cases @@ -4573,7 +4579,7 @@ def test_getitem_setitem_datetimeindex(self): result["1990-01-02"] = ts[24:48] assert_series_equal(result, ts) - def test_getitem_setitem_datetime_tz(self): + def test_getitem_setitem_datetime_tz_pytz(self): _skip_if_no_pytz(); from pytz import timezone as tz @@ -4608,6 +4614,39 @@ def test_getitem_setitem_datetime_tz(self): result[date] = ts[4] assert_series_equal(result, ts) + + def test_getitem_setitem_datetime_tz_dateutil(self): + _skip_if_no_dateutil(); + from dateutil.tz import gettz as tz + + from pandas import date_range + N = 50 + # testing with timezone, GH #2785 + rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern') + ts = Series(np.random.randn(N), index=rng) + + # also test Timestamp tz handling, GH #2789 + result = ts.copy() + result["1990-01-01 09:00:00+00:00"] = 0 + result["1990-01-01 09:00:00+00:00"] = ts[4] + assert_series_equal(result, ts) + + result = ts.copy() + result["1990-01-01 03:00:00-06:00"] = 0 + result["1990-01-01 03:00:00-06:00"] = ts[4] + assert_series_equal(result, ts) + + # repeat with datetimes + result = ts.copy() + result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0 + result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4] + assert_series_equal(result, ts) + + result = ts.copy() + result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = 0 + result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = ts[4] + assert_series_equal(result, ts) + def test_getitem_setitem_periodindex(self): from pandas import period_range N = 50 diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 3612b9dbeafb3..34b0045b4983b 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1617,7 +1617,7 @@ def _view_like(self, ndarray): def tz_convert(self, tz): """ - Convert DatetimeIndex from one time zone to another (using pytz) + Convert DatetimeIndex from one time zone to another (using pytz/dateutil) Returns ------- @@ -1635,11 +1635,11 @@ def tz_convert(self, tz): def tz_localize(self, tz, infer_dst=False): """ - Localize tz-naive DatetimeIndex to given time zone (using pytz) + Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil) Parameters ---------- - tz : string or pytz.timezone + tz : string or pytz.timezone or dateutil.tz.tzfile Time zone for time. Corresponding timestamps would be converted to time zone of the TimeSeries infer_dst : boolean, default False @@ -1666,7 +1666,7 @@ def indexer_at_time(self, time, asof=False): Parameters ---------- time : datetime.time or string - tz : string or pytz.timezone + tz : string or pytz.timezone or dateutil.tz.tzfile Time zone for time. Corresponding timestamps would be converted to time zone of the TimeSeries @@ -1701,7 +1701,7 @@ def indexer_between_time(self, start_time, end_time, include_start=True, end_time : datetime.time or string include_start : boolean, default True include_end : boolean, default True - tz : string or pytz.timezone, default None + tz : string or pytz.timezone or dateutil.tz.tzfile, default None Returns ------- diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py index 159e3d1603b20..dd84ee27caf0e 100644 --- a/pandas/tseries/tests/test_daterange.py +++ b/pandas/tseries/tests/test_daterange.py @@ -24,6 +24,12 @@ def _skip_if_no_pytz(): except ImportError: raise nose.SkipTest("pytz not installed") +def _skip_if_no_dateutil(): + try: + import dateutil + except ImportError: + raise nose.SkipTest("dateutil not installed") + def _skip_if_no_cday(): if datetools.cday is None: @@ -291,6 +297,11 @@ def test_summary_pytz(self): import pytz bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary() + def test_summary_dateutil(self): + _skip_if_no_dateutil() + import dateutil + bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.gettz('UTC')).summary() + def test_misc(self): end = datetime(2009, 5, 13) dr = bdate_range(end=end, periods=20) @@ -354,7 +365,7 @@ def test_range_bug(self): exp_values = [start + i * offset for i in range(5)] self.assert_numpy_array_equal(result, DatetimeIndex(exp_values)) - def test_range_tz(self): + def test_range_tz_pytz(self): # GH 2906 _skip_if_no_pytz() from pytz import timezone as tz @@ -377,7 +388,30 @@ def test_range_tz(self): self.assertEqual(dr[0], start) self.assertEqual(dr[2], end) - def test_month_range_union_tz(self): + def test_range_tz_dateutil(self): + # GH 2906 + _skip_if_no_dateutil() + from dateutil.tz import gettz as tz + + start = datetime(2011, 1, 1, tzinfo=tz('US/Eastern')) + end = datetime(2011, 1, 3, tzinfo=tz('US/Eastern')) + + dr = date_range(start=start, periods=3) + self.assert_(dr.tz == tz('US/Eastern')) + self.assert_(dr[0] == start) + self.assert_(dr[2] == end) + + dr = date_range(end=end, periods=3) + self.assert_(dr.tz == tz('US/Eastern')) + self.assert_(dr[0] == start) + self.assert_(dr[2] == end) + + dr = date_range(start=start, end=end) + self.assert_(dr.tz == tz('US/Eastern')) + self.assert_(dr[0] == start) + self.assert_(dr[2] == end) + + def test_month_range_union_tz_pytz(self): _skip_if_no_pytz() from pytz import timezone tz = timezone('US/Eastern') @@ -393,6 +427,22 @@ def test_month_range_union_tz(self): early_dr.union(late_dr) + def test_month_range_union_tz_dateutil(self): + _skip_if_no_dateutil() + from dateutil.tz import gettz as timezone + tz = timezone('US/Eastern') + + early_start = datetime(2011, 1, 1) + early_end = datetime(2011, 3, 1) + + late_start = datetime(2011, 3, 1) + late_end = datetime(2011, 5, 1) + + early_dr = date_range(start=early_start, end=early_end, tz=tz, freq=datetools.monthEnd) + late_dr = date_range(start=late_start, end=late_end, tz=tz, freq=datetools.monthEnd) + + early_dr.union(late_dr) + def test_range_closed(self): begin = datetime(2011, 1, 1) end = datetime(2014, 1, 1) @@ -580,6 +630,11 @@ def test_summary_pytz(self): import pytz cdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary() + def test_summary_dateutil(self): + _skip_if_no_dateutil() + import dateutil + cdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.gettz('UTC')).summary() + def test_misc(self): end = datetime(2009, 5, 13) dr = cdate_range(end=end, periods=20) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index e0aea9a1a29b1..169939c2f288a 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -83,6 +83,16 @@ def test_timestamp_tz_arg(self): self.assertEqual(p.tz, pytz.timezone('Europe/Brussels').normalize(p).tzinfo) + def test_timestamp_tz_arg_dateutil(self): + import dateutil + p = Period('1/1/2005', freq='M').to_timestamp(tz=dateutil.tz.gettz('Europe/Brussels')) + self.assertEqual(p.tz, dateutil.tz.gettz('Europe/Brussels')) + + def test_timestamp_tz_arg_dateutil_from_string(self): + import dateutil + p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels') + self.assertEqual(p.tz, dateutil.tz.gettz('Europe/Brussels')) + def test_period_constructor(self): i1 = Period('1/1/2005', freq='M') i2 = Period('Jan 2005') diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 068883423015e..610b5687b9fdf 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -41,6 +41,12 @@ from numpy.testing.decorators import slow +def _skip_if_no_dateutil(): + try: + import dateutil + except ImportError: + raise nose.SkipTest("dateutil not installed") + def _skip_if_no_pytz(): try: import pytz @@ -400,6 +406,28 @@ def test_timestamp_to_datetime(self): self.assertEqual(stamp, dtval) self.assertEqual(stamp.tzinfo, dtval.tzinfo) + def test_timestamp_to_datetime_explicit_pytz(self): + _skip_if_no_pytz() + import pytz + rng = date_range('20090415', '20090519', + tz=pytz.timezone('US/Eastern')) + + stamp = rng[0] + dtval = stamp.to_pydatetime() + self.assertEquals(stamp, dtval) + self.assertEquals(stamp.tzinfo, dtval.tzinfo) + + def test_timestamp_to_datetime_explicit_dateutil(self): + _skip_if_no_dateutil() + import dateutil + rng = date_range('20090415', '20090519', + tz=dateutil.tz.gettz('US/Eastern')) + + stamp = rng[0] + dtval = stamp.to_pydatetime() + self.assertEquals(stamp, dtval) + self.assertEquals(stamp.tzinfo, dtval.tzinfo) + def test_index_convert_to_datetime_array(self): _skip_if_no_pytz() @@ -419,6 +447,46 @@ def _check_rng(rng): _check_rng(rng_eastern) _check_rng(rng_utc) + def test_index_convert_to_datetime_array_explicit_pytz(self): + _skip_if_no_pytz() + import pytz + + def _check_rng(rng): + converted = rng.to_pydatetime() + tm.assert_isinstance(converted, np.ndarray) + for x, stamp in zip(converted, rng): + tm.assert_isinstance(x, datetime) + self.assertEquals(x, stamp.to_pydatetime()) + self.assertEquals(x.tzinfo, stamp.tzinfo) + + rng = date_range('20090415', '20090519') + rng_eastern = date_range('20090415', '20090519', tz=pytz.timezone('US/Eastern')) + rng_utc = date_range('20090415', '20090519', tz=pytz.utc) + + _check_rng(rng) + _check_rng(rng_eastern) + _check_rng(rng_utc) + + def test_index_convert_to_datetime_array_explicit_dateutil(self): + _skip_if_no_dateutil() + import dateutil + + def _check_rng(rng): + converted = rng.to_pydatetime() + tm.assert_isinstance(converted, np.ndarray) + for x, stamp in zip(converted, rng): + tm.assert_isinstance(x, datetime) + self.assertEquals(x, stamp.to_pydatetime()) + self.assertEquals(x.tzinfo, stamp.tzinfo) + + rng = date_range('20090415', '20090519') + rng_eastern = date_range('20090415', '20090519', tz=dateutil.tz.gettz('US/Eastern')) + rng_utc = date_range('20090415', '20090519', tz=dateutil.tz.gettz('UTC')) + + _check_rng(rng) + _check_rng(rng_eastern) + _check_rng(rng_utc) + def test_ctor_str_intraday(self): rng = DatetimeIndex(['1-1-2000 00:00:01']) self.assertEqual(rng[0].second, 1) @@ -1430,7 +1498,7 @@ def test_to_period_microsecond(self): self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U')) self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U')) - def test_to_period_tz(self): + def test_to_period_tz_pytz(self): _skip_if_no_pytz() from dateutil.tz import tzlocal from pytz import utc as UTC @@ -1461,6 +1529,68 @@ def test_to_period_tz(self): self.assertEqual(result, expected) self.assertTrue(ts.to_period().equals(xp)) + def test_to_period_tz_explicit_pytz(self): + _skip_if_no_pytz() + import pytz + from dateutil.tz import tzlocal + + xp = date_range('1/1/2000', '4/1/2000').to_period() + + ts = date_range('1/1/2000', '4/1/2000', tz=pytz.timezone('US/Eastern')) + + result = ts.to_period()[0] + expected = ts[0].to_period() + + self.assert_(result == expected) + self.assert_(ts.to_period().equals(xp)) + + ts = date_range('1/1/2000', '4/1/2000', tz=pytz.utc) + + result = ts.to_period()[0] + expected = ts[0].to_period() + + self.assert_(result == expected) + self.assert_(ts.to_period().equals(xp)) + + ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal()) + + result = ts.to_period()[0] + expected = ts[0].to_period() + + self.assert_(result == expected) + self.assert_(ts.to_period().equals(xp)) + + def test_to_period_tz_explicit_dateutil(self): + _skip_if_no_dateutil() + import dateutil + from dateutil.tz import tzlocal + + xp = date_range('1/1/2000', '4/1/2000').to_period() + + ts = date_range('1/1/2000', '4/1/2000', tz=dateutil.tz.gettz('US/Eastern')) + + result = ts.to_period()[0] + expected = ts[0].to_period() + + self.assert_(result == expected) + self.assert_(ts.to_period().equals(xp)) + + ts = date_range('1/1/2000', '4/1/2000', tz=dateutil.tz.gettz('UTC')) + + result = ts.to_period()[0] + expected = ts[0].to_period() + + self.assert_(result == expected) + self.assert_(ts.to_period().equals(xp)) + + ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal()) + + result = ts.to_period()[0] + expected = ts[0].to_period() + + self.assert_(result == expected) + self.assert_(ts.to_period().equals(xp)) + def test_frame_to_period(self): K = 5 from pandas.tseries.period import period_range @@ -1639,6 +1769,54 @@ def test_append_concat_tz(self): appended = rng.append(rng2) self.assertTrue(appended.equals(rng3)) + def test_append_concat_tz_explicit_pytz(self): + # GH 2938 + _skip_if_no_pytz() + from pytz import timezone as timezone + + rng = date_range('5/8/2012 1:45', periods=10, freq='5T', + tz=timezone('US/Eastern')) + rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T', + tz=timezone('US/Eastern')) + rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T', + tz=timezone('US/Eastern')) + ts = Series(np.random.randn(len(rng)), rng) + df = DataFrame(np.random.randn(len(rng), 4), index=rng) + ts2 = Series(np.random.randn(len(rng2)), rng2) + df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) + + result = ts.append(ts2) + result_df = df.append(df2) + self.assert_(result.index.equals(rng3)) + self.assert_(result_df.index.equals(rng3)) + + appended = rng.append(rng2) + self.assert_(appended.equals(rng3)) + + def test_append_concat_tz_explicit_dateutil(self): + # GH 2938 + _skip_if_no_dateutil() + from dateutil.tz import gettz as timezone + + rng = date_range('5/8/2012 1:45', periods=10, freq='5T', + tz=timezone('US/Eastern')) + rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T', + tz=timezone('US/Eastern')) + rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T', + tz=timezone('US/Eastern')) + ts = Series(np.random.randn(len(rng)), rng) + df = DataFrame(np.random.randn(len(rng), 4), index=rng) + ts2 = Series(np.random.randn(len(rng2)), rng2) + df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) + + result = ts.append(ts2) + result_df = df.append(df2) + self.assert_(result.index.equals(rng3)) + self.assert_(result_df.index.equals(rng3)) + + appended = rng.append(rng2) + self.assert_(appended.equals(rng3)) + def test_set_dataframe_column_ns_dtype(self): x = DataFrame([datetime.now(), datetime.now()]) self.assertEqual(x[0].dtype, np.dtype('M8[ns]')) @@ -1817,7 +1995,7 @@ def test_period_resample(self): result2 = s.resample('T', kind='period') assert_series_equal(result2, expected) - def test_period_resample_with_local_timezone(self): + def test_period_resample_with_local_timezone_pytz(self): # GH5430 _skip_if_no_pytz() import pytz @@ -1838,6 +2016,28 @@ def test_period_resample_with_local_timezone(self): expected = pd.Series(1, index=expected_index) assert_series_equal(result, expected) + def test_period_resample_with_local_timezone_dateutil(self): + # GH5430 + _skip_if_no_dateutil() + import dateutil + + local_timezone = dateutil.tz.gettz('America/Los_Angeles') + + start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=dateutil.tz.gettz('UTC')) + # 1 day later + end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=dateutil.tz.gettz('UTC')) + + index = pd.date_range(start, end, freq='H') + + series = pd.Series(1, index=index) + series = series.tz_convert(local_timezone) + result = series.resample('D', kind='period') + # Create the expected series + expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific + expected = pd.Series(1, index=expected_index) + assert_series_equal(result, expected) + + def test_pickle(self): #GH4606 from pandas.compat import cPickle @@ -2727,15 +2927,27 @@ def test_string_index_series_name_converted(self): class TestTimestamp(tm.TestCase): - def test_class_ops(self): + def test_class_ops_pytz(self): _skip_if_no_pytz() - import pytz + from pytz import timezone + + def compare(x, y): + self.assertEqual(int(Timestamp(x).value / 1e9), int(Timestamp(y).value / 1e9)) + + compare(Timestamp.now(), datetime.now()) + compare(Timestamp.now('UTC'), datetime.now(timezone('UTC'))) + compare(Timestamp.utcnow(), datetime.utcnow()) + compare(Timestamp.today(), datetime.today()) + + def test_class_ops_dateutil(self): + _skip_if_no_dateutil() + from dateutil.tz import gettz as timezone def compare(x,y): self.assertEqual(int(np.round(Timestamp(x).value/1e9)), int(np.round(Timestamp(y).value/1e9))) compare(Timestamp.now(),datetime.now()) - compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC'))) + compare(Timestamp.now('UTC'), datetime.now(timezone('UTC'))) compare(Timestamp.utcnow(),datetime.utcnow()) compare(Timestamp.today(),datetime.today()) @@ -2863,6 +3075,53 @@ def test_cant_compare_tz_naive_w_aware(self): self.assertFalse(a == b.to_pydatetime()) self.assertFalse(a.to_pydatetime() == b) + def test_cant_compare_tz_naive_w_aware_explicit_pytz(self): + _skip_if_no_pytz() + from pytz import utc + # #1404 + a = Timestamp('3/12/2012') + b = Timestamp('3/12/2012', tz=utc) + + self.assertRaises(Exception, a.__eq__, b) + self.assertRaises(Exception, a.__ne__, b) + self.assertRaises(Exception, a.__lt__, b) + self.assertRaises(Exception, a.__gt__, b) + self.assertRaises(Exception, b.__eq__, a) + self.assertRaises(Exception, b.__ne__, a) + self.assertRaises(Exception, b.__lt__, a) + self.assertRaises(Exception, b.__gt__, a) + + if sys.version_info < (3, 3): + self.assertRaises(Exception, a.__eq__, b.to_pydatetime()) + self.assertRaises(Exception, a.to_pydatetime().__eq__, b) + else: + self.assertFalse(a == b.to_pydatetime()) + self.assertFalse(a.to_pydatetime() == b) + + def test_cant_compare_tz_naive_w_aware_dateutil(self): + _skip_if_no_dateutil() + from dateutil.tz import gettz + utc = gettz('UTC') + # #1404 + a = Timestamp('3/12/2012') + b = Timestamp('3/12/2012', tz=utc) + + self.assertRaises(Exception, a.__eq__, b) + self.assertRaises(Exception, a.__ne__, b) + self.assertRaises(Exception, a.__lt__, b) + self.assertRaises(Exception, a.__gt__, b) + self.assertRaises(Exception, b.__eq__, a) + self.assertRaises(Exception, b.__ne__, a) + self.assertRaises(Exception, b.__lt__, a) + self.assertRaises(Exception, b.__gt__, a) + + if sys.version_info < (3, 3): + self.assertRaises(Exception, a.__eq__, b.to_pydatetime()) + self.assertRaises(Exception, a.to_pydatetime().__eq__, b) + else: + self.assertFalse(a == b.to_pydatetime()) + self.assertFalse(a.to_pydatetime() == b) + def test_delta_preserve_nanos(self): val = Timestamp(long(1337299200000000123)) result = val + timedelta(1) diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index 9514d5ca6e02c..b3ae02320037c 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -2,6 +2,7 @@ from datetime import datetime, time, timedelta, tzinfo, date import sys import os +import unittest import nose import numpy as np @@ -11,6 +12,7 @@ date_range, Timestamp) from pandas import DatetimeIndex, Int64Index, to_datetime, NaT +from pandas import tslib import pandas.core.datetools as datetools import pandas.tseries.offsets as offsets @@ -38,11 +40,22 @@ def _skip_if_no_pytz(): except ImportError: raise nose.SkipTest("pytz not installed") +def _skip_if_no_dateutil(): + try: + import dateutil + except ImportError: + raise nose.SkipTest + try: import pytz except ImportError: pass +try: + import dateutil +except ImportError: + pass + class FixedOffset(tzinfo): """Fixed offset in minutes east from UTC.""" @@ -64,20 +77,44 @@ def dst(self, dt): fixed_off_no_name = FixedOffset(-330, None) -class TestTimeZoneSupport(tm.TestCase): +class TestTimeZoneSupportPytz(tm.TestCase): _multiprocess_can_split_ = True def setUp(self): _skip_if_no_pytz() + def tz(self, tz): + ''' Construct a timezone object from a string. Overridden in subclass to parameterize tests. ''' + return pytz.timezone(tz) + + def tzstr(self, tz): + ''' Construct a timezone string from a string. Overridden in subclass to parameterize tests. ''' + return tz + + def localize(self, tz, x): + return tz.localize(x) + + def cmptz(self, tz1, tz2): + ''' Compare two timezones. Overridden in subclass to parameterize tests. ''' + return tz1.zone == tz2.zone + def test_utc_to_local_no_modify(self): rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc') - rng_eastern = rng.tz_convert('US/Eastern') + rng_eastern = rng.tz_convert(self.tzstr('US/Eastern')) + + # Values are unmodified + self.assert_(np.array_equal(rng.asi8, rng_eastern.asi8)) + + self.assert_(self.cmptz(rng_eastern.tz, self.tz('US/Eastern'))) + + def test_utc_to_local_no_modify_explicit(self): + rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc') + rng_eastern = rng.tz_convert(self.tz('US/Eastern')) # Values are unmodified self.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) - self.assertEqual(rng_eastern.tz, pytz.timezone('US/Eastern')) + self.assertEqual(rng_eastern.tz, self.tz('US/Eastern')) def test_localize_utc_conversion(self): @@ -87,20 +124,43 @@ def test_localize_utc_conversion(self): rng = date_range('3/10/2012', '3/11/2012', freq='30T') - converted = rng.tz_localize('US/Eastern') + converted = rng.tz_localize(self.tzstr('US/Eastern')) expected_naive = rng + offsets.Hour(5) self.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) # DST ambiguity, this should fail rng = date_range('3/11/2012', '3/12/2012', freq='30T') # Is this really how it should fail?? - self.assertRaises(NonExistentTimeError, rng.tz_localize, 'US/Eastern') + self.assertRaises(NonExistentTimeError, rng.tz_localize, self.tzstr('US/Eastern')) + + def test_localize_utc_conversion_explicit(self): + # Localizing to time zone should: + # 1) check for DST ambiguities + # 2) convert to UTC + + rng = date_range('3/10/2012', '3/11/2012', freq='30T') + converted = rng.tz_localize(self.tz('US/Eastern')) + expected_naive = rng + offsets.Hour(5) + self.assert_(np.array_equal(converted.asi8, expected_naive.asi8)) + + # DST ambiguity, this should fail + rng = date_range('3/11/2012', '3/12/2012', freq='30T') + # Is this really how it should fail?? + self.assertRaises(NonExistentTimeError, rng.tz_localize, self.tz('US/Eastern')) def test_timestamp_tz_localize(self): stamp = Timestamp('3/11/2012 04:00') - result = stamp.tz_localize('US/Eastern') - expected = Timestamp('3/11/2012 04:00', tz='US/Eastern') + result = stamp.tz_localize(self.tzstr('US/Eastern')) + expected = Timestamp('3/11/2012 04:00', tz=self.tzstr('US/Eastern')) + self.assertEqual(result.hour, expected.hour) + self.assertEqual(result, expected) + + def test_timestamp_tz_localize_explicit(self): + stamp = Timestamp('3/11/2012 04:00') + + result = stamp.tz_localize(self.tz('US/Eastern')) + expected = Timestamp('3/11/2012 04:00', tz=self.tz('US/Eastern')) self.assertEqual(result.hour, expected.hour) self.assertEqual(result, expected) @@ -108,12 +168,22 @@ def test_timestamp_constructed_by_date_and_tz(self): # Fix Issue 2993, Timestamp cannot be constructed by datetime.date # and tz correctly - result = Timestamp(date(2012, 3, 11), tz='US/Eastern') + result = Timestamp(date(2012, 3, 11), tz=self.tzstr('US/Eastern')) - expected = Timestamp('3/11/2012', tz='US/Eastern') + expected = Timestamp('3/11/2012', tz=self.tzstr('US/Eastern')) self.assertEqual(result.hour, expected.hour) self.assertEqual(result, expected) + def test_timestamp_constructed_by_date_and_tz_explicit(self): + # Fix Issue 2993, Timestamp cannot be constructed by datetime.date + # and tz correctly + + result = Timestamp(date(2012, 3, 11), tz=self.tz('US/Eastern')) + + expected = Timestamp('3/11/2012', tz=self.tz('US/Eastern')) + self.assertEquals(result.hour, expected.hour) + self.assertEquals(result, expected) + def test_timestamp_to_datetime_tzoffset(self): # tzoffset from dateutil.tz import tzoffset @@ -126,12 +196,25 @@ def test_timedelta_push_over_dst_boundary(self): # #1389 # 4 hours before DST transition - stamp = Timestamp('3/10/2012 22:00', tz='US/Eastern') + stamp = Timestamp('3/10/2012 22:00', tz=self.tzstr('US/Eastern')) result = stamp + timedelta(hours=6) # spring forward, + "7" hours - expected = Timestamp('3/11/2012 05:00', tz='US/Eastern') + expected = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern')) + + self.assertEquals(result, expected) + + def test_timedelta_push_over_dst_boundary_explicit(self): + # #1389 + + # 4 hours before DST transition + stamp = Timestamp('3/10/2012 22:00', tz=self.tz('US/Eastern')) + + result = stamp + timedelta(hours=6) + + # spring forward, + "7" hours + expected = Timestamp('3/11/2012 05:00', tz=self.tz('US/Eastern')) self.assertEqual(result, expected) @@ -140,7 +223,7 @@ def test_tz_localize_dti(self): dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256', freq='L') - dti2 = dti.tz_localize('US/Eastern') + dti2 = dti.tz_localize(self.tzstr('US/Eastern')) dti_utc = DatetimeIndex(start='1/1/2005 05:00', end='1/1/2005 5:00:30.256', freq='L', @@ -148,18 +231,18 @@ def test_tz_localize_dti(self): self.assert_numpy_array_equal(dti2.values, dti_utc.values) - dti3 = dti2.tz_convert('US/Pacific') + dti3 = dti2.tz_convert(self.tzstr('US/Pacific')) self.assert_numpy_array_equal(dti3.values, dti_utc.values) dti = DatetimeIndex(start='11/6/2011 1:59', end='11/6/2011 2:00', freq='L') self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize, - 'US/Eastern') + self.tzstr('US/Eastern')) dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00', freq='L') self.assertRaises( - pytz.NonExistentTimeError, dti.tz_localize, 'US/Eastern') + pytz.NonExistentTimeError, dti.tz_localize, self.tzstr('US/Eastern')) def test_tz_localize_empty_series(self): # #2248 @@ -169,22 +252,22 @@ def test_tz_localize_empty_series(self): ts2 = ts.tz_localize('utc') self.assertTrue(ts2.index.tz == pytz.utc) - ts2 = ts.tz_localize('US/Eastern') - self.assertTrue(ts2.index.tz == pytz.timezone('US/Eastern')) + ts2 = ts.tz_localize(self.tzstr('US/Eastern')) + self.assertTrue(self.cmptz(ts2.index.tz, self.tz('US/Eastern'))) def test_astimezone(self): utc = Timestamp('3/11/2012 22:00', tz='UTC') - expected = utc.tz_convert('US/Eastern') - result = utc.astimezone('US/Eastern') + expected = utc.tz_convert(self.tzstr('US/Eastern')) + result = utc.astimezone(self.tzstr('US/Eastern')) self.assertEqual(expected, result) tm.assert_isinstance(result, Timestamp) def test_create_with_tz(self): - stamp = Timestamp('3/11/2012 05:00', tz='US/Eastern') + stamp = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern')) self.assertEqual(stamp.hour, 5) rng = date_range( - '3/11/2012 04:00', periods=10, freq='H', tz='US/Eastern') + '3/11/2012 04:00', periods=10, freq='H', tz=self.tzstr('US/Eastern')) self.assertEqual(stamp, rng[1]) @@ -257,10 +340,10 @@ def test_date_range_localize(self): def test_utc_box_timestamp_and_localize(self): rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc') - rng_eastern = rng.tz_convert('US/Eastern') + rng_eastern = rng.tz_convert(self.tzstr('US/Eastern')) - tz = pytz.timezone('US/Eastern') - expected = tz.normalize(rng[-1]) + tz = self.tz('US/Eastern') + expected = rng[-1].astimezone(tz) stamp = rng_eastern[-1] self.assertEqual(stamp, expected) @@ -268,15 +351,17 @@ def test_utc_box_timestamp_and_localize(self): # right tzinfo rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc') - rng_eastern = rng.tz_convert('US/Eastern') - self.assertIn('EDT', repr(rng_eastern[0].tzinfo)) + rng_eastern = rng.tz_convert(self.tzstr('US/Eastern')) + # test not valid for dateutil timezones. + # self.assertIn('EDT', repr(rng_eastern[0].tzinfo)) + self.assert_('EDT' in repr(rng_eastern[0].tzinfo) or 'tzfile' in repr(rng_eastern[0].tzinfo)) def test_timestamp_tz_convert(self): strdates = ['1/1/2012', '3/1/2012', '4/1/2012'] - idx = DatetimeIndex(strdates, tz='US/Eastern') + idx = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern')) - conv = idx[0].tz_convert('US/Pacific') - expected = idx.tz_convert('US/Pacific')[0] + conv = idx[0].tz_convert(self.tzstr('US/Pacific')) + expected = idx.tz_convert(self.tzstr('US/Pacific'))[0] self.assertEqual(conv, expected) @@ -284,27 +369,27 @@ def test_pass_dates_localize_to_utc(self): strdates = ['1/1/2012', '3/1/2012', '4/1/2012'] idx = DatetimeIndex(strdates) - conv = idx.tz_localize('US/Eastern') + conv = idx.tz_localize(self.tzstr('US/Eastern')) - fromdates = DatetimeIndex(strdates, tz='US/Eastern') + fromdates = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern')) self.assertEqual(conv.tz, fromdates.tz) self.assert_numpy_array_equal(conv.values, fromdates.values) def test_field_access_localize(self): strdates = ['1/1/2012', '3/1/2012', '4/1/2012'] - rng = DatetimeIndex(strdates, tz='US/Eastern') + rng = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern')) self.assertTrue((rng.hour == 0).all()) # a more unusual time zone, #1946 dr = date_range('2011-10-02 00:00', freq='h', periods=10, - tz='America/Atikokan') + tz=self.tzstr('America/Atikokan')) expected = np.arange(10) self.assert_numpy_array_equal(dr.hour, expected) def test_with_tz(self): - tz = pytz.timezone('US/Central') + tz = self.tz('US/Central') # just want it to work start = datetime(2011, 3, 12, tzinfo=pytz.utc) @@ -317,10 +402,11 @@ def test_with_tz(self): # normalized central = dr.tz_convert(tz) + self.assertIs(central.tz, tz) + self.assertIs(central[0].tz, tz) # compare vs a localized tz - comp = tz.localize(dr[0].to_pydatetime().replace(tzinfo=None)).tzinfo - self.assertIs(central.tz, tz) + comp = self.localize(tz, dr[0].to_pydatetime().replace(tzinfo=None)).tzinfo self.assertIs(central[0].tz, comp) # datetimes with tzinfo set @@ -338,9 +424,7 @@ def test_tz_localize(self): self.assert_numpy_array_equal(dr_utc, localized) def test_with_tz_ambiguous_times(self): - tz = pytz.timezone('US/Eastern') - - rng = bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1)) + tz = self.tz('US/Eastern') # March 13, 2011, spring forward, skip from 2 AM to 3 AM dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, @@ -363,7 +447,7 @@ def test_with_tz_ambiguous_times(self): def test_infer_dst(self): # November 6, 2011, fall back, repeat 2 AM hour # With no repeated hours, we cannot infer the transition - tz = pytz.timezone('US/Eastern') + tz = self.tz('US/Eastern') dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=datetools.Hour()) self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, @@ -388,36 +472,36 @@ def test_infer_dst(self): # test utility methods def test_infer_tz(self): - eastern = pytz.timezone('US/Eastern') + eastern = self.tz('US/Eastern') utc = pytz.utc _start = datetime(2001, 1, 1) _end = datetime(2009, 1, 1) - start = eastern.localize(_start) - end = eastern.localize(_end) - assert(tools._infer_tzinfo(start, end) is eastern.localize(_start).tzinfo) - assert(tools._infer_tzinfo(start, None) is eastern.localize(_start).tzinfo) - assert(tools._infer_tzinfo(None, end) is eastern.localize(_end).tzinfo) + start = self.localize(eastern, _start) + end = self.localize(eastern, _end) + assert(tools._infer_tzinfo(start, end) is self.localize(eastern, _start).tzinfo) + assert(tools._infer_tzinfo(start, None) is self.localize(eastern, _start).tzinfo) + assert(tools._infer_tzinfo(None, end) is self.localize(eastern, _end).tzinfo) start = utc.localize(_start) end = utc.localize(_end) assert(tools._infer_tzinfo(start, end) is utc) - end = eastern.localize(_end) + end = self.localize(eastern, _end) self.assertRaises(Exception, tools._infer_tzinfo, start, end) self.assertRaises(Exception, tools._infer_tzinfo, end, start) def test_tz_string(self): - result = date_range('1/1/2000', periods=10, tz='US/Eastern') + result = date_range('1/1/2000', periods=10, tz=self.tzstr('US/Eastern')) expected = date_range('1/1/2000', periods=10, - tz=pytz.timezone('US/Eastern')) + tz=self.tz('US/Eastern')) self.assertTrue(result.equals(expected)) def test_take_dont_lose_meta(self): _skip_if_no_pytz() - rng = date_range('1/1/2000', periods=20, tz='US/Eastern') + rng = date_range('1/1/2000', periods=20, tz=self.tzstr('US/Eastern')) result = rng.take(lrange(5)) self.assertEqual(result.tz, rng.tz) @@ -426,7 +510,7 @@ def test_take_dont_lose_meta(self): def test_index_with_timezone_repr(self): rng = date_range('4/13/2010', '5/6/2010') - rng_eastern = rng.tz_localize('US/Eastern') + rng_eastern = rng.tz_localize(self.tzstr('US/Eastern')) rng_repr = repr(rng_eastern) self.assertIn('2010-04-13 00:00:00', rng_repr) @@ -435,7 +519,7 @@ def test_index_astype_asobject_tzinfos(self): # #1345 # dates around a dst transition - rng = date_range('2/13/2010', '5/6/2010', tz='US/Eastern') + rng = date_range('2/13/2010', '5/6/2010', tz=self.tzstr('US/Eastern')) objs = rng.asobject for i, x in enumerate(objs): @@ -455,21 +539,21 @@ def test_localized_at_time_between_time(self): rng = date_range('4/16/2012', '5/1/2012', freq='H') ts = Series(np.random.randn(len(rng)), index=rng) - ts_local = ts.tz_localize('US/Eastern') + ts_local = ts.tz_localize(self.tzstr('US/Eastern')) result = ts_local.at_time(time(10, 0)) - expected = ts.at_time(time(10, 0)).tz_localize('US/Eastern') + expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr('US/Eastern')) assert_series_equal(result, expected) - self.assertEqual(result.index.tz.zone, 'US/Eastern') + self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern'))) t1, t2 = time(10, 0), time(11, 0) result = ts_local.between_time(t1, t2) - expected = ts.between_time(t1, t2).tz_localize('US/Eastern') + expected = ts.between_time(t1, t2).tz_localize(self.tzstr('US/Eastern')) assert_series_equal(result, expected) - self.assertEqual(result.index.tz.zone, 'US/Eastern') + self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern'))) def test_string_index_alias_tz_aware(self): - rng = date_range('1/1/2000', periods=10, tz='US/Eastern') + rng = date_range('1/1/2000', periods=10, tz=self.tzstr('US/Eastern')) ts = Series(np.random.randn(len(rng)), index=rng) result = ts['1/3/2000'] @@ -494,14 +578,14 @@ def test_fixedtz_topydatetime(self): def test_convert_tz_aware_datetime_datetime(self): # #1581 - tz = pytz.timezone('US/Eastern') + tz = self.tz('US/Eastern') dates = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)] - dates_aware = [tz.localize(x) for x in dates] + dates_aware = [self.localize(tz, x) for x in dates] result = to_datetime(dates_aware) - self.assertEqual(result.tz.zone, 'US/Eastern') + self.assertTrue(self.cmptz(result.tz, self.tz('US/Eastern'))) converted = to_datetime(dates_aware, utc=True) ex_vals = [Timestamp(x).value for x in dates_aware] @@ -534,7 +618,7 @@ def test_to_datetime_tzlocal(self): def test_frame_no_datetime64_dtype(self): dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') - dr_tz = dr.tz_localize('US/Eastern') + dr_tz = dr.tz_localize(self.tzstr('US/Eastern')) e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr) self.assertEqual(e['B'].dtype, 'M8[ns]') @@ -558,7 +642,7 @@ def test_hongkong_tz_convert(self): def test_tz_convert_unsorted(self): dr = date_range('2012-03-09', freq='H', periods=100, tz='utc') - dr = dr.tz_convert('US/Eastern') + dr = dr.tz_convert(self.tzstr('US/Eastern')) result = dr[::-1].hour exp = dr.hour[::-1] @@ -566,14 +650,14 @@ def test_tz_convert_unsorted(self): def test_shift_localized(self): dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') - dr_tz = dr.tz_localize('US/Eastern') + dr_tz = dr.tz_localize(self.tzstr('US/Eastern')) result = dr_tz.shift(1, '10T') self.assertEqual(result.tz, dr_tz.tz) def test_tz_aware_asfreq(self): dr = date_range( - '2011-12-01', '2012-07-20', freq='D', tz='US/Eastern') + '2011-12-01', '2012-07-20', freq='D', tz=self.tzstr('US/Eastern')) s = Series(np.random.randn(len(dr)), index=dr) @@ -582,15 +666,15 @@ def test_tz_aware_asfreq(self): def test_static_tzinfo(self): # it works! - index = DatetimeIndex([datetime(2012, 1, 1)], tz='EST') + index = DatetimeIndex([datetime(2012, 1, 1)], tz=self.tzstr('EST')) index.hour index[0] def test_tzaware_datetime_to_index(self): - d = [datetime(2012, 8, 19, tzinfo=pytz.timezone('US/Eastern'))] + d = [datetime(2012, 8, 19, tzinfo=self.tz('US/Eastern'))] index = DatetimeIndex(d) - self.assertEqual(index.tz.zone, 'US/Eastern') + self.assertTrue(self.cmptz(index.tz, self.tz('US/Eastern'))) def test_date_range_span_dst_transition(self): # #1778 @@ -601,11 +685,11 @@ def test_date_range_span_dst_transition(self): self.assertTrue((dr.hour == 0).all()) - dr = date_range('2012-11-02', periods=10, tz='US/Eastern') + dr = date_range('2012-11-02', periods=10, tz=self.tzstr('US/Eastern')) self.assertTrue((dr.hour == 0).all()) def test_convert_datetime_list(self): - dr = date_range('2012-06-02', periods=10, tz='US/Eastern') + dr = date_range('2012-06-02', periods=10, tz=self.tzstr('US/Eastern')) dr2 = DatetimeIndex(list(dr), name='foo') self.assertTrue(dr.equals(dr2)) @@ -620,7 +704,7 @@ def test_frame_from_records_utc(self): DataFrame.from_records([rec], index='begin_time') def test_frame_reset_index(self): - dr = date_range('2012-06-02', periods=10, tz='US/Eastern') + dr = date_range('2012-06-02', periods=10, tz=self.tzstr('US/Eastern')) df = DataFrame(np.random.randn(len(dr)), dr) roundtripped = df.reset_index().set_index('index') xp = df.index.tz @@ -643,10 +727,10 @@ def test_dateutil_tzoffset_support(self): def test_getitem_pydatetime_tz(self): index = date_range(start='2012-12-24 16:00', end='2012-12-24 18:00', freq='H', - tz='Europe/Berlin') + tz=self.tzstr('Europe/Berlin')) ts = Series(index=index, data=index.hour) - time_pandas = Timestamp('2012-12-24 17:00', tz='Europe/Berlin') - time_datetime = pytz.timezone('Europe/Berlin').localize(datetime(2012, 12, 24, 17, 0)) + time_pandas = Timestamp('2012-12-24 17:00', tz=self.tzstr('Europe/Berlin')) + time_datetime = self.localize(self.tz('Europe/Berlin'), datetime(2012, 12, 24, 17, 0)) self.assertEqual(ts[time_pandas], ts[time_datetime]) def test_index_drop_dont_lose_tz(self): @@ -663,21 +747,43 @@ def test_datetimeindex_tz(self): arr = ['11/10/2005 08:00:00', '11/10/2005 09:00:00'] - idx1 = to_datetime(arr).tz_localize('US/Eastern') - idx2 = DatetimeIndex(start="2005-11-10 08:00:00", freq='H', periods=2, tz='US/Eastern') - idx3 = DatetimeIndex(arr, tz='US/Eastern') - idx4 = DatetimeIndex(np.array(arr), tz='US/Eastern') + idx1 = to_datetime(arr).tz_localize(self.tzstr('US/Eastern')) + idx2 = DatetimeIndex(start="2005-11-10 08:00:00", freq='H', periods=2, tz=self.tzstr('US/Eastern')) + idx3 = DatetimeIndex(arr, tz=self.tzstr('US/Eastern')) + idx4 = DatetimeIndex(np.array(arr), tz=self.tzstr('US/Eastern')) for other in [idx2, idx3, idx4]: self.assertTrue(idx1.equals(other)) def test_datetimeindex_tz_nat(self): - idx = to_datetime([Timestamp("2013-1-1", tz='US/Eastern'), NaT]) + idx = to_datetime([Timestamp("2013-1-1", tz=self.tzstr('US/Eastern')), NaT]) self.assertTrue(isnull(idx[1])) self.assertTrue(idx[0].tzinfo is not None) +class TestTimeZoneSupportDateutil(TestTimeZoneSupportPytz): + _multiprocess_can_split_ = True + + def setUp(self): + _skip_if_no_dateutil() + + def tz(self, tz): + ''' Construct a timezone object from a string. Overridden in subclass to parameterize tests. ''' + return dateutil.tz.gettz(tz) + + def tzstr(self, tz): + ''' Construct a timezone string from a string. Overridden in subclass to parameterize tests. ''' + return 'dateutil/' + tz + + def cmptz(self, tz1, tz2): + ''' Compare two timezones. Overridden in subclass to parameterize tests. ''' + return tz1 == tz2 + + def localize(self, tz, x): + return x.replace(tzinfo=tz) + + class TestTimeZones(tm.TestCase): _multiprocess_can_split_ = True diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 4260705eadb03..f8043b23a58af 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -57,9 +57,7 @@ def _infer(a, b): def _maybe_get_tz(tz, date=None): - if isinstance(tz, compat.string_types): - import pytz - tz = pytz.timezone(tz) + tz = tslib.maybe_get_tz(tz) if com.is_integer(tz): import pytz tz = pytz.FixedOffset(tz / 60) @@ -71,6 +69,7 @@ def _maybe_get_tz(tz, date=None): return tz + def _guess_datetime_format(dt_str, dayfirst=False, dt_str_parse=compat.parse_date, dt_str_split=_DATEUTIL_LEXER_SPLIT): diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index df9c465c33853..e7385400e5962 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -34,8 +34,11 @@ cimport cython from datetime import timedelta, datetime from datetime import time as datetime_time -from dateutil.tz import tzoffset +from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal, tzfile as _dateutil_tzfile, + tzutc as _dateutil_tzutc, gettz as _dateutil_gettz) +from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo from pandas.compat import parse_date +from pandas.compat import parse_date, string_types from sys import version_info @@ -105,14 +108,19 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None): else: # Adjust datetime64 timestamp, recompute datetimestruct - pos = trans.searchsorted(arr[i]) - 1 - inf = tz._transition_info[pos] + pos = trans.searchsorted(arr[i], side='right') - 1 + if _treat_tz_as_pytz(tz): + # find right representation of dst etc in pytz timezone + new_tz = tz._tzinfos[tz._transition_info[pos]] + else: + # no zone-name change for dateutil tzs - dst etc represented in single object. + new_tz = tz pandas_datetime_to_datetimestruct(arr[i] + deltas[pos], PANDAS_FR_ns, &dts) result[i] = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, - tz._tzinfos[inf]) + new_tz) else: for i in range(n): if arr[i] == iNaT: @@ -124,17 +132,23 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None): return result -from dateutil.tz import tzlocal -def _is_tzlocal(tz): - return isinstance(tz, tzlocal) +cdef inline bint _is_tzlocal(object tz): + return isinstance(tz, _dateutil_tzlocal) -def _is_fixed_offset(tz): - try: - tz._transition_info - return False - except AttributeError: - return True +cdef inline bint _is_fixed_offset(object tz): + if _treat_tz_as_dateutil(tz): + if len(tz._trans_idx) == 0 and len(tz._trans_list) == 0: + return 1 + else: + return 0 + elif _treat_tz_as_pytz(tz): + if len(tz._transition_info) == 0 and len(tz._utc_transition_times) == 0: + return 1 + else: + return 0 + return 1 + _zero_time = datetime_time(0, 0) @@ -157,7 +171,7 @@ class Timestamp(_Timestamp): def now(cls, tz=None): """ compat now with datetime """ if isinstance(tz, basestring): - tz = pytz.timezone(tz) + tz = maybe_get_tz(tz) return cls(datetime.now(tz)) @classmethod @@ -333,7 +347,7 @@ class Timestamp(_Timestamp): Parameters ---------- - tz : pytz.timezone + tz : pytz.timezone or dateutil.tz.tzfile Returns ------- @@ -353,7 +367,7 @@ class Timestamp(_Timestamp): Parameters ---------- - tz : pytz.timezone + tz : pytz.timezone or dateutil.tz.tzfile Returns ------- @@ -866,8 +880,7 @@ cdef convert_to_tsobject(object ts, object tz, object unit): bint utc_convert = 1 if tz is not None: - if isinstance(tz, basestring): - tz = pytz.timezone(tz) + tz = maybe_get_tz(tz) obj = _TSObject() @@ -954,6 +967,9 @@ cdef convert_to_tsobject(object ts, object tz, object unit): return obj cdef inline void _localize_tso(_TSObject obj, object tz): + ''' + Take a TSObject in UTC and localizes to timezone tz. + ''' if _is_utc(tz): obj.tzinfo = tz elif _is_tzlocal(tz): @@ -970,35 +986,75 @@ cdef inline void _localize_tso(_TSObject obj, object tz): deltas = _get_deltas(tz) pos = trans.searchsorted(obj.value, side='right') - 1 - # statictzinfo - if not hasattr(tz, '_transition_info'): - pandas_datetime_to_datetimestruct(obj.value + deltas[0], - PANDAS_FR_ns, &obj.dts) + + # static/pytz/dateutil specific code + if _is_fixed_offset(tz): + # statictzinfo + if len(deltas) > 0: + pandas_datetime_to_datetimestruct(obj.value + deltas[0], + PANDAS_FR_ns, &obj.dts) + else: + pandas_datetime_to_datetimestruct(obj.value, PANDAS_FR_ns, &obj.dts) obj.tzinfo = tz - else: + elif _treat_tz_as_pytz(tz): inf = tz._transition_info[pos] pandas_datetime_to_datetimestruct(obj.value + deltas[pos], PANDAS_FR_ns, &obj.dts) obj.tzinfo = tz._tzinfos[inf] + elif _treat_tz_as_dateutil(tz): + pandas_datetime_to_datetimestruct(obj.value + deltas[pos], + PANDAS_FR_ns, &obj.dts) + obj.tzinfo = tz + else: + obj.tzinfo = tz def get_timezone(tz): return _get_zone(tz) cdef inline bint _is_utc(object tz): - return tz is UTC or isinstance(tz, _du_utc) + return tz is UTC or isinstance(tz, _dateutil_tzutc) cdef inline object _get_zone(object tz): + ''' + We need to do several things here: + 1/ Distinguish between pytz and dateutil timezones + 2/ Not be over-specific (e.g. US/Eastern with/without DST is same *zone* but a different tz object) + 3/ Provide something to serialize when we're storing a datetime object in pytables. + + We return a string prefaced with dateutil if it's a dateutil tz, else just the tz name. It needs to be a + string so that we can serialize it with UJSON/pytables. maybe_get_tz (below) is the inverse of this process. + ''' if _is_utc(tz): return 'UTC' else: - try: - zone = tz.zone - if zone is None: + if _treat_tz_as_dateutil(tz): + return 'dateutil/' + tz._filename.split('zoneinfo/')[1] + else: + # tz is a pytz timezone or unknown. + try: + zone = tz.zone + if zone is None: + return tz + return zone + except AttributeError: return tz - return zone - except AttributeError: - return tz + + +cpdef inline object maybe_get_tz(object tz): + ''' + (Maybe) Construct a timezone object from a string. If tz is a string, use it to construct a timezone object. + Otherwise, just return tz. + ''' + if isinstance(tz, string_types): + split_tz = tz.split('/', 1) + if split_tz[0] == 'dateutil': + tz = _dateutil_gettz(split_tz[1]) + else: + tz = pytz.timezone(tz) + return tz + else: + return tz class OutOfBoundsDatetime(ValueError): @@ -1747,7 +1803,6 @@ def i8_to_pydt(int64_t i8, object tzinfo = None): # time zone conversion helpers try: - from dateutil.tz import tzutc as _du_utc import pytz UTC = pytz.utc have_pytz = True @@ -1884,22 +1939,48 @@ def tz_convert_single(int64_t val, object tz1, object tz2): offset = deltas[pos] return utc_date + offset - +# Timezone data caches, key is the pytz string or dateutil file name. trans_cache = {} utc_offset_cache = {} -def _get_transitions(tz): +cdef inline bint _treat_tz_as_pytz(object tz): + return hasattr(tz, '_utc_transition_times') and hasattr(tz, '_transition_info') + +cdef inline bint _treat_tz_as_dateutil(object tz): + return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx') + + +cdef inline object _tz_cache_key(object tz): + """ + Return the key in the cache for the timezone info object or None if unknown. + + The key is currently the tz string for pytz timezones, the filename for dateutil timezones. + + Notes + ===== + This cannot just be the hash of a timezone object. Unfortunately, the hashes of two dateutil tz objects + which represent the same timezone are not equal (even though the tz objects will compare equal and + represent the same tz file). + Also, pytz objects are not always hashable so we use str(tz) instead. + """ + if isinstance(tz, _pytz_BaseTzInfo): + return tz.zone + elif isinstance(tz, _dateutil_tzfile): + return tz._filename + else: + return None + + +cdef object _get_transitions(object tz): """ Get UTC times of DST transitions """ - try: - # tzoffset not hashable in Python 3 - hash(tz) - except TypeError: + cache_key = _tz_cache_key(tz) + if cache_key is None: return np.array([NPY_NAT + 1], dtype=np.int64) - if tz not in trans_cache: - if hasattr(tz, '_utc_transition_times'): + if cache_key not in trans_cache: + if _treat_tz_as_pytz(tz): arr = np.array(tz._utc_transition_times, dtype='M8[ns]') arr = arr.view('i8') try: @@ -1907,31 +1988,68 @@ def _get_transitions(tz): arr[0] = NPY_NAT + 1 except Exception: pass + elif _treat_tz_as_dateutil(tz): + if len(tz._trans_list): + # get utc trans times + trans_list = _get_utc_trans_times_from_dateutil_tz(tz) + arr = np.hstack([np.array([0], dtype='M8[s]'), # place holder for first item + np.array(trans_list, dtype='M8[s]')]).astype('M8[ns]') # all trans listed + arr = arr.view('i8') + # scale transitions correctly in numpy 1.6 + if _np_version_under1p7: + arr *= 1000000000 + arr[0] = NPY_NAT + 1 + elif _is_fixed_offset(tz): + arr = np.array([NPY_NAT + 1], dtype=np.int64) + else: + arr = np.array([], dtype='M8[ns]') else: arr = np.array([NPY_NAT + 1], dtype=np.int64) - trans_cache[tz] = arr - return trans_cache[tz] + trans_cache[cache_key] = arr + return trans_cache[cache_key] + + +cdef object _get_utc_trans_times_from_dateutil_tz(object tz): + ''' + Transition times in dateutil timezones are stored in local non-dst time. This code + converts them to UTC. It's the reverse of the code in dateutil.tz.tzfile.__init__. + ''' + new_trans = list(tz._trans_list) + last_std_offset = 0 + for i, (trans, tti) in enumerate(zip(tz._trans_list, tz._trans_idx)): + if not tti.isdst: + last_std_offset = tti.offset + new_trans[i] = trans - last_std_offset + return new_trans -def _get_deltas(tz): + +cdef object _get_deltas(object tz): """ Get UTC offsets in microseconds corresponding to DST transitions """ - try: - # tzoffset not hashable in Python 3 - hash(tz) - except TypeError: + cache_key = _tz_cache_key(tz) + if cache_key is None: num = int(total_seconds(_get_utcoffset(tz, None))) * 1000000000 return np.array([num], dtype=np.int64) - if tz not in utc_offset_cache: - if hasattr(tz, '_utc_transition_times'): - utc_offset_cache[tz] = _unbox_utcoffsets(tz._transition_info) + if cache_key not in utc_offset_cache: + if _treat_tz_as_pytz(tz): + utc_offset_cache[cache_key] = _unbox_utcoffsets(tz._transition_info) + elif _treat_tz_as_dateutil(tz): + if len(tz._trans_list): + arr = np.array([v.offset for v in (tz._ttinfo_before,) + tz._trans_idx], dtype='i8') # + (tz._ttinfo_std,) + arr *= 1000000000 + utc_offset_cache[cache_key] = arr + elif _is_fixed_offset(tz): + utc_offset_cache[cache_key] = np.array([tz._ttinfo_std.offset], dtype='i8') * 1000000000 + else: + utc_offset_cache[cache_key] = np.array([], dtype='i8') else: # static tzinfo num = int(total_seconds(_get_utcoffset(tz, None))) * 1000000000 - utc_offset_cache[tz] = np.array([num], dtype=np.int64) + utc_offset_cache[cache_key] = np.array([num], dtype=np.int64) - return utc_offset_cache[tz] + return utc_offset_cache[cache_key] cdef double total_seconds(object td): # Python 2.6 compat return ((td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) // @@ -2019,7 +2137,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, bint infer_dst=False): # right side idx_shifted = _ensure_int64( np.maximum(0, trans.searchsorted(vals + DAY_NS, side='right') - 1)) - + for i in range(n): v = vals[i] - deltas[idx_shifted[i]] pos = bisect_right_i8(tdata, v, ntrans) - 1 @@ -2028,7 +2146,6 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, bint infer_dst=False): if v + deltas[pos] == vals[i]: result_b[i] = v - if infer_dst: dst_hours = np.empty(n, dtype=np.int64) dst_hours.fill(NPY_NAT) @@ -2569,8 +2686,7 @@ def date_normalize(ndarray[int64_t] stamps, tz=None): if tz is not None: tso = _TSObject() - if isinstance(tz, basestring): - tz = pytz.timezone(tz) + tz = maybe_get_tz(tz) result = _normalize_local(stamps, tz) else: for i in range(n): @@ -3173,8 +3289,7 @@ cpdef resolution(ndarray[int64_t] stamps, tz=None): int reso = D_RESO, curr_reso if tz is not None: - if isinstance(tz, basestring): - tz = pytz.timezone(tz) + tz = maybe_get_tz(tz) return _reso_local(stamps, tz) else: for i in range(n):
closes #4688 This PR should provide support for dateutil timezones. It was discussed quite a bit in https://github.com/pydata/pandas/pull/4689. The discussion there ended two months back with "we should incorporate this if it can be made seamless". I think I've managed that now so it would be great to get some feedback on the changes in this PR. Everything that works with pytz timezones should work with dateutil timezones and you shouldn't notice any difference in behaviour when you change between them. There are two exceptions which I'm looking at now: saving data to ujson and pytables. The changes I've made just allow pandas to treat dateutil timezones exactly the same as pytz timezones (from the user perspective) - just extending conversion logic to deal with both where appropriate. This has made a few methods a bit more complicated but relatively few changes were required. All of the significant changes are in tslib.pyx. Almost all of the other changes are adding test cases for the dateutil timezones. The test suites pass locally. Let me know if you have any questions or if there's anything that needs doing before the PR can be accepted.
https://api.github.com/repos/pandas-dev/pandas/pulls/6968
2014-04-25T14:49:32Z
2014-06-04T12:46:42Z
2014-06-04T12:46:42Z
2014-06-12T18:09:13Z
COMPAT: fix numpy 1.9-dev deprecation warnings in test suite
diff --git a/ci/script.sh b/ci/script.sh index 152a2f1ebdcf9..e76789b689c94 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -16,6 +16,13 @@ fi "$TRAVIS_BUILD_DIR"/ci/build_docs.sh 2>&1 > /tmp/doc.log & # doc build log will be shown after tests +# export the testing mode +if [ -n "$NUMPY_BUILD" ]; then + + export PANDAS_TESTING_MODE="numpy_deprecate" + +fi + echo nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml diff --git a/doc/source/release.rst b/doc/source/release.rst index c975143b0ef67..f7b47b06ef841 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -240,6 +240,9 @@ Prior Version Deprecations/Changes - Remove ``time_rule`` from several rolling-moment statistical functions, such as :func:`rolling_sum` (:issue:`1042`) +- Removed neg (-) boolean operations on numpy arrays in favor of inv (~), as this is going to + be deprecated in numpy 1.9 (:issue:`6960`) + Experimental Features ~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 34480668df8c9..dd6a21ccecf55 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -368,6 +368,8 @@ There are prior version deprecations that are taking effect as of 0.14.0. ( `commit 3136390 <https://github.com/pydata/pandas/commit/3136390>`__ ) - Remove ``time_rule`` from several rolling-moment statistical functions, such as :func:`rolling_sum` (:issue:`1042`) +- Removed neg (-) boolean operations on numpy arrays in favor of inv (~), as this is going to + be deprecated in numpy 1.9 (:issue:`6960`) .. _whatsnew_0140.deprecations: diff --git a/pandas/algos.pyx b/pandas/algos.pyx index 4628853df3953..3b527740505e4 100644 --- a/pandas/algos.pyx +++ b/pandas/algos.pyx @@ -510,7 +510,7 @@ def rank_1d_generic(object in_arr, bint retry=1, ties_method='average', if not retry: raise - valid_locs = (-mask).nonzero()[0] + valid_locs = (~mask).nonzero()[0] ranks.put(valid_locs, rank_1d_generic(values.take(valid_locs), 0, ties_method=ties_method, ascending=ascending)) diff --git a/pandas/computation/ops.py b/pandas/computation/ops.py index 1f57c459149ad..59d3860032cff 100644 --- a/pandas/computation/ops.py +++ b/pandas/computation/ops.py @@ -42,9 +42,7 @@ class Term(StringMixin): def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, string_types) else cls supr_new = super(Term, klass).__new__ - if PY3: - return supr_new(klass) - return supr_new(klass, name, env, side=side, encoding=encoding) + return supr_new(klass) def __init__(self, name, env, side=None, encoding=None): self._name = name diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py index 8fc842d958075..9a1e61ad30386 100644 --- a/pandas/computation/pytables.py +++ b/pandas/computation/pytables.py @@ -33,9 +33,7 @@ class Term(ops.Term): def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, string_types) else cls supr_new = StringMixin.__new__ - if PY3: - return supr_new(klass) - return supr_new(klass, name, env, side=side, encoding=encoding) + return supr_new(klass) def __init__(self, name, env, side=None, encoding=None): super(Term, self).__init__(name, env, side=side, encoding=encoding) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 5efba4a9738af..f0ecce0235b49 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -329,7 +329,7 @@ def quantile(x, q, interpolation_method='fraction'): x = np.asarray(x) mask = com.isnull(x) - x = x[-mask] + x = x[~mask] values = np.sort(x) @@ -339,7 +339,7 @@ def _get_score(at): idx = at * (len(values) - 1) if idx % 1 == 0: - score = values[idx] + score = values[int(idx)] else: if interpolation_method == 'fraction': score = _interpolate(values[int(idx)], values[int(idx) + 1], diff --git a/pandas/core/common.py b/pandas/core/common.py index 18a3dba1a44a4..d7a7c1d798731 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -248,7 +248,7 @@ def _isnull_ndarraylike_old(obj): # this is the NaT pattern result = values.view('i8') == tslib.iNaT else: - result = -np.isfinite(values) + result = ~np.isfinite(values) # box if isinstance(obj, ABCSeries): @@ -280,12 +280,22 @@ def notnull(obj): res = isnull(obj) if np.isscalar(res): return not res - return -res + return ~res def _is_null_datelike_scalar(other): """ test whether the object is a null datelike, e.g. Nat but guard against passing a non-scalar """ - return (np.isscalar(other) and (isnull(other) or other == tslib.iNaT)) or other is pd.NaT or other is None + if other is pd.NaT or other is None: + return True + elif np.isscalar(other): + + # a timedelta + if hasattr(other,'dtype'): + return other.view('i8') == tslib.iNaT + elif is_integer(other) and other == tslib.iNaT: + return True + return isnull(other) + return False def array_equivalent(left, right): """ @@ -363,7 +373,7 @@ def mask_missing(arr, values_to_mask): values_to_mask = np.array(values_to_mask, dtype=object) na_mask = isnull(values_to_mask) - nonna = values_to_mask[-na_mask] + nonna = values_to_mask[~na_mask] mask = None for x in nonna: diff --git a/pandas/core/format.py b/pandas/core/format.py index 43eb0e890aa62..0a4f65b6bf0e6 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -1861,7 +1861,7 @@ def _get_format_timedelta64(values): def impl(x): if x is None or lib.checknull(x): return 'NaT' - elif format_short and x == 0: + elif format_short and com.is_integer(x) and x.view('int64') == 0: return "0 days" if even_days else "00:00:00" else: return lib.repr_timedelta64(x, format=format) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fcd2e65afddcb..10a0c9050af50 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3041,7 +3041,7 @@ def update(self, other, join='left', overwrite=True, filter_func=None, this = self[col].values that = other[col].values if filter_func is not None: - mask = -filter_func(this) | isnull(that) + mask = ~filter_func(this) | isnull(that) else: if raise_conflict: mask_this = notnull(that) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3f2ecd8afd2d4..3aed34c7f188e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -606,7 +606,11 @@ def _indexed_same(self, other): for a in self._AXIS_ORDERS]) def __neg__(self): - arr = operator.neg(_values_from_object(self)) + values = _values_from_object(self) + if values.dtype == np.bool_: + arr = operator.inv(values) + else: + arr = operator.neg(values) return self._wrap_array(arr, self.axes, copy=False) def __invert__(self): @@ -1459,10 +1463,10 @@ def drop(self, labels, axis=0, level=None, inplace=False, **kwargs): if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') - indexer = -lib.ismember(axis.get_level_values(level), + indexer = ~lib.ismember(axis.get_level_values(level), set(labels)) else: - indexer = -axis.isin(labels) + indexer = ~axis.isin(labels) slicer = [slice(None)] * self.ndim slicer[self._get_axis_number(axis_name)] = indexer diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index f650b41ff12be..0a4739f586e40 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1698,7 +1698,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, labels = np.empty(len(inds), dtype=inds.dtype) labels[mask] = ok_labels - labels[-mask] = -1 + labels[~mask] = -1 if len(uniques) < len(level_index): level_index = level_index.take(uniques) diff --git a/pandas/core/index.py b/pandas/core/index.py index 8748d0081d2e9..43d0129220a75 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -480,10 +480,10 @@ def to_int(): if is_integer(key): return key elif is_float(key): - if not self.is_floating(): - warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format( - type(self).__name__),FutureWarning) - return to_int() + key = to_int() + warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format( + type(self).__name__),FutureWarning) + return key return self._convert_indexer_error(key, 'label') if is_float(key): @@ -498,17 +498,9 @@ def _validate_slicer(self, key, f): """ validate and raise if needed on a slice indexers according to the passed in function """ - if not f(key.start): - self._convert_indexer_error(key.start, 'slice start value') - if not f(key.stop): - self._convert_indexer_error(key.stop, 'slice stop value') - if not f(key.step): - self._convert_indexer_error(key.step, 'slice step value') - - def _convert_slice_indexer_iloc(self, key): - """ convert a slice indexer for iloc only """ - self._validate_slicer(key, lambda v: v is None or is_integer(v)) - return key + for c in ['start','stop','step']: + if not f(getattr(key,c)): + self._convert_indexer_error(key.start, 'slice {0} value'.format(c)) def _convert_slice_indexer_getitem(self, key, is_index_slice=False): """ called from the getitem slicers, determine how to treat the key @@ -520,6 +512,25 @@ def _convert_slice_indexer_getitem(self, key, is_index_slice=False): def _convert_slice_indexer(self, key, typ=None): """ convert a slice indexer. disallow floats in the start/stop/step """ + # validate iloc + if typ == 'iloc': + + # need to coerce to_int if needed + def f(c): + v = getattr(key,c) + if v is None or is_integer(v): + return v + + # warn if its a convertible float + if v == int(v): + warnings.warn("slice indexers when using iloc should be integers " + "and not floating point",FutureWarning) + return int(v) + + self._convert_indexer_error(v, 'slice {0} value'.format(c)) + + return slice(*[ f(c) for c in ['start','stop','step']]) + # validate slicers def validate(v): if v is None or is_integer(v): @@ -530,7 +541,6 @@ def validate(v): return False return True - self._validate_slicer(key, validate) # figure out if this is a positional indexer @@ -543,9 +553,7 @@ def is_int(v): is_index_slice = is_int(start) and is_int(stop) is_positional = is_index_slice and not self.is_integer() - if typ == 'iloc': - return self._convert_slice_indexer_iloc(key) - elif typ == 'getitem': + if typ == 'getitem': return self._convert_slice_indexer_getitem( key, is_index_slice=is_index_slice) @@ -1980,7 +1988,7 @@ def _convert_slice_indexer(self, key, typ=None): """ convert a slice indexer, by definition these are labels unless we are iloc """ if typ == 'iloc': - return self._convert_slice_indexer_iloc(key) + return super(Float64Index, self)._convert_slice_indexer(key, typ=typ) # allow floats here self._validate_slicer( @@ -2386,14 +2394,6 @@ def __unicode__(self): def __len__(self): return len(self.labels[0]) - def _convert_slice_indexer(self, key, typ=None): - """ convert a slice indexer. disallow floats in the start/stop/step """ - - if typ == 'iloc': - return self._convert_slice_indexer_iloc(key) - - return super(MultiIndex, self)._convert_slice_indexer(key, typ=typ) - def _get_names(self): return FrozenList(level.name for level in self.levels) @@ -2997,7 +2997,7 @@ def _drop_from_level(self, labels, level): index = self.levels[i] values = index.get_indexer(labels) - mask = -lib.ismember(self.labels[i], set(values)) + mask = ~lib.ismember(self.labels[i], set(values)) return self[mask] diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 459c6abbe334e..63988a5976fc9 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -7,7 +7,7 @@ import pandas.core.common as com from pandas.core.common import (_is_bool_indexer, is_integer_dtype, _asarray_tuplesafe, is_list_like, isnull, - ABCSeries, ABCDataFrame, ABCPanel) + ABCSeries, ABCDataFrame, ABCPanel, is_float) import pandas.lib as lib import numpy as np @@ -1319,6 +1319,7 @@ def _get_slice_axis(self, slice_obj, axis=0): if not _need_slice(slice_obj): return obj + slice_obj = self._convert_slice_indexer(slice_obj, axis) if isinstance(slice_obj, slice): return self._slice(slice_obj, axis=axis, typ='iloc') else: @@ -1363,7 +1364,15 @@ def _getitem_axis(self, key, axis=0, validate_iterable=False): def _convert_to_indexer(self, obj, axis=0, is_setter=False): """ much simpler as we only have to deal with our valid types """ - if self._has_valid_type(obj, axis): + + # make need to convert a float key + if isinstance(obj, slice): + return self._convert_slice_indexer(obj, axis) + + elif is_float(obj): + return self._convert_scalar_indexer(obj, axis) + + elif self._has_valid_type(obj, axis): return obj raise ValueError("Can only index by location with a [%s]" % diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 17c0bf283a8a2..887f7562421d7 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1049,7 +1049,7 @@ def to_native_types(self, slicer=None, na_rep='', float_format=None, mask = isnull(values) values[mask] = na_rep if float_format: - imask = (-mask).ravel() + imask = (~mask).ravel() values.flat[imask] = np.array( [float_format % val for val in values.ravel()[imask]]) return values.tolist() @@ -1181,7 +1181,7 @@ def to_native_types(self, slicer=None, na_rep=None, **kwargs): if na_rep is None: na_rep = 'NaT' rvalues[mask] = na_rep - imask = (-mask).ravel() + imask = (~mask).ravel() rvalues.flat[imask] = np.array([lib.repr_timedelta64(val) for val in values.ravel()[imask]], dtype=object) @@ -1531,7 +1531,7 @@ def to_native_types(self, slicer=None, na_rep=None, date_format=None, if na_rep is None: na_rep = 'NaT' rvalues[mask] = na_rep - imask = (-mask).ravel() + imask = (~mask).ravel() if date_format is None: date_formatter = lambda x: Timestamp(x)._repr_base diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 9c2df9b5dde9d..43ececae1b737 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -190,9 +190,9 @@ def _isfinite(values): if issubclass(values.dtype.type, (np.timedelta64, np.datetime64)): return isnull(values) elif isinstance(values.dtype, object): - return -np.isfinite(values.astype('float64')) + return ~np.isfinite(values.astype('float64')) - return -np.isfinite(values) + return ~np.isfinite(values) def _na_ok_dtype(dtype): diff --git a/pandas/core/ops.py b/pandas/core/ops.py index b8e92fb25cec5..d4e756371001b 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -452,7 +452,7 @@ def na_op(x, y): mask = notnull(x) result[mask] = op(x[mask], y) - result, changed = com._maybe_upcast_putmask(result, -mask, pa.NA) + result, changed = com._maybe_upcast_putmask(result, ~mask, pa.NA) result = com._fill_zeros(result, x, y, name, fill_zeros) return result @@ -746,7 +746,7 @@ def na_op(x, y): if np.prod(xrav.shape): result[mask] = op(xrav, y) - result, changed = com._maybe_upcast_putmask(result, -mask, np.nan) + result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan) result = result.reshape(x.shape) result = com._fill_zeros(result, x, y, name, fill_zeros) @@ -817,9 +817,9 @@ def na_op(x, y): result[mask] = op(np.array(list(xrav[mask])), y) if op == operator.ne: # pragma: no cover - np.putmask(result, -mask, True) + np.putmask(result, ~mask, True) else: - np.putmask(result, -mask, False) + np.putmask(result, ~mask, False) result = result.reshape(x.shape) return result @@ -911,7 +911,7 @@ def na_op(x, y): result = pa.empty(len(x), dtype=x.dtype) mask = notnull(x) result[mask] = op(x[mask], y) - result, changed = com._maybe_upcast_putmask(result, -mask, pa.NA) + result, changed = com._maybe_upcast_putmask(result, ~mask, pa.NA) result = com._fill_zeros(result, x, y, name, fill_zeros) return result @@ -947,9 +947,9 @@ def na_op(x, y): result[mask] = op(np.array(list(xrav[mask])), y) if op == operator.ne: # pragma: no cover - np.putmask(result, -mask, True) + np.putmask(result, ~mask, True) else: - np.putmask(result, -mask, False) + np.putmask(result, ~mask, False) result = result.reshape(x.shape) return result diff --git a/pandas/core/series.py b/pandas/core/series.py index 637b2e8bfc67d..c94d7dc9acefd 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -961,7 +961,11 @@ def iteritems(self): # inversion def __neg__(self): - arr = operator.neg(self.values) + values = self.values + if values.dtype == np.bool_: + arr = operator.inv(values) + else: + arr = operator.neg(values) return self._constructor(arr, self.index).__finalize__(self) def __invert__(self): @@ -1646,7 +1650,7 @@ def argsort(self, axis=0, kind='quicksort', order=None): if mask.any(): result = Series( -1, index=self.index, name=self.name, dtype='int64') - notmask = -mask + notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) return self._constructor(result, index=self.index).__finalize__(self) @@ -1767,7 +1771,7 @@ def _try_kind_sort(arr): bad = isnull(arr) - good = -bad + good = ~bad idx = pa.arange(len(self)) argsorted = _try_kind_sort(arr[good]) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 7bcc534a34a1f..6d40aa175cdb6 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -51,7 +51,7 @@ def str_cat(arr, others=None, sep=None, na_rep=None): result = np.empty(n, dtype=object) np.putmask(result, na_mask, np.nan) - notmask = -na_mask + notmask = ~na_mask tuples = zip(*[x[notmask] for x in arrays]) cats = [sep.join(tup) for tup in tuples] diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py index 7b23b306d2927..38a5688ed96e8 100644 --- a/pandas/sparse/array.py +++ b/pandas/sparse/array.py @@ -509,7 +509,7 @@ def make_sparse(arr, kind='block', fill_value=nan): length = len(arr) if np.isnan(fill_value): - mask = -np.isnan(arr) + mask = ~np.isnan(arr) else: mask = arr != fill_value diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index eac1e0373f24d..34060d0c57a4e 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -152,7 +152,18 @@ def infer_dtype_list(list values): cdef inline bint is_null_datetimelike(v): - return util._checknull(v) or (util.is_integer_object(v) and v == iNaT) or v is NaT + # determine if we have a null for a timedelta/datetime (or integer versions)x + if util._checknull(v): + return True + elif util.is_timedelta64_object(v): + return v.view('int64') == iNaT + elif util.is_datetime64_object(v): + return v.view('int64') == iNaT + elif util.is_integer_object(v): + return v == iNaT + elif v is NaT: + return True + return False cdef inline bint is_datetime(object o): return PyDateTime_Check(o) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 2aac364d16770..11c065a52d78e 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -1270,7 +1270,9 @@ def test_getitem_setitem_float_labels(self): df = DataFrame(np.random.randn(5, 5), index=index) # positional slicing only via iloc! - result = df.iloc[1.0:5] + with tm.assert_produces_warning(FutureWarning): + result = df.iloc[1.0:5] + expected = df.reindex([2.5, 3.5, 4.5, 5.0]) assert_frame_equal(result, expected) self.assertEqual(len(result), 4) @@ -1280,15 +1282,26 @@ def test_getitem_setitem_float_labels(self): assert_frame_equal(result, expected) self.assertEqual(len(result), 1) + # GH 4892, float indexers in iloc are deprecated + import warnings + warnings.filterwarnings(action='error', category=FutureWarning) + cp = df.copy() - cp.iloc[1.0:5] = 0 - self.assert_((cp.iloc[1.0:5] == 0).values.all()) - self.assert_((cp.iloc[0:1] == df.iloc[0:1]).values.all()) + def f(): + cp.iloc[1.0:5] = 0 + self.assertRaises(FutureWarning, f) + def f(): + result = cp.iloc[1.0:5] == 0 + self.assertRaises(FutureWarning, f) + self.assertTrue(result.values.all()) + self.assertTrue((cp.iloc[0:1] == df.iloc[0:1]).values.all()) + + warnings.filterwarnings(action='ignore', category=FutureWarning) cp = df.copy() cp.iloc[4:5] = 0 - self.assert_((cp.iloc[4:5] == 0).values.all()) - self.assert_((cp.iloc[0:4] == df.iloc[0:4]).values.all()) + self.assertTrue((cp.iloc[4:5] == 0).values.all()) + self.assertTrue((cp.iloc[0:4] == df.iloc[0:4]).values.all()) # float slicing result = df.ix[1.0:5] @@ -1313,7 +1326,8 @@ def test_getitem_setitem_float_labels(self): cp = df.copy() cp.ix[1.0:5.0] = 0 - self.assert_((cp.ix[1.0:5.0] == 0).values.all()) + result = cp.ix[1.0:5.0] + self.assertTrue((result == 0).values.all()) def test_setitem_single_column_mixed(self): df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'], @@ -4786,7 +4800,8 @@ def _check_unary_op(op): _check_bin_op(operator.or_) _check_bin_op(operator.xor) - _check_unary_op(operator.neg) + # operator.neg is deprecated in numpy >= 1.9 + _check_unary_op(operator.inv) def test_logical_typeerror(self): if not compat.PY3: @@ -11110,7 +11125,7 @@ def test_rank2(self): exp = DataFrame({"a":[ 3.5, 1. , 3.5, 5. , 6. , 7. , 2. ]}) assert_frame_equal(df.rank(), exp) - + def test_rank_na_option(self): from pandas.compat.scipy import rankdata diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 91bca01ab73b5..42bb76930d783 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -526,7 +526,7 @@ def test_interpolate_index_values(self): expected = s.copy() bad = isnull(expected.values) - good = -bad + good = ~bad expected = Series( np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index a105b17795398..d89a88138b8fb 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -3229,48 +3229,92 @@ def test_deprecate_float_indexers(self): tm.makeDateIndex, tm.makePeriodIndex ]: i = index(5) + + for s in [ Series(np.arange(len(i)),index=i), DataFrame(np.random.randn(len(i),len(i)),index=i,columns=i) ]: + self.assertRaises(FutureWarning, lambda : + s.iloc[3.0]) + + # setting + def f(): + s.iloc[3.0] = 0 + self.assertRaises(FutureWarning, f) + + # fallsback to position selection ,series only s = Series(np.arange(len(i)),index=i) - self.assertRaises(FutureWarning, lambda : - s.iloc[3.0]) + s[3] self.assertRaises(FutureWarning, lambda : s[3.0]) - # this is ok! - s[3] - # ints i = index(5) - s = Series(np.arange(len(i))) - self.assertRaises(FutureWarning, lambda : - s.iloc[3.0]) + for s in [ Series(np.arange(len(i))), DataFrame(np.random.randn(len(i),len(i)),index=i,columns=i) ]: + self.assertRaises(FutureWarning, lambda : + s.iloc[3.0]) - # on some arch's this doesn't provide a warning (and thus raise) - # and some it does - try: - s[3.0] - except: - pass + # on some arch's this doesn't provide a warning (and thus raise) + # and some it does + try: + s[3.0] + except: + pass + + # setting + def f(): + s.iloc[3.0] = 0 + self.assertRaises(FutureWarning, f) # floats: these are all ok! i = np.arange(5.) - s = Series(np.arange(len(i)),index=i) - with tm.assert_produces_warning(False): - s[3.0] - with tm.assert_produces_warning(False): - s[3] + for s in [ Series(np.arange(len(i)),index=i), DataFrame(np.random.randn(len(i),len(i)),index=i,columns=i) ]: + with tm.assert_produces_warning(False): + s[3.0] + + with tm.assert_produces_warning(False): + s[3] + + self.assertRaises(FutureWarning, lambda : + s.iloc[3.0]) + + with tm.assert_produces_warning(False): + s.iloc[3] + + with tm.assert_produces_warning(False): + s.loc[3.0] - with tm.assert_produces_warning(False): - s.iloc[3.0] + with tm.assert_produces_warning(False): + s.loc[3] - with tm.assert_produces_warning(False): - s.iloc[3] + def f(): + s.iloc[3.0] = 0 + self.assertRaises(FutureWarning, f) - with tm.assert_produces_warning(False): - s.loc[3.0] + # slices + for index in [ tm.makeIntIndex, tm.makeFloatIndex, + tm.makeStringIndex, tm.makeUnicodeIndex, + tm.makeDateIndex, tm.makePeriodIndex ]: - with tm.assert_produces_warning(False): - s.loc[3] + index = index(5) + for s in [ Series(range(5),index=index), DataFrame(np.random.randn(5,2),index=index) ]: + + # getitem + self.assertRaises(FutureWarning, lambda : + s.iloc[3.0:4]) + self.assertRaises(FutureWarning, lambda : + s.iloc[3.0:4.0]) + self.assertRaises(FutureWarning, lambda : + s.iloc[3:4.0]) + + # setitem + def f(): + s.iloc[3.0:4] = 0 + self.assertRaises(FutureWarning, f) + def f(): + s.iloc[3:4.0] = 0 + self.assertRaises(FutureWarning, f) + def f(): + s.iloc[3.0:4.0] = 0 + self.assertRaises(FutureWarning, f) warnings.filterwarnings(action='ignore', category=FutureWarning) diff --git a/pandas/tests/test_tseries.py b/pandas/tests/test_tseries.py index 1915136f11e47..0bf3f1bec9706 100644 --- a/pandas/tests/test_tseries.py +++ b/pandas/tests/test_tseries.py @@ -338,7 +338,7 @@ def test_rank(): from pandas.compat.scipy import rankdata def _check(arr): - mask = -np.isfinite(arr) + mask = ~np.isfinite(arr) arr = arr.copy() result = algos.rank_1d_float64(arr) arr[mask] = np.inf diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 5fca119c14e83..6d9e32433cd1e 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -1065,7 +1065,7 @@ def _format_native_types(self, na_rep=u('NaT'), **kwargs): mask = isnull(self.values) values[mask] = na_rep - imask = -mask + imask = ~mask values[imask] = np.array([u('%s') % dt for dt in values[imask]]) return values.tolist() diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index 215e6e62c685e..5585233f4a9a5 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -122,8 +122,8 @@ def conv(v): def test_nat_converters(self): _skip_if_numpy_not_friendly() - self.assertEqual(to_timedelta('nat',box=False), tslib.iNaT) - self.assertEqual(to_timedelta('nan',box=False), tslib.iNaT) + self.assertEqual(to_timedelta('nat',box=False).astype('int64'), tslib.iNaT) + self.assertEqual(to_timedelta('nan',box=False).astype('int64'), tslib.iNaT) def test_to_timedelta(self): _skip_if_numpy_not_friendly() @@ -137,7 +137,7 @@ def conv(v): # empty string result = to_timedelta('',box=False) - self.assertEqual(result, tslib.iNaT) + self.assertEqual(result.astype('int64'), tslib.iNaT) result = to_timedelta(['', '']) self.assert_(isnull(result).all()) @@ -302,10 +302,10 @@ def test_to_timedelta_on_missing_values(self): assert_series_equal(actual, expected) actual = pd.to_timedelta(np.nan) - self.assertEqual(actual, timedelta_NaT) + self.assertEqual(actual.astype('int64'), timedelta_NaT.astype('int64')) actual = pd.to_timedelta(pd.NaT) - self.assertEqual(actual, timedelta_NaT) + self.assertEqual(actual.astype('int64'), timedelta_NaT.astype('int64')) def test_timedelta_ops_with_missing_values(self): _skip_if_numpy_not_friendly() diff --git a/pandas/tseries/tests/test_util.py b/pandas/tseries/tests/test_util.py index b10c4351c8725..df556cdc77d08 100644 --- a/pandas/tseries/tests/test_util.py +++ b/pandas/tseries/tests/test_util.py @@ -24,7 +24,7 @@ def test_daily(self): annual = pivot_annual(ts, 'D') doy = ts.index.dayofyear - doy[(-isleapyear(ts.index.year)) & (doy >= 60)] += 1 + doy[(~isleapyear(ts.index.year)) & (doy >= 60)] += 1 for i in range(1, 367): subset = ts[doy == i] @@ -47,7 +47,7 @@ def test_hourly(self): grouped = ts_hourly.groupby(ts_hourly.index.year) hoy = grouped.apply(lambda x: x.reset_index(drop=True)) hoy = hoy.index.droplevel(0).values - hoy[-isleapyear(ts_hourly.index.year) & (hoy >= 1416)] += 24 + hoy[~isleapyear(ts_hourly.index.year) & (hoy >= 1416)] += 24 hoy += 1 annual = pivot_annual(ts_hourly) diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 6761b5cbb04b0..d01ad56165880 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -334,7 +334,7 @@ def calc(carg): def calc_with_mask(carg,mask): result = np.empty(carg.shape, dtype='M8[ns]') iresult = result.view('i8') - iresult[-mask] = tslib.iNaT + iresult[~mask] = tslib.iNaT result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)).astype('M8[ns]') return result diff --git a/pandas/tseries/util.py b/pandas/tseries/util.py index 664a42543822d..72b12ea495ba0 100644 --- a/pandas/tseries/util.py +++ b/pandas/tseries/util.py @@ -52,7 +52,7 @@ def pivot_annual(series, freq=None): offset = index.dayofyear - 1 # adjust for leap year - offset[(-isleapyear(year)) & (offset >= 59)] += 1 + offset[(~isleapyear(year)) & (offset >= 59)] += 1 columns = lrange(1, 367) # todo: strings like 1/1, 1/25, etc.? @@ -66,7 +66,7 @@ def pivot_annual(series, freq=None): defaulted = grouped.apply(lambda x: x.reset_index(drop=True)) defaulted.index = defaulted.index.droplevel(0) offset = np.asarray(defaulted.index) - offset[-isleapyear(year) & (offset >= 1416)] += 24 + offset[~isleapyear(year) & (offset >= 1416)] += 24 columns = lrange(1, 8785) else: raise NotImplementedError(freq) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index ba6f03fd9bbb0..e76a2d0cb6cf1 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -402,17 +402,17 @@ class Timestamp(_Timestamp): if month <= 2: year -= 1 month += 12 - return (day + - np.fix((153*month - 457)/5) + - 365*year + - np.floor(year / 4) - - np.floor(year / 100) + - np.floor(year / 400) + - 1721118.5 + - (self.hour + - self.minute/60.0 + - self.second/3600.0 + - self.microsecond/3600.0/1e+6 + + return (day + + np.fix((153*month - 457)/5) + + 365*year + + np.floor(year / 4) - + np.floor(year / 100) + + np.floor(year / 400) + + 1721118.5 + + (self.hour + + self.minute/60.0 + + self.second/3600.0 + + self.microsecond/3600.0/1e+6 + self.nanosecond/3600.0/1e+9 )/24.0) @@ -1114,7 +1114,7 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, continue raise elif util.is_datetime64_object(val): - if val == np_NaT: + if val is np_NaT or val.view('i8') == iNaT: iresult[i] = iNaT else: try: @@ -1190,7 +1190,11 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, oresult = np.empty(n, dtype=object) for i in range(n): val = values[i] - if util.is_datetime64_object(val): + + # set as nan if is even a datetime NaT + if _checknull_with_nat(val) or val is np_NaT: + oresult[i] = np.nan + elif util.is_datetime64_object(val): oresult[i] = val.item() else: oresult[i] = val diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 8abbb37646b49..95fcc7848c433 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -54,16 +54,19 @@ K = 4 _RAISE_NETWORK_ERROR_DEFAULT = False +# set testing_mode +testing_mode = os.environ.get('PANDAS_TESTING_MODE','None') +if 'numpy_deprecate' in testing_mode: + warnings.simplefilter('always', DeprecationWarning) + class TestCase(unittest.TestCase): @classmethod def setUpClass(cls): pd.set_option('chained_assignment','raise') - #print("setting up: {0}".format(cls)) @classmethod def tearDownClass(cls): - #print("tearing down up: {0}".format(cls)) pass def assert_numpy_array_equal(self, np_array, assert_equal):
COMPAT: change neg of boolean to inv (deprecation in numpy 1.9) COMPAT: remove deprecation warnings on **new** (in computation/pytables) COMPAT: fix array_to_datetime on invalid comparison to NaT related #6958 add ability to set an environmental variable to actually show the DeprecationWarnings just `setenv PANDAS_TESTING_MODE 'numpy_deprecate'`
https://api.github.com/repos/pandas-dev/pandas/pulls/6960
2014-04-25T00:43:53Z
2014-04-27T17:47:19Z
2014-04-27T17:47:19Z
2014-06-17T08:26:48Z
MAINT: Numpy deprecated boolean unary '-' operator
diff --git a/pandas/core/common.py b/pandas/core/common.py index 18a3dba1a44a4..ad5d14989b742 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -280,7 +280,7 @@ def notnull(obj): res = isnull(obj) if np.isscalar(res): return not res - return -res + return ~res def _is_null_datelike_scalar(other): """ test whether the object is a null datelike, e.g. Nat
I'm sure there are more of these, but I just caught this one when I was raising on all warnings.
https://api.github.com/repos/pandas-dev/pandas/pulls/6958
2014-04-24T18:06:43Z
2014-04-25T00:46:38Z
null
2014-06-18T02:07:24Z
BUG: fix handling of color argument for variety of plotting functions
diff --git a/doc/source/release.rst b/doc/source/release.rst index a6aa842940bc0..4e4d61c3e971c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -229,6 +229,14 @@ Deprecations returned if possible, otherwise a copy will be made. Previously the user could think that ``copy=False`` would ALWAYS return a view. (:issue:`6894`) +- The :func:`parallel_coordinates` function now takes argument ``color`` + instead of ``colors``. A ``FutureWarning`` is raised to alert that + the old ``colors`` argument will not be supported in a future release + +- The :func:`parallel_coordinates` and :func:`andrews_curves` functions now take + positional argument ``frame`` instead of ``data``. A ``FutureWarning`` is + raised if the old ``data`` argument is used by name. + Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -460,6 +468,10 @@ Bug Fixes - Bug in timeseries-with-frequency plot cursor display (:issue:`5453`) - Bug surfaced in groupby.plot when using a ``Float64Index`` (:issue:`7025`) - Stopped tests from failing if options data isn't able to be downloaded from Yahoo (:issue:`7034`) +- Bug in ``parallel_coordinates`` and ``radviz`` where reordering of class column + caused possible color/class mismatch +- Bug in ``radviz`` and ``andrews_curves`` where multiple values of 'color' + were being passed to plotting method pandas 0.13.1 ------------- diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index b5df39df3b617..f5e018b6141fe 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -382,6 +382,14 @@ Plotting Because of the default `align` value changes, coordinates of bar plots are now located on integer values (0.0, 1.0, 2.0 ...). This is intended to make bar plot be located on the same coodinates as line plot. However, bar plot may differs unexpectedly when you manually adjust the bar location or drawing area, such as using `set_xlim`, `set_ylim`, etc. In this cases, please modify your script to meet with new coordinates. +- The :func:`parallel_coordinates` function now takes argument ``color`` + instead of ``colors``. A ``FutureWarning`` is raised to alert that + the old ``colors`` argument will not be supported in a future release + +- The :func:`parallel_coordinates` and :func:`andrews_curves` functions now take + positional argument ``frame`` instead of ``data``. A ``FutureWarning`` is + raised if the old ``data`` argument is used by name. + .. _whatsnew_0140.prior_deprecations: Prior Version Deprecations/Changes diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 829b2b296155f..e3f49e14400d1 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1220,11 +1220,29 @@ def scat2(x, y, by=None, ax=None, figsize=None): def test_andrews_curves(self): from pandas import read_csv from pandas.tools.plotting import andrews_curves - + from matplotlib import cm + path = os.path.join(curpath(), 'data', 'iris.csv') df = read_csv(path) _check_plot_works(andrews_curves, df, 'Name') + _check_plot_works(andrews_curves, df, 'Name', + color=('#556270', '#4ECDC4', '#C7F464')) + _check_plot_works(andrews_curves, df, 'Name', + color=['dodgerblue', 'aquamarine', 'seagreen']) + _check_plot_works(andrews_curves, df, 'Name', colormap=cm.jet) + + colors = ['b', 'g', 'r'] + df = DataFrame({"A": [1, 2, 3], + "B": [1, 2, 3], + "C": [1, 2, 3], + "Name": colors}) + ax = andrews_curves(df, 'Name', color=colors) + legend_colors = [l.get_color() for l in ax.legend().get_lines()] + self.assertEqual(colors, legend_colors) + + with tm.assert_produces_warning(FutureWarning): + andrews_curves(data=df, class_column='Name') @slow def test_parallel_coordinates(self): @@ -1235,13 +1253,9 @@ def test_parallel_coordinates(self): df = read_csv(path) _check_plot_works(parallel_coordinates, df, 'Name') _check_plot_works(parallel_coordinates, df, 'Name', - colors=('#556270', '#4ECDC4', '#C7F464')) - _check_plot_works(parallel_coordinates, df, 'Name', - colors=['dodgerblue', 'aquamarine', 'seagreen']) + color=('#556270', '#4ECDC4', '#C7F464')) _check_plot_works(parallel_coordinates, df, 'Name', - colors=('#556270', '#4ECDC4', '#C7F464')) - _check_plot_works(parallel_coordinates, df, 'Name', - colors=['dodgerblue', 'aquamarine', 'seagreen']) + color=['dodgerblue', 'aquamarine', 'seagreen']) _check_plot_works(parallel_coordinates, df, 'Name', colormap=cm.jet) df = read_csv(path, header=None, skiprows=1, names=[1, 2, 4, 8, @@ -1249,6 +1263,21 @@ def test_parallel_coordinates(self): _check_plot_works(parallel_coordinates, df, 'Name', use_columns=True) _check_plot_works(parallel_coordinates, df, 'Name', xticks=[1, 5, 25, 125]) + + colors = ['b', 'g', 'r'] + df = DataFrame({"A": [1, 2, 3], + "B": [1, 2, 3], + "C": [1, 2, 3], + "Name": colors}) + ax = parallel_coordinates(df, 'Name', color=colors) + legend_colors = [l.get_color() for l in ax.legend().get_lines()] + self.assertEqual(colors, legend_colors) + + with tm.assert_produces_warning(FutureWarning): + parallel_coordinates(df, 'Name', colors=colors) + + with tm.assert_produces_warning(FutureWarning): + parallel_coordinates(data=df, class_column='Name') @slow def test_radviz(self): @@ -1259,8 +1288,24 @@ def test_radviz(self): path = os.path.join(curpath(), 'data', 'iris.csv') df = read_csv(path) _check_plot_works(radviz, df, 'Name') + _check_plot_works(radviz, df, 'Name', + color=('#556270', '#4ECDC4', '#C7F464')) + _check_plot_works(radviz, df, 'Name', + color=['dodgerblue', 'aquamarine', 'seagreen']) _check_plot_works(radviz, df, 'Name', colormap=cm.jet) + colors = [[0., 0., 1., 1.], + [0., 0.5, 1., 1.], + [1., 0., 0., 1.]] + df = DataFrame({"A": [1, 2, 3], + "B": [2, 1, 3], + "C": [3, 2, 1], + "Name": ['b', 'g', 'r']}) + ax = radviz(df, 'Name', color=colors) + legend_colors = [c.get_facecolor().squeeze().tolist() + for c in ax.collections] + self.assertEqual(colors, legend_colors) + @slow def test_plot_int_columns(self): df = DataFrame(randn(100, 4)).cumsum() diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 4453b1db359e9..b11d71f48baf2 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -8,7 +8,7 @@ import numpy as np -from pandas.util.decorators import cache_readonly +from pandas.util.decorators import cache_readonly, deprecate_kwarg import pandas.core.common as com from pandas.core.index import MultiIndex from pandas.core.series import Series, remove_na @@ -354,19 +354,22 @@ def _get_marker_compat(marker): return 'o' return marker - -def radviz(frame, class_column, ax=None, colormap=None, **kwds): +def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds): """RadViz - a multivariate data visualization algorithm Parameters: ----------- - frame: DataFrame object - class_column: Column name that contains information about class membership + frame: DataFrame + class_column: str + Column name containing class names ax: Matplotlib axis object, optional + color: list or tuple, optional + Colors to use for the different classes colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. - kwds: Matplotlib scatter method keyword arguments, optional + kwds: keywords + Options to pass to matplotlib scatter plotting method Returns: -------- @@ -380,44 +383,42 @@ def normalize(series): b = max(series) return (series - a) / (b - a) - column_names = [column_name for column_name in frame.columns - if column_name != class_column] - - df = frame[column_names].apply(normalize) + n = len(frame) + classes = frame[class_column].drop_duplicates() + class_col = frame[class_column] + df = frame.drop(class_column, axis=1).apply(normalize) if ax is None: ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1]) - classes = set(frame[class_column]) to_plot = {} - colors = _get_standard_colors(num_colors=len(classes), colormap=colormap, - color_type='random', color=kwds.get('color')) + color_type='random', color=color) - for class_ in classes: - to_plot[class_] = [[], []] + for kls in classes: + to_plot[kls] = [[], []] n = len(frame.columns) - 1 s = np.array([(np.cos(t), np.sin(t)) for t in [2.0 * np.pi * (i / float(n)) for i in range(n)]]) - for i in range(len(frame)): - row = df.irow(i).values + for i in range(n): + row = df.iloc[i].values row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1) y = (s * row_).sum(axis=0) / row.sum() - class_name = frame[class_column].iget(i) - to_plot[class_name][0].append(y[0]) - to_plot[class_name][1].append(y[1]) + kls = class_col.iat[i] + to_plot[kls][0].append(y[0]) + to_plot[kls][1].append(y[1]) - for i, class_ in enumerate(classes): - ax.scatter(to_plot[class_][0], to_plot[class_][1], color=colors[i], - label=com.pprint_thing(class_), **kwds) + for i, kls in enumerate(classes): + ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i], + label=com.pprint_thing(kls), **kwds) ax.legend() ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none')) - for xy, name in zip(s, column_names): + for xy, name in zip(s, df.columns): ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray')) @@ -437,21 +438,24 @@ def normalize(series): ax.axis('equal') return ax - -def andrews_curves(data, class_column, ax=None, samples=200, colormap=None, - **kwds): +@deprecate_kwarg(old_arg_name='data', new_arg_name='frame') +def andrews_curves(frame, class_column, ax=None, samples=200, color=None, + colormap=None, **kwds): """ Parameters: ----------- - data : DataFrame + frame : DataFrame Data to be plotted, preferably normalized to (0.0, 1.0) class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve + color: list or tuple, optional + Colors to use for the different classes colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. - kwds : Optional plotting arguments to be passed to matplotlib + kwds: keywords + Options to pass to matplotlib plotting method Returns: -------- @@ -475,30 +479,31 @@ def f(x): return result return f - n = len(data) - class_col = data[class_column] - uniq_class = class_col.drop_duplicates() - columns = [data[col] for col in data.columns if (col != class_column)] + n = len(frame) + class_col = frame[class_column] + classes = frame[class_column].drop_duplicates() + df = frame.drop(class_column, axis=1) x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)] used_legends = set([]) - colors = _get_standard_colors(num_colors=len(uniq_class), colormap=colormap, - color_type='random', color=kwds.get('color')) - col_dict = dict([(klass, col) for klass, col in zip(uniq_class, colors)]) + color_values = _get_standard_colors(num_colors=len(classes), + colormap=colormap, color_type='random', + color=color) + colors = dict(zip(classes, color_values)) if ax is None: ax = plt.gca(xlim=(-pi, pi)) for i in range(n): - row = [columns[c][i] for c in range(len(columns))] + row = df.iloc[i].values f = function(row) y = [f(t) for t in x] - label = None - if com.pprint_thing(class_col[i]) not in used_legends: - label = com.pprint_thing(class_col[i]) + kls = class_col.iat[i] + label = com.pprint_thing(kls) + if label not in used_legends: used_legends.add(label) - ax.plot(x, y, color=col_dict[class_col[i]], label=label, **kwds) + ax.plot(x, y, color=colors[kls], label=label, **kwds) else: - ax.plot(x, y, color=col_dict[class_col[i]], **kwds) - + ax.plot(x, y, color=colors[kls], **kwds) + ax.legend(loc='upper right') ax.grid() return ax @@ -564,22 +569,23 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): plt.setp(axis.get_yticklabels(), fontsize=8) return fig - -def parallel_coordinates(data, class_column, cols=None, ax=None, colors=None, - use_columns=False, xticks=None, colormap=None, **kwds): +@deprecate_kwarg(old_arg_name='colors', new_arg_name='color') +@deprecate_kwarg(old_arg_name='data', new_arg_name='frame') +def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None, + use_columns=False, xticks=None, colormap=None, + **kwds): """Parallel coordinates plotting. Parameters ---------- - data: DataFrame - A DataFrame containing data to be plotted + frame: DataFrame class_column: str Column name containing class names cols: list, optional A list of column names to use ax: matplotlib.axis, optional matplotlib axis object - colors: list or tuple, optional + color: list or tuple, optional Colors to use for the different classes use_columns: bool, optional If true, columns will be used as xticks @@ -587,8 +593,8 @@ def parallel_coordinates(data, class_column, cols=None, ax=None, colors=None, A list of values to use for xticks colormap: str or matplotlib colormap, default None Colormap to use for line colors. - kwds: list, optional - A list of keywords for matplotlib plot method + kwds: keywords + Options to pass to matplotlib plotting method Returns ------- @@ -600,20 +606,19 @@ def parallel_coordinates(data, class_column, cols=None, ax=None, colors=None, >>> from pandas.tools.plotting import parallel_coordinates >>> from matplotlib import pyplot as plt >>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv') - >>> parallel_coordinates(df, 'Name', colors=('#556270', '#4ECDC4', '#C7F464')) + >>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464')) >>> plt.show() """ import matplotlib.pyplot as plt - - n = len(data) - classes = set(data[class_column]) - class_col = data[class_column] + n = len(frame) + classes = frame[class_column].drop_duplicates() + class_col = frame[class_column] if cols is None: - df = data.drop(class_column, axis=1) + df = frame.drop(class_column, axis=1) else: - df = data[cols] + df = frame[cols] used_legends = set([]) @@ -638,19 +643,17 @@ def parallel_coordinates(data, class_column, cols=None, ax=None, colors=None, color_values = _get_standard_colors(num_colors=len(classes), colormap=colormap, color_type='random', - color=colors) + color=color) colors = dict(zip(classes, color_values)) for i in range(n): - row = df.irow(i).values - y = row - kls = class_col.iget_value(i) - if com.pprint_thing(kls) not in used_legends: - label = com.pprint_thing(kls) + y = df.iloc[i].values + kls = class_col.iat[i] + label = com.pprint_thing(kls) + if label not in used_legends: used_legends.add(label) - ax.plot(x, y, color=colors[kls], - label=label, **kwds) + ax.plot(x, y, color=colors[kls], label=label, **kwds) else: ax.plot(x, y, color=colors[kls], **kwds)
parallel_coordinates - fix reordering of class column (from set) causing possible color/class mismatch - deprecated use of argument colors in favor of color radviz - fix reordering of class column (from set) causing possible color/class mismatch - added explicit color keyword argument (avoids multiple values 'color' being passed to plotting method) andrews_curves - added explicit color keyword argument (avoids multiple values 'color' being passed to plotting method) To recreate the bug: ``` python import pandas as pd from pandas.tools.plotting import parallel_coordinates, radviz, andrews_curves x = pd.DataFrame([[1,2,3,'b'], [2,2,1,'g'], [3,3,1,'r']], columns=[0,1,2,'name']) parallel_coordinates(x, 'name', colors=['b','g','r']) # TypeError from scatter radviz(x, 'name', color=['b','g','r']) # TypeError from plot andrews_curves(x, 'name', color=['b','g','r']) ``` parallel_coordinates before: ![ppl_old](https://cloud.githubusercontent.com/assets/3686522/2791463/28c84c9a-cbc7-11e3-9099-9e064d31e995.png) parallel_coordinates after: ![ppl_new](https://cloud.githubusercontent.com/assets/3686522/2791462/28c73e0e-cbc7-11e3-804e-73dc43cbd6ca.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/6956
2014-04-24T15:52:32Z
2014-05-05T23:12:09Z
2014-05-05T23:12:09Z
2014-06-21T08:53:32Z
ENH: Quantiles accepts an array
diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index a5001e840f471..e63728e22d23a 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -482,6 +482,8 @@ Enhancements - Added ``how`` option to rolling-moment functions to dictate how to handle resampling; :func:``rolling_max`` defaults to max, :func:``rolling_min`` defaults to min, and all others default to mean (:issue:`6297`) - ``CustomBuisnessMonthBegin`` and ``CustomBusinessMonthEnd`` are now available (:issue:`6866`) +- :meth:`Series.quantile` and :meth:`DataFrame.quantile` now accept an array of + quantiles. Performance ~~~~~~~~~~~ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fcbd0688792fb..23736dafe3556 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4145,22 +4145,41 @@ def mode(self, axis=0, numeric_only=False): def quantile(self, q=0.5, axis=0, numeric_only=True): """ Return values at the given quantile over requested axis, a la - scoreatpercentile in scipy.stats + numpy.percentile. Parameters ---------- - q : quantile, default 0.5 (50% quantile) - 0 <= q <= 1 + q : float or array-like, default 0.5 (50% quantile) + 0 <= q <= 1, the quantile(s) to compute axis : {0, 1} 0 for row-wise, 1 for column-wise Returns ------- - quantiles : Series + quantiles : Series or DataFrame + If ``q`` is an array, a DataFrame will be returned where the + index is ``q``, the columns are the columns of self, and the + values are the quantiles. + If ``q`` is a float, a Series will be returned where the + index is the columns of self and the values are the quantiles. + + Examples + -------- + + >>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), + columns=['a', 'b']) + >>> df.quantile(.1) + a 1.3 + b 3.7 + dtype: float64 + >>> df.quantile([.1, .5]) + a b + 0.1 1.3 3.7 + 0.5 2.5 55.0 """ - per = q * 100 + per = np.asarray(q) * 100 - def f(arr): + def f(arr, per): arr = arr.values if arr.dtype != np.float_: arr = arr.astype(float) @@ -4171,7 +4190,12 @@ def f(arr): return _quantile(arr, per) data = self._get_numeric_data() if numeric_only else self - return data.apply(f, axis=axis) + if com.is_list_like(per): + from pandas.tools.merge import concat + return concat([data.apply(f, axis=axis, args=(x,)) for x in per], + axis=1, keys=per/100.).T + else: + return data.apply(f, axis=axis, args=(per,)) def rank(self, axis=0, numeric_only=None, method='average', na_option='keep', ascending=True, pct=False): diff --git a/pandas/core/series.py b/pandas/core/series.py index 6172f87ead246..637b2e8bfc67d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1220,26 +1220,51 @@ def round(self, decimals=0, out=None): def quantile(self, q=0.5): """ - Return value at the given quantile, a la scoreatpercentile in - scipy.stats + Return value at the given quantile, a la numpy.percentile. Parameters ---------- - q : quantile - 0 <= q <= 1 + q : float or array-like, default 0.5 (50% quantile) + 0 <= q <= 1, the quantile(s) to compute Returns ------- - quantile : float + quantile : float or Series + if ``q`` is an array, a Series will be returned where the + index is ``q`` and the values are the quantiles. + + Examples + -------- + + >>> s = Series([1, 2, 3, 4]) + >>> s.quantile(.5) + 2.5 + >>> s.quantile([.25, .5, .75]) + 0.25 1.75 + 0.50 2.50 + 0.75 3.25 + dtype: float64 """ valid_values = self.dropna().values if len(valid_values) == 0: return pa.NA + + def multi(values, qs): + if com.is_list_like(qs): + return Series([_quantile(values, x*100) + for x in qs], index=qs) + else: + return _quantile(values, qs*100) + if com.is_datetime64_dtype(self): values = _values_from_object(self).view('i8') - result = lib.Timestamp(_quantile(values, q * 100)) + result = multi(values, q) + if com.is_list_like(q): + result = result.map(lib.Timestamp) + else: + result = lib.Timestamp(result) else: - result = _quantile(valid_values, q * 100) + result = multi(valid_values, q) return result diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index b9692214dcb74..3a3d5a822163f 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -10945,6 +10945,25 @@ def test_quantile(self): xp = df.median() assert_series_equal(rs, xp) + def test_quantile_multi(self): + df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], + columns=['a', 'b', 'c']) + result = df.quantile([.25, .5]) + expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]], + index=[.25, .5], columns=['a', 'b', 'c']) + assert_frame_equal(result, expected) + + # axis = 1 + result = df.quantile([.25, .5], axis=1) + expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]], + index=[.25, .5], columns=[0, 1, 2]) + + # empty + result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0) + expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]}, + index=[.1, .9]) + assert_frame_equal(result, expected) + def test_cumsum(self): self.tsframe.ix[5:10, 0] = nan self.tsframe.ix[10:15, 1] = nan @@ -12728,7 +12747,6 @@ def check_query_with_unnamed_multiindex(self, parser, engine): df = DataFrame(randn(10, 2), index=index) ind = Series(df.index.get_level_values(0).values, index=index) - #import ipdb; ipdb.set_trace() res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine) res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine) exp = df[ind == 'red'] diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 5dd3201ee5214..839804be4437c 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2203,6 +2203,22 @@ def test_quantile(self): q = tds.quantile(.25) self.assertEqual(q, pd.to_timedelta('24:00:00')) + def test_quantile_multi(self): + from numpy import percentile + + qs = [.1, .9] + result = self.ts.quantile(qs) + expected = pd.Series([percentile(self.ts.valid(), 10), + percentile(self.ts.valid(), 90)], + index=qs) + assert_series_equal(result, expected) + + dts = self.ts.index.to_series() + result = dts.quantile((.2, .2)) + assert_series_equal(result, Series([Timestamp('2000-01-10 19:12:00'), + Timestamp('2000-01-10 19:12:00')], + index=[.2, .2])) + def test_describe(self): _ = self.series.describe() _ = self.ts.describe()
Doesn't quite finish #4196, but it should be easy not that `quantile` takes arrays. I did this on top of #6953, so that should go in first.
https://api.github.com/repos/pandas-dev/pandas/pulls/6955
2014-04-24T15:13:23Z
2014-04-25T11:55:06Z
2014-04-25T11:55:06Z
2014-07-16T09:03:02Z
BUG: Bug in sum/mean on 32-bit platforms on overflows (GH6915)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 4a7ef0ed70828..a888f03b9d8e7 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -422,6 +422,7 @@ Bug Fixes - Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) - Bug in ``DataFrame.apply`` with functions that used *args or **kwargs and returned an empty result (:issue:`6952`) +- Bug in sum/mean on 32-bit platforms on overflows (:issue:`6915`) pandas 0.13.1 ------------- diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index a47c7f82d9199..9c2df9b5dde9d 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -75,7 +75,7 @@ def f(values, axis=None, skipna=True, **kwds): result.fill(0) return result - if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype): + if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name): result = bn_func(values, axis=axis, **kwds) # prefer to treat inf/-inf as NA, but must compute the func @@ -92,11 +92,18 @@ def f(values, axis=None, skipna=True, **kwds): return f -def _bn_ok_dtype(dt): +def _bn_ok_dtype(dt, name): # Bottleneck chokes on datetime64 - time_types = np.datetime64, np.timedelta64 - return dt != np.object_ and not issubclass(dt.type, time_types) + if dt != np.object_ and not issubclass(dt.type, (np.datetime64, np.timedelta64)): + # bottleneck does not properly upcast during the sum + # so can overflow + if name == 'nansum': + if dt != np.bool_ and dt.itemsize < 8: + return False + + return True + return False def _has_infs(result): if isinstance(result, np.ndarray): @@ -165,7 +172,18 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, values = values.copy() values = _view_if_needed(values) - return values, mask, dtype + + # return a platform independent precision dtype + dtype_max = dtype + if dtype.kind == 'i' and not issubclass( + dtype.type, (np.bool, np.datetime64, np.timedelta64)): + dtype_max = np.int64 + elif dtype.kind in ['b'] or issubclass(dtype.type, np.bool): + dtype_max = np.int64 + elif dtype.kind in ['f']: + dtype_max = np.float64 + + return values, mask, dtype, dtype_max def _isfinite(values): @@ -216,20 +234,20 @@ def _wrap_results(result, dtype): def nanany(values, axis=None, skipna=True): - values, mask, dtype = _get_values(values, skipna, False, copy=skipna) + values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna) return values.any(axis) def nanall(values, axis=None, skipna=True): - values, mask, dtype = _get_values(values, skipna, True, copy=skipna) + values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna) return values.all(axis) @disallow('M8') @bottleneck_switch(zero_value=0) def nansum(values, axis=None, skipna=True): - values, mask, dtype = _get_values(values, skipna, 0) - the_sum = values.sum(axis) + values, mask, dtype, dtype_max = _get_values(values, skipna, 0) + the_sum = values.sum(axis,dtype=dtype_max) the_sum = _maybe_null_out(the_sum, axis, mask) return _wrap_results(the_sum, dtype) @@ -238,8 +256,8 @@ def nansum(values, axis=None, skipna=True): @disallow('M8') @bottleneck_switch() def nanmean(values, axis=None, skipna=True): - values, mask, dtype = _get_values(values, skipna, 0) - the_sum = _ensure_numeric(values.sum(axis)) + values, mask, dtype, dtype_max = _get_values(values, skipna, 0) + the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_max)) count = _get_counts(mask, axis) if axis is not None: @@ -257,7 +275,7 @@ def nanmean(values, axis=None, skipna=True): @bottleneck_switch() def nanmedian(values, axis=None, skipna=True): - values, mask, dtype = _get_values(values, skipna) + values, mask, dtype, dtype_max = _get_values(values, skipna) def get_median(x): mask = notnull(x) @@ -325,7 +343,7 @@ def nanvar(values, axis=None, skipna=True, ddof=1): @bottleneck_switch() def nanmin(values, axis=None, skipna=True): - values, mask, dtype = _get_values(values, skipna, fill_value_typ='+inf') + values, mask, dtype, dtype_max = _get_values(values, skipna, fill_value_typ='+inf') # numpy 1.6.1 workaround in Python 3.x if (values.dtype == np.object_ and compat.PY3): @@ -341,7 +359,7 @@ def nanmin(values, axis=None, skipna=True): if ((axis is not None and values.shape[axis] == 0) or values.size == 0): try: - result = com.ensure_float(values.sum(axis)) + result = com.ensure_float(values.sum(axis,dtype=dtype_max)) result.fill(np.nan) except: result = np.nan @@ -354,7 +372,7 @@ def nanmin(values, axis=None, skipna=True): @bottleneck_switch() def nanmax(values, axis=None, skipna=True): - values, mask, dtype = _get_values(values, skipna, fill_value_typ='-inf') + values, mask, dtype, dtype_max = _get_values(values, skipna, fill_value_typ='-inf') # numpy 1.6.1 workaround in Python 3.x if (values.dtype == np.object_ and compat.PY3): @@ -371,7 +389,7 @@ def nanmax(values, axis=None, skipna=True): if ((axis is not None and values.shape[axis] == 0) or values.size == 0): try: - result = com.ensure_float(values.sum(axis)) + result = com.ensure_float(values.sum(axis, dtype=dtype_max)) result.fill(np.nan) except: result = np.nan @@ -386,7 +404,7 @@ def nanargmax(values, axis=None, skipna=True): """ Returns -1 in the NA case """ - values, mask, dtype = _get_values(values, skipna, fill_value_typ='-inf', + values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf', isfinite=True) result = values.argmax(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) @@ -397,7 +415,7 @@ def nanargmin(values, axis=None, skipna=True): """ Returns -1 in the NA case """ - values, mask, dtype = _get_values(values, skipna, fill_value_typ='+inf', + values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf', isfinite=True) result = values.argmin(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index d8eafc7cb8eab..5dd3201ee5214 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -314,6 +314,49 @@ def test_nansum_buglet(self): result = np.nansum(s) assert_almost_equal(result, 1) + def test_overflow(self): + + # GH 6915 + # overflowing on the smaller int dtypes + for dtype in ['int32','int64']: + v = np.arange(5000000,dtype=dtype) + s = Series(v) + + # no bottleneck + result = s.sum(skipna=False) + self.assertEqual(int(result),v.sum(dtype='int64')) + result = s.min(skipna=False) + self.assertEquals(int(result),0) + result = s.max(skipna=False) + self.assertEquals(int(result),v[-1]) + + # use bottleneck if available + result = s.sum() + self.assertEqual(int(result),v.sum(dtype='int64')) + result = s.min() + self.assertEquals(int(result),0) + result = s.max() + self.assertEquals(int(result),v[-1]) + + for dtype in ['float32','float64']: + v = np.arange(5000000,dtype=dtype) + s = Series(v) + + # no bottleneck + result = s.sum(skipna=False) + self.assertTrue(np.allclose(float(result),v.sum(dtype='float64'))) + result = s.min(skipna=False) + self.assertTrue(np.allclose(float(result),0.0)) + result = s.max(skipna=False) + self.assertTrue(np.allclose(float(result),v[-1])) + + # use bottleneck if available + result = s.sum() + self.assertTrue(np.allclose(float(result),v.sum(dtype='float64'))) + result = s.min() + self.assertTrue(np.allclose(float(result),0.0)) + result = s.max() + self.assertTrue(np.allclose(float(result),v[-1])) class SafeForSparse(object): pass
closes #6915 overflow when doing sum (and mean) using bottleneck/numpy on 32-bit platforms with 32-bit dtypes
https://api.github.com/repos/pandas-dev/pandas/pulls/6954
2014-04-24T14:49:48Z
2014-04-24T22:43:18Z
2014-04-24T22:43:18Z
2014-07-16T09:03:00Z
BUG: Pass args and kwargs to empty
diff --git a/doc/source/release.rst b/doc/source/release.rst index 49656046129ca..4a7ef0ed70828 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -420,6 +420,8 @@ Bug Fixes - Bug in C parser with leading whitespace (:issue:`3374`) - Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines - Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) +- Bug in ``DataFrame.apply`` with functions that used *args or **kwargs and returned + an empty result (:issue:`6952`) pandas 0.13.1 ------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bce09b673ad75..fcbd0688792fb 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3313,7 +3313,7 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, f = func if len(self.columns) == 0 and len(self.index) == 0: - return self._apply_empty_result(func, axis, reduce) + return self._apply_empty_result(func, axis, reduce, *args, **kwds) if isinstance(f, np.ufunc): results = f(self.values) @@ -3322,7 +3322,8 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, else: if not broadcast: if not all(self.shape): - return self._apply_empty_result(func, axis, reduce) + return self._apply_empty_result(func, axis, reduce, *args, + **kwds) if raw and not self._is_mixed_type: return self._apply_raw(f, axis) @@ -3333,11 +3334,12 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, else: return self._apply_broadcast(f, axis) - def _apply_empty_result(self, func, axis, reduce): + def _apply_empty_result(self, func, axis, reduce, *args, **kwds): if reduce is None: reduce = False try: - reduce = not isinstance(func(_EMPTY_SERIES), Series) + reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds), + Series) except Exception: pass
Closes #6952 I wasn't sure what the best way to test this was. I went with calling `_apply_empty_result` directly and checking for a SystemExit to get around the try: except Exception block in `_apply_empty_result`. I still don't think I've 100% ensured that `_apply_empty_result` will always be _called with_ _args and *_kwargs (if there are any), but if they are passed things will be handled correctly.
https://api.github.com/repos/pandas-dev/pandas/pulls/6953
2014-04-24T14:11:52Z
2014-04-24T16:10:54Z
2014-04-24T16:10:54Z
2017-04-05T02:08:47Z
BUG: enabling subplots works unexpectedly
diff --git a/doc/source/release.rst b/doc/source/release.rst index 38e95eaba0b0f..011e29fdf015e 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -432,6 +432,8 @@ Bug Fixes - Bug in ``DataFrame.apply`` with functions that used *args or **kwargs and returned an empty result (:issue:`6952`) - Bug in sum/mean on 32-bit platforms on overflows (:issue:`6915`) +- Bug in enabling ``subplots=True`` in ``DataFrame.plot`` only has single column raises ``TypeError``, and ``Series.plot`` raises ``AttributeError`` (:issue:`6951`) +- Bug in ``DataFrame.plot`` draws unnecessary axes when enabling ``subplots`` and ``kind=scatter`` (:issue:`6951`) pandas 0.13.1 ------------- diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index e81cfd39ba78e..a1b6c7b7c518e 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -62,6 +62,9 @@ def test_plot(self): _check_plot_works(self.series[:10].plot, kind='barh') _check_plot_works(Series(randn(10)).plot, kind='bar', color='black') + # GH 6951 + _check_plot_works(self.ts.plot, subplots=True) + @slow def test_plot_figsize_and_title(self): # figsize and title @@ -367,6 +370,11 @@ def test_plot(self): index=index) _check_plot_works(df.plot, title=u('\u03A3')) + # GH 6951 + # Test with single column + df = DataFrame({'x': np.random.rand(10)}) + _check_plot_works(df.plot, kind='bar', subplots=True) + def test_nonnumeric_exclude(self): import matplotlib.pyplot as plt df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}) @@ -665,6 +673,10 @@ def test_plot_scatter(self): with tm.assertRaises(ValueError): df.plot(y='y', kind='scatter') + # GH 6951 + axes = df.plot(x='x', y='y', kind='scatter', subplots=True) + self.assertEqual(len(axes[0].figure.axes), 1) + @slow def test_plot_bar(self): from matplotlib.pylab import close @@ -1271,6 +1283,11 @@ def test_hexbin_basic(self): # TODO: need better way to test. This just does existence. self.assertEqual(len(ax.collections), 1) + # GH 6951 + axes = df.plot(x='A', y='B', kind='hexbin', subplots=True) + # hexbin should have 2 axes, 1 for plotting and another is colorbar + self.assertEqual(len(axes[0].figure.axes), 2) + @slow def test_hexbin_with_c(self): df = DataFrame({"A": np.random.uniform(size=20), diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 971aa7848c2fa..d79177e3db0d3 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -966,18 +966,13 @@ def _maybe_right_yaxis(self, ax): def _setup_subplots(self): if self.subplots: nrows, ncols = self._get_layout() - if self.ax is None: - fig, axes = _subplots(nrows=nrows, ncols=ncols, - sharex=self.sharex, sharey=self.sharey, - figsize=self.figsize, - secondary_y=self.secondary_y, - data=self.data) - else: - fig, axes = _subplots(nrows=nrows, ncols=ncols, - sharex=self.sharex, sharey=self.sharey, - figsize=self.figsize, ax=self.ax, - secondary_y=self.secondary_y, - data=self.data) + fig, axes = _subplots(nrows=nrows, ncols=ncols, + sharex=self.sharex, sharey=self.sharey, + figsize=self.figsize, ax=self.ax, + secondary_y=self.secondary_y, + data=self.data) + if not com.is_list_like(axes): + axes = np.array([axes]) else: if self.ax is None: fig = self.plt.figure(figsize=self.figsize) @@ -1000,7 +995,11 @@ def _setup_subplots(self): self.axes = axes def _get_layout(self): - return (len(self.data.columns), 1) + from pandas.core.frame import DataFrame + if isinstance(self.data, DataFrame): + return (len(self.data.columns), 1) + else: + return (1, 1) def _compute_plot_data(self): numeric_data = self.data.convert_objects()._get_numeric_data() @@ -1403,6 +1402,8 @@ def __init__(self, data, x, y, **kwargs): self.x = x self.y = y + def _get_layout(self): + return (1, 1) def _make_plot(self): x, y, data = self.x, self.y, self.data @@ -1442,6 +1443,9 @@ def __init__(self, data, x, y, C=None, **kwargs): self.y = y self.C = C + def _get_layout(self): + return (1, 1) + def _make_plot(self): import matplotlib.pyplot as plt
Found 2 minor issues related to plotting with `subplots=True`. 1. If `DataFrame` only has single column, plot raises `TypeError`. It is because `_subplots` returns `Axes` instance, not array. 2. If `kind=scatter` or `kind=hexbin` is specified with `subplots=True`, additional blank axes is drawn.
https://api.github.com/repos/pandas-dev/pandas/pulls/6951
2014-04-24T13:02:12Z
2014-04-29T16:29:19Z
null
2014-06-13T14:50:07Z
Update indexing.py
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 1f284a9b7a7ff..459c6abbe334e 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -438,7 +438,7 @@ def can_do_equal_len(): else: if len(labels) != len(value): - raise ValueError('Must have equal len keys and value' + raise ValueError('Must have equal len keys and value ' 'when setting with an iterable') for item, v in zip(labels, value):
Adding a space
https://api.github.com/repos/pandas-dev/pandas/pulls/6946
2014-04-23T23:47:25Z
2014-04-24T00:12:51Z
2014-04-24T00:12:51Z
2014-07-16T09:02:57Z
REGR/API: accept TextFileReader in concat (GH6583)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 848495b13828a..271daa1623a4b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -175,6 +175,7 @@ API Changes - Replace ``pandas.compat.scipy.scoreatpercentile`` with ``numpy.percentile`` (:issue:`6810`) - ``.quantile`` on a ``datetime[ns]`` series now returns ``Timestamp`` instead of ``np.datetime64`` objects (:issue:`6810`) +- change ``AssertionError`` to ``TypeError`` for invalid types passed to ``concat`` (:issue:`6583`) Deprecations ~~~~~~~~~~~~ @@ -400,6 +401,7 @@ Bug Fixes `header` kwarg (:issue:`6186`) - Bug in `DataFrame.plot` and `Series.plot` legend behave inconsistently when plotting to the same axes repeatedly (:issue:`6678`) - Internal tests for patching ``__finalize__`` / bug in merge not finalizing (:issue:`6923`, :issue:`6927`) +- accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`) pandas 0.13.1 ------------- diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index ea7ff27a4ea05..a5001e840f471 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -209,6 +209,8 @@ API changes - default sorting algorithm for ``Series.order`` is not ``quicksort``, to conform with ``Series.sort`` (and numpy defaults) - add ``inplace`` keyword to ``Series.order/sort`` to make them inverses (:issue:`6859`) +- accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`), this was a regression + from 0.13.1 .. _whatsnew_0140.sql: diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index ef43df98b9235..935dfb65a0807 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -20,6 +20,7 @@ from pandas.core.common import (PandasError, ABCSeries, is_timedelta64_dtype, is_datetime64_dtype, is_integer_dtype, isnull) +from pandas.io.parsers import TextFileReader import pandas.core.common as com @@ -938,10 +939,10 @@ class _Concatenator(object): def __init__(self, objs, axis=0, join='outer', join_axes=None, keys=None, levels=None, names=None, ignore_index=False, verify_integrity=False): - if not isinstance(objs, (list,tuple,types.GeneratorType,dict)): - raise AssertionError('first argument must be a list-like of pandas ' - 'objects, you passed an object of type ' - '"{0}"'.format(type(objs).__name__)) + if not isinstance(objs, (list,tuple,types.GeneratorType,dict,TextFileReader)): + raise TypeError('first argument must be a list-like of pandas ' + 'objects, you passed an object of type ' + '"{0}"'.format(type(objs).__name__)) if join == 'outer': self.intersect = False diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index c3fa5b49fa28b..146c244e7d775 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -16,7 +16,7 @@ assert_almost_equal, rands, makeCustomDataframe as mkdf, assertRaisesRegexp) -from pandas import isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range, read_table +from pandas import isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range, read_table, read_csv import pandas.algos as algos import pandas.util.testing as tm @@ -2048,11 +2048,27 @@ def test_concat_invalid(self): def test_concat_invalid_first_argument(self): df1 = mkdf(10, 2) df2 = mkdf(10, 2) - self.assertRaises(AssertionError, concat, df1, df2) + self.assertRaises(TypeError, concat, df1, df2) # generator ok though concat(DataFrame(np.random.rand(5,5)) for _ in range(3)) + # text reader ok + # GH6583 + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + + reader = read_csv(StringIO(data), chunksize=1) + result = concat(reader, ignore_index=True) + expected = read_csv(StringIO(data)) + assert_frame_equal(result,expected) + class TestOrderedMerge(tm.TestCase): def setUp(self):
closes #6583 - API: change AssertionError to TypeError for invalid types passed to concat - REGR: TextFileReader in concat, which was affecting a common user idiom
https://api.github.com/repos/pandas-dev/pandas/pulls/6941
2014-04-23T16:00:53Z
2014-04-23T16:26:27Z
2014-04-23T16:26:27Z
2014-06-17T14:27:30Z
pandas.io.gbq Version 2
diff --git a/ci/requirements-2.6.txt b/ci/requirements-2.6.txt index d101ab9d6876f..117d14005e175 100644 --- a/ci/requirements-2.6.txt +++ b/ci/requirements-2.6.txt @@ -4,7 +4,6 @@ python-dateutil==1.5 pytz==2013b http://www.crummy.com/software/BeautifulSoup/bs4/download/4.2/beautifulsoup4-4.2.0.tar.gz html5lib==1.0b2 -bigquery==2.0.17 numexpr==1.4.2 sqlalchemy==0.7.1 pymysql==0.6.0 diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt index 2e0e20b047ee0..fccc7a0ffdea7 100644 --- a/ci/requirements-2.7.txt +++ b/ci/requirements-2.7.txt @@ -19,5 +19,7 @@ lxml==3.2.1 scipy==0.13.3 beautifulsoup4==4.2.1 statsmodels==0.5.0 -bigquery==2.0.17 boto==2.26.1 +httplib2==0.8 +python-gflags==2.0 +google-api-python-client==1.2 \ No newline at end of file diff --git a/doc/source/install.rst b/doc/source/install.rst index 56ab7b70407bc..fe56b53d7cb82 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -112,7 +112,9 @@ Optional Dependencies :func:`~pandas.io.clipboard.read_clipboard`. Most package managers on Linux distributions will have xclip and/or xsel immediately available for installation. - * `Google bq Command Line Tool <https://developers.google.com/bigquery/bq-command-line-tool/>`__ + * Google's `python-gflags` and `google-api-python-client` + * Needed for :mod:`~pandas.io.gbq` + * `httplib2` * Needed for :mod:`~pandas.io.gbq` * One of the following combinations of libraries is needed to use the top-level :func:`~pandas.io.html.read_html` function: diff --git a/doc/source/io.rst b/doc/source/io.rst index bc58b04de4473..129d74ac92df1 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3373,83 +3373,79 @@ Google BigQuery (Experimental) The :mod:`pandas.io.gbq` module provides a wrapper for Google's BigQuery analytics web service to simplify retrieving results from BigQuery tables using SQL-like queries. Result sets are parsed into a pandas -DataFrame with a shape derived from the source table. Additionally, -DataFrames can be uploaded into BigQuery datasets as tables -if the source datatypes are compatible with BigQuery ones. +DataFrame with a shape and data types derived from the source table. +Additionally, DataFrames can be appended to existing BigQuery tables if +the destination table is the same shape as the DataFrame. For specifics on the service itself, see `here <https://developers.google.com/bigquery/>`__ -As an example, suppose you want to load all data from an existing table -: `test_dataset.test_table` -into BigQuery and pull it into a DataFrame. +As an example, suppose you want to load all data from an existing BigQuery +table : `test_dataset.test_table` into a DataFrame using the :func:`~pandas.io.read_gbq` +function. .. code-block:: python - - from pandas.io import gbq - # Insert your BigQuery Project ID Here - # Can be found in the web console, or - # using the command line tool `bq ls` + # Can be found in the Google web console projectid = "xxxxxxxx" - data_frame = gbq.read_gbq('SELECT * FROM test_dataset.test_table', project_id = projectid) + data_frame = pd.read_gbq('SELECT * FROM test_dataset.test_table', project_id = projectid) -The user will then be authenticated by the `bq` command line client - -this usually involves the default browser opening to a login page, -though the process can be done entirely from command line if necessary. -Datasets and additional parameters can be either configured with `bq`, -passed in as options to `read_gbq`, or set using Google's gflags (this -is not officially supported by this module, though care was taken -to ensure that they should be followed regardless of how you call the -method). +You will then be authenticated to the specified BigQuery account +via Google's Oauth2 mechanism. In general, this is as simple as following the +prompts in a browser window which will be opened for you. Should the browser not +be available, or fail to launch, a code will be provided to complete the process +manually. Additional information on the authentication mechanism can be found +`here <https://developers.google.com/accounts/docs/OAuth2#clientside/>`__ -Additionally, you can define which column to use as an index as well as a preferred column order as follows: +You can define which column from BigQuery to use as an index in the +destination DataFrame as well as a preferred column order as follows: .. code-block:: python - data_frame = gbq.read_gbq('SELECT * FROM test_dataset.test_table', + data_frame = pd.read_gbq('SELECT * FROM test_dataset.test_table', index_col='index_column_name', - col_order='[col1, col2, col3,...]', project_id = projectid) - -Finally, if you would like to create a BigQuery table, `my_dataset.my_table`, from the rows of DataFrame, `df`: + col_order=['col1', 'col2', 'col3'], project_id = projectid) + +Finally, you can append data to a BigQuery table from a pandas DataFrame +using the :func:`~pandas.io.to_gbq` function. This function uses the +Google streaming API which requires that your destination table exists in +BigQuery. Given the BigQuery table already exists, your DataFrame should +match the destination table in column order, structure, and data types. +DataFrame indexes are not supported. By default, rows are streamed to +BigQuery in chunks of 10,000 rows, but you can pass other chuck values +via the ``chunksize`` argument. You can also see the progess of your +post via the ``verbose`` flag which defaults to ``True``. The http +response code of Google BigQuery can be successful (200) even if the +append failed. For this reason, if there is a failure to append to the +table, the complete error response from BigQuery is returned which +can be quite long given it provides a status for each row. You may want +to start with smaller chuncks to test that the size and types of your +dataframe match your destination table to make debugging simpler. .. code-block:: python df = pandas.DataFrame({'string_col_name' : ['hello'], 'integer_col_name' : [1], 'boolean_col_name' : [True]}) - schema = ['STRING', 'INTEGER', 'BOOLEAN'] - data_frame = gbq.to_gbq(df, 'my_dataset.my_table', - if_exists='fail', schema = schema, project_id = projectid) - -To add more rows to this, simply: - -.. code-block:: python - - df2 = pandas.DataFrame({'string_col_name' : ['hello2'], - 'integer_col_name' : [2], - 'boolean_col_name' : [False]}) - data_frame = gbq.to_gbq(df2, 'my_dataset.my_table', if_exists='append', project_id = projectid) + df.to_gbq('my_dataset.my_table', project_id = projectid) -.. note:: +The BigQuery SQL query language has some oddities, see `here <https://developers.google.com/bigquery/query-reference>`__ - A default project id can be set using the command line: - `bq init`. +While BigQuery uses SQL-like syntax, it has some important differences +from traditional databases both in functionality, API limitations (size and +qunatity of queries or uploads), and how Google charges for use of the service. +You should refer to Google documentation often as the service seems to +be changing and evolving. BiqQuery is best for analyzing large sets of +data quickly, but it is not a direct replacement for a transactional database. - There is a hard cap on BigQuery result sets, at 128MB compressed. Also, the BigQuery SQL query language has some oddities, - see `here <https://developers.google.com/bigquery/query-reference>`__ - - You can access the management console to determine project id's by: - <https://code.google.com/apis/console/b/0/?noredirect> +You can access the management console to determine project id's by: +<https://code.google.com/apis/console/b/0/?noredirect> .. warning:: - To use this module, you will need a BigQuery account. See - <https://cloud.google.com/products/big-query> for details. - - As of 1/28/14, a known bug is present that could possibly cause data duplication in the resultant dataframe. A fix is imminent, - but any client changes will not make it into 0.13.1. See: - http://stackoverflow.com/questions/20984592/bigquery-results-not-including-page-token/21009144?noredirect=1#comment32090677_21009144 + To use this module, you will need a valid BigQuery account. See + <https://cloud.google.com/products/big-query> for details on the + service. .. _io.stata: diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 45a5d55ca047d..d5b1c96ab4e8b 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -154,14 +154,11 @@ Performance Experimental ~~~~~~~~~~~~ -``pandas.io.data.Options`` has gained a ``get_all_data method``, and now consistently returns a multi-indexed ``DataFrame`` (:issue:`5602`). See :ref:`the docs<remote_data.yahoo_options>` - - .. ipython:: python - - from pandas.io.data import Options - aapl = Options('aapl', 'yahoo') - data = aapl.get_all_data() - data.iloc[0:5, 0:5] +- ``io.gbq.read_gbq`` and ``io.gbq.to_gbq`` were refactored to remove the + dependency on the Google ``bq.py`` command line client. This submodule + now uses ``httplib2`` and the Google ``apiclient`` and ``oauth2client`` API client + libraries which should be more stable and, therefore, reliable than + ``bq.py`` (:issue:`6937`). .. _whatsnew_0141.bug_fixes: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b4e69e2056507..dd1d87dfa468e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -669,47 +669,43 @@ def to_dict(self, outtype='dict'): else: # pragma: no cover raise ValueError("outtype %s not understood" % outtype) - def to_gbq(self, destination_table, schema=None, col_order=None, - if_exists='fail', **kwargs): + def to_gbq(self, destination_table, project_id=None, chunksize=10000, + verbose=True, reauth=False): """Write a DataFrame to a Google BigQuery table. - If the table exists, the DataFrame will be appended. If not, a new - table will be created, in which case the schema will have to be - specified. By default, rows will be written in the order they appear - in the DataFrame, though the user may specify an alternative order. + THIS IS AN EXPERIMENTAL LIBRARY + + If the table exists, the dataframe will be written to the table using + the defined table schema and column types. For simplicity, this method + uses the Google BigQuery streaming API. The to_gbq method chunks data + into a default chunk size of 10,000. Failures return the complete error + response which can be quite long depending on the size of the insert. + There are several important limitations of the Google streaming API + which are detailed at: + https://developers.google.com/bigquery/streaming-data-into-bigquery. Parameters - --------------- + ---------- + dataframe : DataFrame + DataFrame to be written destination_table : string - name of table to be written, in the form 'dataset.tablename' - schema : sequence (optional) - list of column types in order for data to be inserted, e.g. - ['INTEGER', 'TIMESTAMP', 'BOOLEAN'] - col_order : sequence (optional) - order which columns are to be inserted, e.g. ['primary_key', - 'birthday', 'username'] - if_exists : {'fail', 'replace', 'append'} (optional) - - fail: If table exists, do nothing. - - replace: If table exists, drop it, recreate it, and insert data. - - append: If table exists, insert data. Create if does not exist. - kwargs are passed to the Client constructor - - Raises - ------ - SchemaMissing : - Raised if the 'if_exists' parameter is set to 'replace', but no - schema is specified - TableExists : - Raised if the specified 'destination_table' exists but the - 'if_exists' parameter is set to 'fail' (the default) - InvalidSchema : - Raised if the 'schema' parameter does not match the provided - DataFrame + Name of table to be written, in the form 'dataset.tablename' + project_id : str + Google BigQuery Account project ID. + chunksize : int (default 10000) + Number of rows to be inserted in each chunk from the dataframe. + verbose : boolean (default True) + Show percentage complete + reauth : boolean (default False) + Force Google BigQuery to reauthenticate the user. This is useful + if multiple accounts are used. + """ from pandas.io import gbq - return gbq.to_gbq(self, destination_table, schema=None, col_order=None, - if_exists='fail', **kwargs) + return gbq.to_gbq(self, destination_table, project_id=project_id, + chunksize=chunksize, verbose=verbose, + reauth=reauth) @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 60381a2a628c2..76848a62d0d5f 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -1,68 +1,104 @@ -""" -Pandas module to interface with Google BigQuery. -""" -import os -import sys -import tempfile -import csv -import logging from datetime import datetime -import pkg_resources -from distutils.version import LooseVersion -from pandas.compat import u +import json +import logging +import sys +from time import sleep +import uuid -import pandas as pd import numpy as np +import pkg_resources -from pandas.core.common import PandasError -from pandas.core.frame import DataFrame +from distutils.version import LooseVersion +from pandas import compat +from pandas.core.api import DataFrame from pandas.tools.merge import concat +from pandas.core.common import PandasError -try: - import bq - import bigquery_client - import gflags as flags - _BQ_INSTALLED = True - _BQ_VERSION = pkg_resources.get_distribution('bigquery').version - if LooseVersion(_BQ_VERSION) >= '2.0.17': - _BQ_VALID_VERSION = True - else: - _BQ_VALID_VERSION = False +_GOOGLE_API_CLIENT_INSTALLED = False +_GOOGLE_API_CLIENT_VALID_VERSION = False +_GOOGLE_FLAGS_INSTALLED = False +_GOOGLE_FLAGS_VALID_VERSION = False +_HTTPLIB2_INSTALLED = False -except ImportError: - _BQ_INSTALLED = False +if not compat.PY3: + + try: + from apiclient.discovery import build + from apiclient.http import MediaFileUpload + from apiclient.errors import HttpError + from oauth2client.client import OAuth2WebServerFlow + from oauth2client.client import AccessTokenRefreshError + from oauth2client.client import flow_from_clientsecrets + from oauth2client.file import Storage + from oauth2client.tools import run + _GOOGLE_API_CLIENT_INSTALLED=True + _GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version -# Setup the logger -logger = logging.getLogger('pandas.io.gbq') + if LooseVersion(_GOOGLE_API_CLIENT_VERSION >= '1.2.0'): + _GOOGLE_API_CLIENT_VALID_VERSION = True + + except ImportError: + _GOOGLE_API_CLIENT_INSTALLED = False + + + try: + import gflags as flags + _GOOGLE_FLAGS_INSTALLED = True -# These are some custom exceptions that the -# to_gbq() method can throw + _GOOGLE_FLAGS_VERSION = pkg_resources.get_distribution('python-gflags').version + if LooseVersion(_GOOGLE_FLAGS_VERSION >= '2.0.0'): + _GOOGLE_FLAGS_VALID_VERSION = True -class SchemaMissing(PandasError, IOError): + except ImportError: + _GOOGLE_FLAGS_INSTALLED = False + + try: + import httplib2 + _HTTPLIB2_INSTALLED = True + except ImportError: + _HTTPLIB2_INSTALLED = False + + +logger = logging.getLogger('pandas.io.gbq') +logger.setLevel(logging.ERROR) + +class InvalidPageToken(PandasError, IOError): + """ + Raised when Google BigQuery fails to return, + or returns a duplicate page token. + """ + pass + +class InvalidQueryException(PandasError, IOError): """ - Raised when attempting to write a DataFrame to - a new table in Google BigQuery without specifying - a schema describing the DataFrame. + Raised when a malformed query is given to read_gbq. """ pass +class AccessDeniedException(PandasError, IOError): + """ + Raised when invalid credentials are provided, or tokens have expired. + """ + pass -class InvalidSchema(PandasError, IOError): +class NotFoundException(PandasError, IOError): """ - Raised when attempting to write a DataFrame to - Google BigQuery with an invalid table schema. + Raised when the project_id/table provided in the query could not be found. """ pass +class TermsOfServiceNotAcceptedException(PandasError, IOError): + """ + Raised when the terms of service were not accepted or have been unaccepted. + """ + pass -class TableExistsFail(PandasError, IOError): +class UnknownGBQException(PandasError, IOError): """ - Raised when attempting to write a DataFrame to - an existing Google BigQuery table without specifying - that a replace/update action be taken. + Raised when an unrecognized Google API Error occurs. """ pass @@ -75,253 +111,263 @@ class InvalidColumnOrder(PandasError, IOError): """ pass +class GbqConnector: + def __init__(self, project_id, reauth=False): + self.project_id = project_id + self.reauth = reauth + self.credentials = self.get_credentials() + self.service = self.get_service(self.credentials) + + def get_credentials(self): + flow = OAuth2WebServerFlow(client_id='495642085510-k0tmvj2m941jhre2nbqka17vqpjfddtd.apps.googleusercontent.com', + client_secret='kOc9wMptUtxkcIFbtZCcrEAc', + scope='https://www.googleapis.com/auth/bigquery', + redirect_uri='urn:ietf:wg:oauth:2.0:oob') + + storage = Storage('bigquery_credentials.dat') + credentials = storage.get() + + if credentials is None or credentials.invalid or self.reauth: + credentials = run(flow, storage) + + return credentials + + def get_service(self, credentials): + http = httplib2.Http() + http = credentials.authorize(http) + bigquery_service = build('bigquery', 'v2', http=http) + + return bigquery_service + + def run_query(self, query): + job_collection = self.service.jobs() + job_data = { + 'configuration': { + 'query': { + 'query': query + #'allowLargeResults', 'createDisposition', 'preserveNulls', destinationTable, useQueryCache + } + } + } + + try: + query_reply = job_collection.insert(projectId=self.project_id, + body=job_data).execute() + status = query_reply['status'] + except AccessTokenRefreshError: + raise AccessDeniedException("The credentials have been revoked or expired, please re-run" + "the application to re-authorize") + except HttpError as ex: + status = json.loads(ex.content)['error'] + + + errors = status.get('errors', None) + + if errors: + reasons = [error['reason'] for error in errors] + if 'accessDenied' in reasons: + raise AccessDeniedException + if 'invalidQuery' in reasons: + raise InvalidQueryException + if 'notFound' in reasons: + raise NotFoundException + if 'termsOfServiceNotAccepted' in reasons: + raise TermsOfServiceNotAcceptedException + else: + raise UnknownGBQException(errors) + + job_reference = query_reply['jobReference'] + + while(not 'jobComplete' in query_reply): + print('Job not yet complete...') + query_reply = job_collection.getQueryResults( + projectId=job_reference['projectId'], + jobId=job_reference['jobId']).execute() + + total_rows = int(query_reply['totalRows']) + result_pages = list() + seen_page_tokens = list() + current_row = 0 + #Only read schema on first page + schema = query_reply['schema'] + + # Loop through each page of data + while('rows' in query_reply and current_row < total_rows): + page = query_reply['rows'] + result_pages.append(page) + current_row += len(page) + page_token = query_reply.get('pageToken', None) + + if not page_token and current_row < total_rows: + raise InvalidPageToken("Required pageToken was missing. Recieved {0} of {1} rows".format(current_row,total_rows)) + + elif page_token in seen_page_tokens: + raise InvalidPageToken("A duplicate pageToken was returned") + + seen_page_tokens.append(page_token) + query_reply = job_collection.getQueryResults( + projectId = job_reference['projectId'], + jobId = job_reference['jobId'], + pageToken = page_token).execute() + + if (current_row < total_rows): + raise InvalidPageToken() + + return schema, result_pages + + def load_data(self, dataframe, dataset_id, table_id, chunksize, verbose): + job_id = uuid.uuid4().hex + rows = [] + remaining_rows = len(dataframe) + + if verbose: + total_rows = remaining_rows + sys.stdout.write("\n\n") + sys.stdout.flush() + + for index, row in dataframe.reset_index(drop=True).iterrows(): + row_dict = dict() + row_dict['json'] = json.loads(row.to_json(force_ascii = False, + date_unit = 's', + date_format = 'iso')) + row_dict['insertId'] = job_id + str(index) + rows.append(row_dict) + remaining_rows -= 1 + + if (len(rows) % chunksize == 0) or (remaining_rows == 0): + if verbose: + sys.stdout.write("\rStreaming Insert is {0}% Complete".format(((total_rows - remaining_rows) * 100) / total_rows)) + sys.stdout.flush() + + body = {'rows': rows} + response = self.service.tabledata().insertAll( + projectId = self.project_id, + datasetId = dataset_id, + tableId = table_id, + body = body).execute() + if 'insertErrors' in response: + raise UnknownGBQException(response) + + sleep(1) # Maintains the inserts "per second" rate per API + rows = [] + + if verbose: + sys.stdout.write("\n") + sys.stdout.flush() + +def _parse_data(schema, rows): + # see: http://pandas.pydata.org/pandas-docs/dev/missing_data.html#missing-data-casting-rules-and-indexing + dtype_map = {'INTEGER': np.dtype(float), + 'FLOAT': np.dtype(float), + 'TIMESTAMP': 'M8[ns]'} # This seems to be buggy without + # nanosecond indicator -def _authenticate(): - """ - For testing, we abstract the authentication to BigQuery API. - Presently this is implemented using the bq.py Client.Get() - method. Any exceptions raised are considered fatal, so we - do not process them. + fields = schema['fields'] + col_types = [field['type'] for field in fields] + col_names = [field['name'].encode('ascii', 'ignore') for field in fields] + col_dtypes = [dtype_map.get(field['type'], object) for field in fields] + page_array = np.zeros((len(rows),), + dtype=zip(col_names, col_dtypes)) - Returns - ------- - BigqueryClient : Configured connection to Google BigQuery - """ - return bq.Client.Get() + for row_num, raw_row in enumerate(rows): + entries = raw_row.get('f', []) + for col_num, field_type in enumerate(col_types): + field_value = _parse_entry(entries[col_num].get('v', ''), + field_type) + page_array[row_num][col_num] = field_value + return DataFrame(page_array) def _parse_entry(field_value, field_type): - """ - Given a value and the corresponding BigQuery data type, - perform any operations needed and return in a format - appropriate for a numpy record dictionary - - Parameters - ---------- - field_value : Source object to be transformed - field_type : String representation of Google BigQuery - data type (per schema) - - Returns - ------- - field_value : object or primitive of type corresponding - to field_type - """ - - # Avoid any casting problems if field_value is None or field_value == 'null': return None if field_type == 'INTEGER' or field_type == 'FLOAT': - field_value = float(field_value) + return float(field_value) elif field_type == 'TIMESTAMP': timestamp = datetime.utcfromtimestamp(float(field_value)) - field_value = np.datetime64(timestamp) + return np.datetime64(timestamp) elif field_type == 'BOOLEAN': - field_value = field_value == 'true' - elif field_type == 'STRING': - field_value = field_value - else: - field_value = str(field_value) + return field_value == 'true' return field_value +def _test_imports(): + _GOOGLE_API_CLIENT_INSTALLED + _GOOGLE_API_CLIENT_VALID_VERSION + _GOOGLE_FLAGS_INSTALLED + _GOOGLE_FLAGS_VALID_VERSION + _HTTPLIB2_INSTALLED -def _parse_page(raw_page, col_names, col_types, col_dtypes): - """ - Given a list of rows produced by the client.apiclient.tabledata().list(), - build a numpy array with proper dtypes and column names as specified - by the arguments. + if compat.PY3: + raise NotImplementedError("Google's libraries do not support Python 3 yet") - Parameters - ---------- - raw_page : Resulting list of rows from a page retrieved via - bigquery API - client.apiclient.tabledata().list().execute()['rows'] - col_names: An ordered list of names for the columns - col_types: String representation of the BigQuery DataType for that - column - col_dtypes: Target numpy.dtype for the column + if not _GOOGLE_API_CLIENT_INSTALLED: + raise ImportError('Could not import Google API Client.') - Returns - ------- - page_array : numpy record array corresponding - to the page data - """ + if not _GOOGLE_FLAGS_INSTALLED: + raise ImportError('Could not import Google Command Line Flags Module.') - # Should be at most 100,000 per the API, but this could - # be increased in the future. Should only be less than - # this for the last page to reduce API calls - page_row_count = len(raw_page) + if not _GOOGLE_API_CLIENT_VALID_VERSION: + raise ImportError("pandas requires google-api-python-client >= 1.2.0 for Google " + "BigQuery support, current version " + _GOOGLE_API_CLIENT_VERSION) - # Place to hold the results for a page of data - page_array = np.zeros((page_row_count,), dtype=zip(col_names, col_dtypes)) - for row_num, raw_row in enumerate(raw_page): - entries = raw_row.get('f', []) - # Iterate over each entry - setting proper field types - for col_num, field_type in enumerate(col_types): - # Process the field's types using schema - field_value = _parse_entry(entries[col_num].get('v', ''), - field_type) - # Fill the value into the final array - page_array[row_num][col_num] = field_value + if not _GOOGLE_FLAGS_VALID_VERSION: + raise ImportError("pandas requires python-gflags >= 2.0.0 for Google " + "BigQuery support, current version " + _GOOGLE_FLAGS_VERSION) - return page_array + if not _HTTPLIB2_INSTALLED: + raise ImportError("pandas requires httplib2 for Google BigQuery support") +def read_gbq(query, project_id = None, index_col=None, col_order=None, reauth=False): + """Load data from Google BigQuery. -def _parse_data(client, job, index_col=None, col_order=None): - """ - Iterate through the query results and piece together the - final DataFrame. Builds a DataFrame for each page of - results, then concatenates them together when finished. - To save memory, we use numpy record arrays to build these - DataFrames. + THIS IS AN EXPERIMENTAL LIBRARY + + The main method a user calls to execute a Query in Google BigQuery and read results + into a pandas DataFrame using the v2 Google API client for Python. Documentation for + the API is available at https://developers.google.com/api-client-library/python/. + Authentication to the Google BigQuery service is via OAuth 2.0 using the product name + 'pandas GBQ'. Parameters ---------- - client: An instance of bq.Client - job: An array containing the job info for a completed query - index_col: str (optional) + query : str + SQL-Like Query to return data values + project_id : str + Google BigQuery Account project ID. + index_col : str (optional) Name of result column to use for index in results DataFrame - col_order: list() (optional) + col_order : list(str) (optional) List of BigQuery column names in the desired order for results DataFrame + reauth : boolean (default False) + Force Google BigQuery to reauthenticate the user. This is useful + if multiple accounts are used. Returns ------- - df: pandas DataFrame + df: DataFrame DataFrame representing results of query - Raises: - ------ - InvalidColumnOrder: - Raised if 'col_order' parameter doesn't match returned DataFrame - BigqueryError: - Raised by bigquery_client if a Google API error is encountered - - - Notes: - ----- - This script relies on Google being consistent with their - pagination API. We are using the most flexible iteration method - that we could find in the bq.py/bigquery_client.py API's, but - these have undergone large amounts of change recently. """ - # dtype Map - - # see: http://pandas.pydata.org/pandas-docs/dev/missing_data.html#missing-data-casting-rules-and-indexing - dtype_map = {'INTEGER': np.dtype(float), - 'FLOAT': np.dtype(float), - 'TIMESTAMP': 'M8[ns]'} # This seems to be buggy without - # nanosecond indicator - - # We first need the schema to get information about the columns of - # our dataframe. - - table_dict = job['configuration']['query']['destinationTable'] - fields = client.GetTableSchema(table_dict)['fields'] - - # Get the schema into a format useable to create our - # dataframe - col_dtypes = [] - col_types = [] - col_names = [] - - # TODO: Do this in one clean step - for field in fields: - col_types.append(field['type']) - # Note the encoding... numpy doesn't like titles that are UTF8, which - # is the return type from the API - col_names.append(field['name'].encode('ascii', 'ignore')) - # Note, it would be nice to use 'str' types, but BigQuery doesn't have - # a fixed length in mind - just maxes out at 64k - col_dtypes.append(dtype_map.get(field['type'], object)) - - # How many columns are there - num_columns = len(col_names) - - # Iterate over the result rows. - # Since Google's API now requires pagination of results, - # we do that here. The following is repurposed from - # bigquery_client.py :: Client._JobTableReader._ReadOnePage - - # TODO: Enable Reading From Table, - # see Client._TableTableReader._ReadOnePage - - # Initially, no page token is set - page_token = None - - # This number is the current max results per page - max_rows = bigquery_client._MAX_ROWS_PER_REQUEST - - # How many rows in result set? Initialize to max_rows - total_rows = max_rows - - # This is the starting row for a particular page... - # is ignored if page_token is present, though - # it may be useful if we wish to implement SQL like LIMITs - # with minimums - start_row = 0 - - # Keep our page DataFrames until the end when we concatenate them - dataframe_list = list() - - current_job = job['jobReference'] - - # Iterate over all rows - while start_row < total_rows: - # Setup the parameters for getQueryResults() API Call - kwds = dict(current_job) - kwds['maxResults'] = max_rows - # Sets the timeout to 0 because we assume the table is already ready. - # This is because our previous call to Query() is synchronous - # and will block until it's actually done - kwds['timeoutMs'] = 0 - # Use start row if there's no page_token ... in other words, the - # user requested to start somewhere other than the beginning... - # presently this is not a parameter to read_gbq(), but it will be - # added eventually. - if page_token: - kwds['pageToken'] = page_token - else: - kwds['startIndex'] = start_row - data = client.apiclient.jobs().getQueryResults(**kwds).execute() - if not data['jobComplete']: - raise bigquery_client.BigqueryError('Job was not completed, or was invalid') - - # How many rows are there across all pages? - # Note: This is presently the only reason we don't just use - # _ReadOnePage() directly - total_rows = int(data['totalRows']) - - page_token = data.get('pageToken', None) - raw_page = data.get('rows', []) - page_array = _parse_page(raw_page, col_names, col_types, col_dtypes) - - start_row += len(raw_page) - if total_rows > 0: - completed = (100 * start_row) / total_rows - logger.info('Remaining Rows: ' + str(total_rows - start_row) + '(' - + str(completed) + '% Complete)') - else: - logger.info('No Rows') + _test_imports() - dataframe_list.append(DataFrame(page_array)) + if not project_id: + raise TypeError("Missing required parameter: project_id") - # Did we get enough rows? Note: gbq.py stopped checking for this - # but we felt it was still a good idea. - if not page_token and not raw_page and start_row != total_rows: - raise bigquery_client.BigqueryInterfaceError( - 'Not enough rows returned by server. Expected: {0} Rows, But ' - 'Received {1}'.format(total_rows, start_row) - ) + connector = GbqConnector(project_id, reauth = reauth) + schema, pages = connector.run_query(query) + dataframe_list = [] + while len(pages) > 0: + page = pages.pop() + dataframe_list.append(_parse_data(schema, page)) - # Build final dataframe - final_df = concat(dataframe_list, ignore_index=True) + final_df = concat(dataframe_list, ignore_index = True) # Reindex the DataFrame on the provided column if index_col is not None: - if index_col in col_names: - final_df.set_index(index_col, inplace=True) - col_names.remove(index_col) + if index_col in final_df.columns: + final_df.set_index(index_col, inplace = True) else: raise InvalidColumnOrder( 'Index column "{0}" does not exist in DataFrame.' @@ -330,7 +376,7 @@ def _parse_data(client, job, index_col=None, col_order=None): # Change the order of columns in the DataFrame based on provided list if col_order is not None: - if sorted(col_order) == sorted(col_names): + if sorted(col_order) == sorted(final_df.columns): final_df = final_df[col_order] else: raise InvalidColumnOrder( @@ -343,188 +389,47 @@ def _parse_data(client, job, index_col=None, col_order=None): final_df._data = final_df._data.downcast(dtypes='infer') return final_df - -def to_gbq(dataframe, destination_table, schema=None, col_order=None, - if_exists='fail', **kwargs): +def to_gbq(dataframe, destination_table, project_id=None, chunksize=10000, + verbose=True, reauth=False): """Write a DataFrame to a Google BigQuery table. THIS IS AN EXPERIMENTAL LIBRARY - If the table exists, the DataFrame will be appended. If not, a new table - will be created, in which case the schema will have to be specified. By - default, rows will be written in the order they appear in the DataFrame, - though the user may specify an alternative order. + If the table exists, the dataframe will be written to the table using + the defined table schema and column types. For simplicity, this method + uses the Google BigQuery streaming API. The to_gbq method chunks data + into a default chunk size of 10,000. Failures return the complete error + response which can be quite long depending on the size of the insert. + There are several important limitations of the Google streaming API + which are detailed at: + https://developers.google.com/bigquery/streaming-data-into-bigquery. Parameters ---------- dataframe : DataFrame DataFrame to be written destination_table : string - name of table to be written, in the form 'dataset.tablename' - schema : sequence (optional) - list of column types in order for data to be inserted, - e.g. ['INTEGER', 'TIMESTAMP', 'BOOLEAN'] - col_order : sequence (optional) - order which columns are to be inserted, - e.g. ['primary_key', 'birthday', 'username'] - if_exists : {'fail', 'replace', 'append'} (optional) - - fail: If table exists, do nothing. - - replace: If table exists, drop it, recreate it, and insert data. - - append: If table exists, insert data. Create if does not exist. - kwargs are passed to the Client constructor - - Raises - ------ - SchemaMissing : - Raised if the 'if_exists' parameter is set to 'replace', but no schema - is specified - TableExists : - Raised if the specified 'destination_table' exists but the 'if_exists' - parameter is set to 'fail' (the default) - InvalidSchema : - Raised if the 'schema' parameter does not match the provided DataFrame - """ - - if not _BQ_INSTALLED: - if sys.version_info >= (3, 0): - raise NotImplementedError('gbq module does not support Python 3 ' - 'yet') - else: - raise ImportError('Could not import Google BigQuery Client.') - - if not _BQ_VALID_VERSION: - raise ImportError("pandas requires bigquery >= 2.0.17 for Google " - "BigQuery support, current version " + _BQ_VERSION) - - ALLOWED_TYPES = ['STRING', 'INTEGER', 'FLOAT', 'BOOLEAN', 'TIMESTAMP', - 'RECORD'] - - if if_exists == 'replace' and schema is None: - raise SchemaMissing('Cannot replace a table without specifying the ' - 'data schema') - else: - client = _authenticate() - table_reference = client.GetTableReference(destination_table) - if client.TableExists(table_reference): - if if_exists == 'fail': - raise TableExistsFail('Cannot overwrite existing tables if ' - '\'if_exists="fail"\'') - else: - # Build up a string representation of the - # table's schema. Since the table already - # exists, we ask ask the API for it, which - # is returned in a list of dictionaries - # describing column data. Iterate over these - # and build up a string of form: - # "col_name1 : col_type1, col_name2 : col_type2..." - schema_full = client.GetTableSchema( - dict(table_reference) - )['fields'] - schema = '' - for count, row in enumerate(schema_full): - if count > 0: - schema += ', ' - schema += row['name'] + ':' + row['type'] - else: - logger.info('Creating New Table') - if schema is None: - raise SchemaMissing('Cannot create a new table without ' - 'specifying the data schema') - else: - columns = dataframe.columns - if len(schema) != len(columns): - raise InvalidSchema('Incorrect number of columns in ' - 'schema') - else: - schema_string = '' - for count, name in enumerate(columns): - if count > 0: - schema_string += ', ' - column_type = schema[count].upper() - if column_type in ALLOWED_TYPES: - schema_string += name + ':' + schema[count].lower() - else: - raise InvalidSchema('Invalid Type: ' + column_type - + ". Must be one of: " + - str(ALLOWED_TYPES)) - schema = schema_string - - opts = kwargs - opts['sync'] = True - opts['skip_leading_rows'] = 1 - opts['encoding'] = 'UTF-8' - opts['max_bad_records'] = 0 - - # See: https://developers.google.com/bigquery/docs/reference/v2/jobs - if if_exists == 'replace': - opts['write_disposition'] = 'WRITE_TRUNCATE' - elif if_exists == 'append': - opts['write_disposition'] = 'WRITE_APPEND' - - with tempfile.NamedTemporaryFile() as csv_file: - dataframe.to_csv(csv_file.name, index=False, encoding='utf-8') - job = client.Load(table_reference, csv_file.name, schema=schema, - **opts) - - -def read_gbq(query, project_id=None, destination_table=None, index_col=None, - col_order=None, **kwargs): - """Load data from Google BigQuery. - - THIS IS AN EXPERIMENTAL LIBRARY - - The main method a user calls to load data from Google BigQuery into a - pandas DataFrame. This is a simple wrapper for Google's bq.py and - bigquery_client.py, which we use to get the source data. Because of this, - this script respects the user's bq settings file, '~/.bigqueryrc', if it - exists. Such a file can be generated using 'bq init'. Further, additional - parameters for the query can be specified as either ``**kwds`` in the - command, or using FLAGS provided in the 'gflags' module. Particular options - can be found in bigquery_client.py. - - Parameters - ---------- - query : str - SQL-Like Query to return data values - project_id : str (optional) - Google BigQuery Account project ID. Optional, since it may be - located in ~/.bigqueryrc - index_col : str (optional) - Name of result column to use for index in results DataFrame - col_order : list(str) (optional) - List of BigQuery column names in the desired order for results - DataFrame - destination_table : string (optional) - If provided, send the results to the given table. - **kwargs : - To be passed to bq.Client.Create(). Particularly: 'trace', - 'sync', 'api', 'api_version' - - Returns - ------- - df: DataFrame - DataFrame representing results of query + Name of table to be written, in the form 'dataset.tablename' + project_id : str + Google BigQuery Account project ID. + chunksize : int (default 10000) + Number of rows to be inserted in each chunk from the dataframe. + verbose : boolean (default True) + Show percentage complete + reauth : boolean (default False) + Force Google BigQuery to reauthenticate the user. This is useful + if multiple accounts are used. """ - if not _BQ_INSTALLED: - if sys.version_info >= (3, 0): - raise NotImplementedError('gbq module does not support Python 3 ' - 'yet') - else: - raise ImportError('Could not import Google BigQuery Client.') - - if not _BQ_VALID_VERSION: - raise ImportError('pandas requires bigquery >= 2.0.17 for Google ' - 'BigQuery support, current version ' + _BQ_VERSION) + _test_imports() - query_args = kwargs - query_args['project_id'] = project_id - query_args['query'] = query - query_args['destination_table'] = destination_table - query_args['sync'] = True + if not project_id: + raise TypeError("Missing required parameter: project_id") - client = _authenticate() + if not '.' in destination_table: + raise NotFoundException("Invalid Table Name. Should be of the form 'datasetId.tableId' ") - job = client.Query(**query_args) + connector = GbqConnector(project_id, reauth = reauth) + dataset_id, table_id = destination_table.rsplit('.',1) - return _parse_data(client, job, index_col=index_col, col_order=col_order) + connector.load_data(dataframe, dataset_id, table_id, chunksize, verbose) diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py index 124658ac80234..0f595f75bc66f 100644 --- a/pandas/io/tests/test_gbq.py +++ b/pandas/io/tests/test_gbq.py @@ -1,474 +1,290 @@ import ast +import datetime +import json import nose import os +import pytz import shutil import subprocess +import sys +import platform +from time import sleep import numpy as np +from pandas import NaT +from pandas.compat import u +from pandas.core.frame import DataFrame import pandas.io.gbq as gbq import pandas.util.testing as tm -from pandas.core.frame import DataFrame -from pandas.util.testing import with_connectivity_check -from pandas.compat import u -from pandas import NaT +PROJECT_ID = None +VERSION = platform.python_version() -try: - import bq - import bigquery_client - import gflags as flags -except ImportError: - raise nose.SkipTest - -#################################################################################### -# Fake Google BigQuery Client - -class FakeClient: - def __init__(self): - self.apiclient = FakeApiClient() - def GetTableSchema(self,table_dict): - retval = {'fields': [ - {'type': 'STRING', 'name': 'corpus', 'mode': 'NULLABLE'}, - {'type': 'INTEGER', 'name': 'corpus_date', 'mode': 'NULLABLE'}, - {'type': 'STRING', 'name': 'word', 'mode': 'NULLABLE'}, - {'type': 'INTEGER', 'name': 'word_count', 'mode': 'NULLABLE'} - ]} - return retval - -# Fake Google BigQuery API Client -class FakeApiClient: - def __init__(self): - self._fakejobs = FakeJobs() - - - def jobs(self): - return self._fakejobs - -class FakeJobs: - def __init__(self): - self._fakequeryresults = FakeResults() - - def getQueryResults(self, job_id=None, project_id=None, - max_results=None, timeout_ms=None, **kwargs): - return self._fakequeryresults - -class FakeResults: - def execute(self): - return {'rows': [ {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'brave'}, {'v': '3'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'attended'}, {'v': '1'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'treason'}, {'v': '1'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'islanders'}, {'v': '1'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'heed'}, {'v': '3'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'alehouse'}, {'v': '1'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'corrigible'}, {'v': '1'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'brawl'}, {'v': '2'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': "'"}, {'v': '17'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'troubled'}, {'v': '1'}]} - ], - 'kind': 'bigquery#tableDataList', - 'etag': '"4PTsVxg68bQkQs1RJ1Ndewqkgg4/hoRHzb4qfhJAIa2mEewC-jhs9Bg"', - 'totalRows': '10', - 'jobComplete' : True} - -#################################################################################### - -class TestGbq(tm.TestCase): +def missing_bq(): + try: + subprocess.call('bq') + return False + except OSError: + return True + +def test_requirements(): + try: + gbq._test_imports() + except (ImportError, NotImplementedError) as import_exception: + raise nose.SkipTest(import_exception) + +class TestGBQConnectorIntegration(tm.TestCase): def setUp(self): - with open(self.fake_job_path, 'r') as fin: - self.fake_job = ast.literal_eval(fin.read()) - - self.test_data_small = [{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'brave'}, {'v': '3'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'attended'}, {'v': '1'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'treason'}, {'v': '1'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'islanders'}, {'v': '1'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'heed'}, {'v': '3'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'alehouse'}, {'v': '1'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'corrigible'}, {'v': '1'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'brawl'}, {'v': '2'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': "'"}, {'v': '17'}]}, - {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'troubled'}, - {'v': '1'}]}] - - self.correct_data_small = np.array( - [('othello', 1603, 'brave', 3), - ('othello', 1603, 'attended', 1), - ('othello', 1603, 'treason', 1), - ('othello', 1603, 'islanders', 1), - ('othello', 1603, 'heed', 3), - ('othello', 1603, 'alehouse', 1), - ('othello', 1603, 'corrigible', 1), - ('othello', 1603, 'brawl', 2), - ('othello', 1603, "'", 17), - ('othello', 1603, 'troubled', 1) - ], - dtype=[('corpus', 'S16'), - ('corpus_date', '<i8'), - ('word', 'S16'), - ('word_count', '<i8')] - ) + test_requirements() - self.correct_test_datatype = DataFrame( - {'VALID_STRING' : ['PI'], - 'EMPTY_STRING' : [""], - 'NULL_STRING' : [None], - 'VALID_INTEGER' : [3], - 'NULL_INTEGER' : [np.nan], - 'VALID_FLOAT' : [3.141592653589793], - 'NULL_FLOAT' : [np.nan], - 'UNIX_EPOCH' : [np.datetime64('1970-01-01T00:00:00.000000Z')], - 'VALID_TIMESTAMP' : [np.datetime64('2004-09-15T05:00:00.000000Z')], - 'NULL_TIMESTAMP' :[NaT], - 'TRUE_BOOLEAN' : [True], - 'FALSE_BOOLEAN' : [False], - 'NULL_BOOLEAN' : [None] - } - )[['VALID_STRING', - 'EMPTY_STRING', - 'NULL_STRING', - 'VALID_INTEGER', - 'NULL_INTEGER', - 'VALID_FLOAT', - 'NULL_FLOAT', - 'UNIX_EPOCH', - 'VALID_TIMESTAMP', - 'NULL_TIMESTAMP', - 'TRUE_BOOLEAN', - 'FALSE_BOOLEAN', - 'NULL_BOOLEAN']] + if not PROJECT_ID: + raise nose.SkipTest("Cannot run integration tests without a project id") + + self.sut = gbq.GbqConnector(PROJECT_ID) - @classmethod - def setUpClass(cls): - # Integration tests require a valid bigquery token - # be present in the user's home directory. This - # can be generated with 'bq init' in the command line - super(TestGbq, cls).setUpClass() - cls.dirpath = tm.get_data_path() - home = os.path.expanduser("~") - cls.bq_token = os.path.join(home, '.bigquery.v2.token') - cls.fake_job_path = os.path.join(cls.dirpath, 'gbq_fake_job.txt') - - # If we're using a valid token, make a test dataset - # Note, dataset functionality is beyond the scope - # of the module under test, so we rely on the command - # line utility for this. - if os.path.exists(cls.bq_token): - subprocess.call(['bq','mk', '-d', 'pandas_testing_dataset']) + def test_should_be_able_to_make_a_connector(self): + self.assertTrue(self.sut is not None, 'Could not create a GbqConnector') - @classmethod - def tearDownClass(cls): - super(TestGbq, cls).tearDownClass() + def test_should_be_able_to_get_valid_credentials(self): + credentials = self.sut.get_credentials() + self.assertFalse(credentials.invalid, 'Returned credentials invalid') - # If we're using a valid token, remove the test dataset - # created. - if os.path.exists(cls.bq_token): - subprocess.call(['bq', 'rm', '-r', '-f', '-d', 'pandas_testing_dataset']) + def test_should_be_able_to_get_a_bigquery_service(self): + credentials = self.sut.get_credentials() + bigquery_service = self.sut.get_service(credentials) + self.assertTrue(bigquery_service is not None, 'No service returned') - @with_connectivity_check - def test_valid_authentication(self): - # If the user has a token file, they should recieve a client from gbq._authenticate - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') + def test_should_be_able_to_get_schema_from_query(self): + schema, pages = self.sut.run_query('SELECT 1') + self.assertTrue(schema is not None) - self.assertTrue(gbq._authenticate is not None, 'Authentication To GBQ Failed') + def test_should_be_able_to_get_results_from_query(self): + schema, pages = self.sut.run_query('SELECT 1') + self.assertTrue(pages is not None) - @with_connectivity_check - def test_malformed_query(self): - # If the user has a connection file, performing an invalid query should raise an error - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - else: - self.assertRaises(bigquery_client.BigqueryInvalidQueryError, - gbq.read_gbq, "SELCET * FORM [publicdata:samples.shakespeare]") - - def test_type_conversion(self): - # All BigQuery Types should be cast into appropriate numpy types - sample_input = [('1.095292800E9', 'TIMESTAMP'), - ('false', 'BOOLEAN'), - ('2', 'INTEGER'), - ('3.14159', 'FLOAT'), - ('Hello World', 'STRING')] - actual_output = [gbq._parse_entry(result[0],result[1]) for result in sample_input] - sample_output = [np.datetime64('2004-09-16T00:00:00.000000Z'), - np.bool(False), - np.int('2'), - np.float('3.14159'), - u('Hello World')] - self.assertEqual(actual_output, sample_output, 'A format conversion failed') - - @with_connectivity_check - def test_unicode_string_conversion(self): - # Strings from BigQuery Should be converted to UTF-8 properly - - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') +class TestReadGBQUnitTests(tm.TestCase): + def setUp(self): + test_requirements() - correct_test_datatype = DataFrame( - {'UNICODE_STRING' : [u("\xe9\xfc")]} - ) + def test_should_return_bigquery_integers_as_python_floats(self): + result = gbq._parse_entry(1, 'INTEGER') + tm.assert_equal(result, float(1)) - query = """SELECT '\xc3\xa9\xc3\xbc' as UNICODE_STRING""" + def test_should_return_bigquery_floats_as_python_floats(self): + result = gbq._parse_entry(1, 'FLOAT') + tm.assert_equal(result, float(1)) - client = gbq._authenticate() - a = gbq.read_gbq(query) - tm.assert_frame_equal(a, correct_test_datatype) + def test_should_return_bigquery_timestamps_as_numpy_datetime(self): + result = gbq._parse_entry('0e9', 'TIMESTAMP') + tm.assert_equal(result, np.datetime64('1970-01-01T00:00:00Z')) + def test_should_return_bigquery_booleans_as_python_booleans(self): + result = gbq._parse_entry('false', 'BOOLEAN') + tm.assert_equal(result, False) + def test_should_return_bigquery_strings_as_python_strings(self): + result = gbq._parse_entry('STRING', 'STRING') + tm.assert_equal(result, 'STRING') - def test_data_small(self): - # Parsing a fixed page of data should return the proper fixed np.array() - result_frame = gbq._parse_page(self.test_data_small, - ['corpus','corpus_date','word','word_count'], - ['STRING','INTEGER','STRING','INTEGER'], - [object,np.dtype(int),object,np.dtype(int)] - ) - tm.assert_frame_equal(DataFrame(result_frame), DataFrame(self.correct_data_small), - 'An element in the result DataFrame didn\'t match the sample set') + def test_to_gbq_should_fail_if_invalid_table_name_passed(self): + with tm.assertRaises(gbq.NotFoundException): + gbq.to_gbq(DataFrame(), 'invalid_table_name', project_id="1234") + + def test_to_gbq_with_no_project_id_given_should_fail(self): + with tm.assertRaises(TypeError): + gbq.to_gbq(DataFrame(), 'dataset.tablename') + + def test_read_gbq_with_no_project_id_given_should_fail(self): + with tm.assertRaises(TypeError): + gbq.read_gbq('SELECT "1" as NUMBER_1') + + def test_that_parse_data_works_properly(self): + test_schema = {'fields': [{'mode': 'NULLABLE', + 'name': 'VALID_STRING', + 'type': 'STRING'}]} + test_page = [{'f': [{'v': 'PI'}]}] + + test_output = gbq._parse_data(test_schema, test_page) + correct_output = DataFrame({'VALID_STRING' : ['PI']}) + tm.assert_frame_equal(test_output, correct_output) + +class TestReadGBQIntegration(tm.TestCase): + def setUp(self): + test_requirements() + + if not PROJECT_ID: + raise nose.SkipTest("Cannot run integration tests without a project id") + + def test_should_properly_handle_valid_strings(self): + query = 'SELECT "PI" as VALID_STRING' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'VALID_STRING' : ['PI']})) + + def test_should_properly_handle_empty_strings(self): + query = 'SELECT "" as EMPTY_STRING' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'EMPTY_STRING' : [""]})) + + def test_should_properly_handle_null_strings(self): + query = 'SELECT STRING(NULL) as NULL_STRING' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'NULL_STRING' : [None]})) + + def test_should_properly_handle_valid_integers(self): + query = 'SELECT INTEGER(3) as VALID_INTEGER' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'VALID_INTEGER' : [3]})) + + def test_should_properly_handle_null_integers(self): + query = 'SELECT INTEGER(NULL) as NULL_INTEGER' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'NULL_INTEGER' : [np.nan]})) + + def test_should_properly_handle_valid_floats(self): + query = 'SELECT PI() as VALID_FLOAT' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'VALID_FLOAT' : [3.141592653589793]})) + + def test_should_properly_handle_null_floats(self): + query = 'SELECT FLOAT(NULL) as NULL_FLOAT' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'NULL_FLOAT' : [np.nan]})) + + def test_should_properly_handle_timestamp_unix_epoch(self): + query = 'SELECT TIMESTAMP("1970-01-01 00:00:00") as UNIX_EPOCH' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'UNIX_EPOCH' : [np.datetime64('1970-01-01T00:00:00.000000Z')]})) + + def test_should_properly_handle_arbitrary_timestamp(self): + query = 'SELECT TIMESTAMP("2004-09-15 05:00:00") as VALID_TIMESTAMP' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'VALID_TIMESTAMP' : [np.datetime64('2004-09-15T05:00:00.000000Z')]})) + + def test_should_properly_handle_null_timestamp(self): + query = 'SELECT TIMESTAMP(NULL) as NULL_TIMESTAMP' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'NULL_TIMESTAMP' :[NaT]})) + + def test_should_properly_handle_true_boolean(self): + query = 'SELECT BOOLEAN(TRUE) as TRUE_BOOLEAN' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'TRUE_BOOLEAN' : [True]})) + + def test_should_properly_handle_false_boolean(self): + query = 'SELECT BOOLEAN(FALSE) as FALSE_BOOLEAN' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'FALSE_BOOLEAN' : [False]})) + + def test_should_properly_handle_null_boolean(self): + query = 'SELECT BOOLEAN(NULL) as NULL_BOOLEAN' + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, DataFrame({'NULL_BOOLEAN' : [None]})) + + def test_unicode_string_conversion_and_normalization(self): + correct_test_datatype = DataFrame( + {'UNICODE_STRING' : [u("\xe9\xfc")]} + ) + + query = 'SELECT "\xc3\xa9\xc3\xbc" as UNICODE_STRING' + + df = gbq.read_gbq(query, project_id=PROJECT_ID) + tm.assert_frame_equal(df, correct_test_datatype) def test_index_column(self): - # A user should be able to specify an index column for return - result_frame = gbq._parse_data(FakeClient(), self.fake_job, index_col='word') - correct_frame = DataFrame(self.correct_data_small) - correct_frame.set_index('word', inplace=True) - self.assertTrue(result_frame.index.name == correct_frame.index.name) + query = "SELECT 'a' as STRING_1, 'b' as STRING_2" + result_frame = gbq.read_gbq(query, project_id=PROJECT_ID, index_col="STRING_1") + correct_frame = DataFrame({'STRING_1' : ['a'], 'STRING_2' : ['b']}).set_index("STRING_1") + tm.assert_equal(result_frame.index.name, correct_frame.index.name) def test_column_order(self): - # A User should be able to specify the order in which columns are returned in the dataframe - col_order = ['corpus_date', 'word_count', 'corpus', 'word'] - result_frame = gbq._parse_data(FakeClient(), self.fake_job, col_order=col_order) - tm.assert_index_equal(result_frame.columns, DataFrame(self.correct_data_small)[col_order].columns) + query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3" + col_order = ['STRING_3', 'STRING_1', 'STRING_2'] + result_frame = gbq.read_gbq(query, project_id=PROJECT_ID, col_order=col_order) + correct_frame = DataFrame({'STRING_1' : ['a'], 'STRING_2' : ['b'], 'STRING_3' : ['c']})[col_order] + tm.assert_frame_equal(result_frame, correct_frame) def test_column_order_plus_index(self): - # A User should be able to specify an index and the order of THE REMAINING columns.. they should be notified - # if they screw up - col_order = ['corpus_date', 'word', 'corpus'] - result_frame = gbq._parse_data(FakeClient(), self.fake_job, index_col='word_count', col_order=col_order) - correct_frame_small = DataFrame(self.correct_data_small) - correct_frame_small.set_index('word_count',inplace=True) - correct_frame_small = DataFrame(correct_frame_small)[col_order] - tm.assert_index_equal(result_frame.columns, correct_frame_small.columns) - - @with_connectivity_check - def test_download_dataset_larger_than_100k_rows(self): + query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3" + col_order = ['STRING_3', 'STRING_2'] + result_frame = gbq.read_gbq(query, project_id=PROJECT_ID, index_col='STRING_1', col_order=col_order) + correct_frame = DataFrame({'STRING_1' : ['a'], 'STRING_2' : ['b'], 'STRING_3' : ['c']}) + correct_frame.set_index('STRING_1', inplace=True) + correct_frame = correct_frame[col_order] + tm.assert_frame_equal(result_frame, correct_frame) + + def test_malformed_query(self): + with tm.assertRaises(gbq.InvalidQueryException): + gbq.read_gbq("SELCET * FORM [publicdata:samples.shakespeare]", project_id=PROJECT_ID) + + def test_bad_project_id(self): + with tm.assertRaises(gbq.NotFoundException): + gbq.read_gbq("SELECT 1", project_id='001') + + def test_bad_table_name(self): + with tm.assertRaises(gbq.NotFoundException): + gbq.read_gbq("SELECT * FROM [publicdata:samples.nope]", project_id=PROJECT_ID) + + def test_download_dataset_larger_than_200k_rows(self): # Test for known BigQuery bug in datasets larger than 100k rows # http://stackoverflow.com/questions/19145587/bq-py-not-paging-results - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - client = gbq._authenticate() - a = gbq.read_gbq("SELECT id, FROM [publicdata:samples.wikipedia] LIMIT 100005") - self.assertTrue(len(a) == 100005) - - @with_connectivity_check - def test_download_all_data_types(self): - # Test that all available data types from BigQuery (as of now) - # are handled properly - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - query = """SELECT "PI" as VALID_STRING, - "" as EMPTY_STRING, - STRING(NULL) as NULL_STRING, - INTEGER(3) as VALID_INTEGER, - INTEGER(NULL) as NULL_INTEGER, - PI() as VALID_FLOAT, - FLOAT(NULL) as NULL_FLOAT, - TIMESTAMP("1970-01-01 00:00:00") as UNIX_EPOCH, - TIMESTAMP("2004-09-15 05:00:00") as VALID_TIMESTAMP, - TIMESTAMP(NULL) as NULL_TIMESTAMP, - BOOLEAN(TRUE) as TRUE_BOOLEAN, - BOOLEAN(FALSE) as FALSE_BOOLEAN, - BOOLEAN(NULL) as NULL_BOOLEAN""" - - client = gbq._authenticate() - a = gbq.read_gbq(query, col_order = ['VALID_STRING', - 'EMPTY_STRING', - 'NULL_STRING', - 'VALID_INTEGER', - 'NULL_INTEGER', - 'VALID_FLOAT', - 'NULL_FLOAT', - 'UNIX_EPOCH', - 'VALID_TIMESTAMP', - 'NULL_TIMESTAMP', - 'TRUE_BOOLEAN', - 'FALSE_BOOLEAN', - 'NULL_BOOLEAN']) - - tm.assert_frame_equal(a, self.correct_test_datatype) - - @with_connectivity_check - def test_table_exists(self): - # Given a table name in the format {dataset}.{tablename}, if a table exists, - # the GetTableReference should accurately indicate this. - # This could possibly change in future implementations of bq, - # but it is the simplest way to provide users with appropriate - # error messages regarding schemas. - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - client = gbq._authenticate() - table_reference = client.GetTableReference("publicdata:samples.shakespeare") - self.assertTrue(client.TableExists(table_reference)) - - @with_connectivity_check - def test_table__not_exists(self): - # Test the inverse of `test_table_exists` - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - client = gbq._authenticate() - table_reference = client.GetTableReference("publicdata:samples.does_not_exist") - self.assertFalse(client.TableExists(table_reference)) - - @with_connectivity_check - def test_upload_new_table_schema_error(self): - # Attempting to upload to a non-existent table without a schema should fail - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - df = DataFrame(self.correct_data_small) - with self.assertRaises(gbq.SchemaMissing): - gbq.to_gbq(df, 'pandas_testing_dataset.test_database', schema=None, col_order=None, if_exists='fail') - - @with_connectivity_check - def test_upload_replace_schema_error(self): - # Attempting to replace an existing table without specifying a schema should fail - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - df = DataFrame(self.correct_data_small) - with self.assertRaises(gbq.SchemaMissing): - gbq.to_gbq(df, 'pandas_testing_dataset.test_database', schema=None, col_order=None, if_exists='replace') - - @with_connectivity_check - def test_upload_public_data_error(self): - # Attempting to upload to a public, read-only, dataset should fail - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - array = [['TESTING_GBQ', 999999999, 'hi', 0, True, 9999999999, '00.000.00.000', 1, 'hola', - 99999999, False, False, 1, 'Jedi', 11210]] - df = DataFrame(array) - with self.assertRaises(bigquery_client.BigqueryServiceError): - gbq.to_gbq(df, 'publicdata:samples.wikipedia', schema=None, col_order=None, if_exists='append') - - @with_connectivity_check - def test_upload_new_table(self): - # Attempting to upload to a new table with valid data and a valid schema should succeed - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - schema = ['STRING', 'INTEGER', 'STRING', 'INTEGER', 'BOOLEAN', - 'INTEGER', 'STRING', 'INTEGER', - 'STRING', 'INTEGER', 'BOOLEAN', 'BOOLEAN', - 'INTEGER', 'STRING', 'INTEGER'] - - array = [['TESTING_GBQ', 999999999, 'hi', 0, True, 9999999999, '00.000.00.000', 1, 'hola', - 99999999, False, False, 1, 'Jedi', 11210]] - df = DataFrame(array, columns=['title','id','language','wp_namespace','is_redirect','revision_id', - 'contributor_ip','contributor_id','contributor_username','timestamp', - 'is_minor','is_bot','reversion_id','comment','num_characters']) - gbq.to_gbq(df, 'pandas_testing_dataset.test_data2', schema=schema, col_order=None, if_exists='append') - a = gbq.read_gbq("SELECT * FROM pandas_testing_dataset.test_data2") - self.assertTrue((a == df).all().all()) - - @with_connectivity_check - def test_upload_bad_data_table(self): - # Attempting to upload data that does not match schema should fail - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - schema = ['STRING', 'INTEGER', 'STRING', 'INTEGER', 'BOOLEAN', - 'INTEGER', 'STRING', 'INTEGER', - 'STRING', 'INTEGER', 'BOOLEAN', 'BOOLEAN', - 'INTEGER', 'STRING', 'INTEGER'] - - array = [['TESTING_GBQ\',', False, 'hi', 0, True, 'STRING IN INTEGER', '00.000.00.000', 1, 'hola', - 99999999, -100, 1000, 1, 'Jedi', 11210]] - df = DataFrame(array, columns=['title','id','language','wp_namespace','is_redirect','revision_id', - 'contributor_ip','contributor_id','contributor_username','timestamp', - 'is_minor','is_bot','reversion_id','comment','num_characters']) - with self.assertRaises(bigquery_client.BigqueryServiceError): - gbq.to_gbq(df, 'pandas_testing_dataset.test_data1', schema=schema, col_order=None, if_exists='append') - - @with_connectivity_check - def test_invalid_column_name_schema(self): - # Specifying a schema that contains an invalid column name should fail - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - schema = ['INCORRECT'] - df = DataFrame([[1]],columns=['fake']) - with self.assertRaises(gbq.InvalidSchema): - gbq.to_gbq(df, 'pandas_testing_dataset.test_data', schema=schema, col_order=None, if_exists='append') - - @with_connectivity_check - def test_invalid_number_of_columns_schema(self): - # Specifying a schema that does not have same shape as dataframe should fail - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - schema = ['INTEGER'] - df = DataFrame([[1, 'STRING']],columns=['fake','fake']) - with self.assertRaises(gbq.InvalidSchema): - gbq.to_gbq(df, 'pandas_testing_dataset.test_data4', schema=schema, col_order=None, if_exists='append') - - @with_connectivity_check - def test_upload_fail_if_exists(self): - # Attempting to upload to a new table with valid data and a valid schema should succeed - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - schema = ['STRING', 'INTEGER', 'STRING', 'INTEGER', 'BOOLEAN', - 'INTEGER', 'STRING', 'INTEGER', - 'STRING', 'INTEGER', 'BOOLEAN', 'BOOLEAN', - 'INTEGER', 'STRING', 'INTEGER'] - - array = [['TESTING_GBQ', 999999999, 'hi', 0, True, 9999999999, '00.000.00.000', 1, 'hola', - 99999999, False, False, 1, 'Jedi', 11210]] - df = DataFrame(array, columns=['title','id','language','wp_namespace','is_redirect','revision_id', - 'contributor_ip','contributor_id','contributor_username','timestamp', - 'is_minor','is_bot','reversion_id','comment','num_characters']) - gbq.to_gbq(df, 'pandas_testing_dataset.test_data3', schema=schema, col_order=None, if_exists='fail') - - with self.assertRaises(gbq.TableExistsFail): - gbq.to_gbq(df, 'pandas_testing_dataset.test_data3', schema=schema, col_order=None, if_exists='fail') - - @with_connectivity_check - def test_upload_replace(self): - # Attempting to overwrite an existing table with valid data and a valid schema should succeed - if not os.path.exists(self.bq_token): - raise nose.SkipTest('Skipped because authentication information is not available.') - - schema = ['STRING', 'INTEGER', 'STRING', 'INTEGER', 'BOOLEAN', - 'INTEGER', 'STRING', 'INTEGER', - 'STRING', 'INTEGER', 'BOOLEAN', 'BOOLEAN', - 'INTEGER', 'STRING', 'INTEGER'] - - # Setup an existing table - array1 = [['', 1, '', 1, False, 1, '00.111.00.111', 1, 'hola', - 1, True, True, 1, 'Sith', 1]] - df1 = DataFrame(array1, columns=['title','id','language','wp_namespace','is_redirect','revision_id', - 'contributor_ip','contributor_id','contributor_username','timestamp', - 'is_minor','is_bot','reversion_id','comment','num_characters']) - gbq.to_gbq(df1, 'pandas_testing_dataset.test_data5', schema=schema, col_order=None, if_exists='fail') - - array2 = [['TESTING_GBQ', 999999999, 'hi', 0, True, 9999999999, '00.000.00.000', 1, 'hola', - 99999999, False, False, 1, 'Jedi', 11210]] - - # Overwrite the existing table with different data - df2 = DataFrame(array2, columns=['title','id','language','wp_namespace','is_redirect','revision_id', - 'contributor_ip','contributor_id','contributor_username','timestamp', - 'is_minor','is_bot','reversion_id','comment','num_characters']) - gbq.to_gbq(df2, 'pandas_testing_dataset.test_data5', schema=schema, col_order=None, if_exists='replace') - - # Read the table and confirm the new data is all that is there - a = gbq.read_gbq("SELECT * FROM pandas_testing_dataset.test_data5") - self.assertTrue((a == df2).all().all()) + df = gbq.read_gbq("SELECT id FROM [publicdata:samples.wikipedia] GROUP EACH BY id ORDER BY id ASC LIMIT 200005", project_id=PROJECT_ID) + self.assertEqual(len(df.drop_duplicates()), 200005) +class TestToGBQIntegration(tm.TestCase): + # This class requires bq.py to be installed for setup/teardown. + # It will also need to be preconfigured with a default dataset, + # so, be sure to `bq init` in terminal before running. + + def setUp(self): + test_requirements() + + if not PROJECT_ID: + raise nose.SkipTest("Cannot run integration tests without a project id") + if missing_bq(): + raise nose.SkipTest("Cannot run to_gbq tests without bq command line client") + + @classmethod + def setUpClass(cls): + if PROJECT_ID and not missing_bq(): + subprocess.call(['bq','mk','pydata_pandas_bq_testing']) + subprocess.call(['bq','mk','pydata_pandas_bq_testing.new_test','bools:BOOLEAN,flts:FLOAT,ints:INTEGER,strs:STRING,times:TIMESTAMP']) + + def test_upload_data(self): + test_size = 1000001 + #create df to test for all BQ datatypes except RECORD + bools = np.random.randint(2, size=(1,test_size)).astype(bool) + flts = np.random.randn(1,test_size) + ints = np.random.randint(1,10, size=(1,test_size)) + strs = np.random.randint(1,10, size=(1,test_size)).astype(str) + times = [datetime.datetime.now(pytz.timezone('US/Arizona')) for t in xrange(test_size)] + df = DataFrame({'bools':bools[0], 'flts':flts[0], 'ints':ints[0], 'strs':strs[0], 'times':times[0]}, index=range(test_size)) + gbq.to_gbq(df,"pydata_pandas_bq_testing.new_test", project_id=PROJECT_ID, chunksize=10000) + sleep(60) # <- Curses Google!!! + + result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM pydata_pandas_bq_testing.new_test", project_id=PROJECT_ID) + self.assertEqual(result['NUM_ROWS'][0], test_size) + + def test_google_upload_errors_should_raise_exception(self): + test_timestamp = datetime.datetime.now(pytz.timezone('US/Arizona')) + bad_df = DataFrame( {'bools': [False, False], + 'flts': [0.0,1.0], + 'ints': [0,'1'], + 'strs': ['a', 1], + 'times': [test_timestamp, test_timestamp] + }, index=range(2)) + with tm.assertRaises(gbq.UnknownGBQException): + gbq.to_gbq(bad_df, 'pydata_pandas_bq_testing.new_test', project_id = PROJECT_ID) + + + @classmethod + def tearDownClass(cls): + if PROJECT_ID and not missing_bq(): + subprocess.call(['bq','rm','-f','pydata_pandas_bq_testing.new_test']) + subprocess.call(['bq','rm','-f','pydata_pandas_bq_testing']) if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) + diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py index 5e2230ca526b9..3d793698c7caa 100644 --- a/pandas/util/print_versions.py +++ b/pandas/util/print_versions.py @@ -82,7 +82,7 @@ def show_versions(as_json=False): ("lxml", lambda mod: mod.etree.__version__), ("bs4", lambda mod: mod.__version__), ("html5lib", lambda mod: mod.__version__), - ("bq", lambda mod: mod._VersionNumber()), + ("httplib2", lambda mod: mod.__version__), ("apiclient", lambda mod: mod.__version__), ("rpy2", lambda mod: mod.__version__), ("sqlalchemy", lambda mod: mod.__version__),
closes #5840 (as new interface makes it obsolete) closes #6096 @jreback : We still have some documentation to work on, but we would like your initial thoughts on what we have so far. The key change for this version is the removal of bq.py as a dependency (except as a setup method for a test case). Instead, we rely entirely on the BigQuery python API. We also simplified to_gbq() significantly. Though it cost a few features, the code is much more manageable and less error prone (thanks Google!). Test cases are much more granular and run significantly faster. To use the test cases fully, a BigQuery project_id is still required, though there are some unittests offline.
https://api.github.com/repos/pandas-dev/pandas/pulls/6937
2014-04-23T03:24:11Z
2014-06-30T19:30:22Z
2014-06-30T19:30:22Z
2014-07-08T13:11:14Z
BUG: pass bins arg in hist_frame
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index a9425400bedb3..b1faf3047beea 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -126,6 +126,12 @@ def test_hist(self): with tm.assertRaises(ValueError): self.ts.hist(by=self.ts.index, figure=fig) + @slow + def test_hist_bins(self): + df = DataFrame(np.random.randn(10, 2)) + ax = df.hist(bins=2)[0][0] + self.assertEqual(len(ax.patches), 2) + @slow def test_hist_layout(self): n = 10 diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 158cfeb5e3a6f..4d348c37ed927 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2342,7 +2342,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, ax = axes[i / cols, i % cols] ax.xaxis.set_visible(True) ax.yaxis.set_visible(True) - ax.hist(data[col].dropna().values, **kwds) + ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) ax.grid(grid)
Came up in the comments at https://github.com/pydata/pandas/pull/6850#issuecomment-41119013 Missed passing the `bins` kwarg to a call to matplotlib's hist.
https://api.github.com/repos/pandas-dev/pandas/pulls/6935
2014-04-23T02:43:49Z
2014-04-23T12:44:59Z
2014-04-23T12:44:59Z
2017-04-05T02:08:48Z
BUG: Fix to read decimal seconds from Excel.
diff --git a/doc/source/release.rst b/doc/source/release.rst index 47407eedb17bd..49656046129ca 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -285,6 +285,7 @@ Improvements to existing features - Translate ``sep='\s+'`` to ``delim_whitespace=True`` in :func:`read_csv`/:func:`read_table` if no other C-unsupported options specified (:issue:`6607`) +- ``read_excel`` can now read milliseconds in Excel dates and times with xlrd >= 0.9.3. (:issue:`5945`) .. _release.bug_fixes-0.14.0: diff --git a/pandas/io/excel.py b/pandas/io/excel.py index fef5a24e6ea20..f4f40c8be7855 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -18,6 +18,7 @@ import pandas.compat as compat import pandas.core.common as com from warnings import warn +from distutils.version import LooseVersion __all__ = ["read_excel", "ExcelWriter", "ExcelFile"] @@ -250,11 +251,19 @@ def _parse_excel(self, sheetname=0, header=0, skiprows=None, skip_footer=0, parse_dates=False, date_parser=None, na_values=None, thousands=None, chunksize=None, convert_float=True, **kwds): - from xlrd import (xldate_as_tuple, XL_CELL_DATE, + import xlrd + from xlrd import (xldate, XL_CELL_DATE, XL_CELL_ERROR, XL_CELL_BOOLEAN, XL_CELL_NUMBER) - datemode = self.book.datemode + epoch1904 = self.book.datemode + + # xlrd >= 0.9.3 can return datetime objects directly. + if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"): + xlrd_0_9_3 = True + else: + xlrd_0_9_3 = False + if isinstance(sheetname, compat.string_types): sheet = self.book.sheet_by_name(sheetname) else: # assume an integer if not a string @@ -271,12 +280,29 @@ def _parse_excel(self, sheetname=0, header=0, skiprows=None, skip_footer=0, if parse_cols is None or should_parse[j]: if typ == XL_CELL_DATE: - dt = xldate_as_tuple(value, datemode) - # how to produce this first case? - if dt[0] < datetime.MINYEAR: # pragma: no cover - value = datetime.time(*dt[3:]) + if xlrd_0_9_3: + # Use the newer xlrd datetime handling. + value = xldate.xldate_as_datetime(value, epoch1904) + + # Excel doesn't distinguish between dates and time, + # so we treat dates on the epoch as times only. + # Also, Excel supports 1900 and 1904 epochs. + year = (value.timetuple())[0:3] + if ((not epoch1904 and year == (1899, 12, 31)) + or (epoch1904 and year == (1904, 1, 1))): + value = datetime.time(value.hour, + value.minute, + value.second, + value.microsecond) else: - value = datetime.datetime(*dt) + # Use the xlrd <= 0.9.2 date handling. + dt = xldate.xldate_as_tuple(value, epoch1904) + + if dt[0] < datetime.MINYEAR: + value = datetime.time(*dt[3:]) + else: + value = datetime.datetime(*dt) + elif typ == XL_CELL_ERROR: value = np.nan elif typ == XL_CELL_BOOLEAN: @@ -727,8 +753,9 @@ def __init__(self, path, engine=None, import xlsxwriter super(_XlsxWriter, self).__init__(path, engine=engine, - date_format=date_format, datetime_format=datetime_format, - **engine_kwargs) + date_format=date_format, + datetime_format=datetime_format, + **engine_kwargs) self.book = xlsxwriter.Workbook(path, **engine_kwargs) diff --git a/pandas/io/tests/data/times_1900.xls b/pandas/io/tests/data/times_1900.xls new file mode 100644 index 0000000000000..e9a62b2c25da9 Binary files /dev/null and b/pandas/io/tests/data/times_1900.xls differ diff --git a/pandas/io/tests/data/times_1904.xls b/pandas/io/tests/data/times_1904.xls new file mode 100644 index 0000000000000..ac70787c358a5 Binary files /dev/null and b/pandas/io/tests/data/times_1904.xls differ diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index fde5764993e76..eb245c12c5e30 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -1,8 +1,9 @@ # pylint: disable=E1101 from pandas.compat import u, range, map -from datetime import datetime, date +from datetime import datetime, date, time import os +from distutils.version import LooseVersion import nose @@ -360,6 +361,49 @@ def test_reader_special_dtypes(self): convert_float=False) tm.assert_frame_equal(actual, no_convert_float) + def test_reader_seconds(self): + # Test reading times with and without milliseconds. GH5945. + _skip_if_no_xlrd() + import xlrd + + if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"): + # Xlrd >= 0.9.3 can handle Excel milliseconds. + expected = DataFrame.from_items([("Time", + [time(1, 2, 3), + time(2, 45, 56, 100000), + time(4, 29, 49, 200000), + time(6, 13, 42, 300000), + time(7, 57, 35, 400000), + time(9, 41, 28, 500000), + time(11, 25, 21, 600000), + time(13, 9, 14, 700000), + time(14, 53, 7, 800000), + time(16, 37, 0, 900000), + time(18, 20, 54)])]) + else: + # Xlrd < 0.9.3 rounds Excel milliseconds. + expected = DataFrame.from_items([("Time", + [time(1, 2, 3), + time(2, 45, 56), + time(4, 29, 49), + time(6, 13, 42), + time(7, 57, 35), + time(9, 41, 29), + time(11, 25, 22), + time(13, 9, 15), + time(14, 53, 8), + time(16, 37, 1), + time(18, 20, 54)])]) + + epoch_1900 = os.path.join(self.dirpath, 'times_1900.xls') + epoch_1904 = os.path.join(self.dirpath, 'times_1904.xls') + + actual = read_excel(epoch_1900, 'Sheet1') + tm.assert_frame_equal(actual, expected) + + actual = read_excel(epoch_1904, 'Sheet1') + tm.assert_frame_equal(actual, expected) + class ExcelWriterBase(SharedItems): # Base class for test cases to run with different Excel writers. @@ -400,7 +444,7 @@ def test_excel_deprecated_options(self): with ensure_clean(self.ext) as path: with tm.assert_produces_warning(FutureWarning): self.frame.to_excel(path, 'test1', cols=['A', 'B']) - + with tm.assert_produces_warning(False): self.frame.to_excel(path, 'test1', columns=['A', 'B']) @@ -832,9 +876,9 @@ def test_to_excel_output_encoding(self): index=[u('A\u0192'), 'B'], columns=[u('X\u0193'), 'Y', 'Z']) with ensure_clean(filename) as filename: - df.to_excel(filename, sheet_name = 'TestSheet', encoding='utf8') - result = read_excel(filename, 'TestSheet', encoding = 'utf8') - tm.assert_frame_equal(result,df) + df.to_excel(filename, sheet_name='TestSheet', encoding='utf8') + result = read_excel(filename, 'TestSheet', encoding='utf8') + tm.assert_frame_equal(result, df) def test_to_excel_unicode_filename(self):
Fix to allow decimal seconds to be read from Excel dates and times into datetime objects. #5945. This required a fix to the `xlrd` module to return milliseconds from Excel dates and times. That fix was recently released to PyPI in xlrd version 0.9.3. Tests, version updates and release note included.
https://api.github.com/repos/pandas-dev/pandas/pulls/6934
2014-04-23T00:34:18Z
2014-04-24T01:24:01Z
2014-04-24T01:24:01Z
2014-06-18T06:59:55Z
BUG/INT: Internal tests for patching __finalize__ / bug in concat not finalizing (GH6927)
diff --git a/doc/source/release.rst b/doc/source/release.rst index e38d5e00a31ad..7824a69c92561 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -396,7 +396,7 @@ Bug Fixes - Bug in ``DataFrame.to_csv`` where setting `index` to `False` ignored the `header` kwarg (:issue:`6186`) - Bug in `DataFrame.plot` and `Series.plot` legend behave inconsistently when plotting to the same axes repeatedly (:issue:`6678`) -- Internal tests for patching ``__finalize__`` / bug in merge not finalizing (:issue:`6923`) +- Internal tests for patching ``__finalize__`` / bug in merge not finalizing (:issue:`6923`, :issue:`6927`) pandas 0.13.1 ------------- diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 9556f5d9f9a62..91bca01ab73b5 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -440,6 +440,32 @@ def test_metadata_propagation_indiv(self): result = ts.resample('1T',how=lambda x: x.sum()) self.check_metadata(ts,result) + _metadata = Series._metadata + _finalize = Series.__finalize__ + Series._metadata = ['name','filename'] + o.filename = 'foo' + o2.filename = 'bar' + + def finalize(self, other, method=None, **kwargs): + for name in self._metadata: + if method == 'concat' and name == 'filename': + value = '+'.join([ getattr(o,name) for o in other.objs if getattr(o,name,None) ]) + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, None)) + + return self + + Series.__finalize__ = finalize + + result = pd.concat([o, o2]) + self.assertEquals(result.filename,'foo+bar') + self.assertIsNone(result.name) + + # reset + Series._metadata = _metadata + Series.__finalize__ = _finalize + def test_interpolate(self): ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index) @@ -894,6 +920,28 @@ def finalize(self, other, method=None, **kwargs): result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') self.assertEquals(result.filename,'fname1.csv|fname2.csv') + # concat + # GH 6927 + DataFrame._metadata = ['filename'] + df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab')) + df1.filename = 'foo' + + def finalize(self, other, method=None, **kwargs): + for name in self._metadata: + if method == 'concat': + value = '+'.join([ getattr(o,name) for o in other.objs if getattr(o,name,None) ]) + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, None)) + + return self + + DataFrame.__finalize__ = finalize + + result = pd.concat([df1, df1]) + self.assertEquals(result.filename,'foo+foo') + + # reset DataFrame._metadata = _metadata DataFrame.__finalize__ = _finalize diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 42cea510121ee..ef43df98b9235 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -1064,18 +1064,18 @@ def get_result(self): new_data = com._concat_compat([x.get_values() for x in self.objs]) name = com._consensus_name_attr(self.objs) new_data = self._post_merge(new_data) - return Series(new_data, index=self.new_axes[0], name=name) + return Series(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat') elif self._is_series: data = dict(zip(range(len(self.objs)), self.objs)) index, columns = self.new_axes tmpdf = DataFrame(data, index=index) if columns is not None: tmpdf.columns = columns - return tmpdf + return tmpdf.__finalize__(self, method='concat') else: new_data = self._get_concatenated_data() new_data = self._post_merge(new_data) - return self.objs[0]._from_axes(new_data, self.new_axes) + return self.objs[0]._from_axes(new_data, self.new_axes).__finalize__(self, method='concat') def _post_merge(self, data): if isinstance(data, BlockManager):
closes #6927
https://api.github.com/repos/pandas-dev/pandas/pulls/6931
2014-04-22T19:21:40Z
2014-04-22T19:44:47Z
2014-04-22T19:44:47Z
2014-07-16T09:02:47Z
API/DEPR: Deprecate order kwarg in factorize
diff --git a/doc/source/release.rst b/doc/source/release.rst index e38d5e00a31ad..0ad7793285000 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -206,6 +206,8 @@ Deprecations ``periods`` with a default value of 1. A ``FutureWarning`` is raised if the old argument ``lags`` is used by name. (:issue:`6910`) +- The ``order`` keyword argument of :func:`factorize` will be removed. (:issue:`6926`). + Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index b3ee1fdef30a8..242c27ebbf9b0 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -420,6 +420,7 @@ Deprecations The old positional argument ``lags`` has been changed to a keyword argument ``periods`` with a default value of 1. A ``FutureWarning`` is raised if the old argument ``lags`` is used by name. (:issue:`6910`) +- The ``order`` keyword argument of :func:`factorize` will be removed. (:issue:`6926`). .. _whatsnew_0140.enhancements: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index e2ef178c62e71..5efba4a9738af 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -11,6 +11,7 @@ import pandas.hashtable as htable import pandas.compat as compat from pandas.compat import filter, string_types +from pandas.util.decorators import deprecate_kwarg def match(to_match, values, na_sentinel=-1): """ @@ -104,7 +105,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1): Sequence sort : boolean, default False Sort by values - order : + order : deprecated na_sentinel: int, default -1 Value to mark "not found" @@ -115,6 +116,10 @@ def factorize(values, sort=False, order=None, na_sentinel=-1): note: an array of Periods will ignore sort as it returns an always sorted PeriodIndex """ + if order is not None: + warn("order is deprecated." + "See https://github.com/pydata/pandas/issues/6926", FutureWarning) + from pandas.tseries.period import PeriodIndex vals = np.asarray(values) is_datetime = com.is_datetime64_dtype(vals) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index ebc41ea457b52..07bf247e5aafe 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -47,6 +47,12 @@ def test_strings(self): class TestFactorize(tm.TestCase): _multiprocess_can_split_ = True + def test_warn(self): + + s = Series([1, 2, 3]) + with tm.assert_produces_warning(FutureWarning): + algos.factorize(s, order='A') + def test_basic(self): labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
Closes https://github.com/pydata/pandas/issues/6926 `order` wasn't being used at all. I couldn't use the deprecate_kwarg decorator since the kwarg is being removed, not changed.
https://api.github.com/repos/pandas-dev/pandas/pulls/6930
2014-04-22T18:33:03Z
2014-04-22T20:54:18Z
2014-04-22T20:54:18Z
2017-04-05T02:08:49Z
API/DEPR: Match Panel.shift()'s signature to generic
diff --git a/doc/source/release.rst b/doc/source/release.rst index 245dd71886f63..e38d5e00a31ad 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -201,6 +201,11 @@ Deprecations - Indexers will warn ``FutureWarning`` when used with a scalar indexer and a non-floating point Index (:issue:`4892`) +- :meth:`Panel.shift` now has a function signature that matches :meth:`DataFrame.shift`. + The old positional argument ``lags`` has been changed to a keyword argument + ``periods`` with a default value of 1. A ``FutureWarning`` is raised if the + old argument ``lags`` is used by name. (:issue:`6910`) + Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 49507f2b6dd8f..b3ee1fdef30a8 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -416,6 +416,11 @@ Deprecations In [4]: Series(1,np.arange(5.))[3.0] Out[4]: 1 +- :meth:`Panel.shift` now has a function signature that matches :meth:`DataFrame.shift`. + The old positional argument ``lags`` has been changed to a keyword argument + ``periods`` with a default value of 1. A ``FutureWarning`` is raised if the + old argument ``lags`` is used by name. (:issue:`6910`) + .. _whatsnew_0140.enhancements: Enhancements diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9c3b3fab9f455..d894289c87eee 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3489,7 +3489,8 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, else: data = self.fillna(method=fill_method, limit=limit) - rs = data.div(data.shift(periods, freq=freq, axis=axis, **kwds)) - 1 + rs = (data.div(data.shift(periods=periods, freq=freq, + axis=axis, **kwds)) - 1) if freq is None: mask = com.isnull(_values_from_object(self)) np.putmask(rs.values, mask, np.nan) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 5a2a2f1d17d16..8f1a416a77a7e 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -22,7 +22,8 @@ from pandas.core.generic import NDFrame, _shared_docs from pandas.tools.util import cartesian_product from pandas import compat -from pandas.util.decorators import deprecate, Appender, Substitution +from pandas.util.decorators import (deprecate, Appender, Substitution, + deprecate_kwarg) import pandas.core.common as com import pandas.core.ops as ops import pandas.core.nanops as nanops @@ -1150,7 +1151,8 @@ def count(self, axis='major'): return self._wrap_result(result, axis) - def shift(self, lags, freq=None, axis='major'): + @deprecate_kwarg(old_arg_name='lags', new_arg_name='periods') + def shift(self, periods=1, freq=None, axis='major'): """ Shift major or minor axis by specified number of leads/lags. @@ -1164,12 +1166,12 @@ def shift(self, lags, freq=None, axis='major'): shifted : Panel """ if freq: - return self.tshift(lags, freq, axis=axis) + return self.tshift(periods, freq, axis=axis) if axis == 'items': raise ValueError('Invalid axis') - return super(Panel, self).shift(lags, freq=freq, axis=axis) + return super(Panel, self).shift(periods, freq=freq, axis=axis) def tshift(self, periods=1, freq=None, axis='major', **kwds): return super(Panel, self).tshift(periods, freq, axis, **kwds) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 6aff61c4e2167..21207a6f97ddd 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -824,6 +824,20 @@ def setUp(self): self.panel.minor_axis.name = None self.panel.items.name = None + def test_panel_warnings(self): + with tm.assert_produces_warning(FutureWarning): + shifted1 = self.panel.shift(lags=1) + + with tm.assert_produces_warning(False): + shifted2 = self.panel.shift(periods=1) + + tm.assert_panel_equal(shifted1, shifted2) + + with tm.assert_produces_warning(False): + shifted3 = self.panel.shift() + + tm.assert_panel_equal(shifted1, shifted3) + def test_constructor(self): # with BlockManager wp = Panel(self.panel._data)
Closes https://github.com/pydata/pandas/issues/6910 Also switches the shift implementation back to using keyword arguments.
https://api.github.com/repos/pandas-dev/pandas/pulls/6928
2014-04-22T17:03:48Z
2014-04-22T18:29:10Z
2014-04-22T18:29:10Z
2017-04-05T02:08:53Z
BUG/INT: Internal tests for patching __finalize__ / bug in merge not finalizing (GH6923)
diff --git a/doc/source/release.rst b/doc/source/release.rst index d2de554f1d054..245dd71886f63 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -391,6 +391,7 @@ Bug Fixes - Bug in ``DataFrame.to_csv`` where setting `index` to `False` ignored the `header` kwarg (:issue:`6186`) - Bug in `DataFrame.plot` and `Series.plot` legend behave inconsistently when plotting to the same axes repeatedly (:issue:`6678`) +- Internal tests for patching ``__finalize__`` / bug in merge not finalizing (:issue:`6923`) pandas 0.13.1 ------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 919ea4c0e74ce..9c3b3fab9f455 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1827,8 +1827,9 @@ def __finalize__(self, other, method=None, **kwargs): types of propagation actions based on this """ - for name in self._metadata: - object.__setattr__(self, name, getattr(other, name, None)) + if isinstance(other, NDFrame): + for name in self._metadata: + object.__setattr__(self, name, getattr(other, name, None)) return self def __getattr__(self, name): diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 6c6e70b86105f..9556f5d9f9a62 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -3,7 +3,7 @@ from datetime import datetime, timedelta import operator import nose - +import copy import numpy as np from numpy import nan import pandas as pd @@ -866,6 +866,37 @@ def test_metadata_propagation_indiv(self): result = df.resample('1T') self.check_metadata(df,result) + # merging with override + # GH 6923 + _metadata = DataFrame._metadata + _finalize = DataFrame.__finalize__ + + np.random.seed(10) + df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b']) + df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd']) + DataFrame._metadata = ['filename'] + df1.filename = 'fname1.csv' + df2.filename = 'fname2.csv' + + def finalize(self, other, method=None, **kwargs): + + for name in self._metadata: + if method == 'merge': + left, right = other.left, other.right + value = getattr(left, name, '') + '|' + getattr(right, name, '') + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, '')) + + return self + + DataFrame.__finalize__ = finalize + result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') + self.assertEquals(result.filename,'fname1.csv|fname2.csv') + + DataFrame._metadata = _metadata + DataFrame.__finalize__ = _finalize + class TestPanel(tm.TestCase, Generic): _typ = Panel _comparator = lambda self, x, y: assert_panel_equal(x, y) diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 90e713d72bdda..42cea510121ee 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -195,7 +195,7 @@ def get_result(self): copy=self.copy) result_data = join_op.get_result() - result = DataFrame(result_data) + result = DataFrame(result_data).__finalize__(self, method='merge') self._maybe_add_join_keys(result, left_indexer, right_indexer)
closes #6923
https://api.github.com/repos/pandas-dev/pandas/pulls/6924
2014-04-22T14:01:12Z
2014-04-22T14:24:08Z
2014-04-22T14:24:08Z
2014-07-09T18:05:32Z
API: remove the copy kw from .xs to prevent an expectation of a view (which may not be possible)
diff --git a/doc/source/api.rst b/doc/source/api.rst index 5e5b84e0e80b2..7918d6930341a 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -78,7 +78,7 @@ SQL .. autosummary:: :toctree: generated/ - + read_sql_table read_sql_query read_sql diff --git a/doc/source/release.rst b/doc/source/release.rst index 03b89f9077994..ec92613b67075 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -176,6 +176,10 @@ API Changes - ``.quantile`` on a ``datetime[ns]`` series now returns ``Timestamp`` instead of ``np.datetime64`` objects (:issue:`6810`) +- Remove the ``copy`` keyword from ``DataFrame.xs``,``Panel.major_xs``,``Panel.minor_xs``. A view will be + returned if possible, otherwise a copy will be made. Previously the user could think that ``copy=False`` would + ALWAYS return a view. (:issue:`6894`) + Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 187a757f53d45..d44c5b7a2f392 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -214,6 +214,10 @@ API changes (and numpy defaults) - add ``inplace`` keyword to ``Series.order/sort`` to make them inverses (:issue:`6859`) +- Remove the ``copy`` keyword from ``DataFrame.xs``,``Panel.major_xs``,``Panel.minor_xs``. A view will be + returned if possible, otherwise a copy will be made. Previously the user could think that ``copy=False`` would + ALWAYS return a view. (:issue:`6894`) + .. _whatsnew_0140.sql: SQL diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7346fd522f50d..bce09b673ad75 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1564,7 +1564,7 @@ def irow(self, i, copy=False): def icol(self, i): return self._ixs(i, axis=1) - def _ixs(self, i, axis=0, copy=False): + def _ixs(self, i, axis=0): """ i : int, slice, or sequence of integers axis : int @@ -1588,7 +1588,10 @@ def _ixs(self, i, axis=0, copy=False): result = self.take(i, axis=axis) copy=True else: - new_values, copy = self._data.fast_xs(i, copy=copy) + new_values = self._data.fast_xs(i) + + # if we are a copy, mark as such + copy = isinstance(new_values,np.ndarray) and new_values.base is None result = Series(new_values, index=self.columns, name=self.index[i], dtype=new_values.dtype) result._set_is_copy(self, copy=copy) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ebcdc600f6751..919ea4c0e74ce 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1240,7 +1240,7 @@ def take(self, indices, axis=0, convert=True, is_copy=True): return result - def xs(self, key, axis=0, level=None, copy=True, drop_level=True): + def xs(self, key, axis=0, level=None, copy=None, drop_level=True): """ Returns a cross-section (row(s) or column(s)) from the Series/DataFrame. Defaults to cross-section on the rows (axis=0). @@ -1254,7 +1254,7 @@ def xs(self, key, axis=0, level=None, copy=True, drop_level=True): level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. - copy : boolean, default True + copy : boolean [deprecated] Whether to make a copy of the data drop_level : boolean, default True If False, returns object with same levels as self. @@ -1276,14 +1276,6 @@ def xs(self, key, axis=0, level=None, copy=True, drop_level=True): b 9 c 3 Name: C - >>> s = df.xs('a', copy=False) - >>> s['A'] = 100 - >>> df - A B C - a 100 5 2 - b 4 0 9 - c 9 7 3 - >>> df A B C D @@ -1310,16 +1302,24 @@ def xs(self, key, axis=0, level=None, copy=True, drop_level=True): ------- xs : Series or DataFrame + Notes + ----- + xs is only for getting, not setting values. + + MultiIndex Slicers is a generic way to get/set values on any level or levels + it is a superset of xs functionality, see :ref:`MultiIndex Slicers <indexing.mi_slicers>` + """ + if copy is not None: + warnings.warn("copy keyword is deprecated, " + "default is to return a copy or a view if possible") + axis = self._get_axis_number(axis) labels = self._get_axis(axis) if level is not None: loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) - if not copy and not isinstance(loc, slice): - raise ValueError('Cannot retrieve view (copy=False)') - # convert to a label indexer if needed if isinstance(loc, slice): lev_num = labels._get_level_number(level) @@ -1336,10 +1336,7 @@ def xs(self, key, axis=0, level=None, copy=True, drop_level=True): return result if axis == 1: - data = self[key] - if copy: - data = data.copy() - return data + return self[key] self._consolidate_inplace() @@ -1362,7 +1359,7 @@ def xs(self, key, axis=0, level=None, copy=True, drop_level=True): if np.isscalar(loc): from pandas import Series - new_values, copy = self._data.fast_xs(loc, copy=copy) + new_values = self._data.fast_xs(loc) # may need to box a datelike-scalar # diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e0c5fa573ff69..1f284a9b7a7ff 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -76,17 +76,14 @@ def _get_label(self, label, axis=0): # but will fail when the index is not present # see GH5667 try: - return self.obj._xs(label, axis=axis, copy=False) + return self.obj._xs(label, axis=axis) except: return self.obj[label] elif (isinstance(label, tuple) and isinstance(label[axis], slice)): raise IndexingError('no slices here, handle elsewhere') - try: - return self.obj._xs(label, axis=axis, copy=False) - except Exception: - return self.obj._xs(label, axis=axis, copy=True) + return self.obj._xs(label, axis=axis) def _get_loc(self, key, axis=0): return self.obj._ixs(key, axis=axis) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 8d5fddc6b6029..792a310c8a554 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2861,9 +2861,7 @@ def xs(self, key, axis=1, copy=True, takeable=False): new_blocks = [] if len(self.blocks) > 1: - if not copy: - raise Exception('cannot get view of mixed-type or ' - 'non-consolidated DataFrame') + # we must copy here as we are mixed type for blk in self.blocks: newb = make_block(blk.values[slicer], blk.items, @@ -2884,18 +2882,16 @@ def xs(self, key, axis=1, copy=True, takeable=False): return self.__class__(new_blocks, new_axes) - def fast_xs(self, loc, copy=False): + def fast_xs(self, loc): """ get a cross sectional for a given location in the items ; handle dups - return the result and a flag if a copy was actually made + return the result, is *could* be a view in the case of a + single block """ if len(self.blocks) == 1: - result = self.blocks[0].values[:, loc] - if copy: - result = result.copy() - return result, copy + return self.blocks[0].values[:, loc] items = self.items @@ -2904,7 +2900,7 @@ def fast_xs(self, loc, copy=False): result = self._interleave(items) if self.ndim == 2: result = result.T - return result[loc], True + return result[loc] # unique dtype = _interleaved_dtype(self.blocks) @@ -2915,7 +2911,7 @@ def fast_xs(self, loc, copy=False): i = items.get_loc(item) result[i] = blk._try_coerce_result(blk.iget((j, loc))) - return result, True + return result def consolidate(self): """ @@ -3829,12 +3825,12 @@ def _consolidate_check(self): def _consolidate_inplace(self): pass - def fast_xs(self, loc, copy=False): + def fast_xs(self, loc): """ fast path for getting a cross-section + return a view of the data """ - result = self._block.values[loc] - return result, False + return self._block.values[loc] def construction_error(tot_items, block_shape, axes, e=None): """ raise a helpful message about our construction """ diff --git a/pandas/core/panel.py b/pandas/core/panel.py index f1c52a8facc0a..5a2a2f1d17d16 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -688,7 +688,7 @@ def _combine_panel(self, other, func): return self._constructor(result_values, items, major, minor) - def major_xs(self, key, copy=True): + def major_xs(self, key, copy=None): """ Return slice of panel along major axis @@ -696,17 +696,29 @@ def major_xs(self, key, copy=True): ---------- key : object Major axis label - copy : boolean, default True - Copy data + copy : boolean [deprecated] + Whether to make a copy of the data Returns ------- y : DataFrame index -> minor axis, columns -> items + + Notes + ----- + major_xs is only for getting, not setting values. + + MultiIndex Slicers is a generic way to get/set values on any level or levels + it is a superset of major_xs functionality, see :ref:`MultiIndex Slicers <indexing.mi_slicers>` + """ - return self.xs(key, axis=self._AXIS_LEN - 2, copy=copy) + if copy is not None: + warnings.warn("copy keyword is deprecated, " + "default is to return a copy or a view if possible") - def minor_xs(self, key, copy=True): + return self.xs(key, axis=self._AXIS_LEN - 2) + + def minor_xs(self, key, copy=None): """ Return slice of panel along minor axis @@ -714,17 +726,29 @@ def minor_xs(self, key, copy=True): ---------- key : object Minor axis label - copy : boolean, default True - Copy data + copy : boolean [deprecated] + Whether to make a copy of the data Returns ------- y : DataFrame index -> major axis, columns -> items + + Notes + ----- + minor_xs is only for getting, not setting values. + + MultiIndex Slicers is a generic way to get/set values on any level or levels + it is a superset of minor_xs functionality, see :ref:`MultiIndex Slicers <indexing.mi_slicers>` + """ - return self.xs(key, axis=self._AXIS_LEN - 1, copy=copy) + if copy is not None: + warnings.warn("copy keyword is deprecated, " + "default is to return a copy or a view if possible") + + return self.xs(key, axis=self._AXIS_LEN - 1) - def xs(self, key, axis=1, copy=True): + def xs(self, key, axis=1, copy=None): """ Return slice of panel along selected axis @@ -733,24 +757,36 @@ def xs(self, key, axis=1, copy=True): key : object Label axis : {'items', 'major', 'minor}, default 1/'major' - copy : boolean, default True - Copy data + copy : boolean [deprecated] + Whether to make a copy of the data Returns ------- y : ndim(self)-1 + + Notes + ----- + xs is only for getting, not setting values. + + MultiIndex Slicers is a generic way to get/set values on any level or levels + it is a superset of xs functionality, see :ref:`MultiIndex Slicers <indexing.mi_slicers>` + """ + if copy is not None: + warnings.warn("copy keyword is deprecated, " + "default is to return a copy or a view if possible") + axis = self._get_axis_number(axis) if axis == 0: - data = self[key] - if copy: - data = data.copy() - return data + return self[key] self._consolidate_inplace() axis_number = self._get_axis_number(axis) - new_data = self._data.xs(key, axis=axis_number, copy=copy) - return self._construct_return_type(new_data) + new_data = self._data.xs(key, axis=axis_number, copy=False) + result = self._construct_return_type(new_data) + copy = new_data.is_mixed_type + result._set_is_copy(self, copy=copy) + return result _xs = xs diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 83fd56c676f8e..a9e48c62f9693 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -8371,12 +8371,8 @@ def test_xs(self): expected = self.frame['A'] assert_series_equal(series, expected) - # no view by default - series[:] = 5 - self.assert_((expected != 5).all()) - - # view - series = self.frame.xs('A', axis=1, copy=False) + # view is returned if possible + series = self.frame.xs('A', axis=1) series[:] = 5 self.assert_((expected == 5).all()) @@ -11888,25 +11884,16 @@ def test_boolean_set_uncons(self): assert_almost_equal(expected, self.frame.values) def test_xs_view(self): + """ + in 0.14 this will return a view if possible + a copy otherwise, but this is numpy dependent + """ + dm = DataFrame(np.arange(20.).reshape(4, 5), index=lrange(4), columns=lrange(5)) - dm.xs(2, copy=False)[:] = 5 - self.assert_((dm.xs(2) == 5).all()) - dm.xs(2)[:] = 10 - self.assert_((dm.xs(2) == 5).all()) - - # prior to chained assignment (GH5390) - # this would raise, but now just returns a copy (and sets is_copy) - # TODO (?): deal with mixed-type fiasco? - # with assertRaisesRegexp(TypeError, 'cannot get view of mixed-type'): - # self.mixed_frame.xs(self.mixed_frame.index[2], copy=False) - - # unconsolidated - dm['foo'] = 6. - dm.xs(3, copy=False)[:] = 10 - self.assert_((dm.xs(3) == 10).all()) + self.assert_((dm.xs(2) == 10).all()) def test_boolean_indexing(self): idx = lrange(3) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index aef4e3a72c099..ac420ee5d78cd 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -444,9 +444,15 @@ def test_xs_level(self): expected = df[1:2] expected.index = expected.index.droplevel(2) assert_frame_equal(result, expected) - # can't produce a view of a multiindex with a level without copying - with assertRaisesRegexp(ValueError, 'Cannot retrieve view'): - self.frame.xs('two', level='second', copy=False) + + # this is a copy in 0.14 + result = self.frame.xs('two', level='second') + + # setting this will give a SettingWithCopyError + # as we are trying to write a view + def f(x): + x[:] = 10 + self.assertRaises(com.SettingWithCopyError, f, result) def test_xs_level_multiple(self): from pandas import read_table @@ -461,8 +467,15 @@ def test_xs_level_multiple(self): result = df.xs(('a', 4), level=['one', 'four']) expected = df.xs('a').xs(4, level='four') assert_frame_equal(result, expected) - with assertRaisesRegexp(ValueError, 'Cannot retrieve view'): - df.xs(('a', 4), level=['one', 'four'], copy=False) + + # this is a copy in 0.14 + result = df.xs(('a', 4), level=['one', 'four']) + + # setting this will give a SettingWithCopyError + # as we are trying to write a view + def f(x): + x[:] = 10 + self.assertRaises(com.SettingWithCopyError, f, result) # GH2107 dates = lrange(20111201, 20111205) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 176ef13d23d94..6aff61c4e2167 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -536,19 +536,15 @@ def test_xs(self): expected = self.panel['ItemA'] assert_frame_equal(itemA, expected) - # not view by default - itemA.values[:] = np.nan - self.assert_(not np.isnan(self.panel['ItemA'].values).all()) - - # but can get view - itemA_view = self.panel.xs('ItemA', axis=0, copy=False) + # get a view by default + itemA_view = self.panel.xs('ItemA', axis=0) itemA_view.values[:] = np.nan self.assert_(np.isnan(self.panel['ItemA'].values).all()) - # mixed-type + # mixed-type yields a copy self.panel['strings'] = 'foo' - self.assertRaises(Exception, self.panel.xs, 'D', axis=2, - copy=False) + result = self.panel.xs('D', axis=2) + self.assertIsNotNone(result.is_copy) def test_getitem_fancy_labels(self): p = self.panel diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index a7a87c998d839..77b70132d4bfb 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -452,19 +452,15 @@ def test_xs(self): expected = self.panel4d['l1'] assert_panel_equal(l1, expected) - # not view by default - l1.values[:] = np.nan - self.assert_(not np.isnan(self.panel4d['l1'].values).all()) - - # but can get view - l1_view = self.panel4d.xs('l1', axis=0, copy=False) + # view if possible + l1_view = self.panel4d.xs('l1', axis=0) l1_view.values[:] = np.nan self.assert_(np.isnan(self.panel4d['l1'].values).all()) # mixed-type self.panel4d['strings'] = 'foo' - self.assertRaises(Exception, self.panel4d.xs, 'D', axis=2, - copy=False) + result = self.panel4d.xs('D', axis=3) + self.assertIsNotNone(result.is_copy) def test_getitem_fancy_labels(self): panel4d = self.panel4d
closes #6894 Deprecate passing keyword `copy` to: - `DataFrame/Series/Panel/.xs` - `Panel.minor_xs` - `Panel.major_xs` `.xs` returns a view (as does `.loc`) **IF POSSIBLE** MultiIndexing with Slicers eliminates the need for this (and `.xs` for the too, but for some cases its 'simpler').
https://api.github.com/repos/pandas-dev/pandas/pulls/6919
2014-04-22T00:21:38Z
2014-04-22T13:09:16Z
2014-04-22T13:09:16Z
2014-06-14T14:25:16Z
DOC: Add recipe for shifting groups of values based on the index
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 41a270a37d187..5f0b65218d374 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -191,6 +191,19 @@ The :ref:`grouping <groupby>` docs. `Create a value counts column and reassign back to the DataFrame <http://stackoverflow.com/questions/17709270/i-want-to-create-a-column-of-value-counts-in-my-pandas-dataframe>`__ +`Shift groups of the values in a column based on the index +<http://stackoverflow.com/q/23198053/190597>`__ + +.. ipython:: python + + df = pd.DataFrame( + {u'line_race': [10L, 10L, 8L, 10L, 10L, 8L], + u'beyer': [99L, 102L, 103L, 103L, 88L, 100L]}, + index=[u'Last Gunfighter', u'Last Gunfighter', u'Last Gunfighter', + u'Paynter', u'Paynter', u'Paynter']); df + + df['beyer_shifted'] = df.groupby(level=0)['beyer'].apply(lambda grp: grp.shift(1)) + df Expanding Data ~~~~~~~~~~~~~~
https://api.github.com/repos/pandas-dev/pandas/pulls/6917
2014-04-21T20:55:11Z
2014-04-21T21:54:40Z
2014-04-21T21:54:40Z
2014-07-16T09:02:33Z
to_csv `headers` kwarg now works regardless of `index` kwarg
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6d8f915e2ebb8..857a4d237f423 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -381,6 +381,8 @@ Bug Fixes - Bug in arithmetic operations affecting to NaT (:issue:`6873`) - Bug in ``Series.str.extract`` where the resulting ``Series`` from a single group match wasn't renamed to the group name +- Bug in ``DataFrame.to_csv`` where setting `index` to `False` ignored the + `header` kwarg (:issue:`6186`) pandas 0.13.1 ------------- diff --git a/pandas/core/format.py b/pandas/core/format.py index a7cbf2c70a5d3..117b686b02d75 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -1194,6 +1194,14 @@ def _save_header(self): has_aliases = isinstance(header, (tuple, list, np.ndarray)) if not (has_aliases or self.header): return + if has_aliases: + if len(header) != len(cols): + raise ValueError(('Writing %d cols but got %d aliases' + % (len(cols), len(header)))) + else: + write_cols = header + else: + write_cols = cols if self.index: # should write something for index label @@ -1219,22 +1227,8 @@ def _save_header(self): else: encoded_labels = [] - if has_aliases: - if len(header) != len(cols): - raise ValueError(('Writing %d cols but got %d aliases' - % (len(cols), len(header)))) - else: - write_cols = header - else: - write_cols = cols - - if not has_mi_columns: - encoded_labels += list(write_cols) - - else: - - if not has_mi_columns: - encoded_labels += list(cols) + if not has_mi_columns: + encoded_labels += list(write_cols) # write out the mi if has_mi_columns: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 01b42457e72f5..83fd56c676f8e 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5836,6 +5836,22 @@ def test_to_csv_no_index(self): result = read_csv(path) assert_frame_equal(df,result) + def test_to_csv_headers(self): + # GH6186, the presence or absence of `index` incorrectly + # causes to_csv to have different header semantics. + pname = '__tmp_to_csv_headers__' + from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y']) + with ensure_clean(pname) as path: + from_df.to_csv(path, header=['X', 'Y']) + recons = DataFrame.from_csv(path) + assert_frame_equal(to_df, recons) + + from_df.to_csv(path, index=False, header=['X', 'Y']) + recons = DataFrame.from_csv(path) + recons.reset_index(inplace=True) + assert_frame_equal(to_df, recons) + def test_to_csv_multiindex(self): pname = '__tmp_to_csv_multiindex__'
Addresses issue https://github.com/pydata/pandas/issues/6186
https://api.github.com/repos/pandas-dev/pandas/pulls/6916
2014-04-20T16:21:06Z
2014-04-21T05:43:53Z
2014-04-21T05:43:53Z
2014-06-12T04:44:39Z
BUG: GroupBy.get_group doesnt work with TimeGrouper
diff --git a/doc/source/release.rst b/doc/source/release.rst index 49656046129ca..7ed01e42ff7aa 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -399,6 +399,7 @@ Bug Fixes - Better error message when passing a frequency of 'MS' in ``Period`` construction (GH5332) - Bug in `Series.__unicode__` when `max_rows` is `None` and the Series has more than 1000 rows. (:issue:`6863`) - Bug in ``groupby.get_group`` where a datetlike wasn't always accepted (:issue:`5267`) +- Bug in ``groupBy.get_group`` created by ``TimeGrouper`` raises ``AttributeError`` (:issue:`6914`) - Bug in ``DatetimeIndex.tz_localize`` and ``DatetimeIndex.tz_convert`` affects to NaT (:issue:`5546`) - Bug in arithmetic operations affecting to NaT (:issue:`6873`) - Bug in ``Series.str.extract`` where the resulting ``Series`` from a single diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index c0222ad248e0c..494251ee97044 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2,6 +2,7 @@ from functools import wraps import numpy as np import datetime +import collections from pandas.compat import( zip, builtins, range, long, lrange, lzip, @@ -1556,6 +1557,17 @@ def apply(self, f, data, axis=0): return result_keys, result_values, mutated + @cache_readonly + def indices(self): + indices = collections.defaultdict(list) + + i = 0 + for label, bin in zip(self.binlabels, self.bins): + if i < bin: + indices[label] = list(range(i, bin)) + i = bin + return indices + @cache_readonly def ngroups(self): return len(self.binlabels) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 22d92c7b19fe1..fde9156017c4e 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -3140,6 +3140,55 @@ def test_timegrouper_with_reg_groups(self): result2 = df.groupby([pd.TimeGrouper(freq=freq), 'user_id'])['whole_cost'].sum() assert_series_equal(result2, expected) + def test_timegrouper_get_group(self): + # GH 6914 + + df_original = DataFrame({ + 'Buyer': 'Carl Joe Joe Carl Joe Carl'.split(), + 'Quantity': [18,3,5,1,9,3], + 'Date' : [datetime(2013,9,1,13,0), datetime(2013,9,1,13,5), + datetime(2013,10,1,20,0), datetime(2013,10,3,10,0), + datetime(2013,12,2,12,0), datetime(2013,9,2,14,0),]}) + df_reordered = df_original.sort(columns='Quantity') + + # single grouping + expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]], + df_original.iloc[[4]]] + dt_list = ['2013-09-30', '2013-10-31', '2013-12-31'] + + for df in [df_original, df_reordered]: + grouped = df.groupby(pd.Grouper(freq='M', key='Date')) + for t, expected in zip(dt_list, expected_list): + dt = pd.Timestamp(t) + result = grouped.get_group(dt) + assert_frame_equal(result, expected) + + # multiple grouping + expected_list = [df_original.iloc[[1]], df_original.iloc[[3]], + df_original.iloc[[4]]] + g_list = [('Joe', '2013-09-30'), ('Carl', '2013-10-31'), ('Joe', '2013-12-31')] + + for df in [df_original, df_reordered]: + grouped = df.groupby(['Buyer', pd.Grouper(freq='M', key='Date')]) + for (b, t), expected in zip(g_list, expected_list): + dt = pd.Timestamp(t) + result = grouped.get_group((b, dt)) + assert_frame_equal(result, expected) + + # with index + df_original = df_original.set_index('Date') + df_reordered = df_original.sort(columns='Quantity') + + expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]], + df_original.iloc[[4]]] + + for df in [df_original, df_reordered]: + grouped = df.groupby(pd.Grouper(freq='M')) + for t, expected in zip(dt_list, expected_list): + dt = pd.Timestamp(t) + result = grouped.get_group(dt) + assert_frame_equal(result, expected) + def test_cumcount(self): df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A']) g = df.groupby('A')
`get_group` raises `AttributeError` when the group is created by `TimeGrouper`. ``` >>> df = pd.DataFrame({'Branch' : 'A A A A A A A B'.split(), 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(), 'Quantity': [1,3,5,1,8,1,9,3], 'Date' : [ datetime(2013,1,1,13,0), datetime(2013,1,1,13,5), datetime(2013,10,1,20,0), datetime(2013,10,2,10,0), datetime(2013,10,1,20,0), datetime(2013,10,2,10,0), datetime(2013,12,2,12,0), datetime(2013,12,2,14,0),]}) >>> grouped = df.groupby(pd.Grouper(freq='1M',key='Date')) >>> grouped.get_group(pd.Timestamp('2013-12-31')) AttributeError: 'DataFrameGroupBy' object has no attribute 'indices' ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6914
2014-04-19T18:23:39Z
2014-04-28T14:07:40Z
2014-04-28T14:07:40Z
2014-06-14T08:31:02Z
ENH: pivot_table can now accept Grouper
diff --git a/doc/source/release.rst b/doc/source/release.rst index a888f03b9d8e7..c975143b0ef67 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -286,6 +286,7 @@ Improvements to existing features :func:`read_csv`/:func:`read_table` if no other C-unsupported options specified (:issue:`6607`) - ``read_excel`` can now read milliseconds in Excel dates and times with xlrd >= 0.9.3. (:issue:`5945`) +- ``pivot_table`` can now accept ``Grouper`` by ``index`` and ``columns`` keywords (:issue:`6913`) .. _release.bug_fixes-0.14.0: diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index b872c8a60e34e..436055ffe37d1 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -264,19 +264,24 @@ It takes a number of arguments - ``data``: A DataFrame object - ``values``: a column or a list of columns to aggregate -- ``rows``: list of columns to group by on the table rows -- ``cols``: list of columns to group by on the table columns +- ``index``: a column, Grouper, array which has the same length as data, or list of them. + Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. +- ``columns``: a column, Grouper, array which has the same length as data, or list of them. + Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. - ``aggfunc``: function to use for aggregation, defaulting to ``numpy.mean`` Consider a data set like this: .. ipython:: python + import datetime df = DataFrame({'A' : ['one', 'one', 'two', 'three'] * 6, 'B' : ['A', 'B', 'C'] * 8, 'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 4, 'D' : np.random.randn(24), - 'E' : np.random.randn(24)}) + 'E' : np.random.randn(24), + 'F' : [datetime.datetime(2013, i, 1) for i in range(1, 13)] + + [datetime.datetime(2013, i, 15) for i in range(1, 13)]}) df We can produce pivot tables from this data very easily: @@ -296,6 +301,12 @@ hierarchy in the columns: pivot_table(df, index=['A', 'B'], columns=['C']) +Also, you can use ``Grouper`` for ``index`` and ``columns`` keywords. For detail of ``Grouper``, see :ref:`Grouping with a Grouper specification <groupby.specify>`. + +.. ipython:: python + + pivot_table(df, values='D', index=Grouper(freq='M', key='F'), columns='C') + You can render a nice output of the table omitting the missing values by calling ``to_string`` if you wish: diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index e63728e22d23a..34480668df8c9 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -484,6 +484,26 @@ Enhancements - ``CustomBuisnessMonthBegin`` and ``CustomBusinessMonthEnd`` are now available (:issue:`6866`) - :meth:`Series.quantile` and :meth:`DataFrame.quantile` now accept an array of quantiles. +- ``pivot_table`` can now accept ``Grouper`` by ``index`` and ``columns`` keywords (:issue:`6913`) + + .. ipython:: python + + import datetime + df = DataFrame({ + 'Branch' : 'A A A A A B'.split(), + 'Buyer': 'Carl Mark Carl Carl Joe Joe'.split(), + 'Quantity': [1, 3, 5, 1, 8, 1], + 'Date' : [datetime.datetime(2013,11,1,13,0), datetime.datetime(2013,9,1,13,5), + datetime.datetime(2013,10,1,20,0), datetime.datetime(2013,10,2,10,0), + datetime.datetime(2013,11,1,20,0), datetime.datetime(2013,10,2,10,0)], + 'PayDay' : [datetime.datetime(2013,10,4,0,0), datetime.datetime(2013,10,15,13,5), + datetime.datetime(2013,9,5,20,0), datetime.datetime(2013,11,2,10,0), + datetime.datetime(2013,10,7,20,0), datetime.datetime(2013,9,5,10,0)]}) + df + + pivot_table(df, index=Grouper(freq='M', key='Date'), + columns=Grouper(freq='M', key='PayDay'), + values='Quantity', aggfunc=np.sum) Performance ~~~~~~~~~~~ diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index 6c4f55ae8a3b5..9132fea089fe7 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -4,6 +4,7 @@ from pandas import Series, DataFrame from pandas.core.index import MultiIndex +from pandas.core.groupby import Grouper from pandas.tools.merge import concat from pandas.tools.util import cartesian_product from pandas.compat import range, lrange, zip @@ -25,10 +26,12 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', ---------- data : DataFrame values : column to aggregate, optional - index : list of column names or arrays to group on - Keys to group on the x-axis of the pivot table - columns : list of column names or arrays to group on - Keys to group on the y-axis of the pivot table + index : a column, Grouper, array which has the same length as data, or list of them. + Keys to group by on the pivot table index. + If an array is passed, it is being used as the same manner as column values. + columns : a column, Grouper, array which has the same length as data, or list of them. + Keys to group by on the pivot table column. + If an array is passed, it is being used as the same manner as column values. aggfunc : function, default numpy.mean, or list of functions If list of functions passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred @@ -98,6 +101,8 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', if values_passed: to_filter = [] for x in keys + values: + if isinstance(x, Grouper): + x = x.key try: if x in data: to_filter.append(x) @@ -297,7 +302,7 @@ def _all_key(): def _convert_by(by): if by is None: by = [] - elif (np.isscalar(by) or isinstance(by, (np.ndarray, Series)) + elif (np.isscalar(by) or isinstance(by, (np.ndarray, Series, Grouper)) or hasattr(by, '__call__')): by = [by] else: diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py index 2255fdebc9fe3..6fe32b5b85080 100644 --- a/pandas/tools/tests/test_pivot.py +++ b/pandas/tools/tests/test_pivot.py @@ -1,12 +1,10 @@ import datetime -import unittest -import warnings import numpy as np from numpy.testing import assert_equal import pandas -from pandas import DataFrame, Series, Index, MultiIndex +from pandas import DataFrame, Series, Index, MultiIndex, Grouper from pandas.tools.merge import concat from pandas.tools.pivot import pivot_table, crosstab from pandas.compat import range, u, product @@ -288,8 +286,7 @@ def test_pivot_columns_lexsorted(self): iproduct = np.random.randint(0, len(products), n) items['Index'] = products['Index'][iproduct] items['Symbol'] = products['Symbol'][iproduct] - dr = pandas.date_range(datetime.date(2000, 1, 1), - datetime.date(2010, 12, 31)) + dr = pandas.date_range(datetime.date(2000, 1, 1), datetime.date(2010, 12, 31)) dates = dr[np.random.randint(0, len(dr), n)] items['Year'] = dates.year items['Month'] = dates.month @@ -333,6 +330,128 @@ def test_margins_no_values_two_row_two_cols(self): result = self.data[['A', 'B', 'C', 'D']].pivot_table(index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True) self.assertEqual(result.All.tolist(), [3.0, 1.0, 4.0, 3.0, 11.0]) + def test_pivot_timegrouper(self): + df = DataFrame({ + 'Branch' : 'A A A A A A A B'.split(), + 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(), + 'Quantity': [1, 3, 5, 1, 8, 1, 9, 3], + 'Date' : [datetime.datetime(2013, 1, 1), datetime.datetime(2013, 1, 1), + datetime.datetime(2013, 10, 1), datetime.datetime(2013, 10, 2), + datetime.datetime(2013, 10, 1), datetime.datetime(2013, 10, 2), + datetime.datetime(2013, 12, 2), datetime.datetime(2013, 12, 2),]}).set_index('Date') + + expected = DataFrame(np.array([10, 18, 3]).reshape(1, 3), + index=[datetime.datetime(2013, 12, 31)], + columns='Carl Joe Mark'.split()) + expected.index.name = 'Date' + expected.columns.name = 'Buyer' + + result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer', + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result,expected) + + result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'), + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result,expected.T) + + expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan]).reshape(2, 3), + index=[datetime.datetime(2013, 1, 1), datetime.datetime(2013, 7, 1)], + columns='Carl Joe Mark'.split()) + expected.index.name = 'Date' + expected.columns.name = 'Buyer' + + result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer', + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result, expected) + + result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'), + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result, expected.T) + + # passing the name + df = df.reset_index() + result = pivot_table(df, index=Grouper(freq='6MS', key='Date'), columns='Buyer', + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result, expected) + + result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS', key='Date'), + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result, expected.T) + + self.assertRaises(KeyError, lambda : pivot_table(df, index=Grouper(freq='6MS', key='foo'), + columns='Buyer', values='Quantity', aggfunc=np.sum)) + self.assertRaises(KeyError, lambda : pivot_table(df, index='Buyer', + columns=Grouper(freq='6MS', key='foo'), values='Quantity', aggfunc=np.sum)) + + # passing the level + df = df.set_index('Date') + result = pivot_table(df, index=Grouper(freq='6MS', level='Date'), columns='Buyer', + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result, expected) + + result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS', level='Date'), + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result, expected.T) + + self.assertRaises(ValueError, lambda : pivot_table(df, index=Grouper(freq='6MS', level='foo'), + columns='Buyer', values='Quantity', aggfunc=np.sum)) + self.assertRaises(ValueError, lambda : pivot_table(df, index='Buyer', + columns=Grouper(freq='6MS', level='foo'), values='Quantity', aggfunc=np.sum)) + + # double grouper + df = DataFrame({ + 'Branch' : 'A A A A A A A B'.split(), + 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(), + 'Quantity': [1,3,5,1,8,1,9,3], + 'Date' : [datetime.datetime(2013,11,1,13,0), datetime.datetime(2013,9,1,13,5), + datetime.datetime(2013,10,1,20,0), datetime.datetime(2013,10,2,10,0), + datetime.datetime(2013,11,1,20,0), datetime.datetime(2013,10,2,10,0), + datetime.datetime(2013,10,2,12,0), datetime.datetime(2013,12,5,14,0)], + 'PayDay' : [datetime.datetime(2013,10,4,0,0), datetime.datetime(2013,10,15,13,5), + datetime.datetime(2013,9,5,20,0), datetime.datetime(2013,11,2,10,0), + datetime.datetime(2013,10,7,20,0), datetime.datetime(2013,9,5,10,0), + datetime.datetime(2013,12,30,12,0), datetime.datetime(2013,11,20,14,0),]}) + + result = pivot_table(df, index=Grouper(freq='M', key='Date'), + columns=Grouper(freq='M', key='PayDay'), + values='Quantity', aggfunc=np.sum) + expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan, 6, np.nan, 1, 9, + np.nan, 9, np.nan, np.nan, np.nan, np.nan, 3, np.nan]).reshape(4, 4), + index=[datetime.datetime(2013, 9, 30), datetime.datetime(2013, 10, 31), + datetime.datetime(2013, 11, 30), datetime.datetime(2013, 12, 31)], + columns=[datetime.datetime(2013, 9, 30), datetime.datetime(2013, 10, 31), + datetime.datetime(2013, 11, 30), datetime.datetime(2013, 12, 31)]) + expected.index.name = 'Date' + expected.columns.name = 'PayDay' + + tm.assert_frame_equal(result, expected) + + result = pivot_table(df, index=Grouper(freq='M', key='PayDay'), + columns=Grouper(freq='M', key='Date'), + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result, expected.T) + + tuples = [(datetime.datetime(2013, 9, 30), datetime.datetime(2013, 10, 31)), + (datetime.datetime(2013, 10, 31), datetime.datetime(2013, 9, 30)), + (datetime.datetime(2013, 10, 31), datetime.datetime(2013, 11, 30)), + (datetime.datetime(2013, 10, 31), datetime.datetime(2013, 12, 31)), + (datetime.datetime(2013, 11, 30), datetime.datetime(2013, 10, 31)), + (datetime.datetime(2013, 12, 31), datetime.datetime(2013, 11, 30)),] + idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay']) + expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan, + 9, np.nan, 9, np.nan, np.nan, 3]).reshape(6, 2), + index=idx, columns=['A', 'B']) + expected.columns.name = 'Branch' + + result = pivot_table(df, index=[Grouper(freq='M', key='Date'), + Grouper(freq='M', key='PayDay')], columns=['Branch'], + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result, expected) + + result = pivot_table(df, index=['Branch'], columns=[Grouper(freq='M', key='Date'), + Grouper(freq='M', key='PayDay')], + values='Quantity', aggfunc=np.sum) + tm.assert_frame_equal(result, expected.T) class TestCrosstab(tm.TestCase):
`pivot_table` can accept `Grouper` by `index` and `columns` kw. ``` >>> df = pd.DataFrame({ 'Branch' : 'A A A A A A A B'.split(), 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(), 'Quantity': [1,3,5,1,8,1,9,3], 'Date' : [datetime(2013,11,1,13,0), datetime(2013,9,1,13,5), datetime(2013,10,1,20,0), datetime(2013,10,2,10,0), datetime(2013,11,1,20,0), datetime(2013,10,2,10,0), datetime(2013,10,2,12,0), datetime(2013,12,5,14,0)], 'PayDay' : [datetime(2013,10,4,0,0), datetime(2013,10,15,13,5), datetime(2013,9,5,20,0), datetime(2013,11,2,10,0), datetime(2013,10,7,20,0), datetime(2013,9,5,10,0), datetime(2013,12,30,12,0), datetime(2013,11,20,14,0),]}) >>> pd.pivot_table(df, index=pd.Grouper(freq='M', key='Date'), columns=pd.Grouper(freq='M', key='PayDay'), values='Quantity', aggfunc=np.sum) PayDay 2013-09-30 2013-10-31 2013-11-30 2013-12-31 Date 2013-09-30 NaN 3 NaN NaN 2013-10-31 6 NaN 1 9 2013-11-30 NaN 9 NaN NaN 2013-12-31 NaN NaN 3 NaN ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6913
2014-04-19T17:12:53Z
2014-04-27T13:28:37Z
2014-04-27T13:28:37Z
2014-06-13T01:24:57Z
TST: restructure sql tests
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 83978a0e0b8f7..9622f9d8790cb 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -1,3 +1,20 @@ +"""SQL io tests + +The SQL tests are broken down in different classes: + +- `PandasSQLTest`: base class with common methods for all test classes +- Tests for the public API (only tests with sqlite3) + - `_TestSQLApi` base class + - `TestSQLApi`: test the public API with sqlalchemy engine + - `TesySQLLegacyApi`: test the public API with DBAPI connection +- Tests for the different SQL flavors (flavor specific type conversions) + - Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with + common methods, the different tested flavors (sqlite3, MySQL, PostgreSQL) + derive from the base class + - Tests for the legacy mode (`TestSQLiteLegacy` and `TestMySQLLegacy`) + +""" + from __future__ import print_function import unittest import sqlite3 @@ -8,7 +25,7 @@ import numpy as np from pandas import DataFrame, Series, MultiIndex -from pandas.compat import range, lrange, iteritems +from pandas.compat import range #from pandas.core.datetools import format as date_format import pandas.io.sql as sql @@ -24,11 +41,11 @@ SQL_STRINGS = { 'create_iris': { 'sqlite': """CREATE TABLE iris ( - `SepalLength` REAL, - `SepalWidth` REAL, - `PetalLength` REAL, - `PetalWidth` REAL, - `Name` TEXT + "SepalLength" REAL, + "SepalWidth" REAL, + "PetalLength" REAL, + "PetalWidth" REAL, + "Name" TEXT )""", 'mysql': """CREATE TABLE iris ( `SepalLength` DOUBLE, @@ -52,14 +69,14 @@ }, 'create_test_types': { 'sqlite': """CREATE TABLE types_test_data ( - `TextCol` TEXT, - `DateCol` TEXT, - `IntDateCol` INTEGER, - `FloatCol` REAL, - `IntCol` INTEGER, - `BoolCol` INTEGER, - `IntColWithNull` INTEGER, - `BoolColWithNull` INTEGER + "TextCol" TEXT, + "DateCol" TEXT, + "IntDateCol" INTEGER, + "FloatCol" REAL, + "IntCol" INTEGER, + "BoolCol" INTEGER, + "IntColWithNull" INTEGER, + "BoolColWithNull" INTEGER )""", 'mysql': """CREATE TABLE types_test_data ( `TextCol` TEXT, @@ -118,9 +135,9 @@ class PandasSQLTest(unittest.TestCase): + """ + Base class with common private methods for SQLAlchemy and fallback cases. - """Base class with common private methods for - SQLAlchemy and fallback cases. """ def drop_table(self, table_name): @@ -285,10 +302,18 @@ def _tquery(self): tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) +#------------------------------------------------------------------------------ +#--- Testing the public API + class _TestSQLApi(PandasSQLTest): - """Test the public API as it would be used - directly, including legacy names + """ + Base class to test the public API. + + From this two classes are derived to run these tests for both the + sqlalchemy mode (`TestSQLApi`) and the legacy mode (`TestSQLLegacyApi`). + These tests are run with sqlite3. Specific tests for the different + sql flavours are included in `_TestSQLAlchemy`. Notes: flavor can always be passed even in SQLAlchemy mode, @@ -311,7 +336,6 @@ def test_read_sql_iris(self): self._check_iris_loaded_frame(iris_frame) def test_legacy_read_frame(self): - """Test legacy name read_frame""" iris_frame = sql.read_frame( "SELECT * FROM iris", self.conn, flavor='sqlite') self._check_iris_loaded_frame(iris_frame) @@ -337,7 +361,8 @@ def test_to_sql_replace(self): sql.to_sql(self.test_frame1, 'test_frame3', self.conn, flavor='sqlite', if_exists='replace') self.assertTrue( - sql.has_table('test_frame3', self.conn, flavor='sqlite'), 'Table not written to DB') + sql.has_table('test_frame3', self.conn, flavor='sqlite'), + 'Table not written to DB') num_entries = len(self.test_frame1) num_rows = self._count_rows('test_frame3') @@ -353,7 +378,8 @@ def test_to_sql_append(self): sql.to_sql(self.test_frame1, 'test_frame4', self.conn, flavor='sqlite', if_exists='append') self.assertTrue( - sql.has_table('test_frame4', self.conn, flavor='sqlite'), 'Table not written to DB') + sql.has_table('test_frame4', self.conn, flavor='sqlite'), + 'Table not written to DB') num_entries = 2 * len(self.test_frame1) num_rows = self._count_rows('test_frame4') @@ -374,12 +400,13 @@ def test_to_sql_panel(self): 'test_panel', self.conn, flavor='sqlite') def test_legacy_write_frame(self): - """Test legacy write frame name. - Assume that functionality is already tested above so just do quick check that it basically works""" - sql.write_frame( - self.test_frame1, 'test_frame_legacy', self.conn, flavor='sqlite') + # Assume that functionality is already tested above so just do + # quick check that it basically works + sql.write_frame(self.test_frame1, 'test_frame_legacy', self.conn, + flavor='sqlite') self.assertTrue( - sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'), 'Table not written to DB') + sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'), + 'Table not written to DB') def test_roundtrip(self): sql.to_sql(self.test_frame1, 'test_frame_roundtrip', @@ -410,7 +437,7 @@ def test_tquery(self): tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) def test_date_parsing(self): - """ Test date parsing in read_sql """ + # Test date parsing in read_sq # No Parsing df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, flavor='sqlite') @@ -444,19 +471,17 @@ def test_date_parsing(self): "IntDateCol loaded with incorrect type") def test_date_and_index(self): - """ Test case where same column appears in parse_date and index_col""" + # Test case where same column appears in parse_date and index_col df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, flavor='sqlite', index_col='DateCol', parse_dates=['DateCol', 'IntDateCol']) - self.assertTrue( - issubclass(df.index.dtype.type, np.datetime64), - "DateCol loaded with incorrect type") + self.assertTrue(issubclass(df.index.dtype.type, np.datetime64), + "DateCol loaded with incorrect type") - self.assertTrue( - issubclass(df.IntDateCol.dtype.type, np.datetime64), - "IntDateCol loaded with incorrect type") + self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), + "IntDateCol loaded with incorrect type") def test_to_sql_index_label(self): temp_frame = DataFrame({'col1': range(4)}) @@ -491,7 +516,7 @@ def test_to_sql_index_label(self): def test_to_sql_index_label_multiindex(self): temp_frame = DataFrame({'col1': range(4)}, index=MultiIndex.from_product([('A0', 'A1'), ('B0', 'B1')])) - + # no index name, defaults to 'level_0' and 'level_1' sql.to_sql(temp_frame, 'test_index_label', self.conn) frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn) @@ -527,8 +552,12 @@ def test_to_sql_index_label_multiindex(self): class TestSQLApi(_TestSQLApi): + """ + Test the public API as it would be used directly + + Tests for `read_sql_table` are included here, as this is specific for the + sqlalchemy mode. - """Test the public API as it would be used directly """ flavor = 'sqlite' @@ -574,15 +603,16 @@ def test_read_sql_delegate(self): tm.assert_frame_equal(iris_frame1, iris_frame2, "read_sql and read_sql_query have not the same" " result with a query") - + iris_frame1 = sql.read_sql_table('iris', self.conn) iris_frame2 = sql.read_sql('iris', self.conn) tm.assert_frame_equal(iris_frame1, iris_frame2) class TestSQLLegacyApi(_TestSQLApi): + """ + Test the public legacy API - """Test the public legacy API """ flavor = 'sqlite' @@ -600,35 +630,21 @@ def _load_test2_data(self): self.test_frame2 = DataFrame(data, columns=columns) def test_sql_open_close(self): - """ - Test if the IO in the database still work if the connection - is closed between the writing and reading (as in many real - situations). - """ + # Test if the IO in the database still work if the connection closed + # between the writing and reading (as in many real situations). self._load_test2_data() with tm.ensure_clean() as name: conn = self.connect(name) - - sql.to_sql( - self.test_frame2, - "test_frame2_legacy", - conn, - flavor="sqlite", - index=False, - ) - + sql.to_sql(self.test_frame2, "test_frame2_legacy", conn, + flavor="sqlite", index=False) conn.close() - conn = self.connect(name) - - result = sql.read_sql_query( - "SELECT * FROM test_frame2_legacy;", - conn, - flavor="sqlite", - ) + conn = self.connect(name) + result = sql.read_sql_query("SELECT * FROM test_frame2_legacy;", + conn, flavor="sqlite") conn.close() tm.assert_frame_equal(self.test_frame2, result) @@ -641,19 +657,55 @@ def test_read_sql_delegate(self): tm.assert_frame_equal(iris_frame1, iris_frame2, "read_sql and read_sql_query have not the same" " result with a query") - + self.assertRaises(ValueError, sql.read_sql, 'iris', self.conn, flavor=self.flavor) +#------------------------------------------------------------------------------ +#--- Database flavor specific tests + class _TestSQLAlchemy(PandasSQLTest): """ - Base class for testing the sqlalchemy backend. Subclasses for specific - database types are created below. - Assume that sqlalchemy takes case of the DB specifics + Base class for testing the sqlalchemy backend. + + Subclasses for specific database types are created below. Tests that + deviate for each flavor are overwritten there. + """ + flavor = None - def test_read_sql(self): + def setUp(self): + self.setup_import() + self.setup_driver() + self.setup_connect() + + self._load_iris_data() + self._load_raw_sql() + self._load_test1_data() + + def setup_import(self): + # Skip this test if SQLAlchemy not available + if not SQLALCHEMY_INSTALLED: + raise nose.SkipTest('SQLAlchemy not installed') + + def setup_driver(self): + raise NotImplementedError() + + def connect(self): + raise NotImplementedError() + + def setup_connect(self): + try: + self.conn = self.connect() + self.pandasSQL = sql.PandasSQLAlchemy(self.conn) + except sqlalchemy.exc.OperationalError: + raise nose.SkipTest("Can't connect to {0} server".format(self.flavor)) + + def tearDown(self): + raise NotImplementedError() + + def test_aread_sql(self): self._read_sql_iris() def test_read_sql_parameter(self): @@ -744,22 +796,22 @@ def test_default_date_load(self): # IMPORTANT - sqlite has no native date type, so shouldn't parse, but # MySQL SHOULD be converted. - self.assertTrue( - issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") + self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64), + "DateCol loaded with incorrect type") def test_date_parsing(self): # No Parsing df = sql.read_sql_table("types_test_data", self.conn) - df = sql.read_sql_table( - "types_test_data", self.conn, parse_dates=['DateCol']) - self.assertTrue( - issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") + df = sql.read_sql_table("types_test_data", self.conn, + parse_dates=['DateCol']) + self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64), + "DateCol loaded with incorrect type") - df = sql.read_sql_table( - "types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'}) - self.assertTrue( - issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") + df = sql.read_sql_table("types_test_data", self.conn, + parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'}) + self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64), + "DateCol loaded with incorrect type") df = sql.read_sql_table("types_test_data", self.conn, parse_dates={ 'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}}) @@ -794,27 +846,23 @@ def test_mixed_dtype_insert(self): tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True) -class TestSQLAlchemy(_TestSQLAlchemy): +class TestSQLiteAlchemy(_TestSQLAlchemy): """ Test the sqlalchemy backend against an in-memory sqlite database. + """ flavor = 'sqlite' def connect(self): return sqlalchemy.create_engine('sqlite:///:memory:') - def setUp(self): - # Skip this test if SQLAlchemy not available - if not SQLALCHEMY_INSTALLED: - raise nose.SkipTest('SQLAlchemy not installed') - - self.conn = self.connect() - self.pandasSQL = sql.PandasSQLAlchemy(self.conn) - - self._load_iris_data() - self._load_raw_sql() + def setup_driver(self): + # sqlite3 is built-in + pass - self._load_test1_data() + def tearDown(self): + # in memory so tables should not be removed explicitly + pass def test_default_type_conversion(self): df = sql.read_sql_table("types_test_data", self.conn) @@ -842,13 +890,82 @@ def test_default_date_load(self): "DateCol loaded with incorrect type") -# --- Test SQLITE fallback -class TestSQLite(PandasSQLTest): +class TestMySQLAlchemy(_TestSQLAlchemy): + """ + Test the sqlalchemy backend against an MySQL database. - ''' - Test the sqlalchemy backend against an in-memory sqlite database. - Assume that sqlalchemy takes case of the DB specifics - ''' + """ + flavor = 'mysql' + + def connect(self): + return sqlalchemy.create_engine( + 'mysql+{driver}://root@localhost/pandas_nosetest'.format(driver=self.driver)) + + def setup_driver(self): + try: + import pymysql + self.driver = 'pymysql' + except ImportError: + raise nose.SkipTest('pymysql not installed') + + def tearDown(self): + c = self.conn.execute('SHOW TABLES') + for table in c.fetchall(): + self.conn.execute('DROP TABLE %s' % table[0]) + + def test_default_type_conversion(self): + df = sql.read_sql_table("types_test_data", self.conn) + + self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating), + "FloatCol loaded with incorrect type") + self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer), + "IntCol loaded with incorrect type") + # MySQL has no real BOOL type (it's an alias for TINYINT) + self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer), + "BoolCol loaded with incorrect type") + + # Int column with NA values stays as float + self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating), + "IntColWithNull loaded with incorrect type") + # Bool column with NA = int column with NA values => becomes float + self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating), + "BoolColWithNull loaded with incorrect type") + + +class TestPostgreSQLAlchemy(_TestSQLAlchemy): + """ + Test the sqlalchemy backend against an PostgreSQL database. + + """ + flavor = 'postgresql' + + def connect(self): + return sqlalchemy.create_engine( + 'postgresql+{driver}://postgres@localhost/pandas_nosetest'.format(driver=self.driver)) + + def setup_driver(self): + try: + import psycopg2 + self.driver = 'psycopg2' + except ImportError: + raise nose.SkipTest('psycopg2 not installed') + + def tearDown(self): + c = self.conn.execute( + "SELECT table_name FROM information_schema.tables" + " WHERE table_schema = 'public'") + for table in c.fetchall(): + self.conn.execute("DROP TABLE %s" % table[0]) + + +#------------------------------------------------------------------------------ +#--- Test Sqlite / MySQL fallback + +class TestSQLiteLegacy(PandasSQLTest): + """ + Test the legacy mode against an in-memory sqlite database. + + """ flavor = 'sqlite' def connect(self): @@ -898,13 +1015,13 @@ def test_create_and_drop_table(self): self.pandasSQL.to_sql(temp_frame, 'drop_test_frame') - self.assertTrue(self.pandasSQL.has_table( - 'drop_test_frame'), 'Table not written to DB') + self.assertTrue(self.pandasSQL.has_table('drop_test_frame'), + 'Table not written to DB') self.pandasSQL.drop_table('drop_test_frame') - self.assertFalse(self.pandasSQL.has_table( - 'drop_test_frame'), 'Table not deleted from DB') + self.assertFalse(self.pandasSQL.has_table('drop_test_frame'), + 'Table not deleted from DB') def test_roundtrip(self): self._roundtrip() @@ -916,7 +1033,11 @@ def test_tquery(self): self._tquery() -class TestMySQL(TestSQLite): +class TestMySQLLegacy(TestSQLiteLegacy): + """ + Test the legacy mode against a MySQL database. + + """ flavor = 'mysql' def drop_table(self, table_name): @@ -960,93 +1081,6 @@ def tearDown(self): self.conn.close() -class TestMySQLAlchemy(_TestSQLAlchemy): - flavor = 'mysql' - - def connect(self): - return sqlalchemy.create_engine( - 'mysql+{driver}://root@localhost/pandas_nosetest'.format(driver=self.driver)) - - def setUp(self): - if not SQLALCHEMY_INSTALLED: - raise nose.SkipTest('SQLAlchemy not installed') - - try: - import pymysql - self.driver = 'pymysql' - except ImportError: - raise nose.SkipTest('pymysql not installed') - - try: - self.conn = self.connect() - self.pandasSQL = sql.PandasSQLAlchemy(self.conn) - except sqlalchemy.exc.OperationalError: - raise nose.SkipTest("Can't connect to MySQL server") - - self._load_iris_data() - self._load_raw_sql() - - self._load_test1_data() - - def tearDown(self): - c = self.conn.execute('SHOW TABLES') - for table in c.fetchall(): - self.conn.execute('DROP TABLE %s' % table[0]) - - def test_default_type_conversion(self): - df = sql.read_sql_table("types_test_data", self.conn) - - self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating), - "FloatCol loaded with incorrect type") - self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer), - "IntCol loaded with incorrect type") - # MySQL has no real BOOL type (it's an alias for TINYINT) - self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer), - "BoolCol loaded with incorrect type") - - # Int column with NA values stays as float - self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating), - "IntColWithNull loaded with incorrect type") - # Bool column with NA = int column with NA values => becomes float - self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating), - "BoolColWithNull loaded with incorrect type") - - -class TestPostgreSQLAlchemy(_TestSQLAlchemy): - flavor = 'postgresql' - - def connect(self): - return sqlalchemy.create_engine( - 'postgresql+{driver}://postgres@localhost/pandas_nosetest'.format(driver=self.driver)) - - def setUp(self): - if not SQLALCHEMY_INSTALLED: - raise nose.SkipTest('SQLAlchemy not installed') - - try: - import psycopg2 - self.driver = 'psycopg2' - except ImportError: - raise nose.SkipTest('psycopg2 not installed') - - try: - self.conn = self.connect() - self.pandasSQL = sql.PandasSQLAlchemy(self.conn) - except sqlalchemy.exc.OperationalError: - raise nose.SkipTest("Can't connect to PostgreSQL server") - - self._load_iris_data() - self._load_raw_sql() - - self._load_test1_data() - - def tearDown(self): - c = self.conn.execute( - "SELECT table_name FROM information_schema.tables" - " WHERE table_schema = 'public'") - for table in c.fetchall(): - self.conn.execute("DROP TABLE %s" % table[0]) - if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
- add some explanation of structure of tests - general clean-up (some pep8, docstrings, etc) - move some methods to _TestSQLAlchemy - use standard quoting for sqlite I didn't change any tests, so nothing controversial I think. Wanted to do this first, and then in a subsequent PR try to increase the test coverage.
https://api.github.com/repos/pandas-dev/pandas/pulls/6912
2014-04-19T14:34:29Z
2014-04-21T19:50:30Z
2014-04-21T19:50:30Z
2014-06-25T08:40:32Z
ENH: Implement Panel pct_change
diff --git a/doc/source/computation.rst b/doc/source/computation.rst index fb616c5267e3c..ebda0cde9fb5c 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -26,9 +26,9 @@ Statistical functions Percent Change ~~~~~~~~~~~~~~ -Both ``Series`` and ``DataFrame`` has a method ``pct_change`` to compute the +``Series``, ``DataFrame``, and ``Panel`` all have a method ``pct_change`` to compute the percent change over a given number of periods (using ``fill_method`` to fill -NA/null values). +NA/null values *before* computing the percent change). .. ipython:: python diff --git a/doc/source/release.rst b/doc/source/release.rst index 857a4d237f423..12dc1fec5f969 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -63,7 +63,7 @@ New features noon, January 1, 4713 BC. Because nanoseconds are used to define the time in pandas the actual range of dates that you can use is 1678 AD to 2262 AD. (:issue:`4041`) - Added error bar support to the ``.plot`` method of ``DataFrame`` and ``Series`` (:issue:`3796`) - +- Implemented ``Panel.pct_change`` (:issue:`6904`) API Changes ~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 11296a43e230d..187a757f53d45 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -470,6 +470,7 @@ Enhancements - :ref:`Holidays Calendars<timeseries.holiday>` are now available and can be used with CustomBusinessDay (:issue:`6719`) - ``Float64Index`` is now backed by a ``float64`` dtype ndarray instead of an ``object`` dtype array (:issue:`6471`). +- Implemented ``Panel.pct_change`` (:issue:`6904`) Performance ~~~~~~~~~~~ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7e5e125034189..ebcdc600f6751 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3455,10 +3455,8 @@ def _convert_timedeltas(x): return np.abs(self) - def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, - **kwds): - """ - Percent change over given number of periods + _shared_docs['pct_change'] = """ + Percent change over given number of periods. Parameters ---------- @@ -3473,14 +3471,27 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, Returns ------- - chg : same type as caller + chg : %(klass)s + + Notes + ----- + + By default, the percentage change is calculated along the stat + axis: 0, or ``Index``, for ``DataFrame`` and 1, or ``minor`` for + ``Panel``. You can change this with the ``axis`` keyword argument. """ + + @Appender(_shared_docs['pct_change'] % _shared_doc_kwargs) + def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, + **kwds): # TODO: Not sure if above is correct - need someone to confirm. + axis = self._get_axis_number(kwds.pop('axis', self._stat_axis_name)) if fill_method is None: data = self else: data = self.fillna(method=fill_method, limit=limit) - rs = data / data.shift(periods=periods, freq=freq, **kwds) - 1 + + rs = data.div(data.shift(periods, freq=freq, axis=axis, **kwds)) - 1 if freq is None: mask = com.isnull(_values_from_object(self)) np.putmask(rs.values, mask, np.nan) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index eeb0e292c01d4..f1c52a8facc0a 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1116,7 +1116,7 @@ def count(self, axis='major'): def shift(self, lags, freq=None, axis='major'): """ - Shift major or minor axis by specified number of leads/lags. + Shift major or minor axis by specified number of leads/lags. Parameters ---------- diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 198e600e8edc7..176ef13d23d94 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1684,6 +1684,60 @@ def test_tshift(self): no_freq = panel.ix[:, [0, 5, 7], :] self.assertRaises(ValueError, no_freq.tshift) + def test_pct_change(self): + df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]}) + df2 = df1 + 1 + df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]}) + wp = Panel({'i1': df1, 'i2': df2, 'i3': df3}) + # major, 1 + result = wp.pct_change() # axis='major' + expected = Panel({'i1': df1.pct_change(), + 'i2': df2.pct_change(), + 'i3': df3.pct_change()}) + assert_panel_equal(result, expected) + result = wp.pct_change(axis=1) + assert_panel_equal(result, expected) + # major, 2 + result = wp.pct_change(periods=2) + expected = Panel({'i1': df1.pct_change(2), + 'i2': df2.pct_change(2), + 'i3': df3.pct_change(2)}) + assert_panel_equal(result, expected) + # minor, 1 + result = wp.pct_change(axis='minor') + expected = Panel({'i1': df1.pct_change(axis=1), + 'i2': df2.pct_change(axis=1), + 'i3': df3.pct_change(axis=1)}) + assert_panel_equal(result, expected) + result = wp.pct_change(axis=2) + assert_panel_equal(result, expected) + # minor, 2 + result = wp.pct_change(periods=2, axis='minor') + expected = Panel({'i1': df1.pct_change(periods=2, axis=1), + 'i2': df2.pct_change(periods=2, axis=1), + 'i3': df3.pct_change(periods=2, axis=1)}) + assert_panel_equal(result, expected) + # items, 1 + result = wp.pct_change(axis='items') + expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan], + 'c2': [np.nan, np.nan, np.nan]}), + 'i2': DataFrame({'c1': [1, 0.5, .2], + 'c2': [1./3, 0.25, 1./6]}), + 'i3': DataFrame({'c1': [.5, 1./3, 1./6], + 'c2': [.25, .2, 1./7]})}) + assert_panel_equal(result, expected) + result = wp.pct_change(axis=0) + assert_panel_equal(result, expected) + # items, 2 + result = wp.pct_change(periods=2, axis='items') + expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan], + 'c2': [np.nan, np.nan, np.nan]}), + 'i2': DataFrame({'c1': [np.nan, np.nan, np.nan], + 'c2': [np.nan, np.nan, np.nan]}), + 'i3': DataFrame({'c1': [2, 1, .4], + 'c2': [2./3, .5, 1./3]})}) + assert_panel_equal(result, expected) + def test_multiindex_get(self): ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)], names=['first', 'second']) diff --git a/vb_suite/panel_methods.py b/vb_suite/panel_methods.py index 6710be760e2df..45790feb4a58e 100644 --- a/vb_suite/panel_methods.py +++ b/vb_suite/panel_methods.py @@ -17,3 +17,12 @@ panel_shift_minor = Benchmark('panel.shift(1, axis=minor)', setup, start_date=datetime(2012, 1, 12)) + +panel_pct_change_major = Benchmark('panel.pct_change(1, axis="major")', setup, + start_date=datetime(2014, 4, 19)) + +panel_pct_change_minor = Benchmark('panel.pct_change(1, axis="minor")', setup, + start_date=datetime(2014, 4, 19)) + +panel_pct_change_items = Benchmark('panel.pct_change(1, axis="items")', setup, + start_date=datetime(2014, 4, 19))
Closes https://github.com/pydata/pandas/issues/6904 There's just a bit of extra index handling that needs to be done before moving on to `generic.pct_change()`. I had to adjust that to use the `.div` and `.sub` ops instead of `/` and `-` to work with panels. I wasn't sure why axis wasn't included as an actual names keyword arg. `generic` just looks for it in **kwargs. I did the same in panel. A related issue was the `Panel.shift()` has a different argument signature than `generic.shift()`. I can make those consistent and put in a deprecation warning in this issue or in a new one.
https://api.github.com/repos/pandas-dev/pandas/pulls/6909
2014-04-18T13:10:39Z
2014-04-21T14:15:49Z
2014-04-21T14:15:49Z
2017-04-05T02:08:52Z
BUG: TimeGrouper outputs different result by column order
diff --git a/doc/source/release.rst b/doc/source/release.rst index cc8e271d62183..6d8f915e2ebb8 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -294,6 +294,7 @@ Bug Fixes - Bug in TimeGrouper/resample when presented with a non-monotonic DatetimeIndex would return invalid results. (:issue:`4161`) - Bug in index name propogation in TimeGrouper/resample (:issue:`4161`) - TimeGrouper has a more compatible API to the rest of the groupers (e.g. ``groups`` was missing) (:issue:`3881`) +- Bug in multiple grouping with a TimeGrouper depending on target column order (:issue:`6764`) - Bug in ``pd.eval`` when parsing strings with possible tokens like ``'&'`` (:issue:`6351`) - Bug correctly handle placements of ``-inf`` in Panels when dividing by integer 0 (:issue:`6178`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index a32b25312d4ba..c0222ad248e0c 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -263,11 +263,11 @@ def _set_grouper(self, obj, sort=False): if not (level == 0 or level == ax.name): raise ValueError("The grouper level {0} is not valid".format(level)) - # possibly sort - if (self.sort or sort) and not ax.is_monotonic: - indexer = self.indexer = ax.argsort(kind='quicksort') - ax = ax.take(indexer) - obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False) + # possibly sort + if (self.sort or sort) and not ax.is_monotonic: + indexer = self.indexer = ax.argsort(kind='quicksort') + ax = ax.take(indexer) + obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False) self.obj = obj self.grouper = ax diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index c0b7425485cba..22d92c7b19fe1 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2914,7 +2914,7 @@ def test_groupby_with_timegrouper(self): # TimeGrouper requires a sorted index # also verifies that the resultant index has the correct name import datetime as DT - df = DataFrame({ + df_original = DataFrame({ 'Buyer': 'Carl Carl Carl Carl Joe Carl'.split(), 'Quantity': [18,3,5,1,9,3], 'Date' : [ @@ -2925,29 +2925,34 @@ def test_groupby_with_timegrouper(self): DT.datetime(2013,12,2,12,0), DT.datetime(2013,9,2,14,0), ]}) - df = df.set_index(['Date']) + + # GH 6908 change target column's order + df_reordered = df_original.sort(columns='Quantity') - expected = DataFrame({ 'Quantity' : np.nan }, - index=date_range('20130901 13:00:00','20131205 13:00:00', - freq='5D',name='Date',closed='left')) - expected.iloc[[0,6,18],0] = np.array([24.,6.,9.],dtype='float64') + for df in [df_original, df_reordered]: + df = df.set_index(['Date']) - result1 = df.resample('5D',how=sum) - assert_frame_equal(result1, expected) + expected = DataFrame({ 'Quantity' : np.nan }, + index=date_range('20130901 13:00:00','20131205 13:00:00', + freq='5D',name='Date',closed='left')) + expected.iloc[[0,6,18],0] = np.array([24.,6.,9.],dtype='float64') - df_sorted = df.sort_index() - result2 = df_sorted.groupby(pd.TimeGrouper(freq='5D')).sum() - assert_frame_equal(result2, expected) + result1 = df.resample('5D',how=sum) + assert_frame_equal(result1, expected) - result3 = df.groupby(pd.TimeGrouper(freq='5D')).sum() - assert_frame_equal(result3, expected) + df_sorted = df.sort_index() + result2 = df_sorted.groupby(pd.TimeGrouper(freq='5D')).sum() + assert_frame_equal(result2, expected) + + result3 = df.groupby(pd.TimeGrouper(freq='5D')).sum() + assert_frame_equal(result3, expected) def test_groupby_with_timegrouper_methods(self): # GH 3881 # make sure API of timegrouper conforms import datetime as DT - df = pd.DataFrame({ + df_original = pd.DataFrame({ 'Branch' : 'A A A A A B'.split(), 'Buyer': 'Carl Mark Carl Joe Joe Carl'.split(), 'Quantity': [1,3,5,8,9,3], @@ -2960,13 +2965,16 @@ def test_groupby_with_timegrouper_methods(self): DT.datetime(2013,12,2,14,0), ]}) - df = df.set_index('Date', drop=False) - g = df.groupby(pd.TimeGrouper('6M')) - self.assertTrue(g.group_keys) - self.assertTrue(isinstance(g.grouper,pd.core.groupby.BinGrouper)) - groups = g.groups - self.assertTrue(isinstance(groups,dict)) - self.assertTrue(len(groups) == 3) + df_sorted = df_original.sort(columns='Quantity', ascending=False) + + for df in [df_original, df_sorted]: + df = df.set_index('Date', drop=False) + g = df.groupby(pd.TimeGrouper('6M')) + self.assertTrue(g.group_keys) + self.assertTrue(isinstance(g.grouper,pd.core.groupby.BinGrouper)) + groups = g.groups + self.assertTrue(isinstance(groups,dict)) + self.assertTrue(len(groups) == 3) def test_timegrouper_with_reg_groups(self): @@ -2975,7 +2983,7 @@ def test_timegrouper_with_reg_groups(self): import datetime as DT - df = DataFrame({ + df_original = DataFrame({ 'Branch' : 'A A A A A A A B'.split(), 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(), 'Quantity': [1,3,5,1,8,1,9,3], @@ -2990,32 +2998,34 @@ def test_timegrouper_with_reg_groups(self): DT.datetime(2013,12,2,14,0), ]}).set_index('Date') - expected = DataFrame({ - 'Buyer': 'Carl Joe Mark'.split(), - 'Quantity': [10,18,3], - 'Date' : [ - DT.datetime(2013,12,31,0,0), - DT.datetime(2013,12,31,0,0), - DT.datetime(2013,12,31,0,0), - ]}).set_index(['Date','Buyer']) - - result = df.groupby([pd.Grouper(freq='A'),'Buyer']).sum() - assert_frame_equal(result,expected) - - expected = DataFrame({ - 'Buyer': 'Carl Mark Carl Joe'.split(), - 'Quantity': [1,3,9,18], - 'Date' : [ - DT.datetime(2013,1,1,0,0), - DT.datetime(2013,1,1,0,0), - DT.datetime(2013,7,1,0,0), - DT.datetime(2013,7,1,0,0), - ]}).set_index(['Date','Buyer']) - - result = df.groupby([pd.Grouper(freq='6MS'),'Buyer']).sum() - assert_frame_equal(result,expected) - - df = DataFrame({ + df_sorted = df_original.sort(columns='Quantity', ascending=False) + + for df in [df_original, df_sorted]: + expected = DataFrame({ + 'Buyer': 'Carl Joe Mark'.split(), + 'Quantity': [10,18,3], + 'Date' : [ + DT.datetime(2013,12,31,0,0), + DT.datetime(2013,12,31,0,0), + DT.datetime(2013,12,31,0,0), + ]}).set_index(['Date','Buyer']) + + result = df.groupby([pd.Grouper(freq='A'),'Buyer']).sum() + assert_frame_equal(result,expected) + + expected = DataFrame({ + 'Buyer': 'Carl Mark Carl Joe'.split(), + 'Quantity': [1,3,9,18], + 'Date' : [ + DT.datetime(2013,1,1,0,0), + DT.datetime(2013,1,1,0,0), + DT.datetime(2013,7,1,0,0), + DT.datetime(2013,7,1,0,0), + ]}).set_index(['Date','Buyer']) + result = df.groupby([pd.Grouper(freq='6MS'),'Buyer']).sum() + assert_frame_equal(result,expected) + + df_original = DataFrame({ 'Branch' : 'A A A A A A A B'.split(), 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(), 'Quantity': [1,3,5,1,8,1,9,3], @@ -3030,81 +3040,105 @@ def test_timegrouper_with_reg_groups(self): DT.datetime(2013,10,2,14,0), ]}).set_index('Date') - expected = DataFrame({ - 'Buyer': 'Carl Joe Mark Carl Joe'.split(), - 'Quantity': [6,8,3,4,10], - 'Date' : [ - DT.datetime(2013,10,1,0,0), - DT.datetime(2013,10,1,0,0), - DT.datetime(2013,10,1,0,0), - DT.datetime(2013,10,2,0,0), - DT.datetime(2013,10,2,0,0), - ]}).set_index(['Date','Buyer']) - - result = df.groupby([pd.Grouper(freq='1D'),'Buyer']).sum() - assert_frame_equal(result,expected) - - result = df.groupby([pd.Grouper(freq='1M'),'Buyer']).sum() - expected = DataFrame({ - 'Buyer': 'Carl Joe Mark'.split(), - 'Quantity': [10,18,3], - 'Date' : [ - DT.datetime(2013,10,31,0,0), - DT.datetime(2013,10,31,0,0), - DT.datetime(2013,10,31,0,0), - ]}).set_index(['Date','Buyer']) - assert_frame_equal(result,expected) - - # passing the name - df = df.reset_index() - result = df.groupby([pd.Grouper(freq='1M',key='Date'),'Buyer']).sum() - assert_frame_equal(result,expected) - - self.assertRaises(KeyError, lambda : df.groupby([pd.Grouper(freq='1M',key='foo'),'Buyer']).sum()) - - # passing the level - df = df.set_index('Date') - result = df.groupby([pd.Grouper(freq='1M',level='Date'),'Buyer']).sum() - assert_frame_equal(result,expected) - result = df.groupby([pd.Grouper(freq='1M',level=0),'Buyer']).sum() - assert_frame_equal(result,expected) - - self.assertRaises(ValueError, lambda : df.groupby([pd.Grouper(freq='1M',level='foo'),'Buyer']).sum()) - - # multi names - df = df.copy() - df['Date'] = df.index + pd.offsets.MonthEnd(2) - result = df.groupby([pd.Grouper(freq='1M',key='Date'),'Buyer']).sum() - expected = DataFrame({ - 'Buyer': 'Carl Joe Mark'.split(), - 'Quantity': [10,18,3], - 'Date' : [ - DT.datetime(2013,11,30,0,0), - DT.datetime(2013,11,30,0,0), - DT.datetime(2013,11,30,0,0), - ]}).set_index(['Date','Buyer']) - assert_frame_equal(result,expected) - - # error as we have both a level and a name! - self.assertRaises(ValueError, lambda : df.groupby([pd.Grouper(freq='1M',key='Date',level='Date'),'Buyer']).sum()) - + df_sorted = df_original.sort(columns='Quantity', ascending=False) + for df in [df_original, df_sorted]: + + expected = DataFrame({ + 'Buyer': 'Carl Joe Mark Carl Joe'.split(), + 'Quantity': [6,8,3,4,10], + 'Date' : [ + DT.datetime(2013,10,1,0,0), + DT.datetime(2013,10,1,0,0), + DT.datetime(2013,10,1,0,0), + DT.datetime(2013,10,2,0,0), + DT.datetime(2013,10,2,0,0), + ]}).set_index(['Date','Buyer']) + + result = df.groupby([pd.Grouper(freq='1D'),'Buyer']).sum() + assert_frame_equal(result,expected) + + result = df.groupby([pd.Grouper(freq='1M'),'Buyer']).sum() + expected = DataFrame({ + 'Buyer': 'Carl Joe Mark'.split(), + 'Quantity': [10,18,3], + 'Date' : [ + DT.datetime(2013,10,31,0,0), + DT.datetime(2013,10,31,0,0), + DT.datetime(2013,10,31,0,0), + ]}).set_index(['Date','Buyer']) + assert_frame_equal(result,expected) + + # passing the name + df = df.reset_index() + result = df.groupby([pd.Grouper(freq='1M',key='Date'),'Buyer']).sum() + assert_frame_equal(result,expected) + + self.assertRaises(KeyError, lambda : df.groupby([pd.Grouper(freq='1M',key='foo'),'Buyer']).sum()) + + # passing the level + df = df.set_index('Date') + result = df.groupby([pd.Grouper(freq='1M',level='Date'),'Buyer']).sum() + assert_frame_equal(result,expected) + result = df.groupby([pd.Grouper(freq='1M',level=0),'Buyer']).sum() + assert_frame_equal(result,expected) + + self.assertRaises(ValueError, lambda : df.groupby([pd.Grouper(freq='1M',level='foo'),'Buyer']).sum()) + + # multi names + df = df.copy() + df['Date'] = df.index + pd.offsets.MonthEnd(2) + result = df.groupby([pd.Grouper(freq='1M',key='Date'),'Buyer']).sum() + expected = DataFrame({ + 'Buyer': 'Carl Joe Mark'.split(), + 'Quantity': [10,18,3], + 'Date' : [ + DT.datetime(2013,11,30,0,0), + DT.datetime(2013,11,30,0,0), + DT.datetime(2013,11,30,0,0), + ]}).set_index(['Date','Buyer']) + assert_frame_equal(result,expected) + + # error as we have both a level and a name! + self.assertRaises(ValueError, lambda : df.groupby([pd.Grouper(freq='1M',key='Date',level='Date'),'Buyer']).sum()) + + + # single groupers + expected = DataFrame({ 'Quantity' : [31], + 'Date' : [DT.datetime(2013,10,31,0,0)] }).set_index('Date') + result = df.groupby(pd.Grouper(freq='1M')).sum() + assert_frame_equal(result, expected) - # single groupers - expected = DataFrame({ 'Quantity' : [31], - 'Date' : [DT.datetime(2013,10,31,0,0)] }).set_index('Date') - result = df.groupby(pd.Grouper(freq='1M')).sum() - assert_frame_equal(result, expected) + result = df.groupby([pd.Grouper(freq='1M')]).sum() + assert_frame_equal(result, expected) - result = df.groupby([pd.Grouper(freq='1M')]).sum() - assert_frame_equal(result, expected) + expected = DataFrame({ 'Quantity' : [31], + 'Date' : [DT.datetime(2013,11,30,0,0)] }).set_index('Date') + result = df.groupby(pd.Grouper(freq='1M',key='Date')).sum() + assert_frame_equal(result, expected) - expected = DataFrame({ 'Quantity' : [31], - 'Date' : [DT.datetime(2013,11,30,0,0)] }).set_index('Date') - result = df.groupby(pd.Grouper(freq='1M',key='Date')).sum() - assert_frame_equal(result, expected) + result = df.groupby([pd.Grouper(freq='1M',key='Date')]).sum() + assert_frame_equal(result, expected) - result = df.groupby([pd.Grouper(freq='1M',key='Date')]).sum() - assert_frame_equal(result, expected) + # GH 6764 multiple grouping with/without sort + df = DataFrame({ + 'date' : pd.to_datetime([ + '20121002','20121007','20130130','20130202','20130305','20121002', + '20121207','20130130','20130202','20130305','20130202','20130305']), + 'user_id' : [1,1,1,1,1,3,3,3,5,5,5,5], + 'whole_cost' : [1790,364,280,259,201,623,90,312,359,301,359,801], + 'cost1' : [12,15,10,24,39,1,0,90,45,34,1,12] }).set_index('date') + + for freq in ['D', 'M', 'A', 'Q-APR']: + expected = df.groupby('user_id')['whole_cost'].resample( + freq, how='sum').dropna().reorder_levels( + ['date','user_id']).sortlevel().astype('int64') + expected.name = 'whole_cost' + + result1 = df.sort_index().groupby([pd.TimeGrouper(freq=freq), 'user_id'])['whole_cost'].sum() + assert_series_equal(result1, expected) + + result2 = df.groupby([pd.TimeGrouper(freq=freq), 'user_id'])['whole_cost'].sum() + assert_series_equal(result2, expected) def test_cumcount(self): df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A']) diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 7f243c20fe56e..23a6ae0982771 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -138,7 +138,8 @@ def _get_binner_for_grouping(self, obj): # since we may have had to sort # may need to reorder groups here if self.indexer is not None: - grouper = grouper.take(self.indexer) + indexer = self.indexer.argsort(kind='quicksort') + grouper = grouper.take(indexer) return grouper def _get_time_bins(self, ax): @@ -161,7 +162,7 @@ def _get_time_bins(self, ax): # a little hack trimmed = False - if (len(binner) > 2 and binner[-2] == ax[-1] and + if (len(binner) > 2 and binner[-2] == ax.max() and self.closed == 'right'): binner = binner[:-1] @@ -204,7 +205,7 @@ def _adjust_bin_edges(self, binner, ax_values): bin_edges = bin_edges + day_nanos - 1 # intraday values on last day - if bin_edges[-2] > ax_values[-1]: + if bin_edges[-2] > ax_values.max(): bin_edges = bin_edges[:-1] binner = binner[:-1] @@ -320,8 +321,8 @@ def _resample_periods(self): # Get the fill indexer indexer = memb.get_indexer(new_index, method=self.fill_method, limit=self.limit) - return _take_new_index(obj, indexer, new_index, axis=self.axis) + else: raise ValueError('Frequency %s cannot be resampled to %s' % (axlabels.freq, self.freq)) @@ -352,7 +353,7 @@ def _get_range_edges(axis, offset, closed='left', base=0): return _adjust_dates_anchored(axis[0], axis[-1], offset, closed=closed, base=base) - first, last = axis[0], axis[-1] + first, last = axis.min(), axis.max() if not isinstance(offset, Tick): # and first.time() != last.time(): # hack! first = tools.normalize_date(first)
closes #6764 `TimeGrouper` may output incorrect results depending on the target column order. The problem seems to be caused by 2 parts. - `TimeGrouper._get_time_bins` and related methods expects sorted values input. - `BinGrouper.get_iterator` expects sorted data input. ``` >>> df = pd.DataFrame({'Branch' : 'A A A A A A A B'.split(), 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(), 'Quantity': [1,3,5,1,8,1,9,3], 'Date' : [ datetime(2013,1,1,13,0), datetime(2013,1,1,13,5), datetime(2013,10,1,20,0), datetime(2013,10,2,10,0), datetime(2013,10,1,20,0), datetime(2013,10,2,10,0), datetime(2013,12,2,12,0), datetime(2013,12,2,14,0),]}) # correct >>> df.groupby([pd.Grouper(freq='1M',key='Date'),'Buyer']).sum() Quantity Date Buyer 2013-01-31 Carl 1 Mark 3 2013-10-31 Carl 6 Joe 9 2013-12-31 Carl 3 Joe 9 [6 rows x 1 columns] >>> df_sorted = df.sort('Quantity') # change "Date" column unsorted # incorrect >>> df_sorted.groupby([pd.Grouper(freq='1M',key='Date'),'Buyer']).sum() Quantity Date Buyer 2013-01-31 Carl 1 2013-10-31 Carl 1 Joe 1 Mark 3 2013-12-31 Carl 8 Joe 17 [6 rows x 1 columns] >>> df_sorted.groupby([pd.Grouper(freq='1M',key='Date', sort=True),'Buyer']).sum() # same incorrect result ``` ``` # correct >>> df.groupby([pd.Grouper(freq='6M',key='Date'),'Buyer']).sum() Quantity Date Buyer 2013-01-31 Carl 1 Mark 3 2014-01-31 Carl 9 Joe 18 [4 rows x 1 columns] # incorrect >>> df_sorted.groupby([pd.Grouper(freq='6M',key='Date'),'Buyer']).sum() Quantity Date Buyer 2013-01-31 Carl 1 2014-01-31 Carl 9 Joe 18 Mark 3 [4 rows x 1 columns] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6908
2014-04-18T12:34:42Z
2014-04-19T12:47:08Z
2014-04-19T12:47:08Z
2014-06-21T21:47:54Z
DOC: note about query strings in HDFStore selection
diff --git a/doc/source/io.rst b/doc/source/io.rst index b6bb5718e37f9..b0043228471b6 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2474,6 +2474,37 @@ The right-hand side of the sub-expression (after a comparsion operator) can be: - lists, e.g. ``"['A','B']"`` - variables that are defined in the local names space, e.g. ``date`` +.. note:: + + Passing a string to a query by interpolating it into the query + expression is not recommended. Simply assign the string of interest to a + variable and use that variable in an expression. For example, do this + + .. code-block:: python + + string = "HolyMoly'" + store.select('df', 'index == string') + + instead of this + + .. code-block:: python + + string = "HolyMoly'" + store.select('df', 'index == %s' % string) + + The latter will **not** work and will raise a ``SyntaxError``.Note that + there's a single quote followed by a double quote in the ``string`` + variable. + + If you *must* interpolate, use the ``'%r'`` format specifier + + .. code-block:: python + + store.select('df', 'index == %r' % string) + + which will quote ``string``. + + Here are some examples: .. ipython:: python
closes #6901
https://api.github.com/repos/pandas-dev/pandas/pulls/6905
2014-04-17T22:05:28Z
2014-04-17T23:49:30Z
2014-04-17T23:49:29Z
2014-07-16T09:02:14Z
API: Stop modifying SQL column and names, and warn when pertinent.
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index bed4c2da61c59..158ef7b7ed791 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -7,6 +7,7 @@ import warnings import itertools +import re import numpy as np import pandas.core.common as com @@ -38,11 +39,6 @@ def _convert_params(sql, params): return args -def _safe_col_name(col_name): - #TODO: probably want to forbid database reserved names, such as "database" - return col_name.strip().replace(' ', '_') - - def _handle_date_column(col, format=None): if isinstance(format, dict): return to_datetime(col, **format) @@ -587,11 +583,11 @@ def _index_name(self, index, index_label): def _create_table_statement(self): from sqlalchemy import Table, Column - safe_columns = map(_safe_col_name, self.frame.dtypes.index) + columns = list(map(str, self.frame.columns)) column_types = map(self._sqlalchemy_type, self.frame.dtypes) columns = [Column(name, typ) - for name, typ in zip(safe_columns, column_types)] + for name, typ in zip(columns, column_types)] if self.index is not None: for i, idx_label in enumerate(self.index[::-1]): @@ -836,6 +832,11 @@ def _create_sql_schema(self, frame, table_name): } +_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed." + "In pandas versions < 0.14, spaces were converted to " + "underscores.") + + class PandasSQLTableLegacy(PandasSQLTable): """Patch the PandasSQLTable for legacy support. Instead of a table variable just use the Create Table @@ -847,19 +848,18 @@ def create(self): self.pd_sql.execute(self.table) def insert_statement(self): - # Replace spaces in DataFrame column names with _. - safe_names = [_safe_col_name(n) for n in self.frame.dtypes.index] + names = list(map(str, self.frame.columns)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left val quote char br_r = _SQL_SYMB[flv]['br_r'] # right val quote char wld = _SQL_SYMB[flv]['wld'] # wildcard char if self.index is not None: - [safe_names.insert(0, idx) for idx in self.index[::-1]] + [names.insert(0, idx) for idx in self.index[::-1]] - bracketed_names = [br_l + column + br_r for column in safe_names] + bracketed_names = [br_l + column + br_r for column in names] col_names = ','.join(bracketed_names) - wildcards = ','.join([wld] * len(safe_names)) + wildcards = ','.join([wld] * len(names)) insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % ( self.name, col_names, wildcards) return insert_statement @@ -881,13 +881,15 @@ def insert(self): def _create_table_statement(self): "Return a CREATE TABLE statement to suit the contents of a DataFrame." - # Replace spaces in DataFrame column names with _. - safe_columns = [_safe_col_name(n) for n in self.frame.dtypes.index] + columns = list(map(str, self.frame.columns)) + pat = re.compile('\s+') + if any(map(pat.search, columns)): + warnings.warn(_SAFE_NAMES_WARNING) column_types = [self._sql_type_name(typ) for typ in self.frame.dtypes] if self.index is not None: for i, idx_label in enumerate(self.index[::-1]): - safe_columns.insert(0, idx_label) + columns.insert(0, idx_label) column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv = self.pd_sql.flavor @@ -898,7 +900,7 @@ def _create_table_statement(self): col_template = br_l + '%s' + br_r + ' %s' columns = ',\n '.join(col_template % - x for x in zip(safe_columns, column_types)) + x for x in zip(columns, column_types)) template = """CREATE TABLE %(name)s ( %(columns)s )""" diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 9622f9d8790cb..ad3fa57ab48a7 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -550,6 +550,11 @@ def test_to_sql_index_label_multiindex(self): 'test_index_label', self.conn, if_exists='replace', index_label='C') + def test_integer_col_names(self): + df = DataFrame([[1, 2], [3, 4]], columns=[0, 1]) + sql.to_sql(df, "test_frame_integer_col_names", self.conn, + if_exists='replace') + class TestSQLApi(_TestSQLApi): """ @@ -661,10 +666,19 @@ def test_read_sql_delegate(self): self.assertRaises(ValueError, sql.read_sql, 'iris', self.conn, flavor=self.flavor) + def test_safe_names_warning(self): + # GH 6798 + df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space + # warns on create table with spaces in names + with tm.assert_produces_warning(): + sql.to_sql(df, "test_frame3_legacy", self.conn, + flavor="sqlite", index=False) + #------------------------------------------------------------------------------ #--- Database flavor specific tests + class _TestSQLAlchemy(PandasSQLTest): """ Base class for testing the sqlalchemy backend.
Closes #6796 If any columns names written `to_sql` via a legacy connection _would_ have been modified by previous versions of pandas, a warning is issued, both on table creation and row insertion. But no names are modified. There is a stray warning message in nosetests. Otherwise good, I think. Does this reflect our consensus?
https://api.github.com/repos/pandas-dev/pandas/pulls/6902
2014-04-17T14:07:00Z
2014-04-22T14:33:16Z
2014-04-22T14:33:15Z
2014-08-21T07:57:05Z
ENH (GH6568) Add option info_verbose
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 0e449cb35eaaa..7d6efdb85b94a 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1666,3 +1666,35 @@ columns of DataFrame objects are shown by default. If ``max_columns`` is set to 0 (the default, in fact), the library will attempt to fit the DataFrame's string representation into the current terminal width, and defaulting to the summary view otherwise. + +The visualization of a dataframe can be controled directly with two +options - `large_repr` and `info_verbose`. Depending on the size of +the DataFrame it might be more convenient to print a summary +representation only. + +.. ipython:: python + + df_lrge = DataFrame(columns=['a','b','c'], + index=DatetimeIndex(start='19900101',end='20000101',freq='BM')) + +The default display for a DataFrame is 'truncate'. This prints all +elements of the df up to max_rows/max_columns. + +.. ipython:: python + + with option_context("display.large_repr",'truncate'): + print df_lrge + +To get a more concise representation of the df + +.. ipython:: python + + with option_context("display.large_repr",'info'): + print df_lrge + +Further reduce summary view to one line for all columns + +.. ipython:: python + + with option_context("display.large_repr",'info',"display.info_verbose",False): + print df_lrge diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 6460662553d6b..f311da9cfb402 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -45,6 +45,11 @@ As of 0.13, these are the relevant options, all under the `display` namespace, - large_repr (default 'truncate'): when a :class:`~pandas.DataFrame` exceeds max_columns or max_rows, it can be displayed either as a truncated table or, with this set to 'info', as a short summary view. +- info_verbose (default True): When large_repr is set to 'info', the + index of a :class:`~pandas.DataFrame` will be displayed as a short + summary, while columns are still listed individually. When + 'info_verbose' is set to False, the columns will be displayed in a + short summary view also. - max_columns (default 20): max dataframe columns to display. - max_rows (default 60): max dataframe rows display. - show_dimensions (default True): controls the display of the row/col counts footer. diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 9533c0921e1e3..be8c7ec897e37 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -183,8 +183,17 @@ df.info() (the behaviour in earlier versions of pandas). """ +pc_info_verbose_doc = """ +: boolean + + Relevant when `large_repr` set to `info`. Dictates whether a df + will be displayed as df.info(verbose=True) or + df.info(verbose=False). When `False`, columns are displayed in a + summary line, instead of listing all columns. +""" + pc_mpl_style_doc = """ -: bool +: boolean Setting this to 'default' will modify the rcParams used by matplotlib to give plots a more pleasing visual style by default. @@ -230,6 +239,8 @@ def mpl_style_cb(key): validator=is_instance_factory([type(None), int])) cf.register_option('large_repr', 'truncate', pc_large_repr_doc, validator=is_one_of_factory(['truncate', 'info'])) + cf.register_option('info_verbose', True, pc_info_verbose_doc, + validator=is_bool) cf.register_option('max_info_columns', 100, pc_max_info_cols_doc, validator=is_int) cf.register_option('colheader_justify', 'right', colheader_justify_doc, diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2f8c70024a1e7..662e74aa67a7e 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1381,13 +1381,13 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, if buf is None: return formatter.buf.getvalue() - def info(self, verbose=True, buf=None, max_cols=None): + def info(self, verbose=None, buf=None, max_cols=None): """ Concise summary of a DataFrame. Parameters ---------- - verbose : boolean, default True + verbose : boolean, default None If False, don't print column count summary buf : writable buffer, defaults to sys.stdout max_cols : int, default None @@ -1395,6 +1395,9 @@ def info(self, verbose=True, buf=None, max_cols=None): """ from pandas.core.format import _put_lines + if verbose is None: + verbose = get_option("display.info_verbose") + if buf is None: # pragma: no cover buf = sys.stdout
This adds a info_verbose to the options. There's a small section in faq and basic introduction. Entry in v0.14 is still missing. Closes #6568
https://api.github.com/repos/pandas-dev/pandas/pulls/6890
2014-04-16T07:41:20Z
2014-05-14T22:25:15Z
null
2014-06-25T08:40:22Z
BUG/ENH: Add fallback warnings and correctly handle leading whitespace in C parser
diff --git a/doc/source/io.rst b/doc/source/io.rst index 32891e371a489..249cfaf62878f 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -92,7 +92,8 @@ They can take a number of arguments: - ``dialect``: string or :class:`python:csv.Dialect` instance to expose more ways to specify the file format - ``dtype``: A data type name or a dict of column name to data type. If not - specified, data types will be inferred. + specified, data types will be inferred. (Unsupported with + ``engine='python'``) - ``header``: row number(s) to use as the column names, and the start of the data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly pass ``header=0`` to be able to replace existing names. The header can be @@ -154,6 +155,7 @@ They can take a number of arguments: pieces. Will cause an ``TextFileReader`` object to be returned. More on this below in the section on :ref:`iterating and chunking <io.chunking>` - ``skip_footer``: number of lines to skip at bottom of file (default 0) + (Unsupported with ``engine='c'``) - ``converters``: a dictionary of functions for converting values in certain columns, where keys are either integers or column labels - ``encoding``: a string representing the encoding to use for decoding @@ -275,6 +277,11 @@ individual columns: df = pd.read_csv(StringIO(data), dtype={'b': object, 'c': np.float64}) df.dtypes +.. note:: + The ``dtype`` option is currently only supported by the C engine. + Specifying ``dtype`` with ``engine`` other than 'c' raises a + ``ValueError``. + .. _io.headers: Handling column names @@ -1029,6 +1036,22 @@ Specifying ``iterator=True`` will also return the ``TextFileReader`` object: os.remove('tmp.sv') os.remove('tmp2.sv') +Specifying the parser engine +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Under the hood pandas uses a fast and efficient parser implemented in C as well +as a python implementation which is currently more feature-complete. Where +possible pandas uses the C parser (specified as ``engine='c'``), but may fall +back to python if C-unsupported options are specified. Currently, C-unsupported +options include: + +- ``sep`` other than a single character (e.g. regex separators) +- ``skip_footer`` +- ``sep=None`` with ``delim_whitespace=False`` + +Specifying any of the above options will produce a ``ParserWarning`` unless the +python engine is selected explicitly using ``engine='python'``. + .. _io.store_in_csv: Writing to CSV format diff --git a/doc/source/release.rst b/doc/source/release.rst index 271daa1623a4b..b00d68688b02b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -176,6 +176,8 @@ API Changes - ``.quantile`` on a ``datetime[ns]`` series now returns ``Timestamp`` instead of ``np.datetime64`` objects (:issue:`6810`) - change ``AssertionError`` to ``TypeError`` for invalid types passed to ``concat`` (:issue:`6583`) +- Add :class:`~pandas.io.parsers.ParserWarning` class for fallback and option + validation warnings in :func:`read_csv`/:func:`read_table` (:issue:`6607`) Deprecations ~~~~~~~~~~~~ @@ -280,6 +282,9 @@ Improvements to existing features - Added ``how`` option to rolling-moment functions to dictate how to handle resampling; :func:``rolling_max`` defaults to max, :func:``rolling_min`` defaults to min, and all others default to mean (:issue:`6297`) - ``pd.stats.moments.rolling_var`` now uses Welford's method for increased numerical stability (:issue:`6817`) +- Translate ``sep='\s+'`` to ``delim_whitespace=True`` in + :func:`read_csv`/:func:`read_table` if no other C-unsupported options + specified (:issue:`6607`) .. _release.bug_fixes-0.14.0: @@ -402,6 +407,17 @@ Bug Fixes - Bug in `DataFrame.plot` and `Series.plot` legend behave inconsistently when plotting to the same axes repeatedly (:issue:`6678`) - Internal tests for patching ``__finalize__`` / bug in merge not finalizing (:issue:`6923`, :issue:`6927`) - accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`) +- Raise :class:`ValueError` when ``sep`` specified with + ``delim_whitespace=True`` in :func:`read_csv`/:func:`read_table` + (:issue:`6607`) +- Raise :class:`ValueError` when `engine='c'` specified with unsupported + options (:issue:`6607`) +- Raise :class:`ValueError` when fallback to python parser causes options to be + ignored (:issue:`6607`) +- Produce :class:`~pandas.io.parsers.ParserWarning` on fallback to python + parser when no options are ignored (:issue:`6607`) +- Bug in C parser with leading whitespace (:issue:`3374`) +- Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines pandas 0.13.1 ------------- diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index b45b8929e7af3..b439ca5c61aeb 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -6,6 +6,7 @@ from pandas import compat import re import csv +import warnings import numpy as np @@ -24,6 +25,8 @@ import pandas.tslib as tslib import pandas.parser as _parser +class ParserWarning(Warning): + pass _parser_params = """Also supports optionally iterating or breaking of the file into chunks. @@ -50,6 +53,7 @@ One-character string used to escape delimiter when quoting is QUOTE_NONE. dtype : Type name or dict of column -> type Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32} + (Unsupported with engine='python') compression : {'gzip', 'bz2', None}, default None For on-the-fly decompression of on-disk data dialect : string or csv.Dialect instance, default None @@ -113,7 +117,7 @@ chunksize : int, default None Return TextFileReader object for iteration skipfooter : int, default 0 - Number of line at bottom of file to skip + Number of lines at bottom of file to skip (Unsupported with engine='c') converters : dict. optional Dict of functions for converting values in certain columns. Keys can either be integers or column labels @@ -125,24 +129,24 @@ Encoding to use for UTF when reading/writing (ex. 'utf-8') squeeze : boolean, default False If the parsed data only contains one column then return a Series -na_filter: boolean, default True +na_filter : boolean, default True Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing na_filter=False can improve the performance of reading a large file usecols : array-like Return a subset of the columns. Results in much faster parsing time and lower memory usage. -mangle_dupe_cols: boolean, default True +mangle_dupe_cols : boolean, default True Duplicate columns will be specified as 'X.0'...'X.N', rather than 'X'...'X' -tupleize_cols: boolean, default False +tupleize_cols : boolean, default False Leave a list of tuples on columns as is (default is to convert to a Multi Index on the columns) -error_bad_lines: boolean, default True +error_bad_lines : boolean, default True Lines with too many fields (e.g. a csv line with too many commas) will by default cause an exception to be raised, and no DataFrame will be returned. If False, then these "bad lines" will dropped from the DataFrame that is - returned. (Only valid with C parser). -warn_bad_lines: boolean, default True + returned. (Only valid with C parser) +warn_bad_lines : boolean, default True If error_bad_lines is False, and warn_bad_lines is True, a warning for each "bad line" will be output. (Only valid with C parser). infer_datetime_format : boolean, default False @@ -154,25 +158,30 @@ result : DataFrame or TextParser """ -_csv_sep = """sep : string, default ',' +_csv_params = """sep : string, default ',' Delimiter to use. If sep is None, will try to automatically determine this. Regular expressions are accepted. -""" +engine : {'c', 'python'} + Parser engine to use. The C engine is faster while the python engine is + currently more feature-complete.""" -_table_sep = """sep : string, default \\t (tab-stop) - Delimiter to use. Regular expressions are accepted.""" +_table_params = """sep : string, default \\t (tab-stop) + Delimiter to use. Regular expressions are accepted. +engine : {'c', 'python'} + Parser engine to use. The C engine is faster while the python engine is + currently more feature-complete.""" _read_csv_doc = """ Read CSV (comma-separated) file into DataFrame %s -""" % (_parser_params % _csv_sep) +""" % (_parser_params % _csv_params) _read_table_doc = """ Read general delimited file into DataFrame %s -""" % (_parser_params % _table_sep) +""" % (_parser_params % _table_params) _fwf_widths = """\ colspecs : list of pairs (int, int) or 'infer'. optional @@ -297,6 +306,8 @@ def _read(filepath_or_buffer, kwds): def _make_parser_function(name, sep=','): + default_sep = sep + def parser_f(filepath_or_buffer, sep=sep, dialect=None, @@ -325,7 +336,7 @@ def parser_f(filepath_or_buffer, dtype=None, usecols=None, - engine='c', + engine=None, delim_whitespace=False, as_recarray=False, na_filter=True, @@ -362,10 +373,21 @@ def parser_f(filepath_or_buffer, if delimiter is None: delimiter = sep + if delim_whitespace and delimiter is not default_sep: + raise ValueError("Specified a delimiter with both sep and"\ + " delim_whitespace=True; you can only specify one.") + + if engine is not None: + engine_specified = True + else: + engine = 'c' + engine_specified = False + kwds = dict(delimiter=delimiter, engine=engine, dialect=dialect, compression=compression, + engine_specified=engine_specified, doublequote=doublequote, escapechar=escapechar, @@ -468,10 +490,18 @@ class TextFileReader(object): """ - def __init__(self, f, engine='python', **kwds): + def __init__(self, f, engine=None, **kwds): self.f = f + if engine is not None: + engine_specified = True + else: + engine = 'python' + engine_specified = False + + self._engine_specified = kwds.get('engine_specified', engine_specified) + if kwds.get('dialect') is not None: dialect = kwds['dialect'] kwds['delimiter'] = dialect.delimiter @@ -530,21 +560,36 @@ def _get_options_with_defaults(self, engine): def _clean_options(self, options, engine): result = options.copy() + engine_specified = self._engine_specified + fallback_reason = None + sep = options['delimiter'] delim_whitespace = options['delim_whitespace'] + # C engine not supported yet + if engine == 'c': + if options['skip_footer'] > 0: + fallback_reason = "the 'c' engine does not support"\ + " skip_footer" + engine = 'python' + if sep is None and not delim_whitespace: if engine == 'c': + fallback_reason = "the 'c' engine does not support"\ + " sep=None with delim_whitespace=False" engine = 'python' elif sep is not None and len(sep) > 1: - # wait until regex engine integrated - if engine not in ('python', 'python-fwf'): + if engine == 'c' and sep == '\s+': + result['delim_whitespace'] = True + del result['delimiter'] + elif engine not in ('python', 'python-fwf'): + # wait until regex engine integrated + fallback_reason = "the 'c' engine does not support"\ + " regex separators" engine = 'python' - # C engine not supported yet - if engine == 'c': - if options['skip_footer'] > 0: - engine = 'python' + if fallback_reason and engine_specified: + raise ValueError(fallback_reason) if engine == 'c': for arg in _c_unsupported: @@ -552,8 +597,23 @@ def _clean_options(self, options, engine): if 'python' in engine: for arg in _python_unsupported: + if fallback_reason and result[arg] != _c_parser_defaults[arg]: + msg = ("Falling back to the 'python' engine because" + " {reason}, but this causes {option!r} to be" + " ignored as it is not supported by the 'python'" + " engine.").format(reason=fallback_reason, option=arg) + if arg == 'dtype': + msg += " (Note the 'converters' option provides"\ + " similar functionality.)" + raise ValueError(msg) del result[arg] + if fallback_reason: + warnings.warn(("Falling back to the 'python' engine because" + " {0}; you can avoid this warning by specifying" + " engine='python'.").format(fallback_reason), + ParserWarning) + index_col = options['index_col'] names = options['names'] converters = options['converters'] diff --git a/pandas/io/tests/test_cparser.py b/pandas/io/tests/test_cparser.py index 6cfe4bea01045..6204a441b347d 100644 --- a/pandas/io/tests/test_cparser.py +++ b/pandas/io/tests/test_cparser.py @@ -323,6 +323,9 @@ def _test(text, **kwargs): data = 'A B C\r 2 3\r4 5 6' _test(data, delim_whitespace=True) + data = 'A B C\r2 3\r4 5 6' + _test(data, delim_whitespace=True) + def test_empty_field_eof(self): data = 'a,b,c\n1,2,3\n4,,' diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 612840e82e3ff..872e719eaa630 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -35,7 +35,7 @@ from numpy.testing.decorators import slow from numpy.testing import assert_array_equal -from pandas.parser import OverflowError +from pandas.parser import OverflowError, CParserError class ParserTests(object): @@ -390,19 +390,26 @@ def test_multiple_date_col_timestamp_parse(self): self.assertEqual(result['0_1'][0], ex_val) def test_single_line(self): - # sniff separator - buf = StringIO() - sys.stdout = buf + # GH 6607 + # Test currently only valid with python engine because sep=None and + # delim_whitespace=False. Temporarily copied to TestPythonParser. + # Test for ValueError with other engines: - # printing warning message when engine == 'c' for now + with tm.assertRaisesRegexp(ValueError, + 'sep=None with delim_whitespace=False'): + # sniff separator + buf = StringIO() + sys.stdout = buf - try: - # it works! - df = self.read_csv(StringIO('1,2'), names=['a', 'b'], - header=None, sep=None) - tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df) - finally: - sys.stdout = sys.__stdout__ + # printing warning message when engine == 'c' for now + + try: + # it works! + df = self.read_csv(StringIO('1,2'), names=['a', 'b'], + header=None, sep=None) + tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df) + finally: + sys.stdout = sys.__stdout__ def test_multiple_date_cols_with_header(self): data = """\ @@ -534,11 +541,17 @@ def test_malformed(self): footer """ + # GH 6607 + # Test currently only valid with python engine because + # skip_footer != 0. Temporarily copied to TestPythonParser. + # Test for ValueError with other engines: + try: - df = self.read_table( - StringIO(data), sep=',', header=1, comment='#', - skip_footer=1) - self.assert_(False) + with tm.assertRaisesRegexp(ValueError, 'skip_footer'): #XXX + df = self.read_table( + StringIO(data), sep=',', header=1, comment='#', + skip_footer=1) + self.assert_(False) except Exception as inst: self.assertIn('Expected 3 fields in line 4, saw 5', str(inst)) @@ -599,48 +612,55 @@ def test_malformed(self): self.assertIn('Expected 3 fields in line 6, saw 5', str(inst)) def test_passing_dtype(self): + # GH 6607 + # Passing dtype is currently only supported by the C engine. + # Temporarily copied to TestCParser*. + # Test for ValueError with other engines: - df = DataFrame(np.random.rand(5,2),columns=list('AB'),index=['1A','1B','1C','1D','1E']) + with tm.assertRaisesRegexp(ValueError, + "The 'dtype' option is not supported"): - with tm.ensure_clean('__passing_str_as_dtype__.csv') as path: - df.to_csv(path) + df = DataFrame(np.random.rand(5,2),columns=list('AB'),index=['1A','1B','1C','1D','1E']) - # GH 3795 - # passing 'str' as the dtype - result = pd.read_csv(path, dtype=str, index_col=0) - tm.assert_series_equal(result.dtypes,Series({ 'A' : 'object', 'B' : 'object' })) + with tm.ensure_clean('__passing_str_as_dtype__.csv') as path: + df.to_csv(path) - # we expect all object columns, so need to convert to test for equivalence - result = result.astype(float) - tm.assert_frame_equal(result,df) + # GH 3795 + # passing 'str' as the dtype + result = self.read_csv(path, dtype=str, index_col=0) + tm.assert_series_equal(result.dtypes,Series({ 'A' : 'object', 'B' : 'object' })) - # invalid dtype - self.assertRaises(TypeError, pd.read_csv, path, dtype={'A' : 'foo', 'B' : 'float64' }, - index_col=0) + # we expect all object columns, so need to convert to test for equivalence + result = result.astype(float) + tm.assert_frame_equal(result,df) - # valid but we don't support it (date) - self.assertRaises(TypeError, pd.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' }, - index_col=0) - self.assertRaises(TypeError, pd.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' }, - index_col=0, parse_dates=['B']) + # invalid dtype + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'foo', 'B' : 'float64' }, + index_col=0) - # valid but we don't support it - self.assertRaises(TypeError, pd.read_csv, path, dtype={'A' : 'timedelta64', 'B' : 'float64' }, - index_col=0) + # valid but we don't support it (date) + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' }, + index_col=0) + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' }, + index_col=0, parse_dates=['B']) + + # valid but we don't support it + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'timedelta64', 'B' : 'float64' }, + index_col=0) - def test_quoting(self): - bad_line_small = """printer\tresult\tvariant_name + def test_quoting(self): + bad_line_small = """printer\tresult\tvariant_name Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten"" Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" - self.assertRaises(Exception, self.read_table, StringIO(bad_line_small), - sep='\t') + self.assertRaises(Exception, self.read_table, StringIO(bad_line_small), + sep='\t') - good_line_small = bad_line_small + '"' - df = self.read_table(StringIO(good_line_small), sep='\t') - self.assertEqual(len(df), 3) + good_line_small = bad_line_small + '"' + df = self.read_table(StringIO(good_line_small), sep='\t') + self.assertEqual(len(df), 3) def test_non_string_na_values(self): # GH3611, na_values that are not a string are an issue @@ -1165,58 +1185,64 @@ def test_read_text_list(self): tm.assert_frame_equal(chunk, df) def test_iterator(self): - reader = self.read_csv(StringIO(self.data1), index_col=0, - iterator=True) - df = self.read_csv(StringIO(self.data1), index_col=0) + # GH 6607 + # Test currently only valid with python engine because + # skip_footer != 0. Temporarily copied to TestPythonParser. + # Test for ValueError with other engines: - chunk = reader.read(3) - tm.assert_frame_equal(chunk, df[:3]) + with tm.assertRaisesRegexp(ValueError, 'skip_footer'): + reader = self.read_csv(StringIO(self.data1), index_col=0, + iterator=True) + df = self.read_csv(StringIO(self.data1), index_col=0) - last_chunk = reader.read(5) - tm.assert_frame_equal(last_chunk, df[3:]) + chunk = reader.read(3) + tm.assert_frame_equal(chunk, df[:3]) - # pass list - lines = list(csv.reader(StringIO(self.data1))) - parser = TextParser(lines, index_col=0, chunksize=2) + last_chunk = reader.read(5) + tm.assert_frame_equal(last_chunk, df[3:]) - df = self.read_csv(StringIO(self.data1), index_col=0) + # pass list + lines = list(csv.reader(StringIO(self.data1))) + parser = TextParser(lines, index_col=0, chunksize=2) - chunks = list(parser) - tm.assert_frame_equal(chunks[0], df[:2]) - tm.assert_frame_equal(chunks[1], df[2:4]) - tm.assert_frame_equal(chunks[2], df[4:]) + df = self.read_csv(StringIO(self.data1), index_col=0) - # pass skiprows - parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1]) - chunks = list(parser) - tm.assert_frame_equal(chunks[0], df[1:3]) + chunks = list(parser) + tm.assert_frame_equal(chunks[0], df[:2]) + tm.assert_frame_equal(chunks[1], df[2:4]) + tm.assert_frame_equal(chunks[2], df[4:]) - # test bad parameter (skip_footer) - reader = self.read_csv(StringIO(self.data1), index_col=0, - iterator=True, skip_footer=True) - self.assertRaises(ValueError, reader.read, 3) + # pass skiprows + parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1]) + chunks = list(parser) + tm.assert_frame_equal(chunks[0], df[1:3]) - treader = self.read_table(StringIO(self.data1), sep=',', index_col=0, - iterator=True) - tm.assert_isinstance(treader, TextFileReader) + # test bad parameter (skip_footer) + reader = self.read_csv(StringIO(self.data1), index_col=0, + iterator=True, skip_footer=True) + self.assertRaises(ValueError, reader.read, 3) - # stopping iteration when on chunksize is specified, GH 3967 - data = """A,B,C + treader = self.read_table(StringIO(self.data1), sep=',', index_col=0, + iterator=True) + tm.assert_isinstance(treader, TextFileReader) + + # stopping iteration when on chunksize is specified, GH 3967 + data = """A,B,C foo,1,2,3 bar,4,5,6 baz,7,8,9 """ - reader = self.read_csv(StringIO(data), iterator=True) - result = list(reader) - expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz']) - tm.assert_frame_equal(result[0], expected) + reader = self.read_csv(StringIO(data), iterator=True) + result = list(reader) + expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz']) + tm.assert_frame_equal(result[0], expected) - # chunksize = 1 - reader = self.read_csv(StringIO(data), chunksize=1) - result = list(reader) - expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz']) - self.assertEqual(len(result), 3) - tm.assert_frame_equal(pd.concat(result), expected) + # chunksize = 1 + reader = self.read_csv(StringIO(data), chunksize=1) + result = list(reader) + expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz']) + self.assertEqual(len(result), 3) + tm.assert_frame_equal(pd.concat(result), expected) def test_header_not_first_line(self): data = """got,to,ignore,this,line @@ -1447,28 +1473,34 @@ def test_multi_index_parse_dates(self): (datetime, np.datetime64, Timestamp)) def test_skip_footer(self): - data = """A,B,C + # GH 6607 + # Test currently only valid with python engine because + # skip_footer != 0. Temporarily copied to TestPythonParser. + # Test for ValueError with other engines: + + with tm.assertRaisesRegexp(ValueError, 'skip_footer'): + data = """A,B,C 1,2,3 4,5,6 7,8,9 want to skip this also also skip this """ - result = self.read_csv(StringIO(data), skip_footer=2) - no_footer = '\n'.join(data.split('\n')[:-3]) - expected = self.read_csv(StringIO(no_footer)) + result = self.read_csv(StringIO(data), skip_footer=2) + no_footer = '\n'.join(data.split('\n')[:-3]) + expected = self.read_csv(StringIO(no_footer)) - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected) - result = self.read_csv(StringIO(data), nrows=3) - tm.assert_frame_equal(result, expected) + result = self.read_csv(StringIO(data), nrows=3) + tm.assert_frame_equal(result, expected) - # skipfooter alias - result = read_csv(StringIO(data), skipfooter=2) - no_footer = '\n'.join(data.split('\n')[:-3]) - expected = read_csv(StringIO(no_footer)) + # skipfooter alias + result = read_csv(StringIO(data), skipfooter=2) + no_footer = '\n'.join(data.split('\n')[:-3]) + expected = read_csv(StringIO(no_footer)) - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected) def test_no_unnamed_index(self): data = """ id c0 c1 c2 @@ -1536,15 +1568,21 @@ def test_converter_return_string_bug(self): self.assertEqual(df2['Number1'].dtype, float) def test_read_table_buglet_4x_multiindex(self): - text = """ A B C D E + # GH 6607 + # Parsing multiindex columns currently causes an error in the C parser. + # Temporarily copied to TestPythonParser. + # Here test that CParserError is raised: + + with tm.assertRaises(CParserError): + text = """ A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" - # it works! - df = self.read_table(StringIO(text), sep='\s+') - self.assertEquals(df.index.names, ('one', 'two', 'three', 'four')) + # it works! + df = self.read_table(StringIO(text), sep='\s+') + self.assertEquals(df.index.names, ('one', 'two', 'three', 'four')) def test_read_csv_parse_simple_list(self): text = """foo @@ -1987,21 +2025,21 @@ def test_usecols_index_col_conflict(self): expected = DataFrame({'Price': [100, 101]}, index=[datetime(2013, 5, 11), datetime(2013, 5, 12)]) expected.index.name = 'Time' - df = pd.read_csv(StringIO(data), usecols=['Time', 'Price'], parse_dates=True, index_col=0) + df = self.read_csv(StringIO(data), usecols=['Time', 'Price'], parse_dates=True, index_col=0) tm.assert_frame_equal(expected, df) - df = pd.read_csv(StringIO(data), usecols=['Time', 'Price'], parse_dates=True, index_col='Time') + df = self.read_csv(StringIO(data), usecols=['Time', 'Price'], parse_dates=True, index_col='Time') tm.assert_frame_equal(expected, df) - df = pd.read_csv(StringIO(data), usecols=[1, 2], parse_dates=True, index_col='Time') + df = self.read_csv(StringIO(data), usecols=[1, 2], parse_dates=True, index_col='Time') tm.assert_frame_equal(expected, df) - df = pd.read_csv(StringIO(data), usecols=[1, 2], parse_dates=True, index_col=0) + df = self.read_csv(StringIO(data), usecols=[1, 2], parse_dates=True, index_col=0) tm.assert_frame_equal(expected, df) expected = DataFrame({'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)}) expected = expected.set_index(['Price', 'P2']) - df = pd.read_csv(StringIO(data), usecols=['Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2']) + df = self.read_csv(StringIO(data), usecols=['Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2']) tm.assert_frame_equal(expected, df) def test_chunks_have_consistent_numerical_type(self): @@ -2096,6 +2134,14 @@ def test_catch_too_many_names(self): 10,11,12\n""" tm.assertRaises(Exception, read_csv, StringIO(data), header=0, names=['a', 'b', 'c', 'd']) + def test_ignore_leading_whitespace(self): + # GH 6607, GH 3374 + data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9' + result = self.read_table(StringIO(data), sep='\s+') + expected = DataFrame({'a':[1,4,7], 'b':[2,5,8], 'c': [3,6,9]}) + tm.assert_frame_equal(result, expected) + + class TestPythonParser(ParserTests, tm.TestCase): def test_negative_skipfooter_raises(self): @@ -2411,6 +2457,252 @@ def test_iteration_open_handle(self): expected = Series(['DDD', 'EEE', 'FFF', 'GGG']) tm.assert_series_equal(result, expected) + def test_iterator(self): + # GH 6607 + # This is a copy which should eventually be merged into ParserTests + # when the issue with the C parser is fixed + + reader = self.read_csv(StringIO(self.data1), index_col=0, + iterator=True) + df = self.read_csv(StringIO(self.data1), index_col=0) + + chunk = reader.read(3) + tm.assert_frame_equal(chunk, df[:3]) + + last_chunk = reader.read(5) + tm.assert_frame_equal(last_chunk, df[3:]) + + # pass list + lines = list(csv.reader(StringIO(self.data1))) + parser = TextParser(lines, index_col=0, chunksize=2) + + df = self.read_csv(StringIO(self.data1), index_col=0) + + chunks = list(parser) + tm.assert_frame_equal(chunks[0], df[:2]) + tm.assert_frame_equal(chunks[1], df[2:4]) + tm.assert_frame_equal(chunks[2], df[4:]) + + # pass skiprows + parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1]) + chunks = list(parser) + tm.assert_frame_equal(chunks[0], df[1:3]) + + # test bad parameter (skip_footer) + reader = self.read_csv(StringIO(self.data1), index_col=0, + iterator=True, skip_footer=True) + self.assertRaises(ValueError, reader.read, 3) + + treader = self.read_table(StringIO(self.data1), sep=',', index_col=0, + iterator=True) + tm.assert_isinstance(treader, TextFileReader) + + # stopping iteration when on chunksize is specified, GH 3967 + data = """A,B,C +foo,1,2,3 +bar,4,5,6 +baz,7,8,9 +""" + reader = self.read_csv(StringIO(data), iterator=True) + result = list(reader) + expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz']) + tm.assert_frame_equal(result[0], expected) + + # chunksize = 1 + reader = self.read_csv(StringIO(data), chunksize=1) + result = list(reader) + expected = DataFrame(dict(A = [1,4,7], B = [2,5,8], C = [3,6,9]), index=['foo','bar','baz']) + self.assertEqual(len(result), 3) + tm.assert_frame_equal(pd.concat(result), expected) + + def test_single_line(self): + # GH 6607 + # This is a copy which should eventually be merged into ParserTests + # when the issue with the C parser is fixed + + # sniff separator + buf = StringIO() + sys.stdout = buf + + # printing warning message when engine == 'c' for now + + try: + # it works! + df = self.read_csv(StringIO('1,2'), names=['a', 'b'], + header=None, sep=None) + tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df) + finally: + sys.stdout = sys.__stdout__ + + def test_malformed(self): + # GH 6607 + # This is a copy which should eventually be merged into ParserTests + # when the issue with the C parser is fixed + + # all + data = """ignore +A,B,C +1,2,3 # comment +1,2,3,4,5 +2,3,4 +""" + + try: + df = self.read_table( + StringIO(data), sep=',', header=1, comment='#') + self.assert_(False) + except Exception as inst: + self.assertIn('Expected 3 fields in line 4, saw 5', str(inst)) + + # skip_footer + data = """ignore +A,B,C +1,2,3 # comment +1,2,3,4,5 +2,3,4 +footer +""" + + try: + df = self.read_table( + StringIO(data), sep=',', header=1, comment='#', + skip_footer=1) + self.assert_(False) + except Exception as inst: + self.assertIn('Expected 3 fields in line 4, saw 5', str(inst)) + + # first chunk + data = """ignore +A,B,C +skip +1,2,3 +3,5,10 # comment +1,2,3,4,5 +2,3,4 +""" + try: + it = self.read_table(StringIO(data), sep=',', + header=1, comment='#', iterator=True, chunksize=1, + skiprows=[2]) + df = it.read(5) + self.assert_(False) + except Exception as inst: + self.assertIn('Expected 3 fields in line 6, saw 5', str(inst)) + + # middle chunk + data = """ignore +A,B,C +skip +1,2,3 +3,5,10 # comment +1,2,3,4,5 +2,3,4 +""" + try: + it = self.read_table(StringIO(data), sep=',', header=1, + comment='#', iterator=True, chunksize=1, + skiprows=[2]) + df = it.read(1) + it.read(2) + self.assert_(False) + except Exception as inst: + self.assertIn('Expected 3 fields in line 6, saw 5', str(inst)) + + # last chunk + data = """ignore +A,B,C +skip +1,2,3 +3,5,10 # comment +1,2,3,4,5 +2,3,4 +""" + try: + it = self.read_table(StringIO(data), sep=',', + header=1, comment='#', iterator=True, chunksize=1, + skiprows=[2]) + df = it.read(1) + it.read() + self.assert_(False) + except Exception as inst: + self.assertIn('Expected 3 fields in line 6, saw 5', str(inst)) + + def test_skip_footer(self): + # GH 6607 + # This is a copy which should eventually be merged into ParserTests + # when the issue with the C parser is fixed + + data = """A,B,C +1,2,3 +4,5,6 +7,8,9 +want to skip this +also also skip this +""" + result = self.read_csv(StringIO(data), skip_footer=2) + no_footer = '\n'.join(data.split('\n')[:-3]) + expected = self.read_csv(StringIO(no_footer)) + + tm.assert_frame_equal(result, expected) + + result = self.read_csv(StringIO(data), nrows=3) + tm.assert_frame_equal(result, expected) + + # skipfooter alias + result = self.read_csv(StringIO(data), skipfooter=2) + no_footer = '\n'.join(data.split('\n')[:-3]) + expected = self.read_csv(StringIO(no_footer)) + + tm.assert_frame_equal(result, expected) + + def test_decompression_regex_sep(self): + # GH 6607 + # This is a copy which should eventually be moved to ParserTests + # when the issue with the C parser is fixed + + try: + import gzip + import bz2 + except ImportError: + raise nose.SkipTest('need gzip and bz2 to run') + + data = open(self.csv1, 'rb').read() + data = data.replace(b',', b'::') + expected = self.read_csv(self.csv1) + + with tm.ensure_clean() as path: + tmp = gzip.GzipFile(path, mode='wb') + tmp.write(data) + tmp.close() + + result = self.read_csv(path, sep='::', compression='gzip') + tm.assert_frame_equal(result, expected) + + with tm.ensure_clean() as path: + tmp = bz2.BZ2File(path, mode='wb') + tmp.write(data) + tmp.close() + + result = self.read_csv(path, sep='::', compression='bz2') + tm.assert_frame_equal(result, expected) + + self.assertRaises(ValueError, self.read_csv, + path, compression='bz3') + + def test_read_table_buglet_4x_multiindex(self): + # GH 6607 + # This is a copy which should eventually be merged into ParserTests + # when the issue with multiindex columns is fixed in the C parser. + + text = """ A B C D E +one two three four +a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 +a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 +x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" + + # it works! + df = self.read_table(StringIO(text), sep='\s+') + self.assertEquals(df.index.names, ('one', 'two', 'three', 'four')) class TestFwfColspaceSniffing(tm.TestCase): def test_full_file(self): @@ -2545,12 +2837,58 @@ def test_compact_ints(self): def test_parse_dates_empty_string(self): # #2263 s = StringIO("Date, test\n2012-01-01, 1\n,2") - result = pd.read_csv(s, parse_dates=["Date"], na_filter=False) + result = self.read_csv(s, parse_dates=["Date"], na_filter=False) self.assertTrue(result['Date'].isnull()[1]) def test_usecols(self): raise nose.SkipTest("Usecols is not supported in C High Memory engine.") + def test_passing_dtype(self): + # GH 6607 + # This is a copy which should eventually be merged into ParserTests + # when the dtype argument is supported by all engines. + + df = DataFrame(np.random.rand(5,2),columns=list('AB'),index=['1A','1B','1C','1D','1E']) + + with tm.ensure_clean('__passing_str_as_dtype__.csv') as path: + df.to_csv(path) + + # GH 3795 + # passing 'str' as the dtype + result = self.read_csv(path, dtype=str, index_col=0) + tm.assert_series_equal(result.dtypes,Series({ 'A' : 'object', 'B' : 'object' })) + + # we expect all object columns, so need to convert to test for equivalence + result = result.astype(float) + tm.assert_frame_equal(result,df) + + # invalid dtype + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'foo', 'B' : 'float64' }, + index_col=0) + + # valid but we don't support it (date) + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' }, + index_col=0) + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' }, + index_col=0, parse_dates=['B']) + + # valid but we don't support it + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'timedelta64', 'B' : 'float64' }, + index_col=0) + + def test_fallback_to_python(self): + # GH 6607 + data = 'a b c\n1 2 3' + + # specify C engine with unsupported options (raise) + with tm.assertRaisesRegexp(ValueError, 'does not support'): + self.read_table(StringIO(data), engine='c', sep=None, + delim_whitespace=False) + with tm.assertRaisesRegexp(ValueError, 'does not support'): + self.read_table(StringIO(data), engine='c', sep='\s') + with tm.assertRaisesRegexp(ValueError, 'does not support'): + self.read_table(StringIO(data), engine='c', skip_footer=1) + class TestCParserLowMemory(ParserTests, tm.TestCase): @@ -2706,16 +3044,24 @@ def test_decompression_regex_sep(self): tmp.write(data) tmp.close() - result = self.read_csv(path, sep='::', compression='gzip') - tm.assert_frame_equal(result, expected) + # GH 6607 + # Test currently only valid with the python engine because of + # regex sep. Temporarily copied to TestPythonParser. + # Here test for ValueError when passing regex sep: + + with tm.assertRaisesRegexp(ValueError, 'regex sep'): #XXX + result = self.read_csv(path, sep='::', compression='gzip') + tm.assert_frame_equal(result, expected) with tm.ensure_clean() as path: tmp = bz2.BZ2File(path, mode='wb') tmp.write(data) tmp.close() - result = self.read_csv(path, sep='::', compression='bz2') - tm.assert_frame_equal(result, expected) + # GH 6607 + with tm.assertRaisesRegexp(ValueError, 'regex sep'): #XXX + result = self.read_csv(path, sep='::', compression='bz2') + tm.assert_frame_equal(result, expected) self.assertRaises(ValueError, self.read_csv, path, compression='bz3') @@ -2879,6 +3225,95 @@ def test_invalid_c_parser_opts_with_not_c_parser(self): engine)): read_csv(StringIO(data), engine=engine, **kwargs) + def test_passing_dtype(self): + # GH 6607 + # This is a copy which should eventually be merged into ParserTests + # when the dtype argument is supported by all engines. + + df = DataFrame(np.random.rand(5,2),columns=list('AB'),index=['1A','1B','1C','1D','1E']) + + with tm.ensure_clean('__passing_str_as_dtype__.csv') as path: + df.to_csv(path) + + # GH 3795 + # passing 'str' as the dtype + result = self.read_csv(path, dtype=str, index_col=0) + tm.assert_series_equal(result.dtypes,Series({ 'A' : 'object', 'B' : 'object' })) + + # we expect all object columns, so need to convert to test for equivalence + result = result.astype(float) + tm.assert_frame_equal(result,df) + + # invalid dtype + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'foo', 'B' : 'float64' }, + index_col=0) + + # valid but we don't support it (date) + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' }, + index_col=0) + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'datetime64', 'B' : 'float64' }, + index_col=0, parse_dates=['B']) + + # valid but we don't support it + self.assertRaises(TypeError, self.read_csv, path, dtype={'A' : 'timedelta64', 'B' : 'float64' }, + index_col=0) + + def test_fallback_to_python(self): + # GH 6607 + data = 'a b c\n1 2 3' + + # specify C engine with C-unsupported options (raise) + with tm.assertRaisesRegexp(ValueError, 'does not support'): + self.read_table(StringIO(data), engine='c', sep=None, + delim_whitespace=False) + with tm.assertRaisesRegexp(ValueError, 'does not support'): + self.read_table(StringIO(data), engine='c', sep='\s') + with tm.assertRaisesRegexp(ValueError, 'does not support'): + self.read_table(StringIO(data), engine='c', skip_footer=1) + + def test_raise_on_sep_with_delim_whitespace(self): + # GH 6607 + data = 'a b c\n1 2 3' + with tm.assertRaisesRegexp(ValueError, 'you can only specify one'): + self.read_table(StringIO(data), sep='\s', delim_whitespace=True) + + +class TestMiscellaneous(tm.TestCase): + + # for tests that don't fit into any of the other classes, e.g. those that + # compare results for different engines or test the behavior when 'engine' + # is not passed + + def test_compare_whitespace_regex(self): + # GH 6607 + data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9' + result_c = pd.read_table(StringIO(data), sep='\s+', engine='c') + result_py = pd.read_table(StringIO(data), sep='\s+', engine='python') + tm.assert_frame_equal(result_c, result_py) + + def test_fallback_to_python(self): + # GH 6607 + data = 'a b c\n1 2 3' + + # specify C-unsupported options with python-unsupported option + # (options will be ignored on fallback, raise) + with tm.assertRaisesRegexp(ValueError, 'Falling back'): + pd.read_table(StringIO(data), sep=None, + delim_whitespace=False, dtype={'a': float}) + with tm.assertRaisesRegexp(ValueError, 'Falling back'): + pd.read_table(StringIO(data), sep='\s', dtype={'a': float}) + with tm.assertRaisesRegexp(ValueError, 'Falling back'): + pd.read_table(StringIO(data), skip_footer=1, dtype={'a': float}) + + # specify C-unsupported options without python-unsupported options + with tm.assert_produces_warning(parsers.ParserWarning): + pd.read_table(StringIO(data), sep=None, delim_whitespace=False) + with tm.assert_produces_warning(parsers.ParserWarning): + pd.read_table(StringIO(data), sep='\s') + with tm.assert_produces_warning(parsers.ParserWarning): + pd.read_table(StringIO(data), skip_footer=1) + + class TestParseSQL(tm.TestCase): def test_convert_sql_column_floats(self): diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c index da991ec23c373..f3da2175092e7 100644 --- a/pandas/src/parser/tokenizer.c +++ b/pandas/src/parser/tokenizer.c @@ -1162,7 +1162,6 @@ int tokenize_whitespace(parser_t *self, size_t line_limit) self->state = EAT_CRNL; break; } else if (IS_WHITESPACE(c)) { - END_FIELD(); self->state = EAT_WHITESPACE; break; } else { @@ -1319,10 +1318,14 @@ int tokenize_whitespace(parser_t *self, size_t line_limit) /* self->state = START_RECORD; */ } else if (IS_WHITESPACE(c)){ // Handle \r-delimited files - END_LINE_AND_FIELD_STATE(EAT_WHITESPACE); + END_LINE_STATE(EAT_WHITESPACE); } else { - PUSH_CHAR(c); - END_LINE_STATE(IN_FIELD); + /* XXX + * first character of a new record--need to back up and reread + * to handle properly... + */ + i--; buf--; /* back up one character (HACK!) */ + END_LINE_STATE(START_RECORD); } break; diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index a9e48c62f9693..63355e6ef4a30 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -13501,7 +13501,7 @@ def check_query_with_nested_strings(self, parser, engine): 6 "page 3 load" 2/1/2014 1:02:01 6 "page 3 exit" 2/1/2014 1:02:31 """ - df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', + df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python', parse_dates=['timestamp']) expected = df[df.event == '"page 1 load"'] res = df.query("""'"page 1 load"' in event""", parser=parser, diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index ac420ee5d78cd..1eb43237c3185 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -462,7 +462,7 @@ def test_xs_level_multiple(self): a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" - df = read_table(StringIO(text), sep='\s+') + df = read_table(StringIO(text), sep='\s+', engine='python') result = df.xs(('a', 4), level=['one', 'four']) expected = df.xs('a').xs(4, level='four') @@ -495,7 +495,7 @@ def test_xs_level0(self): a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" - df = read_table(StringIO(text), sep='\s+') + df = read_table(StringIO(text), sep='\s+', engine='python') result = df.xs('a', level=0) expected = df.xs('a')
closes #6607 closes #3374 Currently, specifying options that are incompatible with the C parser in `read_csv` and `read_table` causes a silent fallback to the python engine. This can be confusing if the user has also passed options that are only supported by the C engine, which are then silently ignored. (See #6607) For example, the commonly used option `sep='\s+'` causes a fallback to python which could be avoided by automatically translating this to the equivalent `delim_whitespace=True`, which is supported by the C engine. There are some issues with the C parser that need to be fixed in order not to break tests with `sep='\s+'` which previously fell back to python: The C parser does not correctly handle leading whitespace with `delim_whitespace=True` (#3374). There is a related bug when parsing files with \r-delimited lines and missing values: ``` python In [5]: data = 'a b c\r2 3\r4 5 6' In [6]: pd.read_table(StringIO(data), delim_whitespace=True) Out[6]: a b c 0 2 3 4 1 NaN 5 6 [2 rows x 3 columns] ``` # Summary of changes - **Raise `ValueError` when user specifies `engine='c'` with C-unsupported options:** ``` python In [1]: import pandas as pd In [2]: from pandas.compat import StringIO In [3]: data = ' a\tb c\n 1\t2 3\n 4\t5 6' In [4]: pd.read_table(StringIO(data), engine='c', sep='\s') --------------------------------------------------------------------------- ValueError Traceback (most recent call last) . . . ValueError: the 'c' engine does not support regex separators ``` - **Raise `ValueError` when fallback to python parser causes python-unsupported options to be ignored:** ``` python In [5]: pd.read_table(StringIO(data), sep='\s', dtype={'a': float}) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) . . . ValueError: Falling back to the 'python' engine because the 'c' engine does not support regex separators, but this causes 'dtype' to be ignored as it is not supported by the 'python' engine. (Note the 'converters' option provides similar functionality.) ``` - **Warn (new class ParserWarning) when C-unsupported options cause a fallback to python:** ``` python In [6]: pd.read_table(StringIO(data), skip_footer=1) pandas/io/parsers.py:615: ParserWarning: Falling back to the 'python' engine because the 'c' engine does not support skip_footer; you can avoid this warning by specifying engine='python'. ParserWarning) Out[6]: a b c 0 1 2 3 [1 rows x 2 columns] ``` - **Raise ValueError when the user specifies both `sep` and `delim_whitespace=True`:** ``` python In [7]: pd.read_table(StringIO(data), sep='\s', delim_whitespace=True) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) . . . ValueError: Specified a delimiter with both sep and delim_whitespace=True; you can only specify one. ``` - **Translate `sep='\s+'` to `delim_whitespace=True` when there are no other C-unsupported options:** ``` python In [8]: pd.read_table(StringIO(data), sep='\s+', engine='c') Out[8]: a b c 0 1 2 3 1 4 5 6 [2 rows x 3 columns] ``` - **Fix handling of leading whitespace in the C parser (#3374) (add test)** ``` python # Old behavior In [3]: pd.__version__ Out[3]: '0.13.1-663-g21565a3' In [4]: pd.read_table(StringIO(data), delim_whitespace=True) Out[4]: Unnamed: 0 a b c 0 1 2 3 NaN 1 4 5 6 NaN [2 rows x 4 columns] ``` ``` python # New behavior In [9]: pd.read_table(StringIO(data), delim_whitespace=True) Out[9]: a b c 0 1 2 3 1 4 5 6 [2 rows x 3 columns] ``` - **Fix bug in handling of \r-delimited files (add test)** (Old behavior shown above) ``` python # New behavior In [10]: data = 'a b c\r2 3\r4 5 6' In [11]: pd.read_table(StringIO(data), delim_whitespace=True) Out[11]: a b c 0 2 3 NaN 1 4 5 6 [2 rows x 3 columns] ``` - **Copy tests in `ParserTests` that fall back to python to `TestPythonParser`; leave copies of these tests in `ParserTests` with the assertion that they raise a `ValueError` when run under other engines** - **Add description of `engine` option to docstrings of `read_table` and `read_csv`**
https://api.github.com/repos/pandas-dev/pandas/pulls/6889
2014-04-16T01:16:37Z
2014-04-23T22:23:11Z
2014-04-23T22:23:11Z
2014-06-16T17:30:28Z
BUG: properly rename single group match in Series.str.extract()
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2fea35a887f34..fb4f06ac03ff9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -374,6 +374,8 @@ Bug Fixes - Bug in ``groupby.get_group`` where a datetlike wasn't always accepted (:issue:`5267`) - Bug in ``DatetimeIndex.tz_localize`` and ``DatetimeIndex.tz_convert`` affects to NaT (:issue:`5546`) - Bug in arithmetic operations affecting to NaT (:issue:`6873`) +- Bug in ``Series.str.extract`` where the resulting ``Series`` from a single + group match wasn't renamed to the group name pandas 0.13.1 ------------- diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 6add1767a05d6..7bcc534a34a1f 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -387,6 +387,13 @@ def f(x): return _na_map(f, arr, na) +def _get_single_group_name(rx): + try: + return list(rx.groupindex.keys()).pop() + except IndexError: + return None + + def str_extract(arr, pat, flags=0): """ Find groups in each string using passed regular expression @@ -452,7 +459,7 @@ def f(x): return empty_row if regex.groups == 1: result = Series([f(val)[0] for val in arr], - name=regex.groupindex.get(1), + name=_get_single_group_name(regex), index=arr.index) else: names = dict(zip(regex.groupindex.values(), regex.groupindex.keys())) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 2721edcc89e59..412f2c62e55f2 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -556,6 +556,11 @@ def test_extract(self): exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]], columns=['letter', 'number']) tm.assert_frame_equal(result, exp) + # single group renames series properly + s = Series(['A1', 'A2']) + result = s.str.extract(r'(?P<uno>A)\d') + tm.assert_equal(result.name, 'uno') + # GH6348 # not passing index to the extractor def check_index(index):
null
https://api.github.com/repos/pandas-dev/pandas/pulls/6888
2014-04-16T00:11:03Z
2014-04-16T01:22:38Z
2014-04-16T01:22:38Z
2014-07-16T09:01:58Z
Series.rank() doesn't handle small floats correctly
diff --git a/doc/source/release.rst b/doc/source/release.rst index 271daa1623a4b..248e18034c400 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -402,6 +402,7 @@ Bug Fixes - Bug in `DataFrame.plot` and `Series.plot` legend behave inconsistently when plotting to the same axes repeatedly (:issue:`6678`) - Internal tests for patching ``__finalize__`` / bug in merge not finalizing (:issue:`6923`, :issue:`6927`) - accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`) +- Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) pandas 0.13.1 ------------- diff --git a/pandas/algos.pyx b/pandas/algos.pyx index bba6b46c52e37..4628853df3953 100644 --- a/pandas/algos.pyx +++ b/pandas/algos.pyx @@ -7,6 +7,7 @@ cimport cython import_array() cdef float64_t FP_ERR = 1e-13 +cdef float64_t REL_TOL = 1e-07 cimport util @@ -132,6 +133,18 @@ cdef _take_2d_object(ndarray[object, ndim=2] values, return result +cdef inline bint float64_are_diff(float64_t left, float64_t right): + cdef double abs_diff, allowed + if right == MAXfloat64 or right == -MAXfloat64: + if left == right: + return False + else: + return True + else: + abs_diff = fabs(left - right) + allowed = REL_TOL * fabs(right) + return abs_diff > allowed + def rank_1d_float64(object in_arr, ties_method='average', ascending=True, na_option='keep', pct=False): """ @@ -186,7 +199,7 @@ def rank_1d_float64(object in_arr, ties_method='average', ascending=True, ranks[argsorted[i]] = nan continue count += 1.0 - if i == n - 1 or fabs(sorted_data[i + 1] - val) > FP_ERR: + if i == n - 1 or float64_are_diff(sorted_data[i + 1], val): if tiebreak == TIEBREAK_AVERAGE: for j in range(i - dups + 1, i + 1): ranks[argsorted[j]] = sum_ranks / dups @@ -345,7 +358,7 @@ def rank_2d_float64(object in_arr, axis=0, ties_method='average', ranks[i, argsorted[i, j]] = nan continue count += 1.0 - if j == k - 1 or fabs(values[i, j + 1] - val) > FP_ERR: + if j == k - 1 or float64_are_diff(values[i, j + 1], val): if tiebreak == TIEBREAK_AVERAGE: for z in range(j - dups + 1, j + 1): ranks[i, argsorted[i, z]] = sum_ranks / dups diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index a9e48c62f9693..178a73e3d5967 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -11031,6 +11031,7 @@ def test_rank(self): exp = df.astype(float).rank(1) assert_frame_equal(result, exp) + def test_rank2(self): from datetime import datetime df = DataFrame([[1, 3, 2], [1, 2, 3]]) @@ -11084,6 +11085,11 @@ def test_rank2(self): expected = self.mixed_frame.rank(1, numeric_only=True) assert_frame_equal(result, expected) + df = DataFrame({"a":[1e-20, -5, 1e-20+1e-40, 10, 1e60, 1e80, 1e-30]}) + exp = DataFrame({"a":[ 3.5, 1. , 3.5, 5. , 6. , 7. , 2. ]}) + assert_frame_equal(df.rank(), exp) + + def test_rank_na_option(self): from pandas.compat.scipy import rankdata diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index d1775177d3c1d..d8eafc7cb8eab 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -4047,14 +4047,35 @@ def test_rank(self): exp = iseries / 4.0 iranks = iseries.rank(pct=True) assert_series_equal(iranks, exp) - rng = date_range('1/1/1990', periods=5) + rng = date_range('1/1/1990', periods=5) iseries = Series(np.arange(5), rng) + 1 iseries.ix[4] = np.nan exp = iseries / 4.0 iranks = iseries.rank(pct=True) assert_series_equal(iranks, exp) + iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20+1e-30, 1e-1]) + exp = Series([2, 1, 3.5, 5, 3.5, 6]) + iranks = iseries.rank() + assert_series_equal(iranks, exp) + + values = np.array([-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40], dtype='float64') + random_order = np.random.permutation(len(values)) + iseries = Series(values[random_order]) + exp = Series(random_order + 1.0, dtype='float64') + iranks = iseries.rank() + assert_series_equal(iranks, exp) + + def test_rank_inf(self): + raise nose.SkipTest('DataFrame.rank does not currently rank np.inf and -np.inf properly') + + values = np.array([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40, np.inf], dtype='float64') + random_order = np.random.permutation(len(values)) + iseries = Series(values[random_order]) + exp = Series(random_order + 1.0, dtype='float64') + iranks = iseries.rank() + assert_series_equal(iranks, exp) def test_from_csv(self):
Okay, this fixes #6868 but does so with a bit of a performance penalty. The current pandas version performs just a tad slower than `scipy.stats.rankdata()`, but after these changes, it's about 2-3x slower. On the plus side, it does (what I think is) the right thing. I'm no cython expert, so there may well be things that can be done to improve the speed. Here's some quick benchmarking code (couldn't figure out how to work with the vbench suite): ``` import timeit setup2 = """import pandas import scipy.stats import numpy numpy.random.seed(154) s = pandas.Series(numpy.random.normal(size=10000))""" print "pandas:", timeit.repeat(stmt='s.rank()', setup=setup2, repeat=3, number=10000) print "scipy:", timeit.repeat(stmt='scipy.stats.rankdata(s)', setup=setup2, repeat=3, number=10000) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6886
2014-04-15T16:24:16Z
2014-04-23T23:24:14Z
null
2014-09-24T12:59:05Z
ENH: Allow aggregate numeric operations on timedelta64.
diff --git a/pandas/core/common.py b/pandas/core/common.py index f8f5928ca7d51..171ce9462452f 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1271,14 +1271,23 @@ def _possibly_downcast_to_dtype(result, dtype): dtype = np.dtype(dtype) try: - # don't allow upcasts here (except if empty) + print dtype.kind, result.dtype.kind if dtype.kind == result.dtype.kind: if result.dtype.itemsize <= dtype.itemsize and np.prod(result.shape): return result if issubclass(dtype.type, np.floating): return result.astype(dtype) + + # a datetimelike + elif ((dtype.kind == 'M' and result.dtype.kind == 'i') or + dtype.kind == 'm'): + try: + result = result.astype(dtype) + except: + pass + elif dtype == np.bool_ or issubclass(dtype.type, np.integer): # if we don't have any elements, just astype it @@ -1309,13 +1318,6 @@ def _possibly_downcast_to_dtype(result, dtype): if (new_result == result).all(): return new_result - # a datetimelike - elif dtype.kind in ['M','m'] and result.dtype.kind in ['i']: - try: - result = result.astype(dtype) - except: - pass - except: pass diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index cb5dedc887bca..4b55b8cced559 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1083,12 +1083,24 @@ def _try_cast(self, result, obj): def _cython_agg_general(self, how, numeric_only=True): output = {} for name, obj in self._iterate_slices(): - is_numeric = is_numeric_dtype(obj.dtype) + if is_numeric_dtype(obj.dtype): + obj = com.ensure_float(obj) + is_numeric = True + out_dtype = 'f%d' % obj.dtype.itemsize + values = obj.values + else: + is_numeric = issubclass(obj.dtype.type, (np.datetime64, + np.timedelta64)) + if is_numeric: + values = obj.view('int64') + else: + values = obj.astype(object) + if numeric_only and not is_numeric: continue try: - result, names = self.grouper.aggregate(obj.values, how) + result, names = self.grouper.aggregate(values, how) except AssertionError as e: raise GroupByError(str(e)) output[name] = self._try_cast(result, obj) @@ -2567,9 +2579,9 @@ def _cython_agg_blocks(self, how, numeric_only=True): data = data.get_numeric_data(copy=False) for block in data.blocks: - values = block._try_operate(block.values) + # TODO DAN if block.is_numeric: values = _algos.ensure_float64(values) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 4077f468d8b1f..734287baaa50d 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -41,6 +41,7 @@ def _skip_if_mpl_not_installed(): except ImportError: raise nose.SkipTest("matplotlib not installed") + def commonSetUp(self): self.dateRange = bdate_range('1/1/2005', periods=250) self.stringIndex = Index([rands(8).upper() for x in range(250)]) @@ -603,6 +604,28 @@ def f(grp): e.name = None assert_series_equal(result,e) + # ...and with timedeltas + df1 = df.copy() + df1['D'] = pd.to_timedelta(['00:00:01', '00:00:02', '00:00:03', + '00:00:04', '00:00:05', '00:00:06', + '00:00:07']) + result = df1.groupby('A').apply(f)[['D']] + e = df1.groupby('A').first()[['D']] + e.loc['Pony'] = np.nan + print(type(result)) + print(type(e)) + assert_frame_equal(result, e) + + def f(grp): + if grp.name == 'Pony': + return None + return grp.iloc[0].loc['D'] + result = df1.groupby('A').apply(f)['D'] + e = df1.groupby('A').first()['D'].copy() + e.loc['Pony'] = np.nan + e.name = None + assert_series_equal(result, e) + def test_agg_api(self): # GH 6337 @@ -4365,6 +4388,19 @@ def test_index_label_overlaps_location(self): expected = ser.take([1, 3, 4]) assert_series_equal(actual, expected) + def test_groupby_methods_on_timedelta64(self): + df = self.df.copy().iloc[:4] + df['E'] = pd.to_timedelta(['00:00:01', '00:00:02', '00:00:03', '00:00:04']) + # DataFrameGroupBy + actual = df.groupby('A').mean()['E'] + expected = pd.to_timedelta(Series(['00:00:03', '00:00:02'], index=['bar', 'foo'], name='E')) + assert_series_equal(actual, expected) + + ser = df['E'] + # SeriesGroupBy + actual = ser.groupby(df['A']).mean() + assert_series_equal(actual, expected) + def test_groupby_selection_with_methods(self): # some methods which require DatetimeIndex rng = pd.date_range('2014', periods=len(self.df))
closes #5724 Currently, `timedelta64` columns can't be used in aggregate group operations. They are quietly dropped or, if no other columns are present, an exception is raised: ``` In [2]: df Out[2]: A E 0 foo 00:00:01 1 bar 00:00:02 2 foo 00:00:03 3 bar 00:00:04 In [3]: df.groupby('A').mean() Out[3]: (...) DataError: No numeric types to aggregate ``` But any operation that works on numerical data is also well defined on timedeltas. This PR relaxes the restriction on those methods designated "numeric only" to accept numeric dtypes _and_ `timedelta64`. I include a unit test with one simple case each for `DataFrameGroupBy` and `SeriesGroupBy`. ``` In [2]: df.groupby('A').mean() Out[2]: E A bar 00:00:03 foo 00:00:02 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6884
2014-04-14T21:29:37Z
2015-05-09T16:08:30Z
null
2024-03-20T18:43:59Z
SQL: add multi-index support to legacy mode
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index a80e8049ae627..5052f057871b0 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -436,10 +436,7 @@ def maybe_asscalar(self, i): except AttributeError: return i - def insert(self): - ins = self.insert_statement() - data_list = [] - + def insert_data(self): if self.index is not None: temp = self.frame.copy() temp.index.names = self.index @@ -451,6 +448,12 @@ def insert(self): else: temp = self.frame + return temp + + def insert(self): + ins = self.insert_statement() + data_list = [] + temp = self.insert_data() keys = temp.columns for t in temp.itertuples(): @@ -785,7 +788,7 @@ def insert_statement(self): wld = _SQL_SYMB[flv]['wld'] # wildcard char if self.index is not None: - safe_names.insert(0, self.index) + [safe_names.insert(0, idx) for idx in self.index[::-1]] bracketed_names = [br_l + column + br_r for column in safe_names] col_names = ','.join(bracketed_names) @@ -796,26 +799,18 @@ def insert_statement(self): def insert(self): ins = self.insert_statement() + temp = self.insert_data() + data_list = [] + + for t in temp.itertuples(): + data = tuple((self.maybe_asscalar(v) for v in t[1:])) + data_list.append(data) + cur = self.pd_sql.con.cursor() - for r in self.frame.itertuples(): - data = [self.maybe_asscalar(v) for v in r[1:]] - if self.index is not None: - data.insert(0, self.maybe_asscalar(r[0])) - cur.execute(ins, tuple(data)) + cur.executemany(ins, data_list) cur.close() self.pd_sql.con.commit() - def _index_name(self, index, index_label): - if index is True: - if self.frame.index.name is not None: - return _safe_col_name(self.frame.index.name) - else: - return 'pandas_index' - elif isinstance(index, string_types): - return index - else: - return None - def _create_table_statement(self): "Return a CREATE TABLE statement to suit the contents of a DataFrame." @@ -824,8 +819,10 @@ def _create_table_statement(self): column_types = [self._sql_type_name(typ) for typ in self.frame.dtypes] if self.index is not None: - safe_columns.insert(0, self.index) - column_types.insert(0, self._sql_type_name(self.frame.index.dtype)) + for i, idx_label in enumerate(self.index[::-1]): + safe_columns.insert(0, idx_label) + column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) + flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left val quote char @@ -935,15 +932,16 @@ def to_sql(self, frame, name, if_exists='fail', index=True, ---------- frame: DataFrame name: name of SQL table - flavor: {'sqlite', 'mysql', 'postgres'}, default 'sqlite' + flavor: {'sqlite', 'mysql'}, default 'sqlite' if_exists: {'fail', 'replace', 'append'}, default 'fail' fail: If table exists, do nothing. replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if does not exist. - index_label : ignored (only used in sqlalchemy mode) + """ table = PandasSQLTableLegacy( - name, self, frame=frame, index=index, if_exists=if_exists) + name, self, frame=frame, index=index, if_exists=if_exists, + index_label=index_label) table.insert() def has_table(self, name): @@ -991,13 +989,47 @@ def read_frame(*args, **kwargs): return read_sql(*args, **kwargs) -def write_frame(*args, **kwargs): +def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): """DEPRECIATED - use to_sql + + Write records stored in a DataFrame to a SQL database. + + Parameters + ---------- + frame : DataFrame + name : string + con : DBAPI2 connection + flavor : {'sqlite', 'mysql'}, default 'sqlite' + The flavor of SQL to use. + if_exists : {'fail', 'replace', 'append'}, default 'fail' + - fail: If table exists, do nothing. + - replace: If table exists, drop it, recreate it, and insert data. + - append: If table exists, insert data. Create if does not exist. + index : boolean, default False + Write DataFrame index as a column + + Notes + ----- + This function is deprecated in favor of ``to_sql``. There are however + two differences: + + - With ``to_sql`` the index is written to the sql database by default. To + keep the behaviour this function you need to specify ``index=False``. + - The new ``to_sql`` function supports sqlalchemy engines to work with + different sql flavors. + + See also + -------- + pandas.DataFrame.to_sql + """ warnings.warn("write_frame is depreciated, use to_sql", DeprecationWarning) - return to_sql(*args, **kwargs) + + # for backwards compatibility, set index=False when not specified + index = kwargs.pop('index', False) + return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists, + index=index, **kwargs) # Append wrapped function docstrings read_frame.__doc__ += read_sql.__doc__ -write_frame.__doc__ += to_sql.__doc__ diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index aa1b2516e4fb6..f05f6fe3c1d14 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -460,31 +460,18 @@ def test_date_and_index(self): issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") - -class TestSQLApi(_TestSQLApi): - - """Test the public API as it would be used directly - """ - flavor = 'sqlite' - - def connect(self): - if SQLALCHEMY_INSTALLED: - return sqlalchemy.create_engine('sqlite:///:memory:') - else: - raise nose.SkipTest('SQLAlchemy not installed') - def test_to_sql_index_label(self): temp_frame = DataFrame({'col1': range(4)}) # no index name, defaults to 'index' sql.to_sql(temp_frame, 'test_index_label', self.conn) - frame = sql.read_table('test_index_label', self.conn) + frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[0], 'index') # specifying index_label sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace', index_label='other_label') - frame = sql.read_table('test_index_label', self.conn) + frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[0], 'other_label', "Specified index_label not written to database") @@ -492,14 +479,14 @@ def test_to_sql_index_label(self): temp_frame.index.name = 'index_name' sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace') - frame = sql.read_table('test_index_label', self.conn) + frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[0], 'index_name', "Index name not written to database") # has index name, but specifying index_label sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace', index_label='other_label') - frame = sql.read_table('test_index_label', self.conn) + frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[0], 'other_label', "Specified index_label not written to database") @@ -509,14 +496,14 @@ def test_to_sql_index_label_multiindex(self): # no index name, defaults to 'level_0' and 'level_1' sql.to_sql(temp_frame, 'test_index_label', self.conn) - frame = sql.read_table('test_index_label', self.conn) + frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[0], 'level_0') self.assertEqual(frame.columns[1], 'level_1') # specifying index_label sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace', index_label=['A', 'B']) - frame = sql.read_table('test_index_label', self.conn) + frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'], "Specified index_labels not written to database") @@ -524,14 +511,14 @@ def test_to_sql_index_label_multiindex(self): temp_frame.index.names = ['A', 'B'] sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace') - frame = sql.read_table('test_index_label', self.conn) + frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'], "Index names not written to database") # has index name, but specifying index_label sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace', index_label=['C', 'D']) - frame = sql.read_table('test_index_label', self.conn) + frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'], "Specified index_labels not written to database") @@ -540,6 +527,19 @@ def test_to_sql_index_label_multiindex(self): 'test_index_label', self.conn, if_exists='replace', index_label='C') + +class TestSQLApi(_TestSQLApi): + + """Test the public API as it would be used directly + """ + flavor = 'sqlite' + + def connect(self): + if SQLALCHEMY_INSTALLED: + return sqlalchemy.create_engine('sqlite:///:memory:') + else: + raise nose.SkipTest('SQLAlchemy not installed') + def test_read_table_columns(self): # test columns argument in read_table sql.to_sql(self.test_frame1, 'test_frame', self.conn) @@ -622,23 +622,6 @@ def test_sql_open_close(self): tm.assert_frame_equal(self.test_frame2, result) - def test_roundtrip(self): - # this test otherwise fails, Legacy mode still uses 'pandas_index' - # as default index column label - sql.to_sql(self.test_frame1, 'test_frame_roundtrip', - con=self.conn, flavor='sqlite') - result = sql.read_sql( - 'SELECT * FROM test_frame_roundtrip', - con=self.conn, - flavor='sqlite') - - # HACK! - result.index = self.test_frame1.index - result.set_index('pandas_index', inplace=True) - result.index.astype(int) - result.index.name = None - tm.assert_frame_equal(result, self.test_frame1) - class _TestSQLAlchemy(PandasSQLTest): """ @@ -861,16 +844,6 @@ def setUp(self): self._load_test1_data() - def _roundtrip(self): - # overwrite parent function (level_0 -> pandas_index in legacy mode) - self.drop_table('test_frame_roundtrip') - self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip') - result = self.pandasSQL.read_sql('SELECT * FROM test_frame_roundtrip') - result.set_index('pandas_index', inplace=True) - result.index.name = None - - tm.assert_frame_equal(result, self.test_frame1) - def test_invalid_flavor(self): self.assertRaises( NotImplementedError, sql.PandasSQLLegacy, self.conn, 'oracle')
Closes #6881. @danielballan Added multi-index support to legacy mode, wasn't actually not that difficult (all tests seems to pass). And at once also `index_label` kwarg is supported in legacy mode. This just leaves the issue that this is actually an api change, so could possibly break code.
https://api.github.com/repos/pandas-dev/pandas/pulls/6883
2014-04-14T20:25:59Z
2014-04-15T22:32:19Z
2014-04-15T22:32:19Z
2014-07-16T09:01:52Z
ENH: Float64Index now uses Float64Hashtable as a backend
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 3c15fdea70858..322c58115de3c 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -1261,6 +1261,15 @@ numpy array. For instance, Float64Index ------------ +.. note:: + + As of 0.14.0, ``Float64Index`` is backed by a native ``float64`` dtype + array. Prior to 0.14.0, ``Float64Index`` was backed by an ``object`` dtype + array. Using a ``float64`` dtype in the backend speeds up arithmetic + operations by about 30x and boolean indexing operations on the + ``Float64Index`` itself are about 2x as fast. + + .. versionadded:: 0.13.0 By default a ``Float64Index`` will be automatically created when passing floating, or mixed-integer-floating values in index creation. diff --git a/doc/source/release.rst b/doc/source/release.rst index 08d7bf9b8728b..2fea35a887f34 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -258,6 +258,8 @@ Improvements to existing features - Performance improvement for ``DataFrame.from_records`` when reading a specified number of rows from an iterable (:issue:`6700`) - :ref:`Holidays and holiday calendars<timeseries.holiday>` are now available and can be used with CustomBusinessDay (:issue:`6719`) +- ``Float64Index`` is now backed by a ``float64`` dtype ndarray instead of an + ``object`` dtype array (:issue:`6471`). .. _release.bug_fixes-0.14.0: diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index ded10fd75e8d4..11296a43e230d 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -468,6 +468,8 @@ Enhancements file. (:issue:`6545`) - ``pandas.io.gbq`` now handles reading unicode strings properly. (:issue:`5940`) - :ref:`Holidays Calendars<timeseries.holiday>` are now available and can be used with CustomBusinessDay (:issue:`6719`) +- ``Float64Index`` is now backed by a ``float64`` dtype ndarray instead of an + ``object`` dtype array (:issue:`6471`). Performance ~~~~~~~~~~~ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d330d4309b13e..a00b729f1735a 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1843,7 +1843,6 @@ def eval(self, expr, **kwargs): kwargs['resolvers'] = kwargs.get('resolvers', ()) + resolvers return _eval(expr, **kwargs) - def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] if values.ndim == 2: @@ -2566,7 +2565,7 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False, ------- sorted : DataFrame """ - + from pandas.core.groupby import _lexsort_indexer, _nargsort axis = self._get_axis_number(axis) if axis not in [0, 1]: # pragma: no cover @@ -2622,7 +2621,7 @@ def trans(v): else: indexer = _nargsort(labels, kind=kind, ascending=ascending, na_position=na_position) - + if inplace: if axis == 1: new_data = self._data.reindex_items( @@ -3285,7 +3284,7 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, code path. This can lead to unexpected behavior if func has side-effects, as they will take effect twice for the first column/row. - + Examples -------- >>> df.apply(numpy.sqrt) # returns DataFrame diff --git a/pandas/core/index.py b/pandas/core/index.py index a581a8753ae51..8748d0081d2e9 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -16,7 +16,8 @@ from pandas.util.decorators import cache_readonly, deprecate from pandas.core.common import isnull, array_equivalent import pandas.core.common as com -from pandas.core.common import _values_from_object, is_float, is_integer, ABCSeries +from pandas.core.common import (_values_from_object, is_float, is_integer, + ABCSeries) from pandas.core.config import get_option # simplify @@ -27,6 +28,13 @@ __all__ = ['Index'] +def _try_get_item(x): + try: + return x.item() + except AttributeError: + return x + + def _indexOp(opname): """ Wrapper function for index comparison operations, to avoid @@ -1911,11 +1919,17 @@ class Float64Index(Index): Notes ----- - An Index instance can **only** contain hashable objects + An Float64Index instance can **only** contain hashable objects """ # when this is not longer object dtype this can be changed - #_engine_type = _index.Float64Engine + _engine_type = _index.Float64Engine + _groupby = _algos.groupby_float64 + _arrmap = _algos.arrmap_float64 + _left_indexer_unique = _algos.left_join_indexer_unique_float64 + _left_indexer = _algos.left_join_indexer_float64 + _inner_indexer = _algos.inner_join_indexer_float64 + _outer_indexer = _algos.outer_join_indexer_float64 def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False): @@ -1938,9 +1952,9 @@ def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False): raise TypeError('Unsafe NumPy casting, you must ' 'explicitly cast') - # coerce to object for storage - if not subarr.dtype == np.object_: - subarr = subarr.astype(object) + # coerce to float64 for storage + if subarr.dtype != np.float64: + subarr = subarr.astype(np.float64) subarr = subarr.view(cls) subarr.name = name @@ -1951,13 +1965,12 @@ def inferred_type(self): return 'floating' def astype(self, dtype): - if np.dtype(dtype) != np.object_: - raise TypeError('Setting %s dtype to anything other than object ' - 'is not supported' % self.__class__) - return Index(self.values, name=self.name, dtype=object) + if np.dtype(dtype) not in (np.object, np.float64): + raise TypeError('Setting %s dtype to anything other than ' + 'float64 or object is not supported' % self.__class__) + return Index(self.values, name=self.name, dtype=dtype) def _convert_scalar_indexer(self, key, typ=None): - if typ == 'iloc': return super(Float64Index, self)._convert_scalar_indexer(key, typ=typ) @@ -1968,8 +1981,6 @@ def _convert_slice_indexer(self, key, typ=None): unless we are iloc """ if typ == 'iloc': return self._convert_slice_indexer_iloc(key) - elif typ == 'getitem': - pass # allow floats here self._validate_slicer( @@ -2008,13 +2019,75 @@ def equals(self, other): try: if not isinstance(other, Float64Index): other = self._constructor(other) - if self.dtype != other.dtype or self.shape != other.shape: return False + if self.dtype != other.dtype or self.shape != other.shape: + return False left, right = self.values, other.values - return ((left == right) | (isnull(left) & isnull(right))).all() + return ((left == right) | (self._isnan & other._isnan)).all() except TypeError: # e.g. fails in numpy 1.6 with DatetimeIndex #1681 return False + def __contains__(self, other): + if super(Float64Index, self).__contains__(other): + return True + + try: + # if other is a sequence this throws a ValueError + return np.isnan(other) and self._hasnans + except ValueError: + try: + return len(other) <= 1 and _try_get_item(other) in self + except TypeError: + return False + + def get_loc(self, key): + if np.isnan(key): + try: + return self._nan_idxs.item() + except ValueError: + return self._nan_idxs + return super(Float64Index, self).get_loc(key) + + @property + def is_all_dates(self): + """ + Checks that all the labels are datetime objects + """ + return False + + @cache_readonly + def _nan_idxs(self): + w, = self._isnan.nonzero() + return w + + @cache_readonly + def _isnan(self): + return np.isnan(self.values) + + @cache_readonly + def _hasnans(self): + return self._isnan.any() + + @cache_readonly + def is_unique(self): + return super(Float64Index, self).is_unique and self._nan_idxs.size < 2 + + def isin(self, values): + """ + Compute boolean array of whether each index value is found in the + passed set of values + + Parameters + ---------- + values : set or sequence of values + + Returns + ------- + is_contained : ndarray (boolean dtype) + """ + value_set = set(values) + return lib.ismember_nans(self._array_values(), value_set, + self._hasnans) class MultiIndex(Index): diff --git a/pandas/hashtable.pxd b/pandas/hashtable.pxd index ac68dadd882de..97b6687d061e9 100644 --- a/pandas/hashtable.pxd +++ b/pandas/hashtable.pxd @@ -1,4 +1,4 @@ -from khash cimport * +from khash cimport kh_int64_t, kh_float64_t, kh_pymap_t, int64_t, float64_t # prototypes for sharing @@ -11,12 +11,11 @@ cdef class Int64HashTable(HashTable): cpdef get_item(self, int64_t val) cpdef set_item(self, int64_t key, Py_ssize_t val) - cdef class Float64HashTable(HashTable): cdef kh_float64_t *table - # cpdef get_item(self, float64_t val) - # cpdef set_item(self, float64_t key, Py_ssize_t val) + cpdef get_item(self, float64_t val) + cpdef set_item(self, float64_t key, Py_ssize_t val) cdef class PyObjectHashTable(HashTable): cdef kh_pymap_t *table diff --git a/pandas/hashtable.pyx b/pandas/hashtable.pyx index d4ed7fac5d6b7..2b3aa7b52d6c1 100644 --- a/pandas/hashtable.pyx +++ b/pandas/hashtable.pyx @@ -145,10 +145,6 @@ cdef class HashTable: cdef class StringHashTable(HashTable): cdef kh_str_t *table - # def __init__(self, size_hint=1): - # if size_hint is not None: - # kh_resize_str(self.table, size_hint) - def __cinit__(self, int size_hint=1): self.table = kh_init_str() if size_hint is not None: @@ -539,8 +535,6 @@ cdef class Int64HashTable: #(HashTable): cdef class Float64HashTable(HashTable): - # cdef kh_float64_t *table - def __cinit__(self, size_hint=1): self.table = kh_init_float64() if size_hint is not None: @@ -549,9 +543,34 @@ cdef class Float64HashTable(HashTable): def __len__(self): return self.table.size + cpdef get_item(self, float64_t val): + cdef khiter_t k + k = kh_get_float64(self.table, val) + if k != self.table.n_buckets: + return self.table.vals[k] + else: + raise KeyError(val) + + cpdef set_item(self, float64_t key, Py_ssize_t val): + cdef: + khiter_t k + int ret = 0 + + k = kh_put_float64(self.table, key, &ret) + self.table.keys[k] = key + if kh_exist_float64(self.table, k): + self.table.vals[k] = val + else: + raise KeyError(key) + def __dealloc__(self): kh_destroy_float64(self.table) + def __contains__(self, object key): + cdef khiter_t k + k = kh_get_float64(self.table, key) + return k != self.table.n_buckets + def factorize(self, ndarray[float64_t] values): uniques = Float64Vector() labels = self.get_labels(values, uniques, 0, -1) diff --git a/pandas/index.pyx b/pandas/index.pyx index e5cfa3f7c6f16..ae209b58136e1 100644 --- a/pandas/index.pyx +++ b/pandas/index.pyx @@ -398,6 +398,9 @@ cdef class Float64Engine(IndexEngine): cdef _make_hash_table(self, n): return _hash.Float64HashTable(n) + cdef _get_index_values(self): + return algos.ensure_float64(self.vgetter()) + def _call_monotonic(self, values): return algos.is_monotonic_float64(values) diff --git a/pandas/lib.pyx b/pandas/lib.pyx index dccc68ab59ad3..a1fef095ea277 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -92,6 +92,22 @@ cpdef map_indices_list(list index): from libc.stdlib cimport malloc, free + +def ismember_nans(float64_t[:] arr, set values, bint hasnans): + cdef: + Py_ssize_t i, n + ndarray[uint8_t] result + float64_t val + + n = len(arr) + result = np.empty(n, dtype=np.uint8) + for i in range(n): + val = arr[i] + result[i] = val in values or hasnans and isnan(val) + + return result.view(np.bool_) + + def ismember(ndarray arr, set values): ''' Checks whether @@ -114,10 +130,7 @@ def ismember(ndarray arr, set values): result = np.empty(n, dtype=np.uint8) for i in range(n): val = util.get_value_at(arr, i) - if val in values: - result[i] = 1 - else: - result[i] = 0 + result[i] = val in values return result.view(np.bool_) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index f4d90b533a0f7..ecb09ac395417 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -37,6 +37,7 @@ def _skip_if_need_numpy_1_7(): if _np_version_under1p7: raise nose.SkipTest('numpy >= 1.7 required') + class TestIndex(tm.TestCase): _multiprocess_can_split_ = True @@ -835,15 +836,15 @@ def test_constructor(self): self.assertIsInstance(index, Float64Index) index = Float64Index(np.array([1.,2,3,4,5])) self.assertIsInstance(index, Float64Index) - self.assertEqual(index.dtype, object) + self.assertEqual(index.dtype, float) index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32) self.assertIsInstance(index, Float64Index) - self.assertEqual(index.dtype, object) + self.assertEqual(index.dtype, np.float64) index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32) self.assertIsInstance(index, Float64Index) - self.assertEqual(index.dtype, object) + self.assertEqual(index.dtype, np.float64) # nan handling result = Float64Index([np.nan, np.nan]) @@ -904,6 +905,15 @@ def test_equals(self): i2 = Float64Index([1.0,np.nan]) self.assertTrue(i.equals(i2)) + def test_contains_nans(self): + i = Float64Index([1.0, 2.0, np.nan]) + self.assertTrue(np.nan in i) + + def test_contains_not_nans(self): + i = Float64Index([1.0, 2.0, np.nan]) + self.assertTrue(1.0 in i) + + class TestInt64Index(tm.TestCase): _multiprocess_can_split_ = True diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index b51ad3e15087c..261e1dd2a590c 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -4,23 +4,20 @@ import warnings from pandas.compat import range, lrange, lzip, StringIO, lmap, map -from numpy import random, nan +from numpy import nan from numpy.random import randn import numpy as np -from numpy.testing import assert_array_equal import pandas as pd import pandas.core.common as com -from pandas.core.api import (DataFrame, Index, Series, Panel, notnull, isnull, - MultiIndex, DatetimeIndex, Float64Index, Timestamp) +from pandas.core.api import (DataFrame, Index, Series, Panel, isnull, + MultiIndex, Float64Index, Timestamp) from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal) -from pandas import compat, concat +from pandas import concat import pandas.util.testing as tm -import pandas.lib as lib from pandas import date_range -from numpy.testing.decorators import slow _verbose = False @@ -1201,17 +1198,23 @@ def test_ix_general(self): # ix general issues # GH 2817 - data={'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444}, - 'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0}, - 'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}} - df = DataFrame(data).set_index(keys=['col','year']) + data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444}, + 'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0}, + 'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}} + df = DataFrame(data).set_index(keys=['col', 'year']) + key = 4.0, 2012 # this should raise correct error - self.assertRaises(KeyError, df.ix.__getitem__, tuple([4.0,2012])) + with tm.assertRaises(KeyError): + df.ix[key] # this is ok df.sortlevel(inplace=True) - df.ix[(4.0,2012)] + res = df.ix[key] + index = MultiIndex.from_arrays([[4] * 3, [2012] * 3], + names=['col', 'year']) + expected = DataFrame({'amount': [222, 333, 444]}, index=index) + tm.assert_frame_equal(res, expected) def test_ix_weird_slicing(self): ## http://stackoverflow.com/q/17056560/1240268 diff --git a/vb_suite/index_object.py b/vb_suite/index_object.py index cb13d63cd726c..5a98481a689a6 100644 --- a/vb_suite/index_object.py +++ b/vb_suite/index_object.py @@ -59,3 +59,49 @@ index_str_slice_indexer_even = Benchmark('idx[::2]', setup) index_str_boolean_indexer = Benchmark('idx[mask]', setup) index_str_boolean_series_indexer = Benchmark('idx[series_mask]', setup) + +#---------------------------------------------------------------------- +# float64 index +#---------------------------------------------------------------------- +# construction +setup = common_setup + """ +baseidx = np.arange(1e6) +""" + +index_float64_construct = Benchmark('Index(baseidx)', setup, + name='index_float64_construct', + start_date=datetime(2014, 4, 13)) + +setup = common_setup + """ +idx = tm.makeFloatIndex(1000000) + +mask = np.arange(idx.size) % 3 == 0 +series_mask = Series(mask) +""" +#---------------------------------------------------------------------- +# getting +index_float64_get = Benchmark('idx[1]', setup, name='index_float64_get', + start_date=datetime(2014, 4, 13)) + + +#---------------------------------------------------------------------- +# slicing +index_float64_slice_indexer_basic = Benchmark('idx[:-1]', setup, + name='index_float64_slice_indexer_basic', + start_date=datetime(2014, 4, 13)) +index_float64_slice_indexer_even = Benchmark('idx[::2]', setup, + name='index_float64_slice_indexer_even', + start_date=datetime(2014, 4, 13)) +index_float64_boolean_indexer = Benchmark('idx[mask]', setup, + name='index_float64_boolean_indexer', + start_date=datetime(2014, 4, 13)) +index_float64_boolean_series_indexer = Benchmark('idx[series_mask]', setup, + name='index_float64_boolean_series_indexer', + start_date=datetime(2014, 4, 13)) + +#---------------------------------------------------------------------- +# arith ops +index_float64_mul = Benchmark('idx * 2', setup, name='index_float64_mul', + start_date=datetime(2014, 4, 13)) +index_float64_div = Benchmark('idx / 2', setup, name='index_float64_div', + start_date=datetime(2014, 4, 13))
closes #6471
https://api.github.com/repos/pandas-dev/pandas/pulls/6879
2014-04-13T23:12:00Z
2014-04-14T10:55:43Z
2014-04-14T10:55:43Z
2014-06-14T15:46:38Z
CLN: simplify series plotting
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index b1faf3047beea..e81cfd39ba78e 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -55,9 +55,10 @@ def test_plot(self): _check_plot_works(self.ts.plot, style='.', loglog=True) _check_plot_works(self.ts[:10].plot, kind='bar') _check_plot_works(self.iseries.plot) - _check_plot_works(self.series[:5].plot, kind='bar') - _check_plot_works(self.series[:5].plot, kind='line') - _check_plot_works(self.series[:5].plot, kind='barh') + + for kind in plotting._common_kinds: + _check_plot_works(self.series[:5].plot, kind=kind) + _check_plot_works(self.series[:10].plot, kind='barh') _check_plot_works(Series(randn(10)).plot, kind='bar', color='black') @@ -250,25 +251,19 @@ def test_bootstrap_plot(self): def test_invalid_plot_data(self): s = Series(list('abcd')) - kinds = 'line', 'bar', 'barh', 'kde', 'density' - - for kind in kinds: + for kind in plotting._common_kinds: with tm.assertRaises(TypeError): s.plot(kind=kind) @slow def test_valid_object_plot(self): s = Series(lrange(10), dtype=object) - kinds = 'line', 'bar', 'barh', 'kde', 'density' - - for kind in kinds: + for kind in plotting._common_kinds: _check_plot_works(s.plot, kind=kind) def test_partially_invalid_plot_data(self): s = Series(['a', 'b', 1.0, 2]) - kinds = 'line', 'bar', 'barh', 'kde', 'density' - - for kind in kinds: + for kind in plotting._common_kinds: with tm.assertRaises(TypeError): s.plot(kind=kind) @@ -1247,19 +1242,17 @@ def test_unordered_ts(self): assert_array_equal(ydata, np.array([1.0, 2.0, 3.0])) def test_all_invalid_plot_data(self): - kinds = 'line', 'bar', 'barh', 'kde', 'density' df = DataFrame(list('abcd')) - for kind in kinds: + for kind in plotting._common_kinds: with tm.assertRaises(TypeError): df.plot(kind=kind) @slow def test_partially_invalid_plot_data(self): with tm.RNGContext(42): - kinds = 'line', 'bar', 'barh', 'kde', 'density' df = DataFrame(randn(10, 2), dtype=object) df[np.random.rand(df.shape[0]) > 0.5] = 'a' - for kind in kinds: + for kind in plotting._common_kinds: with tm.assertRaises(TypeError): df.plot(kind=kind) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 4d348c37ed927..971aa7848c2fa 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -887,25 +887,31 @@ def _validate_color_args(self): " use one or the other or pass 'style' " "without a color symbol") - def _iter_data(self): - from pandas.core.frame import DataFrame - if isinstance(self.data, (Series, np.ndarray)): - yield self.label, np.asarray(self.data) - elif isinstance(self.data, DataFrame): - df = self.data + def _iter_data(self, data=None, keep_index=False): + if data is None: + data = self.data + from pandas.core.frame import DataFrame + if isinstance(data, (Series, np.ndarray)): + if keep_index is True: + yield self.label, data + else: + yield self.label, np.asarray(data) + elif isinstance(data, DataFrame): if self.sort_columns: - columns = com._try_sort(df.columns) + columns = com._try_sort(data.columns) else: - columns = df.columns + columns = data.columns for col in columns: # # is this right? # empty = df[col].count() == 0 # values = df[col].values if not empty else np.zeros(len(df)) - values = df[col].values - yield col, values + if keep_index is True: + yield col, data[col] + else: + yield col, data[col].values @property def nseries(self): @@ -1593,38 +1599,26 @@ def _plot(data, col_num, ax, label, style, **kwds): self._add_legend_handle(newlines[0], label, index=col_num) - if isinstance(data, Series): - ax = self._get_ax(0) # self.axes[0] - style = self.style or '' - label = com.pprint_thing(self.label) + it = self._iter_data(data=data, keep_index=True) + for i, (label, y) in enumerate(it): + ax = self._get_ax(i) + style = self._get_style(i, label) kwds = self.kwds.copy() - self._maybe_add_color(colors, kwds, style, 0) - - if 'yerr' in kwds: - kwds['yerr'] = kwds['yerr'][0] - _plot(data, 0, ax, label, self.style, **kwds) - - else: - for i, col in enumerate(data.columns): - label = com.pprint_thing(col) - ax = self._get_ax(i) - style = self._get_style(i, col) - kwds = self.kwds.copy() - - self._maybe_add_color(colors, kwds, style, i) + self._maybe_add_color(colors, kwds, style, i) - # key-matched DataFrame of errors - if 'yerr' in kwds: - yerr = kwds['yerr'] - if isinstance(yerr, (DataFrame, dict)): - if col in yerr.keys(): - kwds['yerr'] = yerr[col] - else: del kwds['yerr'] - else: - kwds['yerr'] = yerr[i] + # key-matched DataFrame of errors + if 'yerr' in kwds: + yerr = kwds['yerr'] + if isinstance(yerr, (DataFrame, dict)): + if label in yerr.keys(): + kwds['yerr'] = yerr[label] + else: del kwds['yerr'] + else: + kwds['yerr'] = yerr[i] - _plot(data[col], i, ax, label, style, **kwds) + label = com.pprint_thing(label) + _plot(y, i, ax, label, style, **kwds) def _maybe_convert_index(self, data): # tsplot converts automatically, but don't want to convert index @@ -1828,6 +1822,16 @@ class BoxPlot(MPLPlot): class HistPlot(MPLPlot): pass +# kinds supported by both dataframe and series +_common_kinds = ['line', 'bar', 'barh', 'kde', 'density'] +# kinds supported by dataframe +_dataframe_kinds = ['scatter', 'hexbin'] +_all_kinds = _common_kinds + _dataframe_kinds + +_plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot, + 'kde': KdePlot, + 'scatter': ScatterPlot, 'hexbin': HexBinPlot} + def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, sharey=False, use_index=True, figsize=None, grid=None, @@ -1921,21 +1925,14 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, is a function of one argument that reduces all the values in a bin to a single number (e.g. `mean`, `max`, `sum`, `std`). """ + kind = _get_standard_kind(kind.lower().strip()) - if kind == 'line': - klass = LinePlot - elif kind in ('bar', 'barh'): - klass = BarPlot - elif kind == 'kde': - klass = KdePlot - elif kind == 'scatter': - klass = ScatterPlot - elif kind == 'hexbin': - klass = HexBinPlot + if kind in _dataframe_kinds or kind in _common_kinds: + klass = _plot_klass[kind] else: raise ValueError('Invalid chart type given %s' % kind) - if kind == 'scatter': + if kind in _dataframe_kinds: plot_obj = klass(frame, x=x, y=y, kind=kind, subplots=subplots, rot=rot,legend=legend, ax=ax, style=style, fontsize=fontsize, use_index=use_index, sharex=sharex, @@ -1944,16 +1941,6 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, figsize=figsize, logx=logx, logy=logy, sort_columns=sort_columns, secondary_y=secondary_y, **kwds) - elif kind == 'hexbin': - C = kwds.pop('C', None) # remove from kwargs so we can set default - plot_obj = klass(frame, x=x, y=y, kind=kind, subplots=subplots, - rot=rot,legend=legend, ax=ax, style=style, - fontsize=fontsize, use_index=use_index, sharex=sharex, - sharey=sharey, xticks=xticks, yticks=yticks, - xlim=xlim, ylim=ylim, title=title, grid=grid, - figsize=figsize, logx=logx, logy=logy, - sort_columns=sort_columns, secondary_y=secondary_y, - C=C, **kwds) else: if x is not None: if com.is_integer(x) and not frame.columns.holds_integer(): @@ -2051,14 +2038,9 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None, See matplotlib documentation online for more on this subject """ - from pandas import DataFrame kind = _get_standard_kind(kind.lower().strip()) - if kind == 'line': - klass = LinePlot - elif kind in ('bar', 'barh'): - klass = BarPlot - elif kind == 'kde': - klass = KdePlot + if kind in _common_kinds: + klass = _plot_klass[kind] else: raise ValueError('Invalid chart type given %s' % kind)
Modified these 2 points: - Simplify `LinePlot._make_ts_plot` to have single flow. - Simplify `plot_frame` and `plot_series` 's `kind` detection. (Also, setting hexbin default is not necessary because it is done in `HexBinPlot.__init__`)
https://api.github.com/repos/pandas-dev/pandas/pulls/6876
2014-04-13T00:23:08Z
2014-04-27T23:57:20Z
2014-04-27T23:57:20Z
2014-07-05T03:07:29Z
BUG: data written with to_sql legacy mode (sqlite/mysql) not persistent GH6846
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index ac90555526a5e..efb8ce07ab60e 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -785,6 +785,7 @@ def insert(self): data.insert(0, self.maybe_asscalar(r[0])) cur.execute(ins, tuple(data)) cur.close() + self.pd_sql.con.commit() def _create_table_statement(self): "Return a CREATE TABLE statement to suit the contents of a DataFrame." diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 80da7ae6bf391..57918e8315102 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -364,7 +364,7 @@ def test_to_sql_append(self): def test_to_sql_series(self): s = Series(np.arange(5, dtype='int64'), name='series') sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False) - s2 = sql.read_sql("SELECT * FROM test_series", self.conn, + s2 = sql.read_sql("SELECT * FROM test_series", self.conn, flavor='sqlite') tm.assert_frame_equal(s.to_frame(), s2) @@ -473,7 +473,7 @@ def connect(self): def test_to_sql_index_label(self): temp_frame = DataFrame({'col1': range(4)}) - + # no index name, defaults to 'pandas_index' sql.to_sql(temp_frame, 'test_index_label', self.conn) frame = sql.read_table('test_index_label', self.conn) @@ -507,8 +507,52 @@ class TestSQLLegacyApi(_TestSQLApi): """ flavor = 'sqlite' - def connect(self): - return sqlite3.connect(':memory:') + def connect(self, database=":memory:"): + return sqlite3.connect(database) + + def _load_test2_data(self): + columns = ['index', 'A', 'B'] + data = [( + '2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670), + ('2000-01-04 00:00:00', -29, -0.0412318367011), + ('2000-01-05 00:00:00', 20000, 0.731167677815), + ('2000-01-06 00:00:00', -290867, 1.56762092543)] + + self.test_frame2 = DataFrame(data, columns=columns) + + def test_sql_open_close(self): + """ + Test if the IO in the database still work if the connection + is closed between the writing and reading (as in many real + situations). + """ + + self._load_test2_data() + + with tm.ensure_clean() as name: + + conn = self.connect(name) + + sql.to_sql( + self.test_frame2, + "test_frame2_legacy", + conn, + flavor="sqlite", + index=False, + ) + + conn.close() + conn = self.connect(name) + + result = sql.read_sql( + "SELECT * FROM test_frame2_legacy;", + conn, + flavor="sqlite", + ) + + conn.close() + + tm.assert_frame_equal(self.test_frame2, result) class _TestSQLAlchemy(PandasSQLTest): @@ -601,7 +645,7 @@ def test_default_type_conversion(self): self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating), "IntColWithNull loaded with incorrect type") # Bool column with NA values becomes object - self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object), + self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object), "BoolColWithNull loaded with incorrect type") def test_default_date_load(self): @@ -696,14 +740,14 @@ def test_default_type_conversion(self): self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating), "IntColWithNull loaded with incorrect type") # Non-native Bool column with NA values stays as float - self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating), + self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating), "BoolColWithNull loaded with incorrect type") def test_default_date_load(self): df = sql.read_table("types_test_data", self.conn) # IMPORTANT - sqlite has no native date type, so shouldn't parse, but - self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64), + self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") @@ -865,7 +909,7 @@ def test_default_type_conversion(self): "FloatCol loaded with incorrect type") self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer), "IntCol loaded with incorrect type") - # MySQL has no real BOOL type (it's an alias for TINYINT) + # MySQL has no real BOOL type (it's an alias for TINYINT) self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer), "BoolCol loaded with incorrect type") @@ -873,13 +917,13 @@ def test_default_type_conversion(self): self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating), "IntColWithNull loaded with incorrect type") # Bool column with NA = int column with NA values => becomes float - self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating), + self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating), "BoolColWithNull loaded with incorrect type") class TestPostgreSQLAlchemy(_TestSQLAlchemy): flavor = 'postgresql' - + def connect(self): return sqlalchemy.create_engine( 'postgresql+{driver}://postgres@localhost/pandas_nosetest'.format(driver=self.driver))
I added a call to commit method for pushing data into the SQL database and not only in the memory. Tests are updated too in order to avoid future regression with a closed connection between writing and reading in the database. Since all the data in a memory based database in sqlite3 should be removed when closed, I create a temporary file used as database. The NamedTemporaryFile documentation states that it should be cross platforms but I don't have any Windows to test it. Normally doesn't break something. I never have done a pull request before, so let me know if something is bad. It's my first time and I'm happy to do it with pandas... Closes #6846
https://api.github.com/repos/pandas-dev/pandas/pulls/6875
2014-04-12T15:25:24Z
2014-04-13T19:39:44Z
2014-04-13T19:39:44Z
2014-06-30T19:39:24Z
BUG: Arithmetic, timezone and offsets operations affecting to NaT
diff --git a/doc/source/release.rst b/doc/source/release.rst index a23936ae154c0..08d7bf9b8728b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -370,6 +370,8 @@ Bug Fixes - Better error message when passing a frequency of 'MS' in ``Period`` construction (GH5332) - Bug in `Series.__unicode__` when `max_rows` is `None` and the Series has more than 1000 rows. (:issue:`6863`) - Bug in ``groupby.get_group`` where a datetlike wasn't always accepted (:issue:`5267`) +- Bug in ``DatetimeIndex.tz_localize`` and ``DatetimeIndex.tz_convert`` affects to NaT (:issue:`5546`) +- Bug in arithmetic operations affecting to NaT (:issue:`6873`) pandas 0.13.1 ------------- diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 4de69639b8d7b..6ac21e60ea7f3 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -611,7 +611,10 @@ def __sub__(self, other): def _add_delta(self, delta): if isinstance(delta, (Tick, timedelta)): inc = offsets._delta_to_nanoseconds(delta) + mask = self.asi8 == tslib.iNaT new_values = (self.asi8 + inc).view(_NS_DTYPE) + new_values[mask] = tslib.iNaT + new_values = new_values.view(_NS_DTYPE) elif isinstance(delta, np.timedelta64): new_values = self.to_series() + delta else: diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index fdbfac1b4aae1..d5eaad61b2fda 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -13,6 +13,8 @@ from pandas import _np_version_under1p7 +import functools + __all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay', 'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd', 'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd', @@ -35,6 +37,15 @@ def as_datetime(obj): obj = f() return obj +def apply_nat(func): + @functools.wraps(func) + def wrapper(self, other): + if other is tslib.NaT: + return tslib.NaT + else: + return func(self, other) + return wrapper + #---------------------------------------------------------------------- # DateOffset @@ -102,6 +113,7 @@ def __init__(self, n=1, **kwds): else: self._offset = timedelta(1) + @apply_nat def apply(self, other): other = as_datetime(other) if len(self.kwds) > 0: @@ -382,6 +394,7 @@ def get_str(td): def isAnchored(self): return (self.n == 1) + @apply_nat def apply(self, other): if isinstance(other, datetime): n = self.n @@ -502,6 +515,7 @@ def __setstate__(self, state): self.__dict__ = state self._set_busdaycalendar() + @apply_nat def apply(self, other): if self.n <= 0: roll = 'forward' @@ -582,6 +596,7 @@ def name(self): class MonthEnd(MonthOffset): """DateOffset of one month end""" + @apply_nat def apply(self, other): other = datetime(other.year, other.month, other.day, tzinfo=other.tzinfo) @@ -606,6 +621,7 @@ def onOffset(cls, dt): class MonthBegin(MonthOffset): """DateOffset of one month at beginning""" + @apply_nat def apply(self, other): n = self.n @@ -628,6 +644,7 @@ class BusinessMonthEnd(MonthOffset): def isAnchored(self): return (self.n == 1) + @apply_nat def apply(self, other): other = datetime(other.year, other.month, other.day) @@ -653,6 +670,7 @@ def apply(self, other): class BusinessMonthBegin(MonthOffset): """DateOffset of one business month at beginning""" + @apply_nat def apply(self, other): n = self.n @@ -710,6 +728,7 @@ def __init__(self, n=1, **kwds): def isAnchored(self): return (self.n == 1 and self.weekday is not None) + @apply_nat def apply(self, other): if self.weekday is None: return as_timestamp(as_datetime(other) + self.n * self._inc) @@ -811,6 +830,7 @@ def __init__(self, n=1, **kwds): self.kwds = kwds + @apply_nat def apply(self, other): offsetOfMonth = self.getOffsetOfMonth(other) @@ -890,6 +910,7 @@ def __init__(self, n=1, **kwds): self.kwds = kwds + @apply_nat def apply(self, other): offsetOfMonth = self.getOffsetOfMonth(other) @@ -983,6 +1004,7 @@ class BQuarterEnd(QuarterOffset): _from_name_startingMonth = 12 _prefix = 'BQ' + @apply_nat def apply(self, other): n = self.n @@ -1037,6 +1059,7 @@ class BQuarterBegin(QuarterOffset): _from_name_startingMonth = 1 _prefix = 'BQS' + @apply_nat def apply(self, other): n = self.n other = as_datetime(other) @@ -1086,6 +1109,7 @@ def __init__(self, n=1, **kwds): def isAnchored(self): return (self.n == 1 and self.startingMonth is not None) + @apply_nat def apply(self, other): n = self.n other = as_datetime(other) @@ -1117,6 +1141,7 @@ class QuarterBegin(QuarterOffset): def isAnchored(self): return (self.n == 1 and self.startingMonth is not None) + @apply_nat def apply(self, other): n = self.n other = as_datetime(other) @@ -1166,6 +1191,7 @@ class BYearEnd(YearOffset): _default_month = 12 _prefix = 'BA' + @apply_nat def apply(self, other): n = self.n other = as_datetime(other) @@ -1203,6 +1229,7 @@ class BYearBegin(YearOffset): _default_month = 1 _prefix = 'BAS' + @apply_nat def apply(self, other): n = self.n other = as_datetime(other) @@ -1234,6 +1261,7 @@ class YearEnd(YearOffset): _default_month = 12 _prefix = 'A' + @apply_nat def apply(self, other): def _increment(date): if date.month == self.month: @@ -1290,6 +1318,7 @@ class YearBegin(YearOffset): _default_month = 1 _prefix = 'AS' + @apply_nat def apply(self, other): def _increment(date): year = date.year @@ -1410,6 +1439,7 @@ def onOffset(self, dt): else: return year_end == dt + @apply_nat def apply(self, other): n = self.n prev_year = self.get_year_end( @@ -1596,6 +1626,7 @@ def __init__(self, n=1, **kwds): def isAnchored(self): return self.n == 1 and self._offset.isAnchored() + @apply_nat def apply(self, other): other = as_datetime(other) n = self.n @@ -1693,6 +1724,7 @@ class Easter(DateOffset): def __init__(self, n=1, **kwds): super(Easter, self).__init__(n, **kwds) + @apply_nat def apply(self, other): currentEaster = easter(other.year) @@ -1786,6 +1818,7 @@ def delta(self): def nanos(self): return _delta_to_nanoseconds(self.delta) + @apply_nat def apply(self, other): if type(other) == date: other = datetime(other.year, other.month, other.day) diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index c306c9ff8b0e9..45ac1b3ac15a6 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -5,6 +5,7 @@ import nose from nose.tools import assert_raises + import numpy as np from pandas.core.datetools import ( @@ -20,7 +21,7 @@ from pandas.tseries.tools import parse_time_string import pandas.tseries.offsets as offsets -from pandas.tslib import monthrange, OutOfBoundsDatetime +from pandas.tslib import monthrange, OutOfBoundsDatetime, NaT from pandas.lib import Timestamp from pandas.util.testing import assertRaisesRegexp import pandas.util.testing as tm @@ -98,14 +99,33 @@ def test_to_m8(): class TestBase(tm.TestCase): _offset = None + offset_types = [getattr(offsets, o) for o in offsets.__all__] + skip_np_u1p7 = [offsets.CustomBusinessDay, offsets.CDay, offsets.Nano] + + def _get_offset(self, klass, value=1): + # create instance from offset class + if klass is FY5253 or klass is FY5253Quarter: + klass = klass(n=value, startingMonth=1, weekday=1, + qtr_with_extra_week=1, variation='last') + elif klass is WeekOfMonth or klass is LastWeekOfMonth: + klass = LastWeekOfMonth(n=value, weekday=5) + else: + try: + klass = klass(value) + except: + klass = klass() + return klass + def test_apply_out_of_range(self): if self._offset is None: raise nose.SkipTest("_offset not defined to test out-of-range") + if self._offset in self.skip_np_u1p7: + raise nose.SkipTest('numpy >= 1.7 required') # try to create an out-of-bounds result timestamp; if we can't create the offset # skip try: - offset = self._offset(10000) + offset = self._get_offset(self._offset, value=10000) result = Timestamp('20080101') + offset self.assertIsInstance(result, datetime) @@ -114,16 +134,27 @@ def test_apply_out_of_range(self): except (ValueError, KeyError): raise nose.SkipTest("cannot create out_of_range offset") + +class TestOps(TestBase): + def test_return_type(self): + for offset in self.offset_types: + if _np_version_under1p7 and offset in self.skip_np_u1p7: + continue - # make sure that we are returning a Timestamp - try: - offset = self._offset(1) - except: - raise nose.SkipTest("_offset not defined to test return_type") + offset = self._get_offset(offset) + + # make sure that we are returning a Timestamp + result = Timestamp('20080101') + offset + self.assertIsInstance(result, Timestamp) + + # make sure that we are returning NaT + self.assert_(NaT + offset is NaT) + self.assert_(offset + NaT is NaT) + + self.assert_(NaT - offset is NaT) + self.assert_((-offset).apply(NaT) is NaT) - result = Timestamp('20080101') + offset - self.assertIsInstance(result, Timestamp) class TestDateOffset(TestBase): _multiprocess_can_split_ = True diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index 5a265464e4e87..cc976488c19c1 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -975,6 +975,41 @@ def test_tzaware_offset(self): offset = dates + timedelta(hours=5) self.assert_(offset.equals(expected)) + def test_nat(self): + # GH 5546 + dates = [NaT] + idx = DatetimeIndex(dates) + idx = idx.tz_localize('US/Pacific') + self.assert_(idx.equals(DatetimeIndex(dates, tz='US/Pacific'))) + idx = idx.tz_convert('US/Eastern') + self.assert_(idx.equals(DatetimeIndex(dates, tz='US/Eastern'))) + idx = idx.tz_convert('UTC') + self.assert_(idx.equals(DatetimeIndex(dates, tz='UTC'))) + + dates = ['2010-12-01 00:00', '2010-12-02 00:00', NaT] + idx = DatetimeIndex(dates) + idx = idx.tz_localize('US/Pacific') + self.assert_(idx.equals(DatetimeIndex(dates, tz='US/Pacific'))) + idx = idx.tz_convert('US/Eastern') + expected = ['2010-12-01 03:00', '2010-12-02 03:00', NaT] + self.assert_(idx.equals(DatetimeIndex(expected, tz='US/Eastern'))) + + idx = idx + offsets.Hour(5) + expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT] + self.assert_(idx.equals(DatetimeIndex(expected, tz='US/Eastern'))) + idx = idx.tz_convert('US/Pacific') + expected = ['2010-12-01 05:00', '2010-12-02 05:00', NaT] + self.assert_(idx.equals(DatetimeIndex(expected, tz='US/Pacific'))) + + if not _np_version_under1p7: + idx = idx + np.timedelta64(3, 'h') + expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT] + self.assert_(idx.equals(DatetimeIndex(expected, tz='US/Pacific'))) + + idx = idx.tz_convert('US/Eastern') + expected = ['2010-12-01 11:00', '2010-12-02 11:00', NaT] + self.assert_(idx.equals(DatetimeIndex(expected, tz='US/Eastern'))) + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 8d9e4d5069f61..5ccac52cbb535 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -269,6 +269,47 @@ def test_nanosecond_string_parsing(self): self.timestamp = Timestamp('2013-05-01 07:15:45.123456789') self.assertEqual(self.timestamp.value, 1367392545123456000) + def test_nat_arithmetic(self): + # GH 6873 + nat = tslib.NaT + t = Timestamp('2014-01-01') + dt = datetime.datetime(2014, 1, 1) + delta = datetime.timedelta(3600) + + # Timestamp / datetime + for (left, right) in [(nat, nat), (nat, t), (dt, nat)]: + # NaT + Timestamp-like should raise TypeError + with tm.assertRaises(TypeError): + left + right + with tm.assertRaises(TypeError): + right + left + + # NaT - Timestamp-like (or inverse) returns NaT + self.assert_((left - right) is tslib.NaT) + self.assert_((right - left) is tslib.NaT) + + # timedelta-like + # offsets are tested in test_offsets.py + for (left, right) in [(nat, delta)]: + # NaT + timedelta-like returns NaT + self.assert_((left + right) is tslib.NaT) + # timedelta-like + NaT should raise TypeError + with tm.assertRaises(TypeError): + right + left + + self.assert_((left - right) is tslib.NaT) + with tm.assertRaises(TypeError): + right - left + + if _np_version_under1p7: + self.assertEqual(nat + np.timedelta64(1, 'h'), tslib.NaT) + with tm.assertRaises(TypeError): + np.timedelta64(1, 'h') + nat + + self.assertEqual(nat - np.timedelta64(1, 'h'), tslib.NaT) + with tm.assertRaises(TypeError): + np.timedelta64(1, 'h') - nat + class TestTslib(tm.TestCase): diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 278e8effb534b..ba6f03fd9bbb0 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -717,7 +717,8 @@ cdef class _Timestamp(datetime): or isinstance(other, timedelta) or hasattr(other, 'delta'): neg_other = -other return self + neg_other - + elif other is NaT: + return NaT return datetime.__sub__(self, other) cpdef _get_field(self, field): @@ -762,6 +763,26 @@ cdef class _NaT(_Timestamp): (type(self).__name__, type(other).__name__)) return PyObject_RichCompare(other, self, _reverse_ops[op]) + def __add__(self, other): + try: + result = _Timestamp.__add__(self, other) + if result is NotImplemented: + return result + except (OverflowError, OutOfBoundsDatetime): + pass + return NaT + + def __sub__(self, other): + if type(self) is datetime: + other, self = self, other + try: + result = _Timestamp.__sub__(self, other) + if result is NotImplemented: + return result + except (OverflowError, OutOfBoundsDatetime): + pass + return NaT + def _delta_to_nanoseconds(delta): if hasattr(delta, 'delta'): @@ -1764,10 +1785,13 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): for i in range(n): v = utc_dates[i] - if (pos + 1) < trans_len and v >= trans[pos + 1]: - pos += 1 - offset = deltas[pos] - result[i] = v + offset + if vals[i] == NPY_NAT: + result[i] = vals[i] + else: + if (pos + 1) < trans_len and v >= trans[pos + 1]: + pos += 1 + offset = deltas[pos] + result[i] = v + offset return result @@ -1781,6 +1805,9 @@ def tz_convert_single(int64_t val, object tz1, object tz2): if not have_pytz: import pytz + if val == NPY_NAT: + return val + # Convert to UTC if _is_tzlocal(tz1): pandas_datetime_to_datetimestruct(val, PANDAS_FR_ns, &dts) @@ -2006,7 +2033,9 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, bint infer_dst=False): for i in range(n): left = result_a[i] right = result_b[i] - if left != NPY_NAT and right != NPY_NAT: + if vals[i] == NPY_NAT: + result[i] = vals[i] + elif left != NPY_NAT and right != NPY_NAT: if left == right: result[i] = left else:
NaT affected by some datetime related ops unexpectedly. # Arithmetic Applying arithmetic ops to `NaT` is not handled properly. Based on numpy results, I understand that results should be all `NaT` as long as valid data is passed. ``` # current results >>> pd.NaT + pd.offsets.Hour(1) 2262-04-11 01:12:43.145224192 >>> pd.NaT - pd.offsets.Hour(1)) OverflowError: Python int too large to convert to C long >>> pd.NaT - pd.Timestamp('2011-01-01') -734779 days, 0:00:00 # numpy >>> np.datetime64('nat') + np.timedelta64(1, 'h') NaT >>> np.datetime64('nat') - np.timedelta64(1, 'h') NaT >>> np.datetime64('nat') - np.datetime64('2011-01-01') NaT ``` # Timezone Closes #5546. ``` # current results >>> idx = pd.DatetimeIndex(['2011-01-01 00:00', '2011-01-02 00:00', pd.NaT]) >>> idx.tz_localize('Asia/Tokyo') <class 'pandas.tseries.index.DatetimeIndex'> [2011-01-01 00:00:00+09:00, ..., 2262-04-10 00:12:43.145224192+09:00] Length: 3, Freq: None, Timezone: Asia/Tokyo >>> idx = pd.DatetimeIndex(['2011-01-01 00:00', '2011-01-02 00:00', pd.NaT], tz='US/Eastern') >>> idx.tz_convert('Asia/Tokyo') <class 'pandas.tseries.index.DatetimeIndex'> [2011-01-01 14:00:00+09:00, ..., 2262-04-11 14:12:43.145224192+09:00] ``` _Note_ I fixed `DatetimeIndex`, and I leave `NatType` still doesn't have `tz_localize` and `tz_convert` methods `Timestamp` has. Is it should be added? # Offsets These have `apply` method which accepts `Timestamp`, but it cannot handle `Nat`. ``` # current result >>> pd.offsets.Hour(1).apply(pd.NaT) 2262-04-11 01:12:43.145224192 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6873
2014-04-11T23:28:59Z
2014-04-13T07:14:55Z
2014-04-13T07:14:55Z
2014-06-24T10:25:33Z
BUG: Bug in groupby.get_group where a datetlike wasn't always accepted (GH5267)
diff --git a/doc/source/release.rst b/doc/source/release.rst index e37a7c7eab861..0e5280f1c5306 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -365,6 +365,7 @@ Bug Fixes would only replace the first occurrence of a value (:issue:`6689`) - Better error message when passing a frequency of 'MS' in ``Period`` construction (GH5332) - Bug in `Series.__unicode__` when `max_rows` is `None` and the Series has more than 1000 rows. (:issue:`6863`) +- Bug in ``groupby.get_group`` where a datetlike wasn't always accepted (:issue:`5267`) pandas 0.13.1 ------------- diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 033cdf5a81318..a32b25312d4ba 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1,9 +1,11 @@ import types from functools import wraps import numpy as np +import datetime from pandas.compat import( - zip, builtins, range, long, lrange, lzip, OrderedDict, callable + zip, builtins, range, long, lrange, lzip, + OrderedDict, callable ) from pandas import compat @@ -402,14 +404,32 @@ def indices(self): return self.grouper.indices def _get_index(self, name): - """ safe get index """ - try: - return self.indices[name] - except: - if isinstance(name, Timestamp): - name = name.value - return self.indices[name] - raise + """ safe get index, translate keys for datelike to underlying repr """ + + def convert(key, s): + # possibly convert to they actual key types + # in the indices, could be a Timestamp or a np.datetime64 + + if isinstance(s, (Timestamp,datetime.datetime)): + return Timestamp(key) + elif isinstance(s, np.datetime64): + return Timestamp(key).asm8 + return key + + sample = list(self.indices)[0] + if isinstance(sample, tuple): + if not isinstance(name, tuple): + raise ValueError("must supply a tuple to get_group with multiple grouping keys") + if not len(name) == len(sample): + raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys") + + name = tuple([ convert(n, k) for n, k in zip(name,sample) ]) + + else: + + name = convert(name, sample) + + return self.indices[name] @property def name(self): @@ -554,7 +574,7 @@ def apply(self, func, *args, **kwargs): path. This can lead to unexpected behavior if func has side-effects, as they will take effect twice for the first group. - + See also -------- diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 540ce1cc61929..a7f7223172848 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -17,7 +17,8 @@ assert_series_equal, assert_almost_equal, assert_index_equal) from pandas.compat import( - range, long, lrange, StringIO, lmap, lzip, map, zip, builtins, OrderedDict + range, long, lrange, StringIO, lmap, lzip, map, + zip, builtins, OrderedDict ) from pandas import compat from pandas.core.panel import Panel @@ -479,6 +480,36 @@ def test_get_group(self): expected = wp.reindex(major=[x for x in wp.major_axis if x.month == 1]) assert_panel_equal(gp, expected) + + # GH 5267 + # be datelike friendly + df = DataFrame({'DATE' : pd.to_datetime(['10-Oct-2013', '10-Oct-2013', '10-Oct-2013', + '11-Oct-2013', '11-Oct-2013', '11-Oct-2013']), + 'label' : ['foo','foo','bar','foo','foo','bar'], + 'VAL' : [1,2,3,4,5,6]}) + + g = df.groupby('DATE') + key = list(g.groups)[0] + result1 = g.get_group(key) + result2 = g.get_group(Timestamp(key).to_datetime()) + result3 = g.get_group(str(Timestamp(key))) + assert_frame_equal(result1,result2) + assert_frame_equal(result1,result3) + + g = df.groupby(['DATE','label']) + + key = list(g.groups)[0] + result1 = g.get_group(key) + result2 = g.get_group((Timestamp(key[0]).to_datetime(),key[1])) + result3 = g.get_group((str(Timestamp(key[0])),key[1])) + assert_frame_equal(result1,result2) + assert_frame_equal(result1,result3) + + # must pass a same-length tuple with multiple keys + self.assertRaises(ValueError, lambda : g.get_group('foo')) + self.assertRaises(ValueError, lambda : g.get_group(('foo'))) + self.assertRaises(ValueError, lambda : g.get_group(('foo','bar','baz'))) + def test_agg_apply_corner(self): # nothing to group, all NA grouped = self.ts.groupby(self.ts * np.nan)
closes #5267
https://api.github.com/repos/pandas-dev/pandas/pulls/6872
2014-04-11T19:08:27Z
2014-04-11T22:33:18Z
2014-04-11T22:33:18Z
2014-06-27T15:47:52Z
API: update SQL functional api (GH6300)
diff --git a/doc/source/api.rst b/doc/source/api.rst index f6dfd5cfaf0e7..5e5b84e0e80b2 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -78,17 +78,11 @@ SQL .. autosummary:: :toctree: generated/ - + + read_sql_table + read_sql_query read_sql -.. currentmodule:: pandas.io.sql - -.. autosummary:: - :toctree: generated/ - - read_frame - write_frame - Google BigQuery ~~~~~~~~~~~~~~~ .. currentmodule:: pandas.io.gbq diff --git a/doc/source/io.rst b/doc/source/io.rst index b6bb5718e37f9..891ad3ca1cf85 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3113,11 +3113,22 @@ DB-API <http://www.python.org/dev/peps/pep-0249/>`__. See also some :ref:`cookbook examples <cookbook.sql>` for some advanced strategies. The key functions are: -:func:`~pandas.io.sql.to_sql` -:func:`~pandas.io.sql.read_sql` -:func:`~pandas.io.sql.read_table` +.. autosummary:: + :toctree: generated/ + + read_sql_table + read_sql_query + read_sql + DataFrame.to_sql +.. note:: + + The function :func:`~pandas.read_sql` is a convenience wrapper around + :func:`~pandas.read_sql_table` and :func:`~pandas.read_sql_query` (and for + backward compatibility) and will delegate to specific function depending on + the provided input (database table name or sql query). + In the following example, we use the `SQlite <http://www.sqlite.org/>`__ SQL database engine. You can use a temporary SQLite database where data are stored in "memory". @@ -3129,7 +3140,7 @@ connecting to. For more information on :func:`create_engine` and the URI formatting, see the examples below and the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html>`__ -.. code-block:: python +.. ipython:: python from sqlalchemy import create_engine from pandas.io import sql @@ -3140,8 +3151,7 @@ Writing DataFrames ~~~~~~~~~~~~~~~~~~ Assuming the following data is in a DataFrame ``data``, we can insert it into -the database using :func:`~pandas.io.sql.to_sql`. - +the database using :func:`~pandas.DataFrame.to_sql`. +-----+------------+-------+-------+-------+ | id | Date | Col_1 | Col_2 | Col_3 | @@ -3154,13 +3164,6 @@ the database using :func:`~pandas.io.sql.to_sql`. +-----+------------+-------+-------+-------+ -.. ipython:: python - :suppress: - - from sqlalchemy import create_engine - from pandas.io import sql - engine = create_engine('sqlite:///:memory:') - .. ipython:: python :suppress: @@ -3171,44 +3174,47 @@ the database using :func:`~pandas.io.sql.to_sql`. (63, datetime.datetime(2010,10,20), 'Z', 5.73, True)] data = DataFrame(d, columns=c) - sql.to_sql(data, 'data', engine) + +.. ipython:: python + + data.to_sql('data', engine) Reading Tables ~~~~~~~~~~~~~~ -:func:`~pandas.io.sql.read_table` will read a databse table given the +:func:`~pandas.read_sql_table` will read a database table given the table name and optionally a subset of columns to read. .. note:: - In order to use :func:`~pandas.io.sql.read_table`, you **must** have the + In order to use :func:`~pandas.read_sql_table`, you **must** have the SQLAlchemy optional dependency installed. .. ipython:: python - sql.read_table('data', engine) + pd.read_sql_table('data', engine) You can also specify the name of the column as the DataFrame index, and specify a subset of columns to be read. .. ipython:: python - sql.read_table('data', engine, index_col='id') - sql.read_table('data', engine, columns=['Col_1', 'Col_2']) + pd.read_sql_table('data', engine, index_col='id') + pd.read_sql_table('data', engine, columns=['Col_1', 'Col_2']) And you can explicitly force columns to be parsed as dates: .. ipython:: python - sql.read_table('data', engine, parse_dates=['Date']) + pd.read_sql_table('data', engine, parse_dates=['Date']) If needed you can explicitly specifiy a format string, or a dict of arguments -to pass to :func:`pandas.tseries.tools.to_datetime`. +to pass to :func:`pandas.to_datetime`: .. code-block:: python - sql.read_table('data', engine, parse_dates={'Date': '%Y-%m-%d'}) - sql.read_table('data', engine, parse_dates={'Date': {'format': '%Y-%m-%d %H:%M:%S'}}) + pd.read_sql_table('data', engine, parse_dates={'Date': '%Y-%m-%d'}) + pd.read_sql_table('data', engine, parse_dates={'Date': {'format': '%Y-%m-%d %H:%M:%S'}}) You can check if a table exists using :func:`~pandas.io.sql.has_table` @@ -3219,20 +3225,20 @@ instantiated directly for more manual control over the SQL interaction. Querying ~~~~~~~~ -You can query using raw SQL in the :func:`~pandas.io.sql.read_sql` function. +You can query using raw SQL in the :func:`~pandas.read_sql_query` function. In this case you must use the SQL variant appropriate for your database. When using SQLAlchemy, you can also pass SQLAlchemy Expression language constructs, which are database-agnostic. .. ipython:: python - sql.read_sql('SELECT * FROM data', engine) + pd.read_sql_query('SELECT * FROM data', engine) Of course, you can specify a more "complex" query. .. ipython:: python - sql.read_frame("SELECT id, Col_1, Col_2 FROM data WHERE id = 42;", engine) + pd.read_sql_query("SELECT id, Col_1, Col_2 FROM data WHERE id = 42;", engine) You can also run a plain query without creating a dataframe with @@ -3290,7 +3296,7 @@ you are using. .. code-block:: python - sql.to_sql(data, 'data', cnx, flavor='sqlite') + data.to_sql('data', cnx, flavor='sqlite') sql.read_sql("SELECT * FROM data", cnx, flavor='sqlite') diff --git a/pandas/io/api.py b/pandas/io/api.py index cf3615cd822cd..5fa8c7ef60074 100644 --- a/pandas/io/api.py +++ b/pandas/io/api.py @@ -8,7 +8,7 @@ from pandas.io.pytables import HDFStore, Term, get_store, read_hdf from pandas.io.json import read_json from pandas.io.html import read_html -from pandas.io.sql import read_sql +from pandas.io.sql import read_sql, read_sql_table, read_sql_query from pandas.io.stata import read_stata from pandas.io.pickle import read_pickle, to_pickle from pandas.io.packers import read_msgpack, to_msgpack diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 5052f057871b0..bed4c2da61c59 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -176,14 +176,66 @@ def uquery(sql, con, cur=None, params=None, engine=None, flavor='sqlite'): #------------------------------------------------------------------------------ # Read and write to DataFrames +def read_sql_table(table_name, con, meta=None, index_col=None, + coerce_float=True, parse_dates=None, columns=None): + """Read SQL database table into a DataFrame. + + Given a table name and an SQLAlchemy engine, returns a DataFrame. + This function does not support DBAPI connections. + + Parameters + ---------- + table_name : string + Name of SQL table in database + con : SQLAlchemy engine + Legacy mode not supported + meta : SQLAlchemy meta, optional + If omitted MetaData is reflected from engine + index_col : string, optional + Column to set as index + coerce_float : boolean, default True + Attempt to convert values to non-string, non-numeric objects (like + decimal.Decimal) to floating point. Can result in loss of Precision. + parse_dates : list or dict + - List of column names to parse as dates + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps + - Dict of ``{column_name: arg dict}``, where the arg dict corresponds + to the keyword arguments of :func:`pandas.to_datetime` + Especially useful with databases without native Datetime support, + such as SQLite + columns : list + List of column names to select from sql table + + Returns + ------- + DataFrame + + See also + -------- + read_sql_query : Read SQL query into a DataFrame. + read_sql + -def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, - params=None, parse_dates=None): """ - Returns a DataFrame corresponding to the result set of the query - string. + pandas_sql = PandasSQLAlchemy(con, meta=meta) + table = pandas_sql.read_table( + table_name, index_col=index_col, coerce_float=coerce_float, + parse_dates=parse_dates, columns=columns) + + if table is not None: + return table + else: + raise ValueError("Table %s not found" % table_name, con) + + +def read_sql_query(sql, con, index_col=None, flavor='sqlite', + coerce_float=True, params=None, parse_dates=None): + """Read SQL query into a DataFrame. - Optionally provide an `index_col` parameter to use one of the + Returns a DataFrame corresponding to the result set of the query + string. Optionally provide an `index_col` parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters @@ -221,15 +273,83 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, See also -------- - read_table + read_sql_table : Read SQL database table into a DataFrame + read_sql """ pandas_sql = pandasSQL_builder(con, flavor=flavor) - return pandas_sql.read_sql(sql, - index_col=index_col, - params=params, - coerce_float=coerce_float, - parse_dates=parse_dates) + return pandas_sql.read_sql( + sql, index_col=index_col, params=params, coerce_float=coerce_float, + parse_dates=parse_dates) + + +def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, + params=None, parse_dates=None, columns=None): + """ + Read SQL query or database table into a DataFrame. + + Parameters + ---------- + sql : string + SQL query to be executed or database table name. + con : SQLAlchemy engine or DBAPI2 connection (legacy mode) + Using SQLAlchemy makes it possible to use any DB supported by that + library. + If a DBAPI2 object is given, a supported SQL flavor must also be provided + index_col : string, optional + column name to use for the returned DataFrame object. + flavor : string, {'sqlite', 'mysql'} + The flavor of SQL to use. Ignored when using + SQLAlchemy engine. Required when using DBAPI2 connection. + coerce_float : boolean, default True + Attempt to convert values to non-string, non-numeric objects (like + decimal.Decimal) to floating point, useful for SQL result sets + cur : depreciated, cursor is obtained from connection + params : list, tuple or dict, optional + List of parameters to pass to execute method. + parse_dates : list or dict + - List of column names to parse as dates + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps + - Dict of ``{column_name: arg dict}``, where the arg dict corresponds + to the keyword arguments of :func:`pandas.to_datetime` + Especially useful with databases without native Datetime support, + such as SQLite + columns : list + List of column names to select from sql table + + Returns + ------- + DataFrame + + Notes + ----- + This function is a convenience wrapper around ``read_sql_table`` and + ``read_sql_query`` (and for backward compatibility) and will delegate + to the specific function depending on the provided input (database + table name or sql query). + + See also + -------- + read_sql_table : Read SQL database table into a DataFrame + read_sql_query : Read SQL query into a DataFrame + + """ + pandas_sql = pandasSQL_builder(con, flavor=flavor) + + if pandas_sql.has_table(sql): + if isinstance(pandas_sql, PandasSQLLegacy): + raise ValueError("Reading a table with read_sql is not supported " + "for a DBAPI2 connection. Use an SQLAlchemy " + "engine or specify an sql query") + return pandas_sql.read_table( + sql, index_col=index_col, coerce_float=coerce_float, + parse_dates=parse_dates, columns=columns) + else: + return pandas_sql.read_sql( + sql, index_col=index_col, params=params, coerce_float=coerce_float, + parse_dates=parse_dates) def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, @@ -296,59 +416,6 @@ def has_table(table_name, con, meta=None, flavor='sqlite'): return pandas_sql.has_table(table_name) -def read_table(table_name, con, meta=None, index_col=None, coerce_float=True, - parse_dates=None, columns=None): - """Given a table name and SQLAlchemy engine, return a DataFrame. - - Type convertions will be done automatically. - - Parameters - ---------- - table_name : string - Name of SQL table in database - con : SQLAlchemy engine - Legacy mode not supported - meta : SQLAlchemy meta, optional - If omitted MetaData is reflected from engine - index_col : string or sequence of strings, optional - Column(s) to set as index. - coerce_float : boolean, default True - Attempt to convert values to non-string, non-numeric objects (like - decimal.Decimal) to floating point. Can result in loss of Precision. - parse_dates : list or dict - - List of column names to parse as dates - - Dict of ``{column_name: format string}`` where format string is - strftime compatible in case of parsing string times or is one of - (D, s, ns, ms, us) in case of parsing integer timestamps - - Dict of ``{column_name: arg dict}``, where the arg dict corresponds - to the keyword arguments of :func:`pandas.to_datetime` - Especially useful with databases without native Datetime support, - such as SQLite - columns : list, optional - List of column names to select from sql table - - Returns - ------- - DataFrame - - See also - -------- - read_sql - - """ - pandas_sql = PandasSQLAlchemy(con, meta=meta) - table = pandas_sql.read_table(table_name, - index_col=index_col, - coerce_float=coerce_float, - parse_dates=parse_dates, - columns=columns) - - if table is not None: - return table - else: - raise ValueError("Table %s not found" % table_name, con) - - def pandasSQL_builder(con, flavor=None, meta=None): """ Convenience function to return the correct PandasSQL subclass based on the @@ -667,6 +734,13 @@ def uquery(self, *args, **kwargs): result = self.execute(*args, **kwargs) return result.rowcount + def read_table(self, table_name, index_col=None, coerce_float=True, + parse_dates=None, columns=None): + + table = PandasSQLTable(table_name, self, index=index_col) + return table.read(coerce_float=coerce_float, + parse_dates=parse_dates, columns=columns) + def read_sql(self, sql, index_col=None, coerce_float=True, parse_dates=None, params=None): args = _convert_params(sql, params) @@ -705,13 +779,6 @@ def has_table(self, name): def get_table(self, table_name): return self.meta.tables.get(table_name) - def read_table(self, table_name, index_col=None, coerce_float=True, - parse_dates=None, columns=None): - - table = PandasSQLTable(table_name, self, index=index_col) - return table.read(coerce_float=coerce_float, - parse_dates=parse_dates, columns=columns) - def drop_table(self, table_name): if self.engine.has_table(table_name): self.get_table(table_name).drop() diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index f05f6fe3c1d14..83978a0e0b8f7 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -306,7 +306,7 @@ def setUp(self): self._load_raw_sql() def test_read_sql_iris(self): - iris_frame = sql.read_sql( + iris_frame = sql.read_sql_query( "SELECT * FROM iris", self.conn, flavor='sqlite') self._check_iris_loaded_frame(iris_frame) @@ -364,8 +364,8 @@ def test_to_sql_append(self): def test_to_sql_series(self): s = Series(np.arange(5, dtype='int64'), name='series') sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False) - s2 = sql.read_sql("SELECT * FROM test_series", self.conn, - flavor='sqlite') + s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn, + flavor='sqlite') tm.assert_frame_equal(s.to_frame(), s2) def test_to_sql_panel(self): @@ -384,7 +384,7 @@ def test_legacy_write_frame(self): def test_roundtrip(self): sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn, flavor='sqlite') - result = sql.read_sql( + result = sql.read_sql_query( 'SELECT * FROM test_frame_roundtrip', con=self.conn, flavor='sqlite') @@ -412,35 +412,33 @@ def test_tquery(self): def test_date_parsing(self): """ Test date parsing in read_sql """ # No Parsing - df = sql.read_sql( - "SELECT * FROM types_test_data", self.conn, flavor='sqlite') + df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, + flavor='sqlite') self.assertFalse( issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") - df = sql.read_sql("SELECT * FROM types_test_data", - self.conn, flavor='sqlite', parse_dates=['DateCol']) + df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, + flavor='sqlite', parse_dates=['DateCol']) self.assertTrue( issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") - df = sql.read_sql("SELECT * FROM types_test_data", self.conn, - flavor='sqlite', - parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'}) + df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, + flavor='sqlite', + parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'}) self.assertTrue( issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") - df = sql.read_sql("SELECT * FROM types_test_data", - self.conn, flavor='sqlite', - parse_dates=['IntDateCol']) + df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, + flavor='sqlite', parse_dates=['IntDateCol']) self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") - df = sql.read_sql("SELECT * FROM types_test_data", - self.conn, flavor='sqlite', - parse_dates={'IntDateCol': 's'}) + df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, + flavor='sqlite', parse_dates={'IntDateCol': 's'}) self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") @@ -448,10 +446,10 @@ def test_date_parsing(self): def test_date_and_index(self): """ Test case where same column appears in parse_date and index_col""" - df = sql.read_sql("SELECT * FROM types_test_data", - self.conn, flavor='sqlite', - parse_dates=['DateCol', 'IntDateCol'], - index_col='DateCol') + df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, + flavor='sqlite', index_col='DateCol', + parse_dates=['DateCol', 'IntDateCol']) + self.assertTrue( issubclass(df.index.dtype.type, np.datetime64), "DateCol loaded with incorrect type") @@ -465,13 +463,13 @@ def test_to_sql_index_label(self): # no index name, defaults to 'index' sql.to_sql(temp_frame, 'test_index_label', self.conn) - frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) + frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[0], 'index') # specifying index_label sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace', index_label='other_label') - frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) + frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[0], 'other_label', "Specified index_label not written to database") @@ -479,14 +477,14 @@ def test_to_sql_index_label(self): temp_frame.index.name = 'index_name' sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace') - frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) + frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[0], 'index_name', "Index name not written to database") # has index name, but specifying index_label sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace', index_label='other_label') - frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) + frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[0], 'other_label', "Specified index_label not written to database") @@ -496,14 +494,14 @@ def test_to_sql_index_label_multiindex(self): # no index name, defaults to 'level_0' and 'level_1' sql.to_sql(temp_frame, 'test_index_label', self.conn) - frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) + frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[0], 'level_0') self.assertEqual(frame.columns[1], 'level_1') # specifying index_label sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace', index_label=['A', 'B']) - frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) + frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'], "Specified index_labels not written to database") @@ -511,14 +509,14 @@ def test_to_sql_index_label_multiindex(self): temp_frame.index.names = ['A', 'B'] sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace') - frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) + frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'], "Index names not written to database") # has index name, but specifying index_label sql.to_sql(temp_frame, 'test_index_label', self.conn, if_exists='replace', index_label=['C', 'D']) - frame = sql.read_sql('SELECT * FROM test_index_label', self.conn) + frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn) self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'], "Specified index_labels not written to database") @@ -545,7 +543,7 @@ def test_read_table_columns(self): sql.to_sql(self.test_frame1, 'test_frame', self.conn) cols = ['A', 'B'] - result = sql.read_table('test_frame', self.conn, columns=cols) + result = sql.read_sql_table('test_frame', self.conn, columns=cols) self.assertEqual(result.columns.tolist(), cols, "Columns not correctly selected") @@ -553,21 +551,34 @@ def test_read_table_index_col(self): # test columns argument in read_table sql.to_sql(self.test_frame1, 'test_frame', self.conn) - result = sql.read_table('test_frame', self.conn, index_col="index") + result = sql.read_sql_table('test_frame', self.conn, index_col="index") self.assertEqual(result.index.names, ["index"], "index_col not correctly set") - result = sql.read_table('test_frame', self.conn, index_col=["A", "B"]) + result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"]) self.assertEqual(result.index.names, ["A", "B"], "index_col not correctly set") - result = sql.read_table('test_frame', self.conn, index_col=["A", "B"], + result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"], columns=["C", "D"]) self.assertEqual(result.index.names, ["A", "B"], "index_col not correctly set") self.assertEqual(result.columns.tolist(), ["C", "D"], "columns not set correctly whith index_col") + def test_read_sql_delegate(self): + iris_frame1 = sql.read_sql_query( + "SELECT * FROM iris", self.conn) + iris_frame2 = sql.read_sql( + "SELECT * FROM iris", self.conn) + tm.assert_frame_equal(iris_frame1, iris_frame2, + "read_sql and read_sql_query have not the same" + " result with a query") + + iris_frame1 = sql.read_sql_table('iris', self.conn) + iris_frame2 = sql.read_sql('iris', self.conn) + tm.assert_frame_equal(iris_frame1, iris_frame2) + class TestSQLLegacyApi(_TestSQLApi): @@ -612,7 +623,7 @@ def test_sql_open_close(self): conn.close() conn = self.connect(name) - result = sql.read_sql( + result = sql.read_sql_query( "SELECT * FROM test_frame2_legacy;", conn, flavor="sqlite", @@ -622,6 +633,18 @@ def test_sql_open_close(self): tm.assert_frame_equal(self.test_frame2, result) + def test_read_sql_delegate(self): + iris_frame1 = sql.read_sql_query( + "SELECT * FROM iris", self.conn, flavor=self.flavor) + iris_frame2 = sql.read_sql( + "SELECT * FROM iris", self.conn, flavor=self.flavor) + tm.assert_frame_equal(iris_frame1, iris_frame2, + "read_sql and read_sql_query have not the same" + " result with a query") + + self.assertRaises(ValueError, sql.read_sql, 'iris', self.conn, + flavor=self.flavor) + class _TestSQLAlchemy(PandasSQLTest): """ @@ -686,21 +709,21 @@ def test_execute_sql(self): self._execute_sql() def test_read_table(self): - iris_frame = sql.read_table("iris", con=self.conn) + iris_frame = sql.read_sql_table("iris", con=self.conn) self._check_iris_loaded_frame(iris_frame) def test_read_table_columns(self): - iris_frame = sql.read_table( + iris_frame = sql.read_sql_table( "iris", con=self.conn, columns=['SepalLength', 'SepalLength']) tm.equalContents( iris_frame.columns.values, ['SepalLength', 'SepalLength']) def test_read_table_absent(self): self.assertRaises( - ValueError, sql.read_table, "this_doesnt_exist", con=self.conn) + ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn) def test_default_type_conversion(self): - df = sql.read_table("types_test_data", self.conn) + df = sql.read_sql_table("types_test_data", self.conn) self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating), "FloatCol loaded with incorrect type") @@ -717,7 +740,7 @@ def test_default_type_conversion(self): "BoolColWithNull loaded with incorrect type") def test_default_date_load(self): - df = sql.read_table("types_test_data", self.conn) + df = sql.read_sql_table("types_test_data", self.conn) # IMPORTANT - sqlite has no native date type, so shouldn't parse, but # MySQL SHOULD be converted. @@ -726,34 +749,34 @@ def test_default_date_load(self): def test_date_parsing(self): # No Parsing - df = sql.read_table("types_test_data", self.conn) + df = sql.read_sql_table("types_test_data", self.conn) - df = sql.read_table( + df = sql.read_sql_table( "types_test_data", self.conn, parse_dates=['DateCol']) self.assertTrue( issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") - df = sql.read_table( + df = sql.read_sql_table( "types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'}) self.assertTrue( issubclass(df.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type") - df = sql.read_table("types_test_data", self.conn, parse_dates={ + df = sql.read_sql_table("types_test_data", self.conn, parse_dates={ 'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}}) self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") - df = sql.read_table( + df = sql.read_sql_table( "types_test_data", self.conn, parse_dates=['IntDateCol']) self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") - df = sql.read_table( + df = sql.read_sql_table( "types_test_data", self.conn, parse_dates={'IntDateCol': 's'}) self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") - df = sql.read_table( + df = sql.read_sql_table( "types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}}) self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64), "IntDateCol loaded with incorrect type") @@ -766,7 +789,7 @@ def test_mixed_dtype_insert(self): # write and read again df.to_sql("test_read_write", self.conn, index=False) - df2 = sql.read_table("test_read_write", self.conn) + df2 = sql.read_sql_table("test_read_write", self.conn) tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True) @@ -794,7 +817,7 @@ def setUp(self): self._load_test1_data() def test_default_type_conversion(self): - df = sql.read_table("types_test_data", self.conn) + df = sql.read_sql_table("types_test_data", self.conn) self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating), "FloatCol loaded with incorrect type") @@ -812,7 +835,7 @@ def test_default_type_conversion(self): "BoolColWithNull loaded with incorrect type") def test_default_date_load(self): - df = sql.read_table("types_test_data", self.conn) + df = sql.read_sql_table("types_test_data", self.conn) # IMPORTANT - sqlite has no native date type, so shouldn't parse, but self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64), @@ -971,7 +994,7 @@ def tearDown(self): self.conn.execute('DROP TABLE %s' % table[0]) def test_default_type_conversion(self): - df = sql.read_table("types_test_data", self.conn) + df = sql.read_sql_table("types_test_data", self.conn) self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating), "FloatCol loaded with incorrect type")
First draft for the functional api part of #6300.
https://api.github.com/repos/pandas-dev/pandas/pulls/6867
2014-04-10T22:31:08Z
2014-04-18T11:59:45Z
2014-04-18T11:59:45Z
2014-06-18T18:18:25Z
Custom Business Month
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 899bc2232f161..e3070ff1507a2 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -432,6 +432,8 @@ frequency increment. Specific offset logic like "month", "business day", or MonthBegin, "calendar month begin" BMonthEnd, "business month end" BMonthBegin, "business month begin" + CBMonthEnd, "custom business month end" + CBMonthBegin, "custom business month begin" QuarterEnd, "calendar quarter end" QuarterBegin, "calendar quarter begin" BQuarterEnd, "business quarter end" @@ -558,6 +560,20 @@ As of v0.14 holiday calendars can be used to provide the list of holidays. See # Tuesday after MLK Day (Monday is skipped because it's a holiday) dt + bday_us +Monthly offsets that respect a certain holiday calendar can be defined +in the usual way. + +.. ipython:: python + + from pandas.tseries.offsets import CustomBusinessMonthBegin + bmth_us = CustomBusinessMonthBegin(calendar=USFederalHolidayCalendar()) + # Skip new years + dt = datetime(2013, 12, 17) + dt + bmth_us + + # Define date index with custom offset + from pandas import DatetimeIndex + DatetimeIndex(start='20100101',end='20120101',freq=bmth_us) .. note:: @@ -601,8 +617,10 @@ frequencies. We will refer to these aliases as *offset aliases* "W", "weekly frequency" "M", "month end frequency" "BM", "business month end frequency" + "CBM", "custom business month end frequency" "MS", "month start frequency" "BMS", "business month start frequency" + "CBMS", "custom business month start frequency" "Q", "quarter end frequency" "BQ", "business quarter endfrequency" "QS", "quarter start frequency" diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 49507f2b6dd8f..e4b356e37a6e1 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -477,6 +477,7 @@ Enhancements - Implemented ``Panel.pct_change`` (:issue:`6904`) - Added ``how`` option to rolling-moment functions to dictate how to handle resampling; :func:``rolling_max`` defaults to max, :func:``rolling_min`` defaults to min, and all others default to mean (:issue:`6297`) +- ``CustomBuisnessMonthBegin`` and ``CustomBusinessMonthEnd`` are now available (:issue:`6866`) Performance ~~~~~~~~~~~ diff --git a/pandas/core/datetools.py b/pandas/core/datetools.py index 1fb6ae4225f25..6678baac1dae5 100644 --- a/pandas/core/datetools.py +++ b/pandas/core/datetools.py @@ -10,14 +10,20 @@ try: cday = CDay() customBusinessDay = CustomBusinessDay() + customBusinessMonthEnd = CBMonthEnd() + customBusinessMonthBegin = CBMonthBegin() except NotImplementedError: cday = None customBusinessDay = None + customBusinessMonthEnd = None + customBusinessMonthBegin = None monthEnd = MonthEnd() yearEnd = YearEnd() yearBegin = YearBegin() bmonthEnd = BMonthEnd() -businessMonthEnd = bmonthEnd +bmonthBegin = BMonthBegin() +cbmonthEnd = customBusinessMonthEnd +cbmonthBegin = customBusinessMonthBegin bquarterEnd = BQuarterEnd() quarterEnd = QuarterEnd() byearEnd = BYearEnd() diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index d5eaad61b2fda..0c8d3c7160fc9 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1,3 +1,4 @@ +import sys from datetime import date, datetime, timedelta from pandas.compat import range from pandas import compat @@ -16,6 +17,7 @@ import functools __all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay', + 'CBMonthEnd','CBMonthBegin', 'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd', 'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd', 'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd', @@ -703,6 +705,132 @@ def onOffset(cls, dt): _prefix = 'BMS' + +class CustomBusinessMonthEnd(MonthOffset): + """ + **EXPERIMENTAL** DateOffset of one custom business month + + .. warning:: EXPERIMENTAL + + This class is not officially supported and the API is likely to change + in future versions. Use this at your own risk. + + Parameters + ---------- + n : int, default 1 + offset : timedelta, default timedelta(0) + normalize : bool, default False + Normalize start/end dates to midnight before generating date range + weekmask : str, Default 'Mon Tue Wed Thu Fri' + weekmask of valid business days, passed to ``numpy.busdaycalendar`` + holidays : list + list/array of dates to exclude from the set of valid business days, + passed to ``numpy.busdaycalendar`` + """ + + _cacheable = False + _prefix = 'CBM' + def __init__(self, n=1, **kwds): + self.n = int(n) + self.kwds = kwds + self.offset = kwds.get('offset', timedelta(0)) + self.normalize = kwds.get('normalize', False) + self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri') + holidays = kwds.get('holidays', []) + self.cbday = CustomBusinessDay(n=self.n,**kwds) + self.m_offset = MonthEnd() + + @apply_nat + def apply(self,other): + n = self.n + dt_in = other + # First move to month offset + cur_mend = self.m_offset.rollforward(dt_in) + # Find this custom month offset + cur_cmend = self.cbday.rollback(cur_mend) + + # handle zero case. arbitrarily rollforward + if n == 0 and dt_in != cur_cmend: + n += 1 + + if dt_in < cur_cmend and n >= 1: + n -= 1 + elif dt_in > cur_cmend and n <= -1: + n += 1 + + new = cur_mend + n * MonthEnd() + result = self.cbday.rollback(new) + return as_timestamp(result) + + def __repr__(self): + if sys.version_info.major < 3: + return BusinessDay.__repr__.__func__(self) + else: + return BusinessDay.__repr__(self) + +class CustomBusinessMonthBegin(MonthOffset): + """ + **EXPERIMENTAL** DateOffset of one custom business month + + .. warning:: EXPERIMENTAL + + This class is not officially supported and the API is likely to change + in future versions. Use this at your own risk. + + Parameters + ---------- + n : int, default 1 + offset : timedelta, default timedelta(0) + normalize : bool, default False + Normalize start/end dates to midnight before generating date range + weekmask : str, Default 'Mon Tue Wed Thu Fri' + weekmask of valid business days, passed to ``numpy.busdaycalendar`` + holidays : list + list/array of dates to exclude from the set of valid business days, + passed to ``numpy.busdaycalendar`` + """ + + _cacheable = False + _prefix = 'CBMS' + def __init__(self, n=1, **kwds): + self.n = int(n) + self.kwds = kwds + self.offset = kwds.get('offset', timedelta(0)) + self.normalize = kwds.get('normalize', False) + self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri') + holidays = kwds.get('holidays', []) + self.cbday = CustomBusinessDay(n=self.n,**kwds) + self.m_offset = MonthBegin() + + @apply_nat + def apply(self,other): + n = self.n + dt_in = other + # First move to month offset + cur_mbegin = self.m_offset.rollback(dt_in) + # Find this custom month offset + cur_cmbegin = self.cbday.rollforward(cur_mbegin) + + # handle zero case. arbitrarily rollforward + if n == 0 and dt_in != cur_cmbegin: + n += 1 + + if dt_in > cur_cmbegin and n <= -1: + n += 1 + elif dt_in < cur_cmbegin and n >= 1: + n -= 1 + + new = cur_mbegin + n * MonthBegin() + result = self.cbday.rollforward(new) + return as_timestamp(result) + + + def __repr__(self): + if sys.version_info.major < 3: + return BusinessDay.__repr__.__func__(self) + else: + return BusinessDay.__repr__(self) + class Week(DateOffset): """ Weekly offset @@ -1906,6 +2034,8 @@ class Nano(Tick): BDay = BusinessDay BMonthEnd = BusinessMonthEnd BMonthBegin = BusinessMonthBegin +CBMonthEnd = CustomBusinessMonthEnd +CBMonthBegin = CustomBusinessMonthBegin CDay = CustomBusinessDay @@ -1988,28 +2118,30 @@ def generate_range(start=None, end=None, periods=None, cur = next_date prefix_mapping = dict((offset._prefix, offset) for offset in [ - YearBegin, # 'AS' - YearEnd, # 'A' - BYearBegin, # 'BAS' - BYearEnd, # 'BA' - BusinessDay, # 'B' - BusinessMonthBegin, # 'BMS' - BusinessMonthEnd, # 'BM' - BQuarterEnd, # 'BQ' - BQuarterBegin, # 'BQS' - CustomBusinessDay, # 'C' - MonthEnd, # 'M' - MonthBegin, # 'MS' - Week, # 'W' - Second, # 'S' - Minute, # 'T' - Micro, # 'U' - QuarterEnd, # 'Q' - QuarterBegin, # 'QS' - Milli, # 'L' - Hour, # 'H' - Day, # 'D' - WeekOfMonth, # 'WOM' + YearBegin, # 'AS' + YearEnd, # 'A' + BYearBegin, # 'BAS' + BYearEnd, # 'BA' + BusinessDay, # 'B' + BusinessMonthBegin, # 'BMS' + BusinessMonthEnd, # 'BM' + BQuarterEnd, # 'BQ' + BQuarterBegin, # 'BQS' + CustomBusinessDay, # 'C' + CustomBusinessMonthEnd, # 'CBM' + CustomBusinessMonthBegin, # 'CBMS' + MonthEnd, # 'M' + MonthBegin, # 'MS' + Week, # 'W' + Second, # 'S' + Minute, # 'T' + Micro, # 'U' + QuarterEnd, # 'Q' + QuarterBegin, # 'QS' + Milli, # 'L' + Hour, # 'H' + Day, # 'D' + WeekOfMonth, # 'WOM' FY5253, FY5253Quarter, ]) diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index 45ac1b3ac15a6..c67af9f503214 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -9,10 +9,11 @@ import numpy as np from pandas.core.datetools import ( - bday, BDay, cday, CDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd, - MonthBegin, BYearBegin, QuarterBegin, BQuarterBegin, BMonthBegin, - DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second, Day, Micro, - Milli, Nano, Easter, + bday, BDay, cday, CDay, BQuarterEnd, BMonthEnd, + CBMonthEnd, CBMonthBegin, + BYearEnd, MonthEnd, MonthBegin, BYearBegin, + QuarterBegin, BQuarterBegin, BMonthBegin, DateOffset, Week, + YearBegin, YearEnd, Hour, Minute, Second, Day, Micro, Milli, Nano, Easter, WeekOfMonth, format, ole2datetime, QuarterEnd, to_datetime, normalize_date, get_offset, get_offset_name, get_standard_freq) @@ -100,7 +101,7 @@ class TestBase(tm.TestCase): _offset = None offset_types = [getattr(offsets, o) for o in offsets.__all__] - skip_np_u1p7 = [offsets.CustomBusinessDay, offsets.CDay, offsets.Nano] + skip_np_u1p7 = [offsets.CustomBusinessDay, offsets.CDay, offsets.CustomBusinessMonthBegin, offsets.CustomBusinessMonthEnd, offsets.Nano] def _get_offset(self, klass, value=1): # create instance from offset class @@ -594,6 +595,328 @@ def test_calendar(self): dt = datetime(2014, 1, 17) assertEq(CDay(calendar=calendar), dt, datetime(2014, 1, 21)) +class TestCustomBusinessMonthEnd(TestBase): + _multiprocess_can_split_ = True + + def setUp(self): + self.d = datetime(2008, 1, 1) + + _skip_if_no_cday() + self.offset = CBMonthEnd() + self.offset2 = CBMonthEnd(2) + + def test_different_normalize_equals(self): + # equivalent in this special case + offset = CBMonthEnd() + offset2 = CBMonthEnd() + offset2.normalize = True + self.assertEqual(offset, offset2) + + def test_repr(self): + assert repr(self.offset) == '<CustomBusinessMonthEnd>' + assert repr(self.offset2) == '<2 * CustomBusinessMonthEnds>' + + def testEQ(self): + self.assertEqual(self.offset2, self.offset2) + + def test_mul(self): + pass + + def test_hash(self): + self.assertEqual(hash(self.offset2), hash(self.offset2)) + + def testCall(self): + self.assertEqual(self.offset2(self.d), datetime(2008, 2, 29)) + + def testRAdd(self): + self.assertEqual(self.d + self.offset2, self.offset2 + self.d) + + def testSub(self): + off = self.offset2 + self.assertRaises(Exception, off.__sub__, self.d) + self.assertEqual(2 * off - off, off) + + self.assertEqual(self.d - self.offset2, + self.d + CBMonthEnd(-2)) + + def testRSub(self): + self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d)) + + def testMult1(self): + self.assertEqual(self.d + 10 * self.offset, + self.d + CBMonthEnd(10)) + + def testMult2(self): + self.assertEqual(self.d + (-5 * CBMonthEnd(-10)), + self.d + CBMonthEnd(50)) + + def testRollback1(self): + self.assertEqual( + CDay(10).rollback(datetime(2007, 12, 31)), datetime(2007, 12, 31)) + + def testRollback2(self): + self.assertEqual(CBMonthEnd(10).rollback(self.d), + datetime(2007,12,31)) + + def testRollforward1(self): + self.assertEqual(CBMonthEnd(10).rollforward(self.d), datetime(2008,1,31)) + + def test_roll_date_object(self): + offset = CBMonthEnd() + + dt = date(2012, 9, 15) + + result = offset.rollback(dt) + self.assertEqual(result, datetime(2012, 8, 31)) + + result = offset.rollforward(dt) + self.assertEqual(result, datetime(2012, 9, 28)) + + offset = offsets.Day() + result = offset.rollback(dt) + self.assertEqual(result, datetime(2012, 9, 15)) + + result = offset.rollforward(dt) + self.assertEqual(result, datetime(2012, 9, 15)) + + def test_onOffset(self): + tests = [(CBMonthEnd(), datetime(2008, 1, 31), True), + (CBMonthEnd(), datetime(2008, 1, 1), False)] + + for offset, date, expected in tests: + assertOnOffset(offset, date, expected) + + + def test_apply(self): + cbm = CBMonthEnd() + tests = [] + + tests.append((cbm, + {datetime(2008, 1, 1): datetime(2008, 1, 31), + datetime(2008, 2, 7): datetime(2008, 2, 29)})) + + tests.append((2 * cbm, + {datetime(2008, 1, 1): datetime(2008, 2, 29), + datetime(2008, 2, 7): datetime(2008, 3, 31)})) + + tests.append((-cbm, + {datetime(2008, 1, 1): datetime(2007, 12, 31), + datetime(2008, 2, 8): datetime(2008, 1, 31)})) + + tests.append((-2 * cbm, + {datetime(2008, 1, 1): datetime(2007, 11, 30), + datetime(2008, 2, 9): datetime(2007, 12, 31)})) + + tests.append((CBMonthEnd(0), + {datetime(2008, 1, 1): datetime(2008, 1, 31), + datetime(2008, 2, 7): datetime(2008, 2, 29)})) + + for offset, cases in tests: + for base, expected in compat.iteritems(cases): + assertEq(offset, base, expected) + + def test_apply_large_n(self): + dt = datetime(2012, 10, 23) + + result = dt + CBMonthEnd(10) + self.assertEqual(result, datetime(2013, 7, 31)) + + result = dt + CDay(100) - CDay(100) + self.assertEqual(result, dt) + + off = CBMonthEnd() * 6 + rs = datetime(2012, 1, 1) - off + xp = datetime(2011, 7, 29) + self.assertEqual(rs, xp) + + st = datetime(2011, 12, 18) + rs = st + off + xp = datetime(2012, 5, 31) + self.assertEqual(rs, xp) + + def test_offsets_compare_equal(self): + offset1 = CBMonthEnd() + offset2 = CBMonthEnd() + self.assertFalse(offset1 != offset2) + + def test_holidays(self): + # Define a TradingDay offset + holidays = ['2012-01-31', datetime(2012, 2, 28), + np.datetime64('2012-02-29')] + bm_offset = CBMonthEnd(holidays=holidays) + dt = datetime(2012,1,1) + self.assertEqual(dt + bm_offset,datetime(2012,1,30)) + self.assertEqual(dt + 2*bm_offset,datetime(2012,2,27)) + + def test_datetimindex(self): + from pandas.tseries.holiday import USFederalHolidayCalendar + self.assertEqual(DatetimeIndex(start='2012',end='2013',freq='CBM').tolist()[0], + datetime(2012,4,30)) + self.assertEqual(DatetimeIndex(start='20120101',end='20130101',freq=CBMonthEnd(calendar=USFederalHolidayCalendar())).tolist()[0], + datetime(2012,1,31)) + + +class TestCustomBusinessMonthBegin(TestBase): + _multiprocess_can_split_ = True + + def setUp(self): + self.d = datetime(2008, 1, 1) + + _skip_if_no_cday() + self.offset = CBMonthBegin() + self.offset2 = CBMonthBegin(2) + + def test_different_normalize_equals(self): + # equivalent in this special case + offset = CBMonthBegin() + offset2 = CBMonthBegin() + offset2.normalize = True + self.assertEqual(offset, offset2) + + def test_repr(self): + assert repr(self.offset) == '<CustomBusinessMonthBegin>' + assert repr(self.offset2) == '<2 * CustomBusinessMonthBegins>' + + def testEQ(self): + self.assertEqual(self.offset2, self.offset2) + + def test_mul(self): + pass + + def test_hash(self): + self.assertEqual(hash(self.offset2), hash(self.offset2)) + + def testCall(self): + self.assertEqual(self.offset2(self.d), datetime(2008, 3, 3)) + + def testRAdd(self): + self.assertEqual(self.d + self.offset2, self.offset2 + self.d) + + def testSub(self): + off = self.offset2 + self.assertRaises(Exception, off.__sub__, self.d) + self.assertEqual(2 * off - off, off) + + self.assertEqual(self.d - self.offset2, + self.d + CBMonthBegin(-2)) + + def testRSub(self): + self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d)) + + def testMult1(self): + self.assertEqual(self.d + 10 * self.offset, + self.d + CBMonthBegin(10)) + + def testMult2(self): + self.assertEqual(self.d + (-5 * CBMonthBegin(-10)), + self.d + CBMonthBegin(50)) + + def testRollback1(self): + self.assertEqual( + CDay(10).rollback(datetime(2007, 12, 31)), datetime(2007, 12, 31)) + + def testRollback2(self): + self.assertEqual(CBMonthBegin(10).rollback(self.d), + datetime(2008,1,1)) + + def testRollforward1(self): + self.assertEqual(CBMonthBegin(10).rollforward(self.d), datetime(2008,1,1)) + + def test_roll_date_object(self): + offset = CBMonthBegin() + + dt = date(2012, 9, 15) + + result = offset.rollback(dt) + self.assertEqual(result, datetime(2012, 9, 3)) + + result = offset.rollforward(dt) + self.assertEqual(result, datetime(2012, 10, 1)) + + offset = offsets.Day() + result = offset.rollback(dt) + self.assertEqual(result, datetime(2012, 9, 15)) + + result = offset.rollforward(dt) + self.assertEqual(result, datetime(2012, 9, 15)) + + def test_onOffset(self): + tests = [(CBMonthBegin(), datetime(2008, 1, 1), True), + (CBMonthBegin(), datetime(2008, 1, 31), False)] + + for offset, date, expected in tests: + assertOnOffset(offset, date, expected) + + + def test_apply(self): + cbm = CBMonthBegin() + tests = [] + + tests.append((cbm, + {datetime(2008, 1, 1): datetime(2008, 2, 1), + datetime(2008, 2, 7): datetime(2008, 3, 3)})) + + tests.append((2 * cbm, + {datetime(2008, 1, 1): datetime(2008, 3, 3), + datetime(2008, 2, 7): datetime(2008, 4, 1)})) + + tests.append((-cbm, + {datetime(2008, 1, 1): datetime(2007, 12, 3), + datetime(2008, 2, 8): datetime(2008, 2, 1)})) + + tests.append((-2 * cbm, + {datetime(2008, 1, 1): datetime(2007, 11, 1), + datetime(2008, 2, 9): datetime(2008, 1, 1)})) + + tests.append((CBMonthBegin(0), + {datetime(2008, 1, 1): datetime(2008, 1, 1), + datetime(2008, 1, 7): datetime(2008, 2, 1)})) + + for offset, cases in tests: + for base, expected in compat.iteritems(cases): + assertEq(offset, base, expected) + + def test_apply_large_n(self): + dt = datetime(2012, 10, 23) + + result = dt + CBMonthBegin(10) + self.assertEqual(result, datetime(2013, 8, 1)) + + result = dt + CDay(100) - CDay(100) + self.assertEqual(result, dt) + + off = CBMonthBegin() * 6 + rs = datetime(2012, 1, 1) - off + xp = datetime(2011, 7, 1) + self.assertEqual(rs, xp) + + st = datetime(2011, 12, 18) + rs = st + off + xp = datetime(2012, 6, 1) + self.assertEqual(rs, xp) + + def test_offsets_compare_equal(self): + offset1 = CBMonthBegin() + offset2 = CBMonthBegin() + self.assertFalse(offset1 != offset2) + + def test_holidays(self): + # Define a TradingDay offset + holidays = ['2012-02-01', datetime(2012, 2, 2), + np.datetime64('2012-03-01')] + bm_offset = CBMonthBegin(holidays=holidays) + dt = datetime(2012,1,1) + self.assertEqual(dt + bm_offset,datetime(2012,1,2)) + self.assertEqual(dt + 2*bm_offset,datetime(2012,2,3)) + + def test_datetimindex(self): + self.assertEqual(DatetimeIndex(start='2012',end='2013',freq='CBMS').tolist()[0], + datetime(2012,5,1)) + self.assertEqual(DatetimeIndex(start='20120101',end='20130101',freq=CBMonthBegin(calendar=USFederalHolidayCalendar())).tolist()[0], + datetime(2012,1,3)) + + + def assertOnOffset(offset, date, expected): actual = offset.onOffset(date) assert actual == expected, ("\nExpected: %s\nActual: %s\nFor Offset: %s)" diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py index 3ea970baeff7a..06ef99442b574 100644 --- a/vb_suite/timeseries.py +++ b/vb_suite/timeseries.py @@ -288,6 +288,7 @@ def date_range(start=None, end=None, periods=None, freq=None): date = dt.datetime(2011,1,1) cday = pd.offsets.CustomBusinessDay() +cme = pd.offsets.CustomBusinessMonthEnd() """ timeseries_custom_bday_incr = \ Benchmark("date + cday",setup) @@ -295,3 +296,10 @@ def date_range(start=None, end=None, periods=None, freq=None): # Increment by n timeseries_custom_bday_incr_n = \ Benchmark("date + 10 * cday",setup) + +# Increment custom business month +timeseries_custom_bmonthend_incr = \ + Benchmark("date + cme",setup) + +timeseries_custom_bmonthend_incr_n = \ + Benchmark("date + 10 * cme",setup)
This extends the `offsets.py` module with a `CBMonthEnd` based on `CustomBusinessDay`. Tests are found in `test_offsets.py`. A few small points still open: - Implement CBMonthBegin - Improve speed. Main speed drag is `DateOffset.onOffset`. Also some of the tests are not passing, but the errors seem unrelated. e.g. ``` ====================================================================== ERROR: test_pickle (pandas.tseries.tests.test_timeseries.TestTimeSeries) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/benjamin/workspace/pandas/pandas/tseries/tests/test_timeseries.py", line 1827, in test_pickle p = pick.loads(pick.dumps(NaT)) File "/usr/lib/python2.7/pickle.py", line 1382, in loads return Unpickler(file).load() File "/usr/lib/python2.7/pickle.py", line 858, in load dispatch[key](self) File "/usr/lib/python2.7/pickle.py", line 1133, in load_reduce value = func(*args) TypeError: __new__() takes exactly one argument (2 given) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6866
2014-04-10T21:50:56Z
2014-04-22T19:26:07Z
2014-04-22T19:26:07Z
2014-06-19T01:12:42Z
BUG: _tidy_repr should not be called when max_rows is None
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2ac6d96c5a36b..e37a7c7eab861 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -364,6 +364,7 @@ Bug Fixes - Bug in ``DataFrame.replace()`` where changing a dtype through replacement would only replace the first occurrence of a value (:issue:`6689`) - Better error message when passing a frequency of 'MS' in ``Period`` construction (GH5332) +- Bug in `Series.__unicode__` when `max_rows` is `None` and the Series has more than 1000 rows. (:issue:`6863`) pandas 0.13.1 ------------- diff --git a/pandas/core/series.py b/pandas/core/series.py index 3b1c7a6af5069..70b73c56772aa 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -828,7 +828,7 @@ def __unicode__(self): width, height = get_terminal_size() max_rows = (height if get_option("display.max_rows") == 0 else get_option("display.max_rows")) - if len(self.index) > (max_rows or 1000): + if max_rows and len(self.index) > max_rows: result = self._tidy_repr(min(30, max_rows - 4)) elif len(self.index) > 0: result = self._get_repr(print_header=True, diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 73dd47ee3d3e4..5b088598dfcec 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1770,6 +1770,11 @@ def test_repr_should_return_str(self): df = Series(data, index=index1) self.assertTrue(type(df.__repr__() == str)) # both py2 / 3 + def test_repr_max_rows(self): + # GH 6863 + with pd.option_context('max_rows', None): + str(Series(range(1001))) # should not raise exception + def test_unicode_string_with_unicode(self): df = Series([u("\u05d0")], name=u("\u05d1")) if compat.PY3:
This issue was raised in http://stackoverflow.com/q/22824104/190597: ``` import pandas as pd pd.options.display.max_rows = None result = pd.Series(range(1001)) print(result) ``` raises `TypeError: unsupported operand type(s) for -: 'NoneType' and 'int'`. The problem occurs in series.py (line 832) when `max_rows` is `None`: ``` if len(self.index) > (max_rows or 1000): result = self._tidy_repr(min(30, max_rows - 4)) ``` Since the doc string for `get_options` says ``` display.max_rows: [default: 60] [currently: 60] ... 'None' value means unlimited. ``` I think `_tidy_repr` should not be called when `max_rows` is None. This PR seems simple enough but my main concern is that this PR stomps on GH1467 which explicitly changed `> max_rows` to `> (max_rows or 1000)` and I don't know what the purpose of this was.
https://api.github.com/repos/pandas-dev/pandas/pulls/6863
2014-04-10T15:41:18Z
2014-04-10T21:01:17Z
2014-04-10T21:01:17Z
2014-06-12T16:16:48Z
API: add inplace keyword to Series.order/sort to make them inverses (GH6859)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2ac6d96c5a36b..df547c3f0a09b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -170,6 +170,7 @@ API Changes add ``na_position`` arg to conform to ``Series.order`` (:issue:`6847`) - default sorting algorithm for ``Series.order`` is not ``quicksort``, to conform with ``Series.sort`` (and numpy defaults) +- add ``inplace`` keyword to ``Series.order/sort`` to make them inverses (:issue:`6859`) Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 05569dbdba702..d4f912baaa4dc 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -212,6 +212,7 @@ API changes add ``na_position`` arg to conform to ``Series.order`` (:issue:`6847`) - default sorting algorithm for ``Series.order`` is not ``quicksort``, to conform with ``Series.sort`` (and numpy defaults) +- add ``inplace`` keyword to ``Series.order/sort`` to make them inverses (:issue:`6859`) .. _whatsnew_0140.sql: diff --git a/pandas/core/series.py b/pandas/core/series.py index 3b1c7a6af5069..c4901e78779b1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1563,39 +1563,6 @@ def update(self, other): #---------------------------------------------------------------------- # Reindexing, sorting - def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last'): - """ - Sort values and index labels by value, in place. For compatibility with - ndarray API. No return value - - Parameters - ---------- - axis : int (can only be zero) - ascending : boolean, default True - Sort ascending. Passing False sorts descending - kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort' - Choice of sorting algorithm. See np.sort for more - information. 'mergesort' is the only stable algorithm - na_position : {'first', 'last'} (optional, default='last') - 'first' puts NaNs at the beginning - 'last' puts NaNs at the end - - See Also - -------- - Series.order - """ - - # GH 5856/5863 - if self._is_cached: - raise ValueError("This Series is a view of some other array, to " - "sort in-place you must create a copy") - - result = self.order(ascending=ascending, - kind=kind, - na_position=na_position) - - self._update_inplace(result) - def sort_index(self, ascending=True): """ Sort object by labels (along an axis) @@ -1692,9 +1659,38 @@ def rank(self, method='average', na_option='keep', ascending=True, ascending=ascending, pct=pct) return self._constructor(ranks, index=self.index).__finalize__(self) - def order(self, na_last=None, ascending=True, kind='quicksort', na_position='last'): + def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last', inplace=True): """ - Sorts Series object, by value, maintaining index-value link + Sort values and index labels by value. This is an inplace sort by default. + Series.order is the equivalent but returns a new Series. + + Parameters + ---------- + axis : int (can only be zero) + ascending : boolean, default True + Sort ascending. Passing False sorts descending + kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort' + Choice of sorting algorithm. See np.sort for more + information. 'mergesort' is the only stable algorithm + na_position : {'first', 'last'} (optional, default='last') + 'first' puts NaNs at the beginning + 'last' puts NaNs at the end + inplace : boolean, default True + Do operation in place. + + See Also + -------- + Series.order + """ + return self.order(ascending=ascending, + kind=kind, + na_position=na_position, + inplace=inplace) + + def order(self, na_last=None, ascending=True, kind='quicksort', na_position='last', inplace=False): + """ + Sorts Series object, by value, maintaining index-value link. + This will return a new Series by default. Series.sort is the equivalent but as an inplace method. Parameters ---------- @@ -1708,6 +1704,8 @@ def order(self, na_last=None, ascending=True, kind='quicksort', na_position='las na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end + inplace : boolean, default False + Do operation in place. Returns ------- @@ -1717,6 +1715,12 @@ def order(self, na_last=None, ascending=True, kind='quicksort', na_position='las -------- Series.sort """ + + # GH 5856/5853 + if inplace and self._is_cached: + raise ValueError("This Series is a view of some other array, to " + "sort in-place you must create a copy") + if na_last is not None: warnings.warn(("na_last is deprecated. Please use na_position instead"), FutureWarning) @@ -1755,8 +1759,13 @@ def _try_kind_sort(arr): sortedIdx[:n] = idx[bad] else: raise ValueError('invalid na_position: {!r}'.format(na_position)) - return self._constructor(arr[sortedIdx], index=self.index[sortedIdx])\ - .__finalize__(self) + + result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx]) + + if inplace: + self._update_inplace(result) + else: + return result.__finalize__(self) def sortlevel(self, level=0, ascending=True): """ diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index ea1e07dbf6acc..277523855da67 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -2848,14 +2848,6 @@ def f(): zed['eyes']['right'].fillna(value=555, inplace=True) self.assertRaises(com.SettingWithCopyError, f) - # GH 5856/5863 - # Series.sort operating on a view - df = DataFrame(np.random.randn(10,4)) - s = df.iloc[:,0] - def f(): - s.sort() - self.assertRaises(ValueError, f) - df = DataFrame(np.random.randn(10,4)) s = df.iloc[:,0] s = s.order() diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 73dd47ee3d3e4..f4a8d4eed2c41 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3825,7 +3825,7 @@ def test_dot(self): self.assertRaises(ValueError, a.dot, b.T) def test_value_counts_nunique(self): - + # basics.rst doc example series = Series(np.random.randn(500)) series[20:500] = np.nan @@ -3911,6 +3911,28 @@ def test_sort(self): self.assert_numpy_array_equal(ts.index, self.ts.order(ascending=False).index) + # GH 5856/5853 + # Series.sort operating on a view + df = DataFrame(np.random.randn(10,4)) + s = df.iloc[:,0] + def f(): + s.sort() + self.assertRaises(ValueError, f) + + # test order/sort inplace + # GH6859 + ts1 = self.ts.copy() + ts1.sort(ascending=False) + ts2 = self.ts.copy() + ts2.order(ascending=False,inplace=True) + assert_series_equal(ts1,ts2) + + ts1 = self.ts.copy() + ts1 = ts1.sort(ascending=False,inplace=False) + ts2 = self.ts.copy() + ts2 = ts.order(ascending=False) + assert_series_equal(ts1,ts2) + def test_sort_index(self): import random
closes #6859
https://api.github.com/repos/pandas-dev/pandas/pulls/6861
2014-04-10T13:54:47Z
2014-04-12T00:27:51Z
2014-04-12T00:27:51Z
2014-06-22T23:19:02Z
API: allow Series comparison ops to align before comparison (GH1134)
diff --git a/doc/source/release.rst b/doc/source/release.rst index a23936ae154c0..6838eb9c90581 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -171,6 +171,7 @@ API Changes - default sorting algorithm for ``Series.order`` is not ``quicksort``, to conform with ``Series.sort`` (and numpy defaults) - add ``inplace`` keyword to ``Series.order/sort`` to make them inverses (:issue:`6859`) +- align on Series comparison operations (e.g. ``x == y``), (:issue:`1134`) Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index ded10fd75e8d4..9d54043085b4f 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -213,6 +213,25 @@ API changes - default sorting algorithm for ``Series.order`` is not ``quicksort``, to conform with ``Series.sort`` (and numpy defaults) - add ``inplace`` keyword to ``Series.order/sort`` to make them inverses (:issue:`6859`) +- align on Series comparison operations (e.g. ``x == y``), (:issue:`1134`) + + This is a reordered comparison + + .. ipython:: python + + s1 = Series(index=["A", "B", "C"], data=[1,2,3]) + s1 + s2 = Series(index=["C", "B", "A"], data=[3,2,1]) + s2 + s1 == s2 + + In the following example, 'A' is missing so it will always compare False (as it has a ``nan`` value) + + .. ipython:: python + + s3 = Series(index=["C", "B"], data=[3,2]) + s3 + s1 == s3 .. _whatsnew_0140.sql: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index b8e92fb25cec5..8762d5edddd82 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -537,13 +537,18 @@ def na_op(x, y): def wrapper(self, other): if isinstance(other, pd.Series): name = _maybe_match_name(self, other) - if len(self) != len(other): - raise ValueError('Series lengths must match to compare') - return self._constructor(na_op(self.values, other.values), - index=self.index, name=name) + if self.index.equals(other): + s1, s2 = self, other + index = self.index + else: + index = self.index + other.index + s1 = self.reindex(index) + s2 = other.reindex(index) + return self._constructor(na_op(s1.values, s2.values), + index=index, name=name) elif isinstance(other, pd.DataFrame): # pragma: no cover return NotImplemented - elif isinstance(other, (pa.Array, pd.Series)): + elif isinstance(other, pa.Array): if len(self) != len(other): raise ValueError('Lengths must match to compare') return self._constructor(na_op(self.values, np.asarray(other)), diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py index 36963d193e5ae..6af45a81fe3eb 100644 --- a/pandas/io/tests/test_json/test_ujson.py +++ b/pandas/io/tests/test_json/test_ujson.py @@ -1241,48 +1241,51 @@ def testDataFrameNumpyLabelled(self): assert_array_equal(df.index, outp.index) def testSeries(self): + s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15]) s.sort() + def check(x,y): + tm.assert_series_equal(x,y,check_index_type=False) + y.index = Index(outp.astype('int64')) + tm.assert_series_equal(x,y) + # column indexed outp = Series(ujson.decode(ujson.encode(s))) outp.sort() - self.assertTrue((s == outp).values.all()) outp = Series(ujson.decode(ujson.encode(s), numpy=True)) outp.sort() - self.assertTrue((s == outp).values.all()) + check(s,outp) dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"))) outp = Series(**dec) - self.assertTrue((s == outp).values.all()) - self.assertTrue(s.name == outp.name) + check(s,outp) dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"), numpy=True)) outp = Series(**dec) - self.assertTrue((s == outp).values.all()) - self.assertTrue(s.name == outp.name) + check(s,outp) outp = Series(ujson.decode(ujson.encode(s, orient="records"), numpy=True)) - self.assertTrue((s == outp).values.all()) + check(s,outp) outp = Series(ujson.decode(ujson.encode(s, orient="records"))) - self.assertTrue((s == outp).values.all()) + check(s,outp) outp = Series(ujson.decode(ujson.encode(s, orient="values"), numpy=True)) - self.assertTrue((s == outp).values.all()) + check(s,outp) outp = Series(ujson.decode(ujson.encode(s, orient="values"))) - self.assertTrue((s == outp).values.all()) + check(s,outp) outp = Series(ujson.decode(ujson.encode(s, orient="index"))) outp.sort() - self.assertTrue((s == outp).values.all()) + check(s,outp) outp = Series(ujson.decode(ujson.encode(s, orient="index"), numpy=True)) outp.sort() - self.assertTrue((s == outp).values.all()) + check(s,outp) def testSeriesNested(self): s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15]) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 744a020347af9..b54c832d56943 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2354,6 +2354,37 @@ def check_comparators(series, other): check_comparators(self.ts, 5) check_comparators(self.ts, self.ts + 1) + def test_align_eq(self): + + # GH 1134 + # eq should align! + + # needs alignment + s1 = Series([1,2], ['a','b']) + s2 = Series([2,3], ['b','c']) + result1 = s1 == s2 + result2 = s2 == s1 + index = s1.index+s2.index + expected = s1.reindex(index) == s2.reindex(index) + assert_series_equal(result1,expected) + assert_series_equal(result2,expected) + + # differs in order + s1 = Series(index=["A", "B", "C"], data=[1,2,3]) + s2 = Series(index=["C", "B", "A"], data=[3,2,1]) + result1 = s1 == s2 + result2 = s2 == s1 + index = s1.index+s2.index + expected = s1.reindex(index) == s2.reindex(index) + assert_series_equal(result1,expected) + assert_series_equal(result2,expected) + + s1 = Series([10,20,30,40,50,60],index=[6,7,8,9,10,15],name='series') + s2 = Series([10,20,30,40,50,60],index=[6,7,8,9,10,15]) + result = s1 == s2 + expected = Series(True,index=[6,7,8,9,10,15]) + assert_series_equal(result,expected) + def test_operators_empty_int_corner(self): s1 = Series([], [], dtype=np.int32) s2 = Series({'x': 0.}) @@ -3214,15 +3245,6 @@ def test_more_na_comparisons(self): expected = Series([True, True, True]) assert_series_equal(result, expected) - def test_comparison_different_length(self): - a = Series(['a', 'b', 'c']) - b = Series(['b', 'a']) - self.assertRaises(ValueError, a.__lt__, b) - - a = Series([1, 2]) - b = Series([2, 3, 4]) - self.assertRaises(ValueError, a.__eq__, b) - def test_comparison_label_based(self): # GH 4947 diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 8abbb37646b49..07484c60d54a8 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -511,15 +511,15 @@ def assert_series_equal(left, right, check_dtype=True, right.values)) else: assert_almost_equal(left.values, right.values, check_less_precise) - if check_less_precise: - assert_almost_equal( - left.index.values, right.index.values, check_less_precise) - else: - assert_index_equal(left.index, right.index) if check_index_type: - assert_isinstance(left.index, type(right.index)) - assert_attr_equal('dtype', left.index, right.index) - assert_attr_equal('inferred_type', left.index, right.index) + if check_less_precise: + assert_almost_equal( + left.index.values, right.index.values, check_less_precise) + else: + assert_index_equal(left.index, right.index) + assert_isinstance(left.index, type(right.index)) + assert_attr_equal('dtype', left.index, right.index) + assert_attr_equal('inferred_type', left.index, right.index) # This could be refactored to use the NDFrame.equals method def assert_frame_equal(left, right, check_dtype=True,
closes #1134 reordered comparisons ``` In [1]: s1 = Series(index=["A", "B", "C"], data=[1,2,3]) In [2]: s1 Out[2]: A 1 B 2 C 3 dtype: int64 In [3]: s2 = Series(index=["C", "B", "A"], data=[3,2,1]) In [4]: s2 Out[4]: C 3 B 2 A 1 dtype: int64 In [5]: s1 == s2 Out[5]: A True B True C True dtype: bool ``` Here we have a missing value, so it's `nan` in the comparisons ``` In [6]: s3 = Series(index=["C", "B"], data=[3,2]) In [7]: s3 Out[7]: C 3 B 2 dtype: int64 In [8]: s1 == s3 Out[8]: A False B True C True dtype: bool In [9]: s1>s3 Out[9]: A False B False C False dtype: bool In [10]: s1<s3 Out[10]: A False B False C False dtype: bool ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6860
2014-04-10T13:22:02Z
2014-08-05T15:43:04Z
null
2014-09-12T20:34:29Z
BUG: to_timedelta not properly converting some units (GH6855)
diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index 628c278aff2e1..57d8bf5623a78 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -199,6 +199,17 @@ def conv(v): expected = Series([ np.timedelta64(1,'D') ]*5) tm.assert_series_equal(result, expected) + # validate all units + # GH 6855 + for unit in ['Y','M','W','D','y','w','d']: + result = to_timedelta(np.arange(5),unit=unit) + expected = Series([ np.timedelta64(i,unit.upper()) for i in np.arange(5).tolist() ]) + tm.assert_series_equal(result, expected) + for unit in ['h','m','s','ms','us','ns','H','S','MS','US','NS']: + result = to_timedelta(np.arange(5),unit=unit) + expected = Series([ np.timedelta64(i,unit.lower()) for i in np.arange(5).tolist() ]) + tm.assert_series_equal(result, expected) + # these will error self.assertRaises(ValueError, lambda : to_timedelta(['1h'])) self.assertRaises(ValueError, lambda : to_timedelta(['1m'])) diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py index cc01c26f78b70..0a5693cc55466 100644 --- a/pandas/tseries/timedeltas.py +++ b/pandas/tseries/timedeltas.py @@ -40,6 +40,8 @@ def _convert_listlike(arg, box, unit): if is_timedelta64_dtype(arg): value = arg.astype('timedelta64[ns]') elif is_integer_dtype(arg): + unit = _validate_timedelta_unit(unit) + # these are shortcutable value = arg.astype('timedelta64[{0}]'.format(unit)).astype('timedelta64[ns]') else: @@ -65,6 +67,15 @@ def _convert_listlike(arg, box, unit): # ...so it must be a scalar value. Return scalar. return _coerce_scalar_to_timedelta_type(arg, unit=unit) +def _validate_timedelta_unit(arg): + """ provide validation / translation for timedelta short units """ + + if re.search("Y|W|D",arg,re.IGNORECASE) or arg == 'M': + return arg.upper() + elif re.search("h|m|s|ms|us|ns",arg,re.IGNORECASE): + return arg.lower() + raise ValueError("invalid timedelta unit {0} provided".format(arg)) + _short_search = re.compile( "^\s*(?P<neg>-?)\s*(?P<value>\d*\.?\d*)\s*(?P<unit>d|s|ms|us|ns)?\s*$",re.IGNORECASE) _full_search = re.compile(
closes #6855
https://api.github.com/repos/pandas-dev/pandas/pulls/6858
2014-04-10T11:49:28Z
2014-04-10T12:08:34Z
2014-04-10T12:08:34Z
2014-06-29T14:30:56Z
DEPR: Indexers will warn FutureWarning when used with a scalar indexer this is floating-point (GH4892)
diff --git a/doc/source/release.rst b/doc/source/release.rst index e37a7c7eab861..aaa91f30f5013 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -189,6 +189,9 @@ Deprecations ``FutureWarning`` is raised to alert that the old ``cols`` arguments will not be supported in a future release (:issue:`6645`) +- Indexers will warn ``FutureWarning`` when used with a scalar indexer and + a non-floating point Index (:issue:`4892`) + Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 05569dbdba702..8f33269fd406b 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -390,6 +390,27 @@ Deprecations ``FutureWarning`` is raised to alert that the old ``cols`` arguments will not be supported in a future release (:issue:`6645`) +- Indexers will warn ``FutureWarning`` when used with a scalar indexer and + a non-floating point Index (:issue:`4892`) + + .. code-block:: python + + # non-floating point indexes can only be indexed by integers / labels + In [1]: Series(1,np.arange(5))[3.0] + pandas/core/index.py:469: FutureWarning: scalar indexers for index type Int64Index should be integers and not floating point + Out[1]: 1 + + In [5]: Series(1,np.arange(5)).iloc[3.0] + pandas/core/index.py:469: FutureWarning: scalar indexers for index type Int64Index should be integers and not floating point + Out[5]: 1 + + # these are Float64Indexes, so integer or floating point is acceptable + In [3]: Series(1,np.arange(5.))[3] + Out[3]: 1 + + In [4]: Series(1,np.arange(5.))[3.0] + Out[4]: 1 + .. _whatsnew_0140.enhancements: Enhancements diff --git a/pandas/core/index.py b/pandas/core/index.py index e8403bfe8b4f8..a581a8753ae51 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1,6 +1,7 @@ # pylint: disable=E1101,E1103,W0232 import datetime from functools import partial +import warnings from pandas.compat import range, zip, lrange, lzip, u, reduce from pandas import compat import numpy as np @@ -468,11 +469,19 @@ def to_int(): return ikey if typ == 'iloc': - if not (is_integer(key) or is_float(key)): - self._convert_indexer_error(key, 'label') - return to_int() + if is_integer(key): + return key + elif is_float(key): + if not self.is_floating(): + warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format( + type(self).__name__),FutureWarning) + return to_int() + return self._convert_indexer_error(key, 'label') if is_float(key): + if not self.is_floating(): + warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format( + type(self).__name__),FutureWarning) return to_int() return key diff --git a/pandas/index.pyx b/pandas/index.pyx index 8aa4f69a1ec8e..e5cfa3f7c6f16 100644 --- a/pandas/index.pyx +++ b/pandas/index.pyx @@ -355,6 +355,8 @@ cdef class Int64Engine(IndexEngine): hash(val) if util.is_bool_object(val): raise KeyError(val) + elif util.is_float_object(val): + raise KeyError(val) cdef _maybe_get_bool_indexer(self, object val): cdef: diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index ea1e07dbf6acc..f730d415a6acc 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -94,6 +94,7 @@ class TestIndexing(tm.TestCase): _typs = set(['ints','labels','mixed','ts','floats','empty']) def setUp(self): + import warnings warnings.filterwarnings(action='ignore', category=FutureWarning) @@ -3220,6 +3221,64 @@ def test_ix_empty_list_indexer_is_ok(self): assert_frame_equal(df.ix[:,[]], df.iloc[:, :0]) # vertical empty assert_frame_equal(df.ix[[],:], df.iloc[:0, :]) # horizontal empty + def test_deprecate_float_indexers(self): + + # GH 4892 + # deprecate allowing float indexers that are equal to ints to be used + # as indexers in non-float indices + + import warnings + warnings.filterwarnings(action='error', category=FutureWarning) + + for index in [ tm.makeStringIndex, tm.makeUnicodeIndex, + tm.makeDateIndex, tm.makePeriodIndex ]: + + i = index(5) + s = Series(np.arange(len(i)),index=i) + self.assertRaises(FutureWarning, lambda : + s.iloc[3.0]) + self.assertRaises(FutureWarning, lambda : + s[3.0]) + + # this is ok! + s[3] + + # ints + i = index(5) + s = Series(np.arange(len(i))) + self.assertRaises(FutureWarning, lambda : + s.iloc[3.0]) + + # on some arch's this doesn't provide a warning (and thus raise) + # and some it does + try: + s[3.0] + except: + pass + + # floats: these are all ok! + i = np.arange(5.) + s = Series(np.arange(len(i)),index=i) + with tm.assert_produces_warning(False): + s[3.0] + + with tm.assert_produces_warning(False): + s[3] + + with tm.assert_produces_warning(False): + s.iloc[3.0] + + with tm.assert_produces_warning(False): + s.iloc[3] + + with tm.assert_produces_warning(False): + s.loc[3.0] + + with tm.assert_produces_warning(False): + s.loc[3] + + warnings.filterwarnings(action='ignore', category=FutureWarning) + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
closes #4892 numpy is doing this i 1.9 IIRC, and Scipy already did it. in 0.13.3 Indexers will warn `FutureWarning` when used with a scalar indexer and a non-floating point Index ``` # non-floating point indexes can only be indexed by integers / labels In [1]: Series(1,np.arange(5))[3.0] pandas/core/index.py:469: FutureWarning: scalar indexers should not be floating point unless a floating index Out[1]: 1 In [5]: Series(1,np.arange(5)).iloc[3.0] pandas/core/index.py:463: FutureWarning: scalar indexers should not be floating point unless a floating index Out[5]: 1 # these are Float64Indexes, so integer or floating point is acceptable In [3]: Series(1,np.arange(5.))[3] Out[3]: 1 In [4]: Series(1,np.arange(5.))[3.0] Out[4]: 1 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6853
2014-04-09T20:38:47Z
2014-04-11T22:33:35Z
2014-04-11T22:33:35Z
2014-07-05T04:40:12Z
add bins argument to Histogram function
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index fceec8cf00e92..a9425400bedb3 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -110,6 +110,7 @@ def test_hist(self): _check_plot_works(self.ts.hist, grid=False) _check_plot_works(self.ts.hist, figsize=(8, 10)) _check_plot_works(self.ts.hist, by=self.ts.index.month) + _check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5) import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 1) @@ -909,6 +910,9 @@ def test_hist(self): # handle figsize arg _check_plot_works(df.hist, figsize=(8, 10)) + # check bins argument + _check_plot_works(df.hist, bins=5) + # make sure xlabelsize and xrot are handled ser = df[0] xf, yf = 20, 20 diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 7e67c48572f51..158cfeb5e3a6f 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2263,7 +2263,7 @@ def plot_group(group, ax): def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False, - sharey=False, figsize=None, layout=None, **kwds): + sharey=False, figsize=None, layout=None, bins=10, **kwds): """ Draw histogram of the DataFrame's series using matplotlib / pylab. @@ -2290,6 +2290,8 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, figsize : tuple The size of the figure to create in inches by default layout: (optional) a tuple (rows, columns) for the layout of the histograms + bins: integer, default 10 + Number of histogram bins to be used kwds : other plotting keyword arguments To be passed to hist function """ @@ -2302,7 +2304,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, if by is not None: axes = grouped_hist(data, by=by, ax=ax, grid=grid, figsize=figsize, - sharex=sharex, sharey=sharey, layout=layout, + sharex=sharex, sharey=sharey, layout=layout, bins=bins, **kwds) for ax in axes.ravel(): @@ -2363,7 +2365,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, - xrot=None, ylabelsize=None, yrot=None, figsize=None, **kwds): + xrot=None, ylabelsize=None, yrot=None, figsize=None, bins=10, **kwds): """ Draw histogram of the input series using matplotlib @@ -2385,6 +2387,8 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, rotation of y axis labels figsize : tuple, default None figure size in inches by default + bins: integer, default 10 + Number of histogram bins to be used kwds : keywords To be passed to the actual plotting function @@ -2411,7 +2415,7 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, raise AssertionError('passed axis not bound to passed figure') values = self.dropna().values - ax.hist(values, **kwds) + ax.hist(values, bins=bins, **kwds) ax.grid(grid) axes = np.array([ax]) else: @@ -2420,7 +2424,7 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, "'by' argument, since a new 'Figure' instance " "will be created") axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize, - **kwds) + bins=bins, **kwds) for ax in axes.ravel(): if xlabelsize is not None:
The primary argument used when plotting histograms is `bins`. This argument is not in the pandas documentation so you do not see it when auto-tabbing for documentation. I have added `bins=10` as an argument to the dataframe and series histogram function while also adding a line of documentation. `10` is the matplotlib default bin value so this is only making `bins` visible, not adding or changing any plotting functionality.
https://api.github.com/repos/pandas-dev/pandas/pulls/6850
2014-04-09T15:59:23Z
2014-04-23T02:21:47Z
2014-04-23T02:21:47Z
2014-07-16T09:01:15Z
API/BUG Raise ValueError when stacking nonunique levels
diff --git a/doc/source/release.rst b/doc/source/release.rst index 13aef1a5d8fdb..21c30d68a29d9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -149,6 +149,8 @@ API Changes - Define and document the order of column vs index names in query/eval (:issue:`6676`) - ``DataFrame.sort`` now places NaNs at the beginning or end of the sort according to the ``na_position`` parameter. (:issue:`3917`) +- ``stack`` and ``unstack`` now raise a ``ValueError`` when the ``level`` keyword refers + to a non-unique item in the ``Index`` (previously raised a ``KeyError``). (:issue:`6738`) - all offset operations now return ``Timestamp`` types (rather than datetime), Business/Week frequencies were incorrect (:issue:`4069`) - ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index b0ea3cb770a64..ad538b3c01dae 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -206,7 +206,8 @@ API changes - ``Panel.shift`` now uses ``NDFrame.shift``. It no longer drops the ``nan`` data and retains its original shape. (:issue:`4867`) - Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) - +- ``stack`` and ``unstack`` now raise a ``ValueError`` when the ``level`` keyword refers + to a non-unique item in the ``Index`` (previously raised a ``KeyError``). .. _whatsnew_0140.sql: diff --git a/pandas/core/index.py b/pandas/core/index.py index 7edd2c6646535..72465040077b2 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2317,6 +2317,13 @@ def _set_names(self, values, validate=True): names = property( fset=_set_names, fget=_get_names, doc="Names of levels in MultiIndex") + def _reference_duplicate_name(self, name): + """ + Returns True if the name refered to in self.names is duplicated. + """ + # count the times name equals an element in self.names. + return np.sum(name == np.asarray(self.names)) > 1 + def _format_native_types(self, **kwargs): return self.tolist() diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 0d06e9253ce1f..7dc266617c5fd 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -1,6 +1,5 @@ # pylint: disable=E1101,E1103 # pylint: disable=W0703,W0622,W0613,W0201 - from pandas.compat import range, zip from pandas import compat import itertools @@ -69,6 +68,13 @@ def __init__(self, values, index, level=-1, value_columns=None): raise ValueError('must pass column labels for multi-column data') self.index = index + + if isinstance(self.index, MultiIndex): + if index._reference_duplicate_name(level): + msg = ("Ambiguous reference to {0}. The index " + "names are not unique.".format(level)) + raise ValueError(msg) + self.level = self.index._get_level_number(level) levels = index.levels @@ -497,6 +503,12 @@ def stack(frame, level=-1, dropna=True): stacked : Series """ N, K = frame.shape + if isinstance(frame.columns, MultiIndex): + if frame.columns._reference_duplicate_name(level): + msg = ("Ambiguous reference to {0}. The column " + "names are not unique.".format(level)) + raise ValueError(msg) + if isinstance(level, int) and level < 0: level += frame.columns.nlevels diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index aa8350dfdfe78..5c8ae497f9117 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -11387,6 +11387,16 @@ def test_unstack_dtypes(self): expected = Series({'float64' : 2, 'object' : 2}) assert_series_equal(result, expected) + def test_unstack_non_unique_index_names(self): + idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], + names=['c1', 'c1']) + df = DataFrame([1, 2], index=idx) + with tm.assertRaises(ValueError): + df.unstack('c1') + + with tm.assertRaises(ValueError): + df.T.stack('c1') + def test_reset_index(self): stacked = self.frame.stack()[::2] stacked = DataFrame({'foo': stacked, 'bar': stacked}) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index c6c405306afb8..74ca5d0fe9276 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1534,6 +1534,13 @@ def test_names(self): level_names = [level.name for level in index.levels] self.assertEqual(ind_names, level_names) + def test_reference_duplicate_name(self): + idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x']) + self.assertTrue(idx._reference_duplicate_name('x')) + + idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y']) + self.assertFalse(idx._reference_duplicate_name('x')) + def test_astype(self): expected = self.index.copy() actual = self.index.astype('O')
Should raise a ValueError when (un)stacking a DataFrame on a nonunique level. Previous behavior was to raise a KeyError (not deliberately). Closes #6729.
https://api.github.com/repos/pandas-dev/pandas/pulls/6849
2014-04-09T15:38:28Z
2014-04-09T21:07:29Z
2014-04-09T21:07:29Z
2017-04-05T02:08:53Z
API: make Series.sort/order arguments in the same order
diff --git a/doc/source/release.rst b/doc/source/release.rst index 21c30d68a29d9..2459123626f10 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -83,7 +83,7 @@ API Changes for a Series with a ``DatetimeIndex`` or a ``PeriodIndex``; trying this on a non-supported Index type will now raise a ``TypeError``. (:issue:`4551`, :issue:`4056`, :issue:`5519`) - The following affected: + The following are affected: - ``date,time,year,month,day`` - ``hour,minute,second,weekofyear`` @@ -132,26 +132,20 @@ API Changes the name of the inserted column containing the pivoted data. - Allow specification of a more complex groupby, via ``pd.Grouper`` (:issue:`3794`) - - A tuple passed to ``DataFame.sort_index`` will be interpreted as the levels of the index, rather than requiring a list of tuple (:issue:`4370`) - - Fix a bug where invalid eval/query operations would blow the stack (:issue:`5198`) - - Following keywords are now acceptable for :meth:`DataFrame.plot` with ``kind='bar'`` and ``kind='barh'``: - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. (:issue:`6604`) - - - `align`: Specify the bar alignment. Default is `center` (different from matplotlib). In previous versions, pandas passes `align='edge'` to matplotlib and adjust the location to `center` by itself, and it results `align` keyword is not applied as expected. (:issue:`4525`) - + - `align`: Specify the bar alignment. Default is `center` (different from matplotlib). In previous versions, pandas passes `align='edge'` to + matplotlib and adjust the location to `center` by itself, and it results `align` keyword is not applied as expected. (:issue:`4525`) - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center). (:issue:`6604`) - Define and document the order of column vs index names in query/eval (:issue:`6676`) - - ``DataFrame.sort`` now places NaNs at the beginning or end of the sort according to the ``na_position`` parameter. (:issue:`3917`) - ``stack`` and ``unstack`` now raise a ``ValueError`` when the ``level`` keyword refers to a non-unique item in the ``Index`` (previously raised a ``KeyError``). (:issue:`6738`) - - all offset operations now return ``Timestamp`` types (rather than datetime), Business/Week frequencies were incorrect (:issue:`4069`) - ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) - ``Panel.shift`` now uses ``NDFrame.shift``. It no longer drops the ``nan`` data and retains its original shape. (:issue:`4867`) @@ -160,19 +154,19 @@ API Changes representation) (:issue:`6782`) - Arithmetic ops are now disallowed when passed two bool dtype Series or DataFrames (:issue:`6762`). - - Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) - ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``. The ``table`` kewyword can receive the following values. - ``False``: Do nothing (default). - - ``True``: Draw a table using the ``DataFrame`` or ``Series`` called ``plot`` method. Data will be transposed to meet matplotlib's default layout. - - ``DataFrame`` or ``Series``: Draw matplotlib.table using the passed data. The data will be drawn as displayed in print method (not transposed automatically). - Also, helper function ``pandas.tools.plotting.table`` is added to create a table from ``DataFrame`` and ``Series``, and add it to an ``matplotlib.Axes``. +- drop unused order argument from ``Series.sort``; args now in the same orders as ``Series.order``; + add ``na_position`` arg to conform to ``Series.order`` (:issue:`6847`) +- default sorting algorithm for ``Series.order`` is not ``quicksort``, to conform with ``Series.sort`` + (and numpy defaults) Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index ad538b3c01dae..3b65d6e24eb2d 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -65,7 +65,7 @@ API changes for a Series with a ``DatetimeIndex`` or a ``PeriodIndex``; trying this on a non-supported Index type will now raise a ``TypeError``. (:issue:`4551`, :issue:`4056`, :issue:`5519`) - The following affected: + The following are affected: - ``date,time,year,month,day`` - ``hour,minute,second,weekofyear`` @@ -208,6 +208,10 @@ API changes - Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) - ``stack`` and ``unstack`` now raise a ``ValueError`` when the ``level`` keyword refers to a non-unique item in the ``Index`` (previously raised a ``KeyError``). +- drop unused order argument from ``Series.sort``; args now in the same orders as ``Series.order``; + add ``na_position`` arg to conform to ``Series.order`` (:issue:`6847`) +- default sorting algorithm for ``Series.order`` is not ``quicksort``, to conform with ``Series.sort`` + (and numpy defaults) .. _whatsnew_0140.sql: @@ -329,20 +333,15 @@ Plotting ~~~~~~~~ - Hexagonal bin plots from ``DataFrame.plot`` with ``kind='hexbin'`` (:issue:`5478`), See :ref:`the docs<visualization.hexbin>`. - - Plotting with Error Bars is now supported in the ``.plot`` method of ``DataFrame`` and ``Series`` objects (:issue:`3796`), See :ref:`the docs<visualization.errorbars>`. - - ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``, See :ref:`the docs<visualization.table>`. - - ``plot(legend='reverse')`` will now reverse the order of legend labels for most plot kinds. (:issue:`6014`) - Following keywords are now acceptable for :meth:`DataFrame.plot(kind='bar')` and :meth:`DataFrame.plot(kind='barh')`. - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. (:issue:`6604`) - - `align`: Specify the bar alignment. Default is `center` (different from matplotlib). In previous versions, pandas passes `align='edge'` to matplotlib and adjust the location to `center` by itself, and it results `align` keyword is not applied as expected. (:issue:`4525`) - - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1(right/top-end). Default is 0.5 (center). (:issue:`6604`) Because of the default `align` value changes, coordinates of bar plots are now located on integer values (0.0, 1.0, 2.0 ...). This is intended to make bar plot be located on the same coodinates as line plot. However, bar plot may differs unexpectedly when you manually adjust the bar location or drawing area, such as using `set_xlim`, `set_ylim`, etc. In this cases, please modify your script to meet with new coordinates. @@ -355,28 +354,19 @@ Prior Version Deprecations/Changes There are prior version deprecations that are taking effect as of 0.14.0. - Remove :class:`DateRange` in favor of :class:`DatetimeIndex` (:issue:`6816`) - - Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) - - Remove ``precision`` keyword from :func:`set_eng_float_format` (:issue:`395`) - - Remove ``force_unicode`` keyword from :meth:`DataFrame.to_string`, :meth:`DataFrame.to_latex`, and :meth:`DataFrame.to_html`; these function encode in unicode by default (:issue:`2224`, :issue:`2225`) - - Remove ``nanRep`` keyword from :meth:`DataFrame.to_csv` and :meth:`DataFrame.to_string` (:issue:`275`) - - Remove ``unique`` keyword from :meth:`HDFStore.select_column` (:issue:`3256`) - - Remove ``inferTimeRule`` keyword from :func:`Timestamp.offset` (:issue:`391`) - - Remove ``name`` keyword from :func:`get_data_yahoo` and :func:`get_data_google` ( `commit b921d1a <https://github.com/pydata/pandas/commit/b921d1a2>`__ ) - - Remove ``offset`` keyword from :class:`DatetimeIndex` constructor ( `commit 3136390 <https://github.com/pydata/pandas/commit/3136390>`__ ) - - Remove ``time_rule`` from several rolling-moment statistical functions, such as :func:`rolling_sum` (:issue:`1042`) diff --git a/pandas/core/series.py b/pandas/core/series.py index d115723c34943..05ce7d8ed7148 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1563,7 +1563,7 @@ def update(self, other): #---------------------------------------------------------------------- # Reindexing, sorting - def sort(self, axis=0, kind='quicksort', order=None, ascending=True): + def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last'): """ Sort values and index labels by value, in place. For compatibility with ndarray API. No return value @@ -1571,12 +1571,14 @@ def sort(self, axis=0, kind='quicksort', order=None, ascending=True): Parameters ---------- axis : int (can only be zero) + ascending : boolean, default True + Sort ascending. Passing False sorts descending kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm - order : ignored - ascending : boolean, default True - Sort ascending. Passing False sorts descending + na_position : {'first', 'last'} (optional, default='last') + 'first' puts NaNs at the beginning + 'last' puts NaNs at the end See Also -------- @@ -1588,9 +1590,9 @@ def sort(self, axis=0, kind='quicksort', order=None, ascending=True): raise ValueError("This Series is a view of some other array, to " "sort in-place you must create a copy") - result = self.order(kind=kind, - ascending=ascending, - na_position='last') + result = self.order(ascending=ascending, + kind=kind, + na_position=na_position) self._update_inplace(result) @@ -1690,7 +1692,7 @@ def rank(self, method='average', na_option='keep', ascending=True, ascending=ascending, pct=pct) return self._constructor(ranks, index=self.index).__finalize__(self) - def order(self, na_last=None, ascending=True, kind='mergesort', na_position='last'): + def order(self, na_last=None, ascending=True, kind='quicksort', na_position='last'): """ Sorts Series object, by value, maintaining index-value link @@ -1700,7 +1702,7 @@ def order(self, na_last=None, ascending=True, kind='mergesort', na_position='las Put NaN's at beginning or end ascending : boolean, default True Sort ascending. Passing False sorts descending - kind : {'mergesort', 'quicksort', 'heapsort'}, default 'mergesort' + kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm na_position : {'first', 'last'} (optional, default='last')
- drop unused `Series.sort` order argument - added `Series.sort na_position` argument to conform with `Series.order` - default kind for `Series.order` is now `quicksort`, same as `Series.sort` and numpy default closes #6847
https://api.github.com/repos/pandas-dev/pandas/pulls/6848
2014-04-09T14:53:16Z
2014-04-09T23:40:11Z
2014-04-09T23:40:11Z
2014-06-22T23:19:41Z
BUG/ENH: Add how kwarg to rolling_* functions [fix #6297]
diff --git a/doc/source/computation.rst b/doc/source/computation.rst index ebda0cde9fb5c..79d85ae9586ed 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -240,7 +240,11 @@ accept the following arguments: or :ref:`DateOffset <timeseries.offsets>` to pre-conform the data to. Note that prior to pandas v0.8.0, a keyword argument ``time_rule`` was used instead of ``freq`` that referred to the legacy time rule constants - + - ``how``: optionally specify method for down or re-sampling. Default is + is min for ``rolling_min``, max for ``rolling_max``, median for + ``rolling_median``, and mean for all other rolling functions. See + :meth:`DataFrame.resample`'s how argument for more information. + These functions can be applied to ndarrays or Series objects: .. ipython:: python diff --git a/doc/source/release.rst b/doc/source/release.rst index 03b89f9077994..caf0afd830217 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -265,6 +265,8 @@ Improvements to existing features - ``Float64Index`` is now backed by a ``float64`` dtype ndarray instead of an ``object`` dtype array (:issue:`6471`). - Add option to turn off escaping in ``DataFrame.to_latex`` (:issue:`6472`) +- Added ``how`` option to rolling-moment functions to dictate how to handle resampling; :func:``rolling_max`` defaults to max, + :func:``rolling_min`` defaults to min, and all others default to mean (:issue:`6297`) .. _release.bug_fixes-0.14.0: diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 187a757f53d45..9e776201a5cc9 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -471,6 +471,8 @@ Enhancements - ``Float64Index`` is now backed by a ``float64`` dtype ndarray instead of an ``object`` dtype array (:issue:`6471`). - Implemented ``Panel.pct_change`` (:issue:`6904`) +- Added ``how`` option to rolling-moment functions to dictate how to handle resampling; :func:``rolling_max`` defaults to max, + :func:``rolling_min`` defaults to min, and all others default to mean (:issue:`6297`) Performance ~~~~~~~~~~~ diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index 3f6352d5cbfbf..246037c7d7009 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -57,6 +57,8 @@ as a frequency string or DateOffset object. center : boolean, default False Set the labels at the center of the window. +how : string, default '%s' + Method for down- or re-sampling """ _roll_notes = r""" @@ -85,6 +87,8 @@ adjust : boolean, default True Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings (viewing EWMA as a moving average) +how : string, default 'mean' + Method for down- or re-sampling """ _ewm_notes = r""" @@ -148,7 +152,7 @@ """ -def rolling_count(arg, window, freq=None, center=False): +def rolling_count(arg, window, freq=None, center=False, how=None): """ Rolling count of number of non-NaN observations inside provided window. @@ -163,6 +167,8 @@ def rolling_count(arg, window, freq=None, center=False): as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window + how : string, default 'mean' + Method for down- or re-sampling Returns ------- @@ -174,7 +180,7 @@ def rolling_count(arg, window, freq=None, center=False): frequency by resampling the data. This is done with the default parameters of :meth:`~pandas.Series.resample` (i.e. using the `mean`). """ - arg = _conv_timerule(arg, freq) + arg = _conv_timerule(arg, freq, how) window = min(window, len(arg)) return_hook, values = _process_data_structure(arg, kill_inf=False) @@ -190,10 +196,10 @@ def rolling_count(arg, window, freq=None, center=False): @Substitution("Unbiased moving covariance.", _binary_arg_flex, - _roll_kw+_pairwise_kw, _flex_retval, _roll_notes) + _roll_kw%'None'+_pairwise_kw, _flex_retval, _roll_notes) @Appender(_doc_template) def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None, - center=False, pairwise=None): + center=False, pairwise=None, how=None): if window is None and isinstance(arg2, (int, float)): window = arg2 arg2 = arg1 @@ -201,8 +207,8 @@ def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None, elif arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise # only default unset - arg1 = _conv_timerule(arg1, freq) - arg2 = _conv_timerule(arg2, freq) + arg1 = _conv_timerule(arg1, freq, how) + arg2 = _conv_timerule(arg2, freq, how) window = min(window, len(arg1), len(arg2)) def _get_cov(X, Y): @@ -215,10 +221,10 @@ def _get_cov(X, Y): @Substitution("Moving sample correlation.", _binary_arg_flex, - _roll_kw+_pairwise_kw, _flex_retval, _roll_notes) + _roll_kw%'None'+_pairwise_kw, _flex_retval, _roll_notes) @Appender(_doc_template) def rolling_corr(arg1, arg2=None, window=None, min_periods=None, freq=None, - center=False, pairwise=None): + center=False, pairwise=None, how=None): if window is None and isinstance(arg2, (int, float)): window = arg2 arg2 = arg1 @@ -226,8 +232,8 @@ def rolling_corr(arg1, arg2=None, window=None, min_periods=None, freq=None, elif arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise # only default unset - arg1 = _conv_timerule(arg1, freq) - arg2 = _conv_timerule(arg2, freq) + arg1 = _conv_timerule(arg1, freq, how) + arg2 = _conv_timerule(arg2, freq, how) window = min(window, len(arg1), len(arg2)) def _get_corr(a, b): @@ -289,7 +295,7 @@ def _flex_binary_moment(arg1, arg2, f, pairwise=False): @Substitution("Deprecated. Use rolling_corr(..., pairwise=True) instead.\n\n" "Pairwise moving sample correlation", _pairwise_arg, - _roll_kw, _pairwise_retval, _roll_notes) + _roll_kw%'None', _pairwise_retval, _roll_notes) @Appender(_doc_template) def rolling_corr_pairwise(df1, df2=None, window=None, min_periods=None, freq=None, center=False): @@ -301,7 +307,7 @@ def rolling_corr_pairwise(df1, df2=None, window=None, min_periods=None, def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False, - args=(), kwargs={}, **kwds): + how=None, args=(), kwargs={}, **kwds): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. @@ -318,6 +324,8 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False, Frequency to conform to before computing statistic center : boolean, default False Whether the label should correspond with center of window + how : string, default 'mean' + Method for down- or re-sampling args : tuple Passed on to func kwargs : dict @@ -327,7 +335,7 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False, ------- y : type of input """ - arg = _conv_timerule(arg, freq) + arg = _conv_timerule(arg, freq, how) calc = lambda x: func(x, window, minp=minp, args=args, kwargs=kwargs, **kwds) return_hook, values = _process_data_structure(arg) @@ -413,9 +421,9 @@ def _get_center_of_mass(com, span, halflife): _type_of_input_retval, _ewm_notes) @Appender(_doc_template) def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None, - adjust=True): + adjust=True, how=None): com = _get_center_of_mass(com, span, halflife) - arg = _conv_timerule(arg, freq) + arg = _conv_timerule(arg, freq, how) def _ewma(v): result = algos.ewma(v, com, int(adjust)) @@ -437,9 +445,9 @@ def _first_valid_index(arr): _ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) def ewmvar(arg, com=None, span=None, halflife=None, min_periods=0, bias=False, - freq=None): + freq=None, how=None): com = _get_center_of_mass(com, span, halflife) - arg = _conv_timerule(arg, freq) + arg = _conv_timerule(arg, freq, how) moment2nd = ewma(arg * arg, com=com, min_periods=min_periods) moment1st = ewma(arg, com=com, min_periods=min_periods) @@ -465,7 +473,7 @@ def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False): _ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, - bias=False, freq=None, pairwise=None): + bias=False, freq=None, pairwise=None, how=None): if arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise @@ -473,8 +481,8 @@ def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, com = arg2 arg2 = arg1 pairwise = True if pairwise is None else pairwise - arg1 = _conv_timerule(arg1, freq) - arg2 = _conv_timerule(arg2, freq) + arg1 = _conv_timerule(arg1, freq, how) + arg2 = _conv_timerule(arg2, freq, how) def _get_ewmcov(X, Y): mean = lambda x: ewma(x, com=com, span=span, halflife=halflife, min_periods=min_periods) @@ -492,7 +500,7 @@ def _get_ewmcov(X, Y): _ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, - freq=None, pairwise=None): + freq=None, pairwise=None, how=None): if arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise @@ -500,8 +508,8 @@ def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, com = arg2 arg2 = arg1 pairwise = True if pairwise is None else pairwise - arg1 = _conv_timerule(arg1, freq) - arg2 = _conv_timerule(arg2, freq) + arg1 = _conv_timerule(arg1, freq, how) + arg2 = _conv_timerule(arg2, freq, how) def _get_ewmcorr(X, Y): mean = lambda x: ewma(x, com=com, span=span, halflife=halflife, min_periods=min_periods) @@ -541,12 +549,12 @@ def _prep_binary(arg1, arg2): # Python interface to Cython functions -def _conv_timerule(arg, freq): +def _conv_timerule(arg, freq, how): types = (DataFrame, Series) if freq is not None and isinstance(arg, types): # Conform to whatever frequency needed. - arg = arg.resample(freq) + arg = arg.resample(freq, how=how) return arg @@ -567,25 +575,32 @@ def _use_window(minp, window): return minp -def _rolling_func(func, desc, check_minp=_use_window): - @Substitution(desc, _unary_arg, _roll_kw, _type_of_input_retval, _roll_notes) +def _rolling_func(func, desc, check_minp=_use_window, how=None): + if how is None: + how_arg_str = 'None' + else: + how_arg_str = "'%s"%how + + @Substitution(desc, _unary_arg, _roll_kw%how_arg_str, _type_of_input_retval, + _roll_notes) @Appender(_doc_template) @wraps(func) - def f(arg, window, min_periods=None, freq=None, center=False, + def f(arg, window, min_periods=None, freq=None, center=False, how=how, **kwargs): def call_cython(arg, window, minp, args=(), kwargs={}, **kwds): minp = check_minp(minp, window) return func(arg, window, minp, **kwds) return _rolling_moment(arg, window, call_cython, min_periods, freq=freq, - center=center, **kwargs) + center=center, how=how, **kwargs) return f -rolling_max = _rolling_func(algos.roll_max2, 'Moving maximum.') -rolling_min = _rolling_func(algos.roll_min2, 'Moving minimum.') +rolling_max = _rolling_func(algos.roll_max2, 'Moving maximum.', how='max') +rolling_min = _rolling_func(algos.roll_min2, 'Moving minimum.', how='min') rolling_sum = _rolling_func(algos.roll_sum, 'Moving sum.') rolling_mean = _rolling_func(algos.roll_mean, 'Moving mean.') -rolling_median = _rolling_func(algos.roll_median_cython, 'Moving median.') +rolling_median = _rolling_func(algos.roll_median_cython, 'Moving median.', + how='median') _ts_std = lambda *a, **kw: _zsqrt(algos.roll_var(*a, **kw)) rolling_std = _rolling_func(_ts_std, 'Unbiased moving standard deviation.', @@ -687,7 +702,7 @@ def call_cython(arg, window, minp, args, kwargs): def rolling_window(arg, window=None, win_type=None, min_periods=None, freq=None, center=False, mean=True, - axis=0, **kwargs): + axis=0, how=None, **kwargs): """ Applies a moving window of type ``window_type`` and size ``window`` on the data. @@ -711,6 +726,8 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None, mean : boolean, default True If True computes weighted mean, else weighted sum axis : {0, 1}, default 0 + how : string, default 'mean' + Method for down- or re-sampling Returns ------- @@ -761,7 +778,7 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None, minp = _use_window(min_periods, len(window)) - arg = _conv_timerule(arg, freq) + arg = _conv_timerule(arg, freq, how) return_hook, values = _process_data_structure(arg) f = lambda x: algos.roll_window(x, window, minp, avg=mean) diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index 22661ea7cacda..8c9eb080cfc61 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -848,6 +848,97 @@ def _check_expanding(self, func, static_comp, has_min_periods=True, preserve_nan=preserve_nan) self._check_expanding_structures(func) + def test_rolling_max_gh6297(self): + """Replicate result expected in GH #6297""" + + indices = [datetime(1975, 1, i) for i in range(1, 6)] + # So that we can have 2 datapoints on one of the days + indices.append(datetime(1975, 1, 3, 6, 0)) + series = Series(range(1, 7), index=indices) + # Use floats instead of ints as values + series = series.map(lambda x: float(x)) + # Sort chronologically + series = series.sort_index() + + expected = Series([1.0, 2.0, 6.0, 4.0, 5.0], + index=[datetime(1975, 1, i, 0) + for i in range(1, 6)]) + x = mom.rolling_max(series, window=1, freq='D') + assert_series_equal(expected, x) + + def test_rolling_max_how_resample(self): + + indices = [datetime(1975, 1, i) for i in range(1, 6)] + # So that we can have 3 datapoints on last day (4, 10, and 20) + indices.append(datetime(1975, 1, 5, 1)) + indices.append(datetime(1975, 1, 5, 2)) + series = Series(list(range(0, 5)) + [10, 20], index=indices) + # Use floats instead of ints as values + series = series.map(lambda x: float(x)) + # Sort chronologically + series = series.sort_index() + + # Default how should be max + expected = Series([0.0, 1.0, 2.0, 3.0, 20.0], + index=[datetime(1975, 1, i, 0) + for i in range(1, 6)]) + x = mom.rolling_max(series, window=1, freq='D') + assert_series_equal(expected, x) + + # Now specify median (10.0) + expected = Series([0.0, 1.0, 2.0, 3.0, 10.0], + index=[datetime(1975, 1, i, 0) + for i in range(1, 6)]) + x = mom.rolling_max(series, window=1, freq='D', how='median') + assert_series_equal(expected, x) + + # Now specify mean (4+10+20)/3 + v = (4.0+10.0+20.0)/3.0 + expected = Series([0.0, 1.0, 2.0, 3.0, v], + index=[datetime(1975, 1, i, 0) + for i in range(1, 6)]) + x = mom.rolling_max(series, window=1, freq='D', how='mean') + assert_series_equal(expected, x) + + + def test_rolling_min_how_resample(self): + + indices = [datetime(1975, 1, i) for i in range(1, 6)] + # So that we can have 3 datapoints on last day (4, 10, and 20) + indices.append(datetime(1975, 1, 5, 1)) + indices.append(datetime(1975, 1, 5, 2)) + series = Series(list(range(0, 5)) + [10, 20], index=indices) + # Use floats instead of ints as values + series = series.map(lambda x: float(x)) + # Sort chronologically + series = series.sort_index() + + # Default how should be min + expected = Series([0.0, 1.0, 2.0, 3.0, 4.0], + index=[datetime(1975, 1, i, 0) + for i in range(1, 6)]) + x = mom.rolling_min(series, window=1, freq='D') + assert_series_equal(expected, x) + + def test_rolling_median_how_resample(self): + + indices = [datetime(1975, 1, i) for i in range(1, 6)] + # So that we can have 3 datapoints on last day (4, 10, and 20) + indices.append(datetime(1975, 1, 5, 1)) + indices.append(datetime(1975, 1, 5, 2)) + series = Series(list(range(0, 5)) + [10, 20], index=indices) + # Use floats instead of ints as values + series = series.map(lambda x: float(x)) + # Sort chronologically + series = series.sort_index() + + # Default how should be median + expected = Series([0.0, 1.0, 2.0, 3.0, 10], + index=[datetime(1975, 1, i, 0) + for i in range(1, 6)]) + x = mom.rolling_median(series, window=1, freq='D') + assert_series_equal(expected, x) + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
Fixes #6297 Figured while I was in the area, would submit a PR to do this too. Let me know if you want to see more unit tests than the one.
https://api.github.com/repos/pandas-dev/pandas/pulls/6845
2014-04-09T01:47:22Z
2014-04-22T13:09:37Z
2014-04-22T13:09:37Z
2014-06-27T21:48:09Z
DOC: Use correct git hub numbers for deprecations [#6641]
diff --git a/doc/source/release.rst b/doc/source/release.rst index 1c1ecde5b95b8..d0de16524b035 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -195,27 +195,29 @@ Prior Version Deprecations/Changes - Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) -- Remove ``precision`` keyword from :func:`set_eng_float_format` (:issue:`6641`) +- Remove ``precision`` keyword from :func:`set_eng_float_format` (:issue:`395`) - Remove ``force_unicode`` keyword from :meth:`DataFrame.to_string`, :meth:`DataFrame.to_latex`, and :meth:`DataFrame.to_html`; these function - encode in unicode by default (:issue:`6641`) + encode in unicode by default (:issue:`2224`, :issue:`2225`) - Remove ``nanRep`` keyword from :meth:`DataFrame.to_csv` and - :meth:`DataFrame.to_string` (:issue:`6641`) + :meth:`DataFrame.to_string` (:issue:`275`) -- Remove ``unique`` keyword from :meth:`HDFStore.select_column` (:issue:`6641`) +- Remove ``unique`` keyword from :meth:`HDFStore.select_column` (:issue:`3256`) -- Remove ``inferTimeRule`` keyword from :func:`Timestamp.offset` (:issue:`6641`) +- Remove ``inferTimeRule`` keyword from :func:`Timestamp.offset` (:issue:`391`) - Remove ``name`` keyword from :func:`get_data_yahoo` and - :func:`get_data_google` (:issue:`6641`) + :func:`get_data_google` (`commit b921d1a +<https://github.com/pydata/pandas/commit/b921d1a2>`__) - Remove ``offset`` keyword from :class:`DatetimeIndex` constructor - (:issue:`6641`) + (`commit 3136390 +<https://github.com/pydata/pandas/commit/3136390>`__) - Remove ``time_rule`` from several rolling-moment statistical functions, such - as :func:`rolling_sum` (:issue:`6641`) + as :func:`rolling_sum` (:issue:`1042`) Experimental Features ~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 42b50a7b21674..6eb47b3e9d753 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -355,27 +355,29 @@ There are prior version deprecations that are taking effect as of 0.14.0. - Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) -- Remove ``precision`` keyword from :func:`set_eng_float_format` (:issue:`6641`) +- Remove ``precision`` keyword from :func:`set_eng_float_format` (:issue:`395`) - Remove ``force_unicode`` keyword from :meth:`DataFrame.to_string`, :meth:`DataFrame.to_latex`, and :meth:`DataFrame.to_html`; these function - encode in unicode by default (:issue:`6641`) + encode in unicode by default (:issue:`2224`, :issue:`2225`) - Remove ``nanRep`` keyword from :meth:`DataFrame.to_csv` and - :meth:`DataFrame.to_string` (:issue:`6641`) + :meth:`DataFrame.to_string` (:issue:`275`) -- Remove ``unique`` keyword from :meth:`HDFStore.select_column` (:issue:`6641`) +- Remove ``unique`` keyword from :meth:`HDFStore.select_column` (:issue:`3256`) -- Remove ``inferTimeRule`` keyword from :func:`Timestamp.offset` (:issue:`6641`) +- Remove ``inferTimeRule`` keyword from :func:`Timestamp.offset` (:issue:`391`) - Remove ``name`` keyword from :func:`get_data_yahoo` and - :func:`get_data_google` (:issue:`6641`) + :func:`get_data_google` (`commit b921d1a +<https://github.com/pydata/pandas/commit/b921d1a2>`__) - Remove ``offset`` keyword from :class:`DatetimeIndex` constructor - (:issue:`6641`) + (`commit 3136390 +<https://github.com/pydata/pandas/commit/3136390>`__) - Remove ``time_rule`` from several rolling-moment statistical functions, such - as :func:`rolling_sum` (:issue:`6641`) + as :func:`rolling_sum` (:issue:`1042`) .. _whatsnew_0140.deprecations:
https://api.github.com/repos/pandas-dev/pandas/pulls/6844
2014-04-09T01:20:28Z
2014-04-09T01:58:22Z
2014-04-09T01:58:22Z
2014-06-23T03:13:56Z
DOC: shrink bounding box on table plot
diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 09decc5ed1e25..a625015eb3148 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -429,7 +429,16 @@ Plotting with matplotlib table is now supported in the ``DataFrame.plot`` and `` ax.get_xaxis().set_visible(False) # Hide Ticks @savefig line_plot_table_true.png + + msgs = [] df.plot(table=True, ax=ax) + msgs.append("<<<DEBUGGING TABLE>>>") + msgs.append("<<< fig size: {}".format(fig.get_size_inches())) + msgs.append("<<< fig extent: {}".format(fig.get_window_extent())) + msgs.append("<<< ax position: {}".format(ax.get_position())) + msgs.append("<<< ax extent: {}".format(ax.get_window_extent())) + msgs.append("<<< tab clip: {}".format(ax.tables[0].get_clip_box())) + raise Exception('\n'.join(msgs)) Also, you can pass different ``DataFrame`` or ``Series`` for ``table`` keyword. The data will be drawn as displayed in print method (not transposed automatically). If required, it should be transposed manually as below example. @@ -437,6 +446,7 @@ Also, you can pass different ``DataFrame`` or ``Series`` for ``table`` keyword. fig, ax = plt.subplots(1, 1) ax.get_xaxis().set_visible(False) # Hide Ticks + ax.set_position(pos=[.125, .4, .5, .5]) @savefig line_plot_table_data.png df.plot(table=np.round(df.T, 2), ax=ax)
Doc formatting issue brought up in https://github.com/pydata/pandas/pull/6661#issuecomment-39748330 @jreback any way to view the doc build before merging into master? I was looking into `pandas/ci/build_docs.sh` but it looks likes I'd need to set up another travis job and a new GH pages site.
https://api.github.com/repos/pandas-dev/pandas/pulls/6841
2014-04-08T14:14:06Z
2014-04-10T19:23:13Z
null
2017-05-15T21:15:59Z
Additional documentation for holiday calendars
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 5e1025b2d24dd..899bc2232f161 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -541,10 +541,10 @@ calendars which account for local holidays and local weekend conventions. holidays = ['2012-05-01', datetime(2013, 5, 1), np.datetime64('2014-05-01')] bday_egypt = CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt) dt = datetime(2013, 4, 30) - print(dt + 2 * bday_egypt) + dt + 2 * bday_egypt dts = date_range(dt, periods=5, freq=bday_egypt).to_series() - print(dts) - print(Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split()))) + dts + Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split())) As of v0.14 holiday calendars can be used to provide the list of holidays. See the :ref:`holiday calendar<timeseries.holiday>` section for more information. @@ -553,8 +553,10 @@ As of v0.14 holiday calendars can be used to provide the list of holidays. See from pandas.tseries.holiday import USFederalHolidayCalendar bday_us = CustomBusinessDay(calendar=USFederalHolidayCalendar()) - dt = datetime(2014, 1, 17) #Friday before MLK Day - print(dt + bday_us) #Tuesday after MLK Day + # Friday before MLK Day + dt = datetime(2014, 1, 17) + # Tuesday after MLK Day (Monday is skipped because it's a holiday) + dt + bday_us .. note:: @@ -767,12 +769,36 @@ An example of how holidays and holiday calendars are defined: offset=DateOffset(weekday=MO(2))), #same as 2*Week(weekday=2) ] cal = ExampleCalendar() - datetime(2012, 5, 25) + CustomBusinessDay(calendar=cal) - cal.holidays(datetime(2012, 1, 1), datetime(2012, 12, 31))#holiday list - AbstractHolidayCalendar.start_date #default start date of range - AbstractHolidayCalendar.end_date #default end date of range - AbstractHolidayCalendar.start_date = datetime(2012, 1, 1)#or Timestamp - AbstractHolidayCalendar.end_date = datetime(2012, 12, 31)#or Timestamp + cal.holidays(datetime(2012, 1, 1), datetime(2012, 12, 31)) + +Using this calendar, creating an index or doing offset arithmetic skips weekends +and holidays (i.e., Memorial Day/July 4th). + +.. ipython:: python + + DatetimeIndex(start='7/1/2012', end='7/10/2012', + freq=CDay(calendar=cal)).to_pydatetime() + offset = CustomBusinessDay(calendar=cal) + datetime(2012, 5, 25) + offset + datetime(2012, 7, 3) + offset + datetime(2012, 7, 3) + 2 * offset + datetime(2012, 7, 6) + offset + +Ranges are defined by the ``start_date`` and ``end_date`` class attributes +of ``AbstractHolidayCalendar``. The defaults are below. + +.. ipython:: python + + AbstractHolidayCalendar.start_date + AbstractHolidayCalendar.end_date + +These dates can be overwritten by setting the attributes as +datetime/Timestamp/string. + +.. ipython:: python + + AbstractHolidayCalendar.start_date = datetime(2012, 1, 1) + AbstractHolidayCalendar.end_date = datetime(2012, 12, 31) cal.holidays() Every calendar class is accessible by name using the ``get_calendar`` function diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index febcdc11d30cf..6291be340d651 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -225,13 +225,6 @@ def holidays(self, start=None, end=None, return_name=False): ------- DatetimeIndex of holidays """ - #FIXME: Where should the default limits exist? - if start is None: - start = datetime(1970, 1, 1) - - if end is None: - end = datetime(2030, 12, 31) - if self.rules is None: raise Exception('Holiday Calendar %s does not have any '\ 'rules specified' % self.name)
@jreback here are the additional holiday examples. Let me know if you want something else.
https://api.github.com/repos/pandas-dev/pandas/pulls/6840
2014-04-08T13:36:40Z
2014-04-09T00:35:49Z
2014-04-09T00:35:49Z
2014-07-16T09:00:59Z
ENH: Scatter plot now supports errorbar
diff --git a/doc/source/release.rst b/doc/source/release.rst index a0a96ebbd5c70..8853ce79f3d04 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -63,7 +63,7 @@ New features Date is used primarily in astronomy and represents the number of days from noon, January 1, 4713 BC. Because nanoseconds are used to define the time in pandas the actual range of dates that you can use is 1678 AD to 2262 AD. (:issue:`4041`) -- Added error bar support to the ``.plot`` method of ``DataFrame`` and ``Series`` (:issue:`3796`) +- Added error bar support to the ``.plot`` method of ``DataFrame`` and ``Series`` (:issue:`3796`, :issue:`6834`) - Implemented ``Panel.pct_change`` (:issue:`6904`) API Changes diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index a6caa075f6358..c70e32fd18694 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -365,7 +365,7 @@ Plotting - Hexagonal bin plots from ``DataFrame.plot`` with ``kind='hexbin'`` (:issue:`5478`), See :ref:`the docs<visualization.hexbin>`. - ``DataFrame.plot`` and ``Series.plot`` now supports area plot with specifying ``kind='area'`` (:issue:`6656`) -- Plotting with Error Bars is now supported in the ``.plot`` method of ``DataFrame`` and ``Series`` objects (:issue:`3796`), See :ref:`the docs<visualization.errorbars>`. +- Plotting with Error Bars is now supported in the ``.plot`` method of ``DataFrame`` and ``Series`` objects (:issue:`3796`, :issue:`6834`), See :ref:`the docs<visualization.errorbars>`. - ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``, See :ref:`the docs<visualization.table>`. - ``plot(legend='reverse')`` will now reverse the order of legend labels for most plot kinds. (:issue:`6014`) diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 5255ddf3c33e7..8906e82eb937b 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -394,10 +394,13 @@ x and y errorbars are supported and be supplied using the ``xerr`` and ``yerr`` - As a ``DataFrame`` or ``dict`` of errors with column names matching the ``columns`` attribute of the plotting ``DataFrame`` or matching the ``name`` attribute of the ``Series`` - As a ``str`` indicating which of the columns of plotting ``DataFrame`` contain the error values -- As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting ``DataFrame``/``Series`` +- As list-like raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting ``DataFrame``/``Series`` +- As float. The error value will be applied to all data. Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``M`` length ``Series``, a ``Mx2`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` ``DataFrame``, asymmetrical errors should be in a ``Mx2xN`` array. +**Note**: Plotting ``xerr`` is not supported in time series. + Here is an example of one way to easily plot group means with standard deviations from the raw data. .. ipython:: python diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 8b79c9e9d1307..0186ac4c2b74b 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1,3 +1,6 @@ +#!/usr/bin/env python +# coding: utf-8 + import nose import os import string @@ -27,7 +30,6 @@ def _skip_if_no_scipy(): except ImportError: raise nose.SkipTest("no scipy") - @tm.mplskip class TestSeriesPlots(tm.TestCase): def setUp(self): @@ -315,24 +317,36 @@ def test_dup_datetime_index_plot(self): @slow def test_errorbar_plot(self): - s = Series(np.arange(10)) + s = Series(np.arange(10), name='x') s_err = np.random.randn(10) - + d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y']) # test line and bar plots kinds = ['line', 'bar'] for kind in kinds: - _check_plot_works(s.plot, yerr=Series(s_err), kind=kind) - _check_plot_works(s.plot, yerr=s_err, kind=kind) - _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind) - - _check_plot_works(s.plot, xerr=s_err) + ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=1) + ax = _check_plot_works(s.plot, yerr=s_err, kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=1) + ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=1) + ax = _check_plot_works(s.plot, yerr=d_err, kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=1) + ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind) + _check_has_errorbars(self, ax, xerr=1, yerr=1) + + ax = _check_plot_works(s.plot, xerr=s_err) + _check_has_errorbars(self, ax, xerr=1, yerr=0) # test time series plotting ix = date_range('1/1/2000', '1/1/2001', freq='M') - ts = Series(np.arange(12), index=ix) + ts = Series(np.arange(12), index=ix, name='x') ts_err = Series(np.random.randn(12), index=ix) + td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y']) - _check_plot_works(ts.plot, yerr=ts_err) + ax = _check_plot_works(ts.plot, yerr=ts_err) + _check_has_errorbars(self, ax, xerr=0, yerr=1) + ax = _check_plot_works(ts.plot, yerr=td_err) + _check_has_errorbars(self, ax, xerr=0, yerr=1) # check incorrect lengths and types with tm.assertRaises(ValueError): @@ -1505,27 +1519,51 @@ def test_errorbar_plot(self): df_err = DataFrame(d_err) # check line plots - _check_plot_works(df.plot, yerr=df_err, logy=True) - _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True) - _check_plot_works(df.plot, yerr=df_err, loglog=True) + ax = _check_plot_works(df.plot, yerr=df_err, logy=True) + _check_has_errorbars(self, ax, xerr=0, yerr=2) + ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True) + _check_has_errorbars(self, ax, xerr=0, yerr=2) + ax = _check_plot_works(df.plot, yerr=df_err, loglog=True) + _check_has_errorbars(self, ax, xerr=0, yerr=2) kinds = ['line', 'bar', 'barh'] for kind in kinds: - _check_plot_works(df.plot, yerr=df_err['x'], kind=kind) - _check_plot_works(df.plot, yerr=d_err, kind=kind) - _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind) - _check_plot_works(df.plot, yerr=df_err['x'], xerr=df_err['x'], kind=kind) - _check_plot_works(df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind) + ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=2) + ax = _check_plot_works(df.plot, yerr=d_err, kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=2) + ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind) + _check_has_errorbars(self, ax, xerr=2, yerr=2) + ax = _check_plot_works(df.plot, yerr=df_err['x'], xerr=df_err['x'], kind=kind) + _check_has_errorbars(self, ax, xerr=2, yerr=2) + ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind) + _check_has_errorbars(self, ax, xerr=2, yerr=2) + axes = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind) + for ax in axes: + _check_has_errorbars(self, ax, xerr=1, yerr=1) - _check_plot_works((df+1).plot, yerr=df_err, xerr=df_err, kind='bar', log=True) + ax = _check_plot_works((df+1).plot, yerr=df_err, xerr=df_err, kind='bar', log=True) + _check_has_errorbars(self, ax, xerr=2, yerr=2) # yerr is raw error values - _check_plot_works(df['y'].plot, yerr=np.ones(12)*0.4) - _check_plot_works(df.plot, yerr=np.ones((2, 12))*0.4) + ax = _check_plot_works(df['y'].plot, yerr=np.ones(12)*0.4) + _check_has_errorbars(self, ax, xerr=0, yerr=1) + ax = _check_plot_works(df.plot, yerr=np.ones((2, 12))*0.4) + _check_has_errorbars(self, ax, xerr=0, yerr=2) + + # yerr is iterator + import itertools + ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df))) + _check_has_errorbars(self, ax, xerr=0, yerr=2) # yerr is column name - df['yerr'] = np.ones(12)*0.2 - _check_plot_works(df.plot, y='y', x='x', yerr='yerr') + for yerr in ['yerr', u('誤差')]: + s_df = df.copy() + s_df[yerr] = np.ones(12)*0.2 + ax = _check_plot_works(s_df.plot, yerr=yerr) + _check_has_errorbars(self, ax, xerr=0, yerr=2) + ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr) + _check_has_errorbars(self, ax, xerr=0, yerr=1) with tm.assertRaises(ValueError): df.plot(yerr=np.random.randn(11)) @@ -1539,8 +1577,10 @@ def test_errorbar_with_integer_column_names(self): # test with integer column names df = DataFrame(np.random.randn(10, 2)) df_err = DataFrame(np.random.randn(10, 2)) - _check_plot_works(df.plot, yerr=df_err) - _check_plot_works(df.plot, y=0, yerr=1) + ax = _check_plot_works(df.plot, yerr=df_err) + _check_has_errorbars(self, ax, xerr=0, yerr=2) + ax = _check_plot_works(df.plot, y=0, yerr=1) + _check_has_errorbars(self, ax, xerr=0, yerr=1) @slow def test_errorbar_with_partial_columns(self): @@ -1548,12 +1588,22 @@ def test_errorbar_with_partial_columns(self): df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2]) kinds = ['line', 'bar'] for kind in kinds: - _check_plot_works(df.plot, yerr=df_err, kind=kind) + ax = _check_plot_works(df.plot, yerr=df_err, kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=2) ix = date_range('1/1/2000', periods=10, freq='M') df.set_index(ix, inplace=True) df_err.set_index(ix, inplace=True) - _check_plot_works(df.plot, yerr=df_err, kind='line') + ax = _check_plot_works(df.plot, yerr=df_err, kind='line') + _check_has_errorbars(self, ax, xerr=0, yerr=2) + + d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)} + df = DataFrame(d) + d_err = {'x': np.ones(12)*0.2, 'z': np.ones(12)*0.4} + df_err = DataFrame(d_err) + for err in [d_err, df_err]: + ax = _check_plot_works(df.plot, yerr=err) + _check_has_errorbars(self, ax, xerr=0, yerr=1) @slow def test_errorbar_timeseries(self): @@ -1568,13 +1618,19 @@ def test_errorbar_timeseries(self): kinds = ['line', 'bar', 'barh'] for kind in kinds: - _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) - _check_plot_works(tdf.plot, yerr=d_err, kind=kind) - _check_plot_works(tdf.plot, y='y', kind=kind) - _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind) - _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) - _check_plot_works(tdf.plot, kind=kind, subplots=True) - + ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=2) + ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=2) + ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'], kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=1) + ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=1) + ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) + _check_has_errorbars(self, ax, xerr=0, yerr=2) + axes = _check_plot_works(tdf.plot, kind=kind, yerr=tdf_err, subplots=True) + for ax in axes: + _check_has_errorbars(self, ax, xerr=0, yerr=1) def test_errorbar_asymmetrical(self): @@ -1608,6 +1664,21 @@ def test_table(self): plotting.table(ax, df.T) self.assert_(len(ax.tables) == 1) + def test_errorbar_scatter(self): + df = DataFrame(np.random.randn(5, 2), index=range(5), columns=['x', 'y']) + df_err = DataFrame(np.random.randn(5, 2) / 5, + index=range(5), columns=['x', 'y']) + + ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y') + _check_has_errorbars(self, ax, xerr=0, yerr=0) + ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', xerr=df_err) + _check_has_errorbars(self, ax, xerr=1, yerr=0) + ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', yerr=df_err) + _check_has_errorbars(self, ax, xerr=0, yerr=1) + ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', + xerr=df_err, yerr=df_err) + _check_has_errorbars(self, ax, xerr=1, yerr=1) + @tm.mplskip class TestDataFrameGroupByPlots(tm.TestCase): @@ -1803,8 +1874,24 @@ def assert_is_valid_plot_return_object(objs): ''.format(objs.__class__.__name__)) +def _check_has_errorbars(t, ax, xerr=0, yerr=0): + containers = ax.containers + xerr_count = 0 + yerr_count = 0 + for c in containers: + has_xerr = getattr(c, 'has_xerr', False) + has_yerr = getattr(c, 'has_yerr', False) + if has_xerr: + xerr_count += 1 + if has_yerr: + yerr_count += 1 + t.assertEqual(xerr, xerr_count) + t.assertEqual(yerr, yerr_count) + + def _check_plot_works(f, *args, **kwargs): import matplotlib.pyplot as plt + ret = None try: try: diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index ab3717d52e4f2..55aa01fd2e265 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -16,7 +16,7 @@ from pandas.tseries.period import PeriodIndex, Period from pandas.tseries.frequencies import get_period_alias, get_base_alias from pandas.tseries.offsets import DateOffset -from pandas.compat import range, lrange, lmap, map, zip +from pandas.compat import range, lrange, lmap, map, zip, string_types import pandas.compat as compat try: # mpl optional @@ -837,9 +837,11 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=True, self.axes = None # parse errorbar input if given - for err_dim in 'xy': - if err_dim+'err' in kwds: - kwds[err_dim+'err'] = self._parse_errorbars(error_dim=err_dim, **kwds) + xerr = kwds.pop('xerr', None) + yerr = kwds.pop('yerr', None) + self.errors = {} + for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]): + self.errors[kw] = self._parse_errorbars(kw, err) if not isinstance(secondary_y, (bool, tuple, list, np.ndarray)): secondary_y = [secondary_y] @@ -1185,8 +1187,7 @@ def _get_plot_function(self): the presence of errorbar keywords. ''' - if ('xerr' not in self.kwds) and \ - ('yerr' not in self.kwds): + if all(e is None for e in self.errors.values()): plotf = self.plt.Axes.plot else: plotf = self.plt.Axes.errorbar @@ -1266,7 +1267,7 @@ def _maybe_add_color(self, colors, kwds, style, i): if has_color and (style is None or re.match('[a-z]+', style) is None): kwds['color'] = colors[i % len(colors)] - def _parse_errorbars(self, error_dim='y', **kwds): + def _parse_errorbars(self, label, err): ''' Look for error keyword arguments and return the actual errorbar data or return the error DataFrame/dict @@ -1280,47 +1281,48 @@ def _parse_errorbars(self, error_dim='y', **kwds): str: the name of the column within the plotted DataFrame ''' - err_kwd = kwds.pop(error_dim+'err', None) - if err_kwd is None: + if err is None: return None from pandas import DataFrame, Series - def match_labels(data, err): - err = err.reindex_axis(data.index) - return err + def match_labels(data, e): + e = e.reindex_axis(data.index) + return e # key-matched DataFrame - if isinstance(err_kwd, DataFrame): - err = err_kwd - err = match_labels(self.data, err) + if isinstance(err, DataFrame): + err = match_labels(self.data, err) # key-matched dict - elif isinstance(err_kwd, dict): - err = err_kwd + elif isinstance(err, dict): + pass # Series of error values - elif isinstance(err_kwd, Series): + elif isinstance(err, Series): # broadcast error series across data - err = match_labels(self.data, err_kwd) + err = match_labels(self.data, err) err = np.atleast_2d(err) err = np.tile(err, (self.nseries, 1)) # errors are a column in the dataframe - elif isinstance(err_kwd, str): - err = np.atleast_2d(self.data[err_kwd].values) - self.data = self.data[self.data.columns.drop(err_kwd)] + elif isinstance(err, string_types): + evalues = self.data[err].values + self.data = self.data[self.data.columns.drop(err)] + err = np.atleast_2d(evalues) err = np.tile(err, (self.nseries, 1)) - elif isinstance(err_kwd, (tuple, list, np.ndarray)): - - # raw error values - err = np.atleast_2d(err_kwd) + elif com.is_list_like(err): + if com.is_iterator(err): + err = np.atleast_2d(list(err)) + else: + # raw error values + err = np.atleast_2d(err) err_shape = err.shape # asymmetrical error bars - if err.ndim==3: + if err.ndim == 3: if (err_shape[0] != self.nseries) or \ (err_shape[1] != 2) or \ (err_shape[2] != len(self.data)): @@ -1330,15 +1332,39 @@ def match_labels(data, err): raise ValueError(msg) # broadcast errors to each data series - if len(err)==1: + if len(err) == 1: err = np.tile(err, (self.nseries, 1)) + elif com.is_number(err): + err = np.tile([err], (self.nseries, len(self.data))) + else: - msg = "No valid %serr detected" % error_dim + msg = "No valid %s detected" % label raise ValueError(msg) return err + def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True): + from pandas import DataFrame + errors = {} + + for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]): + if flag: + err = self.errors[kw] + # user provided label-matched dataframe of errors + if isinstance(err, (DataFrame, dict)): + if label is not None and label in err.keys(): + err = err[label] + else: + err = None + elif index is not None and err is not None: + err = err[index] + + if err is not None: + errors[kw] = err + return errors + + class KdePlot(MPLPlot): def __init__(self, data, bw_method=None, ind=None, **kwargs): MPLPlot.__init__(self, data, **kwargs) @@ -1418,6 +1444,14 @@ def _make_plot(self): self._add_legend_handle(scatter, label) + errors_x = self._get_errorbars(label=x, index=0, yerr=False) + errors_y = self._get_errorbars(label=y, index=1, xerr=False) + if len(errors_x) > 0 or len(errors_y) > 0: + err_kwds = dict(errors_x, **errors_y) + if 'color' in self.kwds: + err_kwds['color'] = self.kwds['color'] + ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds) + def _post_plot_logic(self): ax = self.axes[0] x, y = self.x, self.y @@ -1558,16 +1592,9 @@ def _make_plot(self): kwds = self.kwds.copy() self._maybe_add_color(colors, kwds, style, i) - for err_kw in ['xerr', 'yerr']: - # user provided label-matched dataframe of errors - if err_kw in kwds: - if isinstance(kwds[err_kw], (DataFrame, dict)): - if label in kwds[err_kw].keys(): - kwds[err_kw] = kwds[err_kw][label] - else: del kwds[err_kw] - elif kwds[err_kw] is not None: - kwds[err_kw] = kwds[err_kw][i] - + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) + label = com.pprint_thing(label) # .encode('utf-8') kwds['label'] = label @@ -1629,7 +1656,6 @@ def _plot(data, ax, label, style, **kwds): return _plot def _make_ts_plot(self, data, **kwargs): - from pandas.core.frame import DataFrame colors = self._get_colors() plotf = self._get_ts_plot_function() @@ -1641,15 +1667,8 @@ def _make_ts_plot(self, data, **kwargs): self._maybe_add_color(colors, kwds, style, i) - # key-matched DataFrame of errors - if 'yerr' in kwds: - yerr = kwds['yerr'] - if isinstance(yerr, (DataFrame, dict)): - if label in yerr.keys(): - kwds['yerr'] = yerr[label] - else: del kwds['yerr'] - else: - kwds['yerr'] = yerr[i] + errors = self._get_errorbars(label=label, index=i, xerr=False) + kwds = dict(kwds, **errors) label = com.pprint_thing(label) @@ -1833,8 +1852,6 @@ def f(ax, x, y, w, start=None, log=self.log, **kwds): def _make_plot(self): import matplotlib as mpl - from pandas import DataFrame, Series - # mpl decided to make their version string unicode across all Python # versions for mpl >= 1.3 so we have to call str here for python 2 mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1') @@ -1853,15 +1870,8 @@ def _make_plot(self): kwds = self.kwds.copy() kwds['color'] = colors[i % ncolors] - for err_kw in ['xerr', 'yerr']: - if err_kw in kwds: - # user provided label-matched dataframe of errors - if isinstance(kwds[err_kw], (DataFrame, dict)): - if label in kwds[err_kw].keys(): - kwds[err_kw] = kwds[err_kw][label] - else: del kwds[err_kw] - elif kwds[err_kw] is not None: - kwds[err_kw] = kwds[err_kw][i] + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) label = com.pprint_thing(label) @@ -2074,7 +2084,7 @@ def plot_frame(frame=None, x=None, y=None, subplots=False, sharex=True, for kw in ['xerr', 'yerr']: if (kw in kwds) and \ - (isinstance(kwds[kw], str) or com.is_integer(kwds[kw])): + (isinstance(kwds[kw], string_types) or com.is_integer(kwds[kw])): try: kwds[kw] = frame[kwds[kw]] except (IndexError, KeyError, TypeError):
This is enhancement for #5638. I sometimes want to plot scatter with errorbars, thus I've refactored to support it. ![figure_1](https://cloud.githubusercontent.com/assets/1696302/2632517/21edcfd6-be65-11e3-9cd9-97bad65c8129.png) Also, this includes following fixes. I've also modified tests to check the number of errorbars actually drawn. - There are some types which expected to work, but results in `ValueError`. - `float` (MPL can accept it) - `unicode` (in Python 2.x) - Other list-like, such as `iterator` - A bug related to time series. ``` >>> s = Series(rand(6), index=range(6), name='x') >>> err_df = DataFrame(rand(6, 3) / 10, index=range(6), columns=['x', 'y', 'z']) >>> s.plot(yerr=err_df) # This works, errorbar appears using err_df x column >>> s.index = tm.makeDateIndex(k=6) >>> s.plot(yerr=err_df) # But this doesn't, errorbar doesn't appear ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6834
2014-04-07T15:07:27Z
2014-05-02T20:40:47Z
2014-05-02T20:40:47Z
2014-06-16T12:08:19Z
DOC: Fixed repetition
diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 008b5560a4645..fb616c5267e3c 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -97,8 +97,7 @@ in order to have a valid result. Correlation ~~~~~~~~~~~ -Several methods for computing correlations are provided. Several kinds of -correlation methods are provided: +Several methods for computing correlations are provided: .. csv-table:: :header: "Method name", "Description"
Small fix, avoiding repeating a sentence.
https://api.github.com/repos/pandas-dev/pandas/pulls/6832
2014-04-07T09:08:24Z
2014-04-07T10:02:06Z
2014-04-07T10:02:06Z
2014-07-16T09:00:53Z
CLN apply pep257 via docformatter
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index bff6eb1f95abc..6eb26fc478bb7 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -1,6 +1,4 @@ -""" -compat -====== +"""compat. Cross-compatible functions for Python 2 and 3. @@ -26,6 +24,7 @@ Other items: * OrderedDefaultDict + """ # pylint disable=W0611 import functools @@ -119,10 +118,11 @@ def bytes_to_str(b, encoding='ascii'): def iteritems(obj, **kwargs): - """replacement for six's iteritems for Python2/3 compat - uses 'iteritems' if available and otherwise uses 'items'. + """replacement for six's iteritems for Python2/3 compat uses 'iteritems' if + available and otherwise uses 'items'. + + Passes kwargs to method. - Passes kwargs to method. """ func = getattr(obj, "iteritems", None) if not func: @@ -161,6 +161,7 @@ class to receive bound method Returns ------- None + """ # only python 2 has bound/unbound method issue if not PY3: @@ -248,7 +249,7 @@ def wrapper(cls): class _OrderedDict(dict): - """Dictionary that remembers insertion order""" + """Dictionary that remembers insertion order.""" # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. @@ -262,9 +263,12 @@ class _OrderedDict(dict): # KEY]. def __init__(self, *args, **kwds): - """Initialize an ordered dictionary. Signature is the same as for - regular dictionaries, but keyword arguments are not recommended - because their insertion order is arbitrary. + """Initialize an ordered dictionary. + + Signature is the same as for regular dictionaries, but keyword + arguments are not recommended because their insertion order is + arbitrary. + """ if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) @@ -446,7 +450,7 @@ def __repr__(self, _repr_running={}): del _repr_running[call_key] def __reduce__(self): - """Return state information for pickling""" + """Return state information for pickling.""" items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): @@ -509,9 +513,9 @@ def viewitems(self): class _Counter(dict): - """Dict subclass for counting hashable objects. Sometimes called a bag - or multiset. Elements are stored as dictionary keys and their counts - are stored as dictionary values. + """Dict subclass for counting hashable objects. Sometimes called a bag or + multiset. Elements are stored as dictionary keys and their counts are + stored as dictionary values. >>> Counter('zyzygy') Counter({'y': 3, 'z': 2, 'g': 1}) @@ -598,14 +602,13 @@ def update(self, iterable=None, **kwds): self.update(kwds) def copy(self): - """Like dict.copy() but returns a Counter instance instead of a dict. - """ + """Like dict.copy() but returns a Counter instance instead of a + dict.""" return Counter(self) def __delitem__(self, elem): """Like dict.__delitem__() but does not raise KeyError for missing - values. - """ + values.""" if elem in self: dict.__delitem__(self, elem) diff --git a/pandas/compat/chainmap_impl.py b/pandas/compat/chainmap_impl.py index 92d2424057f83..824c3e94090cc 100644 --- a/pandas/compat/chainmap_impl.py +++ b/pandas/compat/chainmap_impl.py @@ -7,7 +7,8 @@ def recursive_repr(fillvalue='...'): - 'Decorator to make a repr function return fillvalue for a recursive call' + """Decorator to make a repr function return fillvalue for a recursive + call.""" def decorating_function(user_function): repr_running = set() @@ -33,8 +34,8 @@ def wrapper(self): class ChainMap(MutableMapping): - ''' A ChainMap groups multiple dicts (or other mappings) together - to create a single, updateable view. + """A ChainMap groups multiple dicts (or other mappings) together to create + a single, updateable view. The underlying mappings are stored in a list. That list is public and can accessed or updated using the *maps* attribute. There is no other state. @@ -43,7 +44,7 @@ class ChainMap(MutableMapping): In contrast, writes, updates, and deletions only operate on the first mapping. - ''' + """ def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. @@ -85,7 +86,7 @@ def __repr__(self): @classmethod def fromkeys(cls, iterable, *args): - 'Create a ChainMap with a single dict created from the iterable.' + """Create a ChainMap with a single dict created from the iterable.""" return cls(dict.fromkeys(iterable, *args)) def copy(self): @@ -95,10 +96,11 @@ def copy(self): __copy__ = copy def new_child(self, m=None): # like Django's Context.push() - ''' - New ChainMap with a new map followed by all previous maps. If no - map is provided, an empty dict is used. - ''' + """New ChainMap with a new map followed by all previous maps. + + If no map is provided, an empty dict is used. + + """ if m is None: m = {} return self.__class__(m, *self.maps) @@ -118,7 +120,11 @@ def __delitem__(self, key): raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def popitem(self): - 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + """Remove and return an item pair from maps[0]. + + Raise KeyError is maps[0] is empty. + + """ try: return self.maps[0].popitem() except KeyError: diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 03b45336833d3..232c2ee092d3a 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -59,7 +59,7 @@ class Unpickler(pkl.Unpickler): def load(fh, encoding=None, compat=False, is_verbose=False): - """load a pickle, with a provided encoding + """load a pickle, with a provided encoding. if compat is True: fake the old class hierarchy @@ -71,6 +71,7 @@ def load(fh, encoding=None, compat=False, is_verbose=False): encoding: an optional encoding compat: provide Series compatibility mode, boolean, default False is_verbose: show exception output + """ try: diff --git a/pandas/compat/scipy.py b/pandas/compat/scipy.py index 81601ffe25609..069e03bab6db4 100644 --- a/pandas/compat/scipy.py +++ b/pandas/compat/scipy.py @@ -1,6 +1,5 @@ -""" -Shipping functions from SciPy to reduce dependency on having SciPy installed -""" +"""Shipping functions from SciPy to reduce dependency on having SciPy +installed.""" from pandas.compat import range, lrange import numpy as np @@ -89,8 +88,7 @@ def _interpolate(a, b, fraction): def rankdata(a): - """ - Ranks the data, dealing with ties appropriately. + """Ranks the data, dealing with ties appropriately. Equal values are assigned a rank that is the average of the ranks that would have been otherwise assigned to all of the values within that set. @@ -131,8 +129,7 @@ def rankdata(a): def fastsort(a): - """ - Sort an array and provide the argsort. + """Sort an array and provide the argsort. Parameters ---------- diff --git a/pandas/computation/align.py b/pandas/computation/align.py index 2e0845bddf7e2..ae0f4ddd165fa 100644 --- a/pandas/computation/align.py +++ b/pandas/computation/align.py @@ -1,5 +1,4 @@ -"""Core eval alignment algorithms -""" +"""Core eval alignment algorithms.""" import warnings from functools import partial, wraps @@ -117,7 +116,7 @@ def _align_core(terms): def _align(terms): - """Align a set of terms""" + """Align a set of terms.""" try: # flatten the parse tree (a nested list, really) terms = list(com.flatten(terms)) @@ -138,7 +137,8 @@ def _align(terms): def _reconstruct_object(typ, obj, axes, dtype): - """Reconstruct an object given its type, raw value, and possibly empty + """Reconstruct an object given its type, raw value, and possibly empty. + (None) axes. Parameters @@ -155,6 +155,7 @@ def _reconstruct_object(typ, obj, axes, dtype): ret : typ An object of type ``typ`` with the value `obj` and possible axes `axes`. + """ try: typ = typ.type diff --git a/pandas/computation/common.py b/pandas/computation/common.py index 105cc497a4207..84c0c5ae6a083 100644 --- a/pandas/computation/common.py +++ b/pandas/computation/common.py @@ -4,15 +4,15 @@ def _ensure_decoded(s): - """ if we have bytes, decode them to unicode """ + """if we have bytes, decode them to unicode.""" if isinstance(s, (np.bytes_, bytes)): s = s.decode(pd.get_option('display.encoding')) return s def _result_type_many(*arrays_and_dtypes): - """ wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32) - argument limit """ + """wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32) + argument limit.""" try: return np.result_type(*arrays_and_dtypes) except ValueError: diff --git a/pandas/computation/engines.py b/pandas/computation/engines.py index 58b822af546c8..5d4cc725015be 100644 --- a/pandas/computation/engines.py +++ b/pandas/computation/engines.py @@ -51,11 +51,12 @@ def convert(self): """Convert an expression for evaluation. Defaults to return the expression as a string. + """ return com.pprint_thing(self.expr) def evaluate(self): - """Run the engine on the expression + """Run the engine on the expression. This method performs alignment which is necessary no matter what engine is being used, thus its implementation is in the base class. @@ -64,6 +65,7 @@ def evaluate(self): ------- obj : object The result of the passed expression. + """ if not self._is_aligned: self.result_type, self.aligned_axes = _align(self.expr.terms) @@ -90,13 +92,14 @@ def _evaluate(self): Notes ----- Must be implemented by subclasses. + """ pass class NumExprEngine(AbstractEngine): - """NumExpr engine class""" + """NumExpr engine class.""" has_neg_frac = True def __init__(self, expr): @@ -131,6 +134,7 @@ class PythonEngine(AbstractEngine): """Evaluate an expression in Python space. Mostly for testing purposes. + """ has_neg_frac = False diff --git a/pandas/computation/eval.py b/pandas/computation/eval.py index 82c68fb10e7d6..d423c3963ec5e 100644 --- a/pandas/computation/eval.py +++ b/pandas/computation/eval.py @@ -1,7 +1,6 @@ #!/usr/bin/env python -"""Top level ``eval`` module. -""" +"""Top level ``eval`` module.""" import tokenize from pandas.core import common as com @@ -25,6 +24,7 @@ def _check_engine(engine): * If an invalid engine is passed ImportError * If numexpr was requested but doesn't exist + """ if engine not in _engines: raise KeyError('Invalid engine {0!r} passed, valid engines are' @@ -58,6 +58,7 @@ def _check_parser(parser): ------ KeyError * If an invalid parser is passed + """ if parser not in _parsers: raise KeyError('Invalid parser {0!r} passed, valid parsers are' @@ -74,7 +75,7 @@ def _check_resolvers(resolvers): def _check_expression(expr): - """Make sure an expression is not an empty string + """Make sure an expression is not an empty string. Parameters ---------- @@ -85,6 +86,7 @@ def _check_expression(expr): ------ ValueError * If expr is an empty string + """ if not expr: raise ValueError("expr cannot be an empty string") @@ -112,6 +114,7 @@ def _convert_expression(expr): ------ ValueError * If the expression is empty. + """ s = com.pprint_thing(expr) _check_expression(s) @@ -213,6 +216,7 @@ def eval(expr, parser='pandas', engine='numexpr', truediv=True, -------- pandas.DataFrame.query pandas.DataFrame.eval + """ expr = _convert_expression(expr) _check_engine(engine) diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py index 353c58c23febd..13beaa90851f4 100644 --- a/pandas/computation/expr.py +++ b/pandas/computation/expr.py @@ -31,6 +31,7 @@ def tokenize_string(source): ---------- source : str A Python source code string + """ line_reader = StringIO(source).readline for toknum, tokval, _, _, _ in tokenize.generate_tokens(line_reader): @@ -68,6 +69,7 @@ def _replace_booleans(tok): ------- t : tuple of int, str Either the input or token or the replacement values + """ toknum, tokval = tok if toknum == tokenize.OP: @@ -97,6 +99,7 @@ def _replace_locals(tok): This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_`` is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it. + """ toknum, tokval = tok if toknum == tokenize.OP and tokval == '@': @@ -106,7 +109,7 @@ def _replace_locals(tok): def _preparse(source, f=compose(_replace_locals, _replace_booleans, _rewrite_assign)): - """Compose a collection of tokenization functions + """Compose a collection of tokenization functions. Parameters ---------- @@ -128,6 +131,7 @@ def _preparse(source, f=compose(_replace_locals, _replace_booleans, The `f` parameter can be any callable that takes *and* returns input of the form ``(toknum, tokval)``, where ``toknum`` is one of the constants from the ``tokenize`` module and ``tokval`` is a string. + """ assert callable(f), 'f must be callable' return tokenize.untokenize(lmap(f, tokenize_string(source))) @@ -197,8 +201,7 @@ def _filter_nodes(superclass, all_nodes=_all_nodes): def _node_not_implemented(node_name, cls): """Return a function that raises a NotImplementedError with a passed node - name. - """ + name.""" def f(self, *args, **kwargs): raise NotImplementedError("{0!r} nodes are not " @@ -213,6 +216,7 @@ def disallow(nodes): Returns ------- disallowed : callable + """ def disallowed(cls): cls.unsupported_nodes = () @@ -231,6 +235,7 @@ def _op_maker(op_class, op_symbol): Returns ------- f : callable + """ def f(self, node, *args, **kwargs): @@ -240,6 +245,7 @@ def f(self, node, *args, **kwargs): Returns ------- f : callable + """ return partial(op_class, op_symbol, *args, **kwargs) return f @@ -276,6 +282,7 @@ class BaseExprVisitor(ast.NodeVisitor): engine : str parser : str preparser : callable + """ const_type = Constant term_type = Term @@ -435,7 +442,7 @@ def visit_List(self, node, **kwargs): visit_Tuple = visit_List def visit_Index(self, node, **kwargs): - """ df.index[4] """ + """df.index[4]""" return self.visit(node.value) def visit_Subscript(self, node, **kwargs): @@ -455,7 +462,7 @@ def visit_Subscript(self, node, **kwargs): return self.term_type(name, env=self.env) def visit_Slice(self, node, **kwargs): - """ df.index[slice(4,6)] """ + """df.index[slice(4,6)]""" lower = node.lower if lower is not None: lower = self.visit(lower).value @@ -469,8 +476,7 @@ def visit_Slice(self, node, **kwargs): return slice(lower, upper, step) def visit_Assign(self, node, **kwargs): - """ - support a single assignment node, like + """support a single assignment node, like. c = a + b @@ -624,6 +630,7 @@ class Expr(StringMixin): env : Scope, optional, default None truediv : bool, optional, default True level : int, optional, default 2 + """ def __init__(self, expr, engine='numexpr', parser='pandas', env=None, @@ -650,12 +657,12 @@ def __len__(self): return len(self.expr) def parse(self): - """Parse an expression""" + """Parse an expression.""" return self._visitor.visit(self.expr) @property def names(self): - """Get the names in an expression""" + """Get the names in an expression.""" if is_term(self.terms): return frozenset([self.terms.name]) return frozenset(term.name for term in com.flatten(self.terms)) diff --git a/pandas/computation/expressions.py b/pandas/computation/expressions.py index 128aa5bf2b511..ab0c425e62030 100644 --- a/pandas/computation/expressions.py +++ b/pandas/computation/expressions.py @@ -1,6 +1,4 @@ -""" -Expressions ------------ +"""Expressions. Offer fast expression evaluation through numexpr @@ -58,14 +56,14 @@ def set_numexpr_threads(n=None): def _evaluate_standard(op, op_str, a, b, raise_on_error=True, **eval_kwargs): - """ standard evaluation """ + """standard evaluation.""" if _TEST_MODE: _store_test_result(False) return op(a, b) def _can_use_numexpr(op, op_str, a, b, dtype_check): - """ return a boolean if we WILL be using numexpr """ + """return a boolean if we WILL be using numexpr.""" if op_str is not None: # required min elements (otherwise we are adding overhead) @@ -170,20 +168,21 @@ def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('+', '*', '-', '/', def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, **eval_kwargs): - """ evaluate and return the expression of the op on a and b - - Parameters - ---------- - - op : the actual operand - op_str: the string version of the op - a : left operand - b : right operand - raise_on_error : pass the error to the higher level if indicated - (default is False), otherwise evaluate the op with and - return the results - use_numexpr : whether to try to use numexpr (default True) - """ + """evaluate and return the expression of the op on a and b. + + Parameters + ---------- + + op : the actual operand + op_str: the string version of the op + a : left operand + b : right operand + raise_on_error : pass the error to the higher level if indicated + (default is False), otherwise evaluate the op with and + return the results + use_numexpr : whether to try to use numexpr (default True) + + """ _bool_arith_check(op_str, a, b) if use_numexpr: return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error, @@ -192,19 +191,20 @@ def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, def where(cond, a, b, raise_on_error=False, use_numexpr=True): - """ evaluate the where condition cond on a and b + """evaluate the where condition cond on a and b. - Parameters - ---------- + Parameters + ---------- - cond : a boolean array - a : return if cond is True - b : return if cond is False - raise_on_error : pass the error to the higher level if indicated - (default is False), otherwise evaluate the op with and - return the results - use_numexpr : whether to try to use numexpr (default True) - """ + cond : a boolean array + a : return if cond is True + b : return if cond is False + raise_on_error : pass the error to the higher level if indicated + (default is False), otherwise evaluate the op with and + return the results + use_numexpr : whether to try to use numexpr (default True) + + """ if use_numexpr: return _where(cond, a, b, raise_on_error=raise_on_error) @@ -212,10 +212,11 @@ def where(cond, a, b, raise_on_error=False, use_numexpr=True): def set_test_mode(v=True): - """ - Keeps track of whether numexpr was used. Stores an additional ``True`` - for every successful use of evaluate with numexpr since the last - ``get_test_result`` + """Keeps track of whether numexpr was used. + + Stores an additional ``True`` for every successful use of evaluate + with numexpr since the last ``get_test_result`` + """ global _TEST_MODE, _TEST_RESULT _TEST_MODE = v @@ -229,7 +230,7 @@ def _store_test_result(used_numexpr): def get_test_result(): - """get test result and reset test_results""" + """get test result and reset test_results.""" global _TEST_RESULT res = _TEST_RESULT _TEST_RESULT = [] diff --git a/pandas/computation/ops.py b/pandas/computation/ops.py index 1f57c459149ad..9cd514c5cc35a 100644 --- a/pandas/computation/ops.py +++ b/pandas/computation/ops.py @@ -1,5 +1,4 @@ -"""Operator classes for eval. -""" +"""Operator classes for eval.""" import re import operator as op @@ -169,8 +168,7 @@ def name(self): class Op(StringMixin): - """Hold an operator of arbitrary arity - """ + """Hold an operator of arbitrary arity.""" def __init__(self, op, operands, *args, **kwargs): self.op = _bool_op_map.get(op, op) @@ -221,8 +219,7 @@ def is_datetime(self): def _in(x, y): """Compute the vectorized membership of ``x in y`` if possible, otherwise - use Python. - """ + use Python.""" try: return x.isin(y) except AttributeError: @@ -236,8 +233,7 @@ def _in(x, y): def _not_in(x, y): """Compute the vectorized membership of ``x not in y`` if possible, - otherwise use Python. - """ + otherwise use Python.""" try: return ~x.isin(y) except AttributeError: @@ -282,6 +278,7 @@ def _cast_inplace(terms, dtype): The expression that should cast. dtype : str or numpy.dtype The dtype to cast to. + """ dt = np.dtype(dtype) for term in terms: @@ -298,13 +295,14 @@ def is_term(obj): class BinOp(Op): - """Hold a binary operator and its operands + """Hold a binary operator and its operands. Parameters ---------- op : str left : Term or Op right : Term or Op + """ def __init__(self, op, lhs, rhs, **kwargs): @@ -335,6 +333,7 @@ def __call__(self, env): ------- object The result of an evaluated expression. + """ # handle truediv if self.op == '/' and env.scope['truediv']: @@ -384,8 +383,7 @@ def evaluate(self, env, engine, parser, term_type, eval_in_python): return term_type(name, env=env) def convert_values(self): - """Convert datetimes to a comparable value in an expression. - """ + """Convert datetimes to a comparable value in an expression.""" def stringify(value): if self.encoding is not None: encoder = partial(com.pprint_thing_encoded, @@ -437,6 +435,7 @@ class Div(BinOp): truediv : bool Whether or not to use true division. With Python 3 this happens regardless of the value of ``truediv``. + """ def __init__(self, lhs, rhs, truediv, *args, **kwargs): @@ -459,7 +458,7 @@ def __init__(self, lhs, rhs, truediv, *args, **kwargs): class UnaryOp(Op): - """Hold a unary operator and its operands + """Hold a unary operator and its operands. Parameters ---------- @@ -472,6 +471,7 @@ class UnaryOp(Op): ------ ValueError * If no function associated with the passed operator token is found. + """ def __init__(self, op, operand): diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py index 8fc842d958075..ed2c877794428 100644 --- a/pandas/computation/pytables.py +++ b/pandas/computation/pytables.py @@ -1,4 +1,4 @@ -""" manage PyTables query interface via Expressions """ +"""manage PyTables query interface via Expressions.""" import ast import time @@ -85,7 +85,7 @@ def _disallow_scalar_only_bool_ops(self): def prune(self, klass): def pr(left, right): - """ create and return a new specialized BinOp from myself """ + """create and return a new specialized BinOp from myself.""" if left is None: return right @@ -128,7 +128,7 @@ def pr(left, right): return res def conform(self, rhs): - """ inplace conform rhs """ + """inplace conform rhs.""" if not com.is_list_like(rhs): rhs = [rhs] if hasattr(self.rhs, 'ravel'): @@ -137,28 +137,28 @@ def conform(self, rhs): @property def is_valid(self): - """ return True if this is a valid field """ + """return True if this is a valid field.""" return self.lhs in self.queryables @property def is_in_table(self): - """ return True if this is a valid column name for generation (e.g. an - actual column in the table) """ + """return True if this is a valid column name for generation (e.g. an + actual column in the table)""" return self.queryables.get(self.lhs) is not None @property def kind(self): - """ the kind of my field """ + """the kind of my field.""" return self.queryables.get(self.lhs) def generate(self, v): - """ create and return the op string for this TermValue """ + """create and return the op string for this TermValue.""" val = v.tostring(self.encoding) return "(%s %s %s)" % (self.lhs, self.op, val) def convert_value(self, v): - """ convert the expression that is in the term to something that is - accepted by pytables """ + """convert the expression that is in the term to something that is + accepted by pytables.""" def stringify(value): if self.encoding is not None: @@ -216,7 +216,7 @@ def __unicode__(self): "[{1}]".format(self.filter[0], self.filter[1])) def invert(self): - """ invert the filter """ + """invert the filter.""" if self.filter is not None: f = list(self.filter) f[1] = self.generate_filter_op(invert=True) @@ -224,7 +224,7 @@ def invert(self): return self def format(self): - """ return the actual filter format """ + """return the actual filter format.""" return [self.filter] def evaluate(self): @@ -287,7 +287,7 @@ def __unicode__(self): return com.pprint_thing("[Condition : [{0}]]".format(self.condition)) def invert(self): - """ invert the condition """ + """invert the condition.""" # if self.condition is not None: # self.condition = "~(%s)" % self.condition # return self @@ -295,7 +295,7 @@ def invert(self): "passing to numexpr") def format(self): - """ return the actual ne format """ + """return the actual ne format.""" return self.condition def evaluate(self): @@ -438,7 +438,7 @@ def _rewrite_membership_op(self, node, left, right): class Expr(expr.Expr): - """ hold a pytables like expression, comprised of possibly multiple 'terms' + """hold a pytables like expression, comprised of possibly multiple 'terms'. Parameters ---------- @@ -463,6 +463,7 @@ class Expr(expr.Expr): '(index>df.index[3] & index<=df.index[6]) | string="bar"' "ts>=Timestamp('2012-02-01')" "major_axis>=20130101" + """ def __init__(self, where, op=None, value=None, queryables=None, @@ -504,7 +505,7 @@ def __init__(self, where, op=None, value=None, queryables=None, self.terms = self.parse() def parse_back_compat(self, w, op=None, value=None): - """ allow backward compatibility for passed arguments """ + """allow backward compatibility for passed arguments.""" if isinstance(w, dict): w, op, value = w.get('field'), w.get('op'), w.get('value') @@ -561,7 +562,7 @@ def __unicode__(self): return com.pprint_thing(self.expr) def evaluate(self): - """ create and return the numexpr condition and filter """ + """create and return the numexpr condition and filter.""" try: self.condition = self.terms.prune(ConditionBinOp) @@ -579,7 +580,7 @@ def evaluate(self): class TermValue(object): - """ hold a term value the we use to construct a condition/filter """ + """hold a term value the we use to construct a condition/filter.""" def __init__(self, value, converted, kind): self.value = value @@ -587,8 +588,7 @@ def __init__(self, value, converted, kind): self.kind = kind def tostring(self, encoding): - """ quote the string if not encoded - else encode and return """ + """quote the string if not encoded else encode and return.""" if self.kind == u('string'): if encoding is not None: return self.converted diff --git a/pandas/computation/scope.py b/pandas/computation/scope.py index 004d8d39d5e82..c3ac7dc83f7fd 100644 --- a/pandas/computation/scope.py +++ b/pandas/computation/scope.py @@ -1,5 +1,4 @@ -"""Module for scope operations -""" +"""Module for scope operations.""" import sys import operator @@ -24,8 +23,10 @@ def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(), def _replacer(x): - """Replace a number with its hexadecimal representation. Used to tag - temporary variables with their calling scope's id. + """Replace a number with its hexadecimal representation. + + Used to tag temporary variables with their calling scope's id. + """ # get the hex repr of the binary char and remove 0x and pad by pad_size # zeros @@ -57,7 +58,7 @@ def _raw_hex_id(obj): def _get_pretty_string(obj): - """Return a prettier version of obj + """Return a prettier version of obj. Parameters ---------- @@ -68,6 +69,7 @@ def _get_pretty_string(obj): ------- s : str Pretty print object repr + """ sio = StringIO() pprint.pprint(obj, stream=sio) @@ -93,6 +95,7 @@ class Scope(StringMixin): scope : DeepChainMap target : object temps : dict + """ __slots__ = 'level', 'scope', 'target', 'temps' @@ -147,11 +150,12 @@ def has_resolvers(self): Returns ------- hr : bool + """ return bool(len(self.resolvers)) def resolve(self, key, is_local): - """Resolve a variable name in a possibly local context + """Resolve a variable name in a possibly local context. Parameters ---------- @@ -165,6 +169,7 @@ def resolve(self, key, is_local): ------- value : object The value of a particular variable + """ try: # only look for locals in outer scope @@ -199,6 +204,7 @@ def swapkey(self, old_key, new_key, new_value=None): New variable name to replace `old_key` with new_value : object Value to be replaced along with the possible renaming + """ if self.has_resolvers: maps = self.resolvers.maps + self.scope.maps @@ -222,6 +228,7 @@ def _get_vars(self, stack, scopes): scopes : sequence of strings A sequence containing valid stack frame attribute names that evaluate to a dictionary. For example, ('locals', 'globals') + """ variables = itertools.product(scopes, stack) for scope, (frame, _, _, _, _, _) in variables: @@ -240,6 +247,7 @@ def update(self, level): Parameters ---------- level : int or None, optional, default None + """ sl = level + 1 @@ -265,6 +273,7 @@ def add_tmp(self, value): ------- name : basestring The name of the temporary variable created. + """ name = '{0}_{1}_{2}'.format(type(value).__name__, self.ntemps, _raw_hex_id(self)) @@ -278,18 +287,19 @@ def add_tmp(self, value): return name def remove_tmp(self, name): - """Remove a temporary variable from this scope + """Remove a temporary variable from this scope. Parameters ---------- name : str The name of a temporary to be removed + """ del self.temps[name] @property def ntemps(self): - """The number of temporary variables in this scope""" + """The number of temporary variables in this scope.""" return len(self.temps) @property @@ -301,6 +311,7 @@ def full_scope(self): ------- vars : DeepChainMap All variables in this scope. + """ maps = [self.temps] + self.resolvers.maps + self.scope.maps return DeepChainMap(*maps) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index e2ef178c62e71..f7457f2dbb3b1 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1,6 +1,8 @@ -""" -Generic data algorithms. This module is experimental at the moment and not -intended for public consumption +"""Generic data algorithms. + +This module is experimental at the moment and not intended for public +consumption + """ from __future__ import division from warnings import warn @@ -13,8 +15,7 @@ from pandas.compat import filter, string_types def match(to_match, values, na_sentinel=-1): - """ - Compute locations of to_match into values + """Compute locations of to_match into values. Parameters ---------- @@ -31,6 +32,7 @@ def match(to_match, values, na_sentinel=-1): Returns ------- match : ndarray of integers + """ values = com._asarray_tuplesafe(values) if issubclass(values.dtype.type, string_types): @@ -50,9 +52,8 @@ def match(to_match, values, na_sentinel=-1): def unique(values): - """ - Compute unique values (not necessarily sorted) efficiently from input array - of values + """Compute unique values (not necessarily sorted) efficiently from input + array of values. Parameters ---------- @@ -61,6 +62,7 @@ def unique(values): Returns ------- uniques + """ values = com._asarray_tuplesafe(values) f = lambda htype, caster: _unique_generic(values, htype, caster) @@ -95,8 +97,7 @@ def _unique_generic(values, table_type, type_caster): def factorize(values, sort=False, order=None, na_sentinel=-1): - """ - Encode input values as an enumerated type or categorical variable + """Encode input values as an enumerated type or categorical variable. Parameters ---------- @@ -114,6 +115,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1): uniques : the unique values note: an array of Periods will ignore sort as it returns an always sorted PeriodIndex + """ from pandas.tseries.period import PeriodIndex vals = np.asarray(values) @@ -267,9 +269,7 @@ def mode(values): def rank(values, axis=0, method='average', na_option='keep', ascending=True, pct=False): - """ - - """ + """""" if values.ndim == 1: f, values = _get_data_algo(values, _rank1d_functions) ranks = f(values, ties_method=method, ascending=ascending, @@ -390,9 +390,7 @@ def _get_data_algo(values, func_map): def group_position(*args): - """ - Get group position - """ + """Get group position.""" from collections import defaultdict table = defaultdict(int) diff --git a/pandas/core/array.py b/pandas/core/array.py index 495f231921a19..56dcaacec4d2b 100644 --- a/pandas/core/array.py +++ b/pandas/core/array.py @@ -1,6 +1,4 @@ -""" -Isolate pandas's exposure to NumPy -""" +"""Isolate pandas's exposure to NumPy.""" import numpy as np diff --git a/pandas/core/base.py b/pandas/core/base.py index ec6a4ffbcefbb..ab807ea801eb7 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1,6 +1,4 @@ -""" -Base and utility classes for pandas objects. -""" +"""Base and utility classes for pandas objects.""" from pandas import compat import numpy as np from pandas.core import common as com @@ -11,6 +9,7 @@ class StringMixin(object): method. Handles Python2/3 compatibility transparently. + """ # side note - this could be made into a metaclass if more than one # object needs @@ -22,11 +21,11 @@ def __unicode__(self): raise NotImplementedError def __str__(self): - """ - Return a string representation for a particular Object + """Return a string representation for a particular Object. Invoked by str(df) in both py2/py3. Yields Bytestring in Py2, Unicode String in py3. + """ if compat.PY3: @@ -34,11 +33,11 @@ def __str__(self): return self.__bytes__() def __bytes__(self): - """ - Return a string representation for a particular object. + """Return a string representation for a particular object. Invoked by bytes(obj) in py3 only. Yields a bytestring in both py2/py3. + """ from pandas.core.config import get_option @@ -46,17 +45,17 @@ def __bytes__(self): return self.__unicode__().encode(encoding, 'replace') def __repr__(self): - """ - Return a string representation for a particular object. + """Return a string representation for a particular object. Yields Bytestring in Py2, Unicode String in py3. + """ return str(self) class PandasObject(StringMixin): - """baseclass for various pandas objects""" + """baseclass for various pandas objects.""" @property def _constructor(self): @@ -64,29 +63,29 @@ def _constructor(self): return self.__class__ def __unicode__(self): - """ - Return a string representation for a particular object. + """Return a string representation for a particular object. Invoked by unicode(obj) in py2 only. Yields a Unicode String in both py2/py3. + """ # Should be overwritten by base classes return object.__repr__(self) def _local_dir(self): - """ provide addtional __dir__ for this object """ + """provide addtional __dir__ for this object.""" return [] def __dir__(self): - """ - Provide method name lookup and completion - Only provide 'public' methods - """ + """Provide method name lookup and completion Only provide 'public' + methods.""" return list(sorted(list(set(dir(type(self)) + self._local_dir())))) def _reset_cache(self, key=None): - """ - Reset cached properties. If ``key`` is passed, only clears that key. + """Reset cached properties. + + If ``key`` is passed, only clears that key. + """ if getattr(self, '_cache', None) is None: return @@ -190,11 +189,11 @@ def values(self): return arr def __unicode__(self): - """ - Return a string representation for this object. + """Return a string representation for this object. Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. + """ prepr = com.pprint_thing(self, escape_chars=('\t', '\r', '\n'), quote_strings=True) @@ -212,7 +211,8 @@ def f(self): return property(f) class IndexOpsMixin(object): - """ common ops mixin to support a unified inteface / docs for Series / Index """ + """common ops mixin to support a unified inteface / docs for Series / + Index.""" def _is_allowed_index_op(self, name): if not self._allow_index_ops: @@ -260,12 +260,12 @@ def _wrap_access_object(self, obj): return obj def max(self): - """ The maximum value of the object """ + """The maximum value of the object.""" self._is_allowed_index_op('max') return self.values.max() def min(self): - """ The minimum value of the object """ + """The minimum value of the object.""" self._is_allowed_index_op('min') return self.values.min() @@ -298,24 +298,24 @@ def value_counts(self, normalize=False, sort=True, ascending=False, normalize=normalize, bins=bins) def unique(self): - """ - Return array of unique values in the object. Significantly faster than - numpy.unique. Includes NA values. + """Return array of unique values in the object. Significantly faster + than numpy.unique. Includes NA values. Returns ------- uniques : ndarray + """ from pandas.core.nanops import unique1d return unique1d(self.values) def nunique(self): - """ - Return count of unique elements in the object. Excludes NA values. + """Return count of unique elements in the object. Excludes NA values. Returns ------- nunique : int + """ return len(self.value_counts()) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 23fccc3719278..5c6a72b0f0391 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -203,8 +203,7 @@ def __getitem__(self, key): return Categorical(self.labels[key], self.levels) def equals(self, other): - """ - Returns True if categorical arrays are equal + """Returns True if categorical arrays are equal. Parameters ---------- @@ -213,6 +212,7 @@ def equals(self, other): Returns ------- are_equal : boolean + """ if not isinstance(other, Categorical): return False @@ -221,9 +221,7 @@ def equals(self, other): np.array_equal(self.labels, other.labels)) def describe(self): - """ - Returns a dataframe with frequency and counts by level. - """ + """Returns a dataframe with frequency and counts by level.""" # Hack? from pandas.core.frame import DataFrame grouped = DataFrame(self.labels).groupby(0) diff --git a/pandas/core/common.py b/pandas/core/common.py index 18a3dba1a44a4..030eef1c502c1 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1,6 +1,4 @@ -""" -Misc tools for implementing data structures -""" +"""Misc tools for implementing data structures.""" import re import collections @@ -100,6 +98,7 @@ class to receive bound method Returns ------- None + """ # only python 2 has bound/unbound method issue if not compat.PY3: @@ -125,6 +124,7 @@ def isnull(obj): See also -------- pandas.notnull: boolean inverse of pandas.isnull + """ return _isnull(obj) @@ -340,6 +340,7 @@ def flatten(l): Returns ------- flattened : generator + """ for el in l: if _iterable_not_string(el): @@ -350,10 +351,8 @@ def flatten(l): def mask_missing(arr, values_to_mask): - """ - Return a masking array of same size/shape as arr - with entries equaling any member of values_to_mask set to True - """ + """Return a masking array of same size/shape as arr with entries equaling + any member of values_to_mask set to True.""" if not isinstance(values_to_mask, (list, np.ndarray)): values_to_mask = [values_to_mask] @@ -609,8 +608,7 @@ def func(arr, indexer, out, fill_value=np.nan): def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None, allow_fill=True): - """ - Specialized Cython take which sets NaN values in one pass + """Specialized Cython take which sets NaN values in one pass. Parameters ---------- @@ -635,6 +633,7 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, If False, indexer is assumed to contain no -1 values so no filling will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. + """ if indexer is None: indexer = np.arange(arr.shape[axis], dtype=np.int64) @@ -691,9 +690,7 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None, allow_fill=True): - """ - Specialized Cython take which sets NaN values in one pass - """ + """Specialized Cython take which sets NaN values in one pass.""" if indexer is None or (indexer[0] is None and indexer[1] is None): row_idx = np.arange(arr.shape[0], dtype=np.int64) col_idx = np.arange(arr.shape[1], dtype=np.int64) @@ -823,9 +820,8 @@ def diff(arr, n, axis=0): def _coerce_to_dtypes(result, dtypes): - """ given a dtypes and a result set, coerce the result elements to the - dtypes - """ + """given a dtypes and a result set, coerce the result elements to the + dtypes.""" if len(result) != len(dtypes): raise AssertionError("_coerce_to_dtypes requires equal len arrays") @@ -857,8 +853,8 @@ def conv(r, dtype): def _infer_dtype_from_scalar(val): - """ interpret the dtype from a scalar, upcast floats and ints - return the new value and the dtype """ + """interpret the dtype from a scalar, upcast floats and ints return the new + value and the dtype.""" dtype = np.object_ @@ -977,11 +973,9 @@ def _maybe_promote(dtype, fill_value=np.nan): def _maybe_upcast_putmask(result, mask, other, dtype=None, change=None): - """ a safe version of put mask that (potentially upcasts the result - return the result - if change is not None, then MUTATE the change (and change the dtype) - return a changed flag - """ + """a safe version of put mask that (potentially upcasts the result return + the result if change is not None, then MUTATE the change (and change the + dtype) return a changed flag.""" if mask.any(): @@ -1052,7 +1046,7 @@ def changeit(): def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False): - """ provide explict type promotion and coercion + """provide explict type promotion and coercion. Parameters ---------- @@ -1060,6 +1054,7 @@ def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False): fill_value : what we want to fill with dtype : if None, then use the dtype of the values, else coerce to this type copy : if True always make a copy even if no upcast is required + """ if dtype is None: @@ -1168,7 +1163,7 @@ def _possibly_downcast_to_dtype(result, dtype): def _lcd_dtypes(a_dtype, b_dtype): - """ return the lcd dtype to hold these types """ + """return the lcd dtype to hold these types.""" if is_datetime64_dtype(a_dtype) or is_datetime64_dtype(b_dtype): return _NS_DTYPE @@ -1196,14 +1191,14 @@ def _lcd_dtypes(a_dtype, b_dtype): def _fill_zeros(result, x, y, name, fill): - """ - if this is a reversed op, then flip x,y + """if this is a reversed op, then flip x,y. if we have an integer value (or array in y) and we have 0's, fill them with the fill, return the result mask the nan's from x + """ if fill is not None: @@ -1406,7 +1401,7 @@ def interpolate_1d(xvalues, yvalues, method='linear', limit=None, method = 'values' def _interp_limit(invalid, limit): - """mask off values that won't be filled since they exceed the limit""" + """mask off values that won't be filled since they exceed the limit.""" all_nans = np.where(invalid)[0] violate = [invalid[x:x + limit + 1] for x in all_nans] violate = np.array([x.all() & (x.size > limit) for x in violate]) @@ -1469,10 +1464,11 @@ def _interp_limit(invalid, limit): def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs): - """ - passed off to scipy.interpolate.interp1d. method is scipy's kind. - Returns an array interpolated at new_x. Add any new methods to - the list in _clean_interp_method + """passed off to scipy.interpolate.interp1d. + + method is scipy's kind. Returns an array interpolated at new_x. Add + any new methods to the list in _clean_interp_method + """ try: from scipy import interpolate @@ -1594,7 +1590,7 @@ def _maybe_box_datetimelike(value): def _possibly_convert_objects(values, convert_dates=True, convert_numeric=True, convert_timedeltas=True): - """ if we have an object dtype, try to coerce dates and/or numbers """ + """if we have an object dtype, try to coerce dates and/or numbers.""" # if we have passed in a list or scalar if isinstance(values, (list, tuple)): @@ -1668,7 +1664,7 @@ def _possibly_castable(arr): def _possibly_convert_platform(values): - """ try to do platform conversion, allow ndarray or list here """ + """try to do platform conversion, allow ndarray or list here.""" if isinstance(values, (list, tuple)): values = lib.list_to_object_array(values) @@ -1681,9 +1677,8 @@ def _possibly_convert_platform(values): def _possibly_cast_to_datetime(value, dtype, coerce=False): - """ try to cast the array/value to a datetimelike dtype, converting float - nan to iNaT - """ + """try to cast the array/value to a datetimelike dtype, converting float + nan to iNaT.""" if dtype is not None: if isinstance(dtype, compat.string_types): @@ -1866,9 +1861,10 @@ def rands(n): def adjoin(space, *lists): - """ - Glues together two sets of strings using the amount of space requested. + """Glues together two sets of strings using the amount of space requested. + The idea is to prettify. + """ out_lines = [] newLists = [] @@ -1898,10 +1894,9 @@ def _join_unicode(lines, sep=''): def iterpairs(seq): - """ - Parameters - ---------- - seq: sequence + """Parameters. + +seq: sequence Returns ------- @@ -1911,6 +1906,7 @@ def iterpairs(seq): -------- >>> iterpairs([1, 2, 3, 4]) [(1, 2), (2, 3), (3, 4) + """ # input may not be sliceable seq_it = iter(seq) @@ -1921,10 +1917,11 @@ def iterpairs(seq): def split_ranges(mask): - """ Generates tuples of ranges which cover all True value in mask + """Generates tuples of ranges which cover all True value in mask. >>> list(split_ranges([1,0,0,1,0])) [(0, 1), (3, 4)] + """ ranges = [(0, len(mask))] @@ -1961,11 +1958,11 @@ def _long_prod(vals): class groupby(dict): - """ - A simple groupby different from the one in itertools. + """A simple groupby different from the one in itertools. + + Does not require the sequence elements to be sorted by keys, however + it is slower. - Does not require the sequence elements to be sorted by keys, - however it is slower. """ def __init__(self, seq, key=lambda x: x): @@ -1981,10 +1978,8 @@ def __iter__(self): def map_indices_py(arr): - """ - Returns a dictionary with (element, index) pairs for each element in the - given array/list - """ + """Returns a dictionary with (element, index) pairs for each element in the + given array/list.""" return dict([(x, i) for i, x in enumerate(arr)]) @@ -2210,8 +2205,8 @@ def _is_sequence(x): def _astype_nansafe(arr, dtype, copy=True): - """ return a view if copy is False, but - need to be very careful as the result shape could change! """ + """return a view if copy is False, but need to be very careful as the + result shape could change!""" if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) @@ -2310,9 +2305,11 @@ def next(self): def _get_handle(path, mode, encoding=None, compression=None): """Gets file handle for given path and mode. + NOTE: Under Python 3.2, getting a compressed file handle means reading in the entire file, decompressing it and decoding it to ``str`` all at once and then wrapping it in a StringIO. + """ if compression is not None: if encoding is not None and not compat.PY3: @@ -2359,12 +2356,12 @@ def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds): else: class UnicodeReader: - """ - A CSV reader which will iterate over lines in the CSV file "f", + """A CSV reader which will iterate over lines in the CSV file "f", which is encoded in the given encoding. - On Python 3, this is replaced (below) by csv.reader, which handles - unicode. + On Python 3, this is replaced (below) by csv.reader, which + handles unicode. + """ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): @@ -2383,10 +2380,8 @@ def __iter__(self): # pragma: no cover class UnicodeWriter: - """ - A CSV writer which will write rows to CSV file "f", - which is encoded in the given encoding. - """ + """A CSV writer which will write rows to CSV file "f", which is encoded + in the given encoding.""" def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): # Redirect output to a queue @@ -2490,9 +2485,10 @@ class Sentinel(object): def in_interactive_session(): - """ check if we're running in an interactive shell + """check if we're running in an interactive shell. returns True if running under python/ipython interactive shell + """ def check_main(): import __main__ as main @@ -2506,9 +2502,7 @@ def check_main(): def in_qtconsole(): - """ - check if we're inside an IPython qtconsole - """ + """check if we're inside an IPython qtconsole.""" try: ip = get_ipython() front_end = ( @@ -2523,9 +2517,7 @@ def in_qtconsole(): def in_ipnb(): - """ - check if we're inside an IPython Notebook - """ + """check if we're inside an IPython Notebook.""" try: ip = get_ipython() front_end = ( @@ -2540,9 +2532,7 @@ def in_ipnb(): def in_ipython_frontend(): - """ - check if we're inside an an IPython zmq frontend - """ + """check if we're inside an an IPython zmq frontend.""" try: ip = get_ipython() return 'zmq' in str(type(ip)).lower() @@ -2581,11 +2571,11 @@ def in_ipython_frontend(): def _pprint_seq(seq, _nest_lvl=0, **kwds): - """ - internal. pprinter for iterables. you should probably use pprint_thing() + """internal. pprinter for iterables. you should probably use pprint_thing() rather then calling this directly. bounds length of printed sequence, depending on options + """ if isinstance(seq, set): fmt = u("set([%s])") @@ -2609,9 +2599,11 @@ def _pprint_seq(seq, _nest_lvl=0, **kwds): def _pprint_dict(seq, _nest_lvl=0, **kwds): - """ - internal. pprinter for iterables. you should probably use pprint_thing() + """internal. + + pprinter for iterables. you should probably use pprint_thing() rather then calling this directly. + """ fmt = u("{%s}") pairs = [] @@ -2632,9 +2624,8 @@ def _pprint_dict(seq, _nest_lvl=0, **kwds): def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False, quote_strings=False): - """ - This function is the sanctioned way of converting objects - to a unicode representation. + """This function is the sanctioned way of converting objects to a unicode + representation. properly handles nested sequences containing unicode strings (unicode(object) does not) @@ -2723,9 +2714,8 @@ def console_encode(object, **kwds): def load(path): # TODO remove in 0.13 - """ - Load pickled pandas object (or any other pickled object) from the specified - file path + """Load pickled pandas object (or any other pickled object) from the + specified file path. Warning: Loading pickled data received from untrusted sources can be unsafe. See: http://docs.python.org/2.7/library/pickle.html @@ -2738,6 +2728,7 @@ def load(path): # TODO remove in 0.13 Returns ------- unpickled : type of object stored in file + """ import warnings warnings.warn("load is deprecated, use read_pickle", FutureWarning) @@ -2746,14 +2737,14 @@ def load(path): # TODO remove in 0.13 def save(obj, path): # TODO remove in 0.13 - """ - Pickle (serialize) object to input file path + """Pickle (serialize) object to input file path. Parameters ---------- obj : any object path : string File path + """ import warnings warnings.warn("save is deprecated, use obj.to_pickle", FutureWarning) diff --git a/pandas/core/config.py b/pandas/core/config.py index f2f932e39759a..a98a39393c53c 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -68,7 +68,7 @@ class OptionError(AttributeError, KeyError): """Exception for pandas.options, backwards compatible with KeyError - checks""" + checks.""" # @@ -485,6 +485,7 @@ def _select_options(pat): """returns a list of keys matching `pat` if pat=="all", returns all registered options + """ # short-circuit for exact key @@ -508,19 +509,19 @@ def _get_root(key): def _is_deprecated(key): - """ Returns True if the given option has been deprecated """ + """Returns True if the given option has been deprecated.""" key = key.lower() return key in _deprecated_options def _get_deprecated_option(key): - """ - Retrieves the metadata for a deprecated option, if `key` is deprecated. + """Retrieves the metadata for a deprecated option, if `key` is deprecated. Returns ------- DeprecatedOption (namedtuple) if key is deprecated, None otherwise + """ try: @@ -532,20 +533,21 @@ def _get_deprecated_option(key): def _get_registered_option(key): - """ - Retrieves the option metadata if `key` is a registered option. + """Retrieves the option metadata if `key` is a registered option. Returns ------- RegisteredOption (namedtuple) if key is deprecated, None otherwise + """ return _registered_options.get(key) def _translate_key(key): - """ - if key id deprecated and a replacement key defined, will return the + """if key id deprecated and a replacement key defined, will return the. + replacement key, otherwise returns `key` as - is + """ d = _get_deprecated_option(key) @@ -556,12 +558,12 @@ def _translate_key(key): def _warn_if_deprecated(key): - """ - Checks if `key` is a deprecated option and if so, prints a warning. + """Checks if `key` is a deprecated option and if so, prints a warning. Returns ------- bool - True if `key` is deprecated, False otherwise. + """ d = _get_deprecated_option(key) @@ -583,7 +585,7 @@ def _warn_if_deprecated(key): def _build_option_description(k): - """ Builds a formatted description of a registered option and prints it """ + """Builds a formatted description of a registered option and prints it.""" o = _get_registered_option(k) d = _get_deprecated_option(k) @@ -608,7 +610,7 @@ def _build_option_description(k): def pp_options_list(keys, width=80, _print=False): - """ Builds a concise listing of available options, grouped by prefix """ + """Builds a concise listing of available options, grouped by prefix.""" from textwrap import wrap from itertools import groupby @@ -645,7 +647,7 @@ def pp(name, ks): @contextmanager def config_prefix(prefix): - """contextmanager for multiple invocations of API with a common prefix + """contextmanager for multiple invocations of API with a common prefix. supported API functions: (register / get / set )__option @@ -666,6 +668,7 @@ def config_prefix(prefix): will register options "display.font.color", "display.font.size", set the value of "display.font.size"... and so on. + """ # Note: reset_option relies on set_option, and on key directly @@ -697,16 +700,14 @@ def inner(key, *args, **kwds): # arg in register_option def is_type_factory(_type): - """ + """Parameters. - Parameters - ---------- `_type` - a type to be compared against (e.g. type(x) == `_type`) - Returns - ------- - validator - a function of a single argument x , which returns the - True if type(x) is equal to `_type` + Returns + ------- + validator - a function of a single argument x , which returns the + True if type(x) is equal to `_type` """ @@ -718,16 +719,14 @@ def inner(x): def is_instance_factory(_type): - """ + """Parameters. - Parameters - ---------- `_type` - the type to be checked against - Returns - ------- - validator - a function of a single argument x , which returns the - True if x is an instance of `_type` + Returns + ------- + validator - a function of a single argument x , which returns the + True if x is an instance of `_type` """ if isinstance(_type, (tuple, list)): diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 9533c0921e1e3..9589b6a4d492d 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -1,9 +1,8 @@ -""" -This module is imported from the pandas package __init__.py file -in order to ensure that the core.config options registered here will -be available as soon as the user loads the package. if register_option -is invoked inside specific modules, they will not be registered until that -module is imported, which may or may not be a problem. +"""This module is imported from the pandas package __init__.py file in order to +ensure that the core.config options registered here will be available as soon +as the user loads the package. if register_option is invoked inside specific +modules, they will not be registered until that module is imported, which may +or may not be a problem. If you need to make sure options are available even before a certain module is imported, register them here rather then in the module. diff --git a/pandas/core/daterange.py b/pandas/core/daterange.py index bdaf546789c39..b59f063ddf3f6 100644 --- a/pandas/core/daterange.py +++ b/pandas/core/daterange.py @@ -10,8 +10,7 @@ class DateRange(Index): - """Deprecated - """ + """Deprecated.""" offset = tzinfo = None @@ -33,7 +32,7 @@ def __new__(cls, start=None, end=None, periods=None, tzinfo=tzinfo, name=name, **kwds) def __setstate__(self, aug_state): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" index_state = aug_state[:1] offset = aug_state[1] diff --git a/pandas/core/datetools.py b/pandas/core/datetools.py index 1fb6ae4225f25..6f18143be43f2 100644 --- a/pandas/core/datetools.py +++ b/pandas/core/datetools.py @@ -1,4 +1,4 @@ -"""A collection of random tools for dealing with dates in Python""" +"""A collection of random tools for dealing with dates in Python.""" from pandas.tseries.tools import * from pandas.tseries.offsets import * diff --git a/pandas/core/format.py b/pandas/core/format.py index 636b3f452a20c..8b3c757834506 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -251,8 +251,7 @@ def _get_formatter(self, i): class DataFrameFormatter(TableFormatter): - """ - Render a DataFrame + """Render a DataFrame. self.to_string() : console-friendly tabular output self.to_html() : html table @@ -304,9 +303,7 @@ def __init__(self, frame, buf=None, columns=None, col_space=None, self.columns = frame.columns def _to_str_columns(self): - """ - Render a DataFrame to a list of columns (as lists of strings). - """ + """Render a DataFrame to a list of columns (as lists of strings).""" # may include levels names also str_index = self._get_formatted_index() @@ -425,9 +422,8 @@ def _join_multiline(self, *strcols): def to_latex(self, force_unicode=None, column_format=None, longtable=False): - """ - Render a DataFrame to a LaTeX tabular/longtable environment output. - """ + """Render a DataFrame to a LaTeX tabular/longtable environment + output.""" #TODO: column_format is not settable in df.to_latex def get_col_type(dtype): if issubclass(dtype.type, np.number): @@ -520,9 +516,7 @@ def _format_col(self, i): ) def to_html(self, classes=None): - """ - Render a DataFrame to a html table. - """ + """Render a DataFrame to a html table.""" html_renderer = HTMLFormatter(self, classes=classes, max_rows=self.max_rows, max_cols=self.max_cols) @@ -1347,8 +1341,7 @@ def __init__(self, row, col, val, class ExcelFormatter(object): - """ - Class for formatting a DataFrame to a list of ExcelCells, + """Class for formatting a DataFrame to a list of ExcelCells, Parameters ---------- @@ -1372,6 +1365,7 @@ class ExcelFormatter(object): inf_rep : string, default `'inf'` representation for np.inf values (which aren't representable in Excel) A `'-'` sign will be added in front of -inf. + """ def __init__(self, df, na_rep='', float_format=None, cols=None, @@ -1713,9 +1707,7 @@ def _format(x): class FloatArrayFormatter(GenericArrayFormatter): - """ - - """ + """""" def __init__(self, *args, **kwargs): GenericArrayFormatter.__init__(self, *args, **kwargs) @@ -1924,9 +1916,7 @@ def just(x): def _trim_zeros(str_floats, na_rep='NaN'): - """ - Trims zeros and decimal points. - """ + """Trims zeros and decimal points.""" trimmed = str_floats def _cond(values): @@ -1976,9 +1966,10 @@ def _has_names(index): def detect_console_encoding(): - """ - Try to find the most capable encoding supported by the console. + """Try to find the most capable encoding supported by the console. + slighly modified from the way IPython handles the same issue. + """ import locale global _initial_defencoding @@ -2048,10 +2039,10 @@ def get_console_size(): class EngFormatter(object): - """ - Formats float values according to engineering format. + """Formats float values according to engineering format. Based on matplotlib.ticker.EngFormatter + """ # The SI engineering prefixes @@ -2140,12 +2131,12 @@ def __call__(self, num): def set_eng_float_format(precision=None, accuracy=3, use_eng_prefix=False): - """ - Alter default behavior on how float is formatted in DataFrame. - Format float in engineering format. By accuracy, we mean the number of - decimal digits after the floating point. + """Alter default behavior on how float is formatted in DataFrame. Format + float in engineering format. By accuracy, we mean the number of decimal + digits after the floating point. See also EngFormatter. + """ if precision is not None: # pragma: no cover import warnings diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d4185ab6b22a5..806223e65cff7 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1,12 +1,12 @@ -""" -DataFrame ---------- +"""DataFrame. + An efficient 2D container for potentially mixed-type time series or other labeled data series. Similar to its R counterpart, data.frame, except providing automatic data alignment and a host of useful data manipulation methods having to do with the labeling information + """ from __future__ import division # pylint: disable=E1101,E1103 @@ -277,9 +277,10 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, NDFrame.__init__(self, mgr, fastpath=True) def _init_dict(self, data, index, columns, dtype=None): - """ - Segregate Series based on type and coerce into matrices. + """Segregate Series based on type and coerce into matrices. + Needs to handle a lot of exceptional cases. + """ if columns is not None: columns = _ensure_index(columns) @@ -373,9 +374,7 @@ def shape(self): return (len(self.index), len(self.columns)) def _repr_fits_vertical_(self): - """ - Check length against max_rows. - """ + """Check length against max_rows.""" max_rows = get_option("display.max_rows") return len(self) <= max_rows @@ -440,11 +439,11 @@ def _info_repr(self): ) def __unicode__(self): - """ - Return a string representation for a particular DataFrame + """Return a string representation for a particular DataFrame. Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. + """ buf = StringIO(u("")) if self._info_repr(): @@ -464,9 +463,10 @@ def __unicode__(self): return buf.getvalue() def _repr_html_(self): - """ - Return a html representation for a particular DataFrame. + """Return a html representation for a particular DataFrame. + Mainly for IPython notebook. + """ # ipnb in html repr mode allows scrolling # users strongly prefer to h-scroll a wide HTML table in the browser @@ -498,7 +498,7 @@ def _repr_html_(self): return None def iteritems(self): - """Iterator over (column, series) pairs""" + """Iterator over (column, series) pairs.""" if self.columns.is_unique and hasattr(self, '_item_cache'): for k in self.columns: yield k, self._get_item_cache(k) @@ -507,8 +507,7 @@ def iteritems(self): yield k, self.icol(i) def iterrows(self): - """ - Iterate over rows of DataFrame as (index, Series) pairs. + """Iterate over rows of DataFrame as (index, Series) pairs. Notes ----- @@ -527,6 +526,7 @@ def iterrows(self): ------- it : generator A generator that iterates over the rows of the frame. + """ columns = self.columns for k, v in zip(self.index, self.values): @@ -534,10 +534,8 @@ def iterrows(self): yield k, s def itertuples(self, index=True): - """ - Iterate over rows of DataFrame as tuples, with index value - as first element of the tuple - """ + """Iterate over rows of DataFrame as tuples, with index value as first + element of the tuple.""" arrays = [] if index: arrays.append(self.index) @@ -550,12 +548,11 @@ def itertuples(self, index=True): items = iteritems def __len__(self): - """Returns length of info axis, but here we use the index """ + """Returns length of info axis, but here we use the index.""" return len(self.index) def dot(self, other): - """ - Matrix multiplication with DataFrame or Series objects + """Matrix multiplication with DataFrame or Series objects. Parameters ---------- @@ -564,6 +561,7 @@ def dot(self, other): Returns ------- dot_product : DataFrame or Series + """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) @@ -634,8 +632,7 @@ def from_dict(cls, data, orient='columns', dtype=None): return cls(data, index=index, columns=columns, dtype=dtype) def to_dict(self, outtype='dict'): - """ - Convert DataFrame to dictionary. + """Convert DataFrame to dictionary. Parameters ---------- @@ -650,6 +647,7 @@ def to_dict(self, outtype='dict'): Returns ------- result : dict like {column -> {index -> value}} + """ if not self.columns.is_unique: warnings.warn("DataFrame columns are not unique, some " @@ -702,6 +700,7 @@ def to_gbq(self, destination_table, schema=None, col_order=None, InvalidSchema : Raised if the 'schema' parameter does not match the provided DataFrame + """ from pandas.io import gbq @@ -711,8 +710,7 @@ def to_gbq(self, destination_table, schema=None, col_order=None, @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, coerce_float=False, nrows=None): - """ - Convert structured or record ndarray to DataFrame + """Convert structured or record ndarray to DataFrame. Parameters ---------- @@ -735,6 +733,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, Returns ------- df : DataFrame + """ # Make a copy of the input columns so we can modify it if columns is not None: @@ -838,9 +837,8 @@ def from_records(cls, data, index=None, exclude=None, columns=None, return cls(mgr) def to_records(self, index=True, convert_datetime64=True): - """ - Convert DataFrame to record array. Index will be put in the - 'index' field of the record array if requested + """Convert DataFrame to record array. Index will be put in the 'index' + field of the record array if requested. Parameters ---------- @@ -853,6 +851,7 @@ def to_records(self, index=True, convert_datetime64=True): Returns ------- y : recarray + """ if index: if com.is_datetime64_dtype(self.index) and convert_datetime64: @@ -885,10 +884,9 @@ def to_records(self, index=True, convert_datetime64=True): @classmethod def from_items(cls, items, columns=None, orient='columns'): - """ - Convert (key, value) pairs to DataFrame. The keys will be the axis - index (usually the columns, but depends on the specified - orientation). The values should be arrays or Series. + """Convert (key, value) pairs to DataFrame. The keys will be the axis + index (usually the columns, but depends on the specified orientation). + The values should be arrays or Series. Parameters ---------- @@ -905,6 +903,7 @@ def from_items(cls, items, columns=None, orient='columns'): Returns ------- frame : DataFrame + """ keys, values = lzip(*items) @@ -946,8 +945,7 @@ def _from_arrays(cls, arrays, columns, index, dtype=None): def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, encoding=None, tupleize_cols=False, infer_datetime_format=False): - """ - Read delimited file into DataFrame + """Read delimited file into DataFrame. Parameters ---------- @@ -978,6 +976,7 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, Returns ------- y : DataFrame + """ from pandas.io.parsers import read_table return read_table(path, header=header, sep=sep, @@ -986,8 +985,7 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, infer_datetime_format=infer_datetime_format) def to_sparse(self, fill_value=None, kind='block'): - """ - Convert to SparseDataFrame + """Convert to SparseDataFrame. Parameters ---------- @@ -997,6 +995,7 @@ def to_sparse(self, fill_value=None, kind='block'): Returns ------- y : SparseDataFrame + """ from pandas.core.sparse import SparseDataFrame return SparseDataFrame(self._series, index=self.index, @@ -1004,8 +1003,7 @@ def to_sparse(self, fill_value=None, kind='block'): default_fill_value=fill_value) def to_panel(self): - """ - Transform long (stacked) format (DataFrame) into wide (3D, Panel) + """Transform long (stacked) format (DataFrame) into wide (3D, Panel) format. Currently the index of the DataFrame must be a 2-level MultiIndex. This @@ -1014,6 +1012,7 @@ def to_panel(self): Returns ------- panel : Panel + """ from pandas.core.panel import Panel from pandas.core.reshape import block2d_to_blocknd @@ -1150,8 +1149,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep='inf'): - """ - Write DataFrame to a excel sheet + """Write DataFrame to a excel sheet. Parameters ---------- @@ -1202,6 +1200,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', >>> df1.to_excel(writer,'Sheet1') >>> df2.to_excel(writer,'Sheet2') >>> writer.save() + """ from pandas.io.excel import ExcelWriter @@ -1315,8 +1314,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, justify=None, force_unicode=None, bold_rows=True, classes=None, escape=True, max_rows=None, max_cols=None, show_dimensions=False): - """ - Render a DataFrame as an HTML table. + """Render a DataFrame as an HTML table. `to_html`-specific options: @@ -1367,8 +1365,7 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=True, force_unicode=None, longtable=False): - """ - Render a DataFrame to a tabular environment table. You can splice + """Render a DataFrame to a tabular environment table. You can splice this into a LaTeX document. Requires \\usepackage(booktabs}. `to_latex`-specific options: @@ -1404,8 +1401,7 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, return formatter.buf.getvalue() def info(self, verbose=True, buf=None, max_cols=None): - """ - Concise summary of a DataFrame. + """Concise summary of a DataFrame. Parameters ---------- @@ -1414,6 +1410,7 @@ def info(self, verbose=True, buf=None, max_cols=None): buf : writable buffer, defaults to sys.stdout max_cols : int, default None Determines whether full summary or short summary is printed + """ from pandas.core.format import _put_lines @@ -1475,7 +1472,7 @@ def info(self, verbose=True, buf=None, max_cols=None): _put_lines(buf, lines) def transpose(self): - """Transpose index and columns""" + """Transpose index and columns.""" return super(DataFrame, self).transpose(1, 0) T = property(transpose) @@ -1520,8 +1517,7 @@ def _unpickle_matrix_compat(self, state): # pragma: no cover # Getting and setting elements def get_value(self, index, col, takeable=False): - """ - Quickly retrieve single value at passed column and index + """Quickly retrieve single value at passed column and index. Parameters ---------- @@ -1532,6 +1528,7 @@ def get_value(self, index, col, takeable=False): Returns ------- value : scalar value + """ if takeable is True: @@ -1543,8 +1540,7 @@ def get_value(self, index, col, takeable=False): return engine.get_value(series.values, index) def set_value(self, index, col, value, takeable=False): - """ - Put single value at passed column and index + """Put single value at passed column and index. Parameters ---------- @@ -1558,6 +1554,7 @@ def set_value(self, index, col, value, takeable=False): frame : DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object + """ try: if takeable is True: @@ -1591,10 +1588,10 @@ def _ixs(self, i, axis=0, copy=False): # irow if axis == 0: - """ - Notes - ----- + """Notes. + If slice passed, the resulting data will be a view + """ if isinstance(i, slice): @@ -1615,10 +1612,10 @@ def _ixs(self, i, axis=0, copy=False): # icol else: - """ - Notes - ----- + """Notes. + If slice passed, the resulting data will be a view + """ label = self.columns[i] @@ -1676,7 +1673,7 @@ def __getitem__(self, key): return self._getitem_column(key) def _getitem_column(self, key): - """ return the actual column """ + """return the actual column.""" # get column if self.columns.is_unique: @@ -1809,6 +1806,7 @@ def query(self, expr, **kwargs): >>> df = DataFrame(randn(10, 2), columns=list('ab')) >>> df.query('a > b') >>> df[df.a > df.b] # same result as the previous expression + """ kwargs['level'] = kwargs.pop('level', 0) + 1 res = self.eval(expr, **kwargs) @@ -1855,6 +1853,7 @@ def eval(self, expr, **kwargs): >>> df = DataFrame(randn(10, 2), columns=list('ab')) >>> df.eval('a + b') >>> df.eval('c = a + b') + """ resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 @@ -1874,7 +1873,7 @@ def _box_item_values(self, key, values): return self._box_col_values(values, items) def _box_col_values(self, values, items): - """ provide boxed values for a column """ + """provide boxed values for a column.""" return self._constructor_sliced.from_array(values, index=self.index, name=items, fastpath=True) @@ -1933,10 +1932,8 @@ def _setitem_frame(self, key, value): self.where(-key, value, inplace=True) def _ensure_valid_index(self, value): - """ - ensure that if we don't have an index, that we can create one from the - passed value - """ + """ensure that if we don't have an index, that we can create one from + the passed value.""" if not len(self.index): # GH5632, make sure that we are a Series convertible @@ -1959,14 +1956,14 @@ def _ensure_valid_index(self, value): pass def _set_item(self, key, value): - """ - Add series to DataFrame in specified column. + """Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrame's index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrame's index to ensure homogeneity. + """ is_existing = key in self.columns @@ -1981,8 +1978,7 @@ def _set_item(self, key, value): self._check_setitem_copy() def insert(self, loc, column, value, allow_duplicates=False): - """ - Insert column into DataFrame at specified location. + """Insert column into DataFrame at specified location. If `allow_duplicates` is False, raises Exception if column is already contained in the DataFrame. @@ -1993,6 +1989,7 @@ def insert(self, loc, column, value, allow_duplicates=False): Must have 0 <= loc <= len(columns) column : object value : int, Series, or array-like + """ self._ensure_valid_index(value) value = self._sanitize_column(column, value) @@ -2182,8 +2179,7 @@ def rename(self, index=None, columns=None, **kwargs): def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): - """ - Set the DataFrame index (row labels) using one or more existing + """Set the DataFrame index (row labels) using one or more existing columns. By default yields a new object. Parameters @@ -2209,6 +2205,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False, Returns ------- dataframe : DataFrame + """ if not isinstance(keys, list): keys = [keys] @@ -2385,9 +2382,8 @@ def _maybe_cast(values, labels=None): def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): - """ - Return object with labels on given axis omitted where alternately any - or all of the data are missing + """Return object with labels on given axis omitted where alternately + any or all of the data are missing. Parameters ---------- @@ -2407,6 +2403,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, Returns ------- dropped : DataFrame + """ if isinstance(axis, (tuple, list)): result = self @@ -2445,9 +2442,8 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset') def drop_duplicates(self, subset=None, take_last=False, inplace=False): - """ - Return DataFrame with duplicate rows removed, optionally only - considering certain columns + """Return DataFrame with duplicate rows removed, optionally only + considering certain columns. Parameters ---------- @@ -2463,6 +2459,7 @@ def drop_duplicates(self, subset=None, take_last=False, inplace=False): Returns ------- deduplicated : DataFrame + """ duplicated = self.duplicated(subset, take_last=take_last) @@ -2475,9 +2472,8 @@ def drop_duplicates(self, subset=None, take_last=False, inplace=False): @deprecate_kwarg(old_arg_name='cols', new_arg_name='subset') def duplicated(self, subset=None, take_last=False): - """ - Return boolean Series denoting duplicate rows, optionally only - considering certain columns + """Return boolean Series denoting duplicate rows, optionally only + considering certain columns. Parameters ---------- @@ -2491,6 +2487,7 @@ def duplicated(self, subset=None, take_last=False): Returns ------- duplicated : Series + """ # kludge for #1833 def _m8_to_i8(x): @@ -2521,9 +2518,8 @@ def _m8_to_i8(x): def sort(self, columns=None, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): - """ - Sort DataFrame either by labels (along either axis) or by the values in - column(s) + """Sort DataFrame either by labels (along either axis) or by the values + in column(s) Parameters ---------- @@ -2551,15 +2547,15 @@ def sort(self, columns=None, axis=0, ascending=True, Returns ------- sorted : DataFrame + """ return self.sort_index(by=columns, axis=axis, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position) def sort_index(self, axis=0, by=None, ascending=True, inplace=False, kind='quicksort', na_position='last'): - """ - Sort DataFrame either by labels (along either axis) or by the values in - a column + """Sort DataFrame either by labels (along either axis) or by the values + in a column. Parameters ---------- @@ -2587,6 +2583,7 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False, Returns ------- sorted : DataFrame + """ from pandas.core.groupby import _lexsort_indexer, _nargsort @@ -2657,8 +2654,7 @@ def trans(v): return self.take(indexer, axis=axis, convert=False, is_copy=False) def sortlevel(self, level=0, axis=0, ascending=True, inplace=False): - """ - Sort multilevel index by chosen axis and primary level. Data will be + """Sort multilevel index by chosen axis and primary level. Data will be lexicographically sorted by the chosen level followed by the other levels (in order) @@ -2673,6 +2669,7 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False): Returns ------- sorted : DataFrame + """ axis = self._get_axis_number(axis) the_axis = self._get_axis(axis) @@ -2701,8 +2698,7 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False): return self.take(indexer, axis=axis, convert=False, is_copy=False) def swaplevel(self, i, j, axis=0): - """ - Swap levels i and j in a MultiIndex on a particular axis + """Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- @@ -2712,6 +2708,7 @@ def swaplevel(self, i, j, axis=0): Returns ------- swapped : type of caller (new object) + """ result = self.copy() @@ -2723,9 +2720,8 @@ def swaplevel(self, i, j, axis=0): return result def reorder_levels(self, order, axis=0): - """ - Rearrange index levels using input order. - May not drop or duplicate levels + """Rearrange index levels using input order. May not drop or duplicate + levels. Parameters ---------- @@ -2738,6 +2734,7 @@ def reorder_levels(self, order, axis=0): Returns ------- type of caller (new object) + """ axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), @@ -2895,8 +2892,9 @@ def _flex_compare_frame(self, other, func, str_rep, level): return self._compare_frame_evaluate(other, func, str_rep) def combine(self, other, func, fill_value=None, overwrite=True): - """ - Add two DataFrame objects and do not propagate NaN values, so if for a + """Add two DataFrame objects and do not propagate NaN values, so if for + a. + (column, time) one frame is missing a value, it will default to the other frame's value (which might be NaN as well) @@ -2911,6 +2909,7 @@ def combine(self, other, func, fill_value=None, overwrite=True): Returns ------- result : DataFrame + """ other_idxlen = len(other.index) # save for compare @@ -3093,8 +3092,7 @@ def last_valid_index(self): # Data reshaping def pivot(self, index=None, columns=None, values=None): - """ - Reshape data (produce a "pivot" table) based on column values. Uses + """Reshape data (produce a "pivot" table) based on column values. Uses unique values from index / columns to form axes and return either DataFrame or Panel, depending on whether you request a single value column (DataFrame) or all columns (Panel) @@ -3139,6 +3137,7 @@ def pivot(self, index=None, columns=None, values=None): pivoted : DataFrame If no values column specified, will have hierarchically indexed columns + """ from pandas.core.reshape import pivot return pivot(self, index=index, columns=columns, values=values) @@ -3244,8 +3243,7 @@ def unstack(self, level=-1): # Time series-related def diff(self, periods=1): - """ - 1st discrete difference of object + """1st discrete difference of object. Parameters ---------- @@ -3255,6 +3253,7 @@ def diff(self, periods=1): Returns ------- diffed : DataFrame + """ new_data = self._data.diff(n=periods) return self._constructor(new_data) @@ -3264,8 +3263,7 @@ def diff(self, periods=1): def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds): - """ - Applies function along input axis of DataFrame. + """Applies function along input axis of DataFrame. Objects passed to functions are Series objects having index either the DataFrame's index (axis=0) or the columns (axis=1). @@ -3321,6 +3319,7 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, Returns ------- applied : Series or DataFrame + """ axis = self._get_axis_number(axis) if kwds or args and not isinstance(func, np.ufunc): @@ -3486,10 +3485,9 @@ def _apply_broadcast(self, func, axis): return result def applymap(self, func): - """ - Apply a function to a DataFrame that is intended to operate + """Apply a function to a DataFrame that is intended to operate elementwise, i.e. like doing map(func, series) for each series in the - DataFrame + DataFrame. Parameters ---------- @@ -3517,8 +3515,7 @@ def infer(x): # Merging / joining methods def append(self, other, ignore_index=False, verify_integrity=False): - """ - Append columns of other to end of this frame's columns and index, + """Append columns of other to end of this frame's columns and index, returning a new object. Columns not in this frame are added as new columns. @@ -3540,6 +3537,7 @@ def append(self, other, ignore_index=False, verify_integrity=False): Returns ------- appended : DataFrame + """ if isinstance(other, (Series, dict)): if isinstance(other, dict): @@ -3570,8 +3568,7 @@ def append(self, other, ignore_index=False, verify_integrity=False): def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): - """ - Join columns with other DataFrame either on index or on a key + """Join columns with other DataFrame either on index or on a key column. Efficiently Join multiple DataFrame objects by index at once by passing a list. @@ -3610,6 +3607,7 @@ def join(self, other, on=None, how='left', lsuffix='', rsuffix='', Returns ------- joined : DataFrame + """ # For SparseDataFrame's benefit return self._join_compat(other, on=on, how=how, lsuffix=lsuffix, @@ -3671,8 +3669,7 @@ def merge(self, right, how='inner', on=None, left_on=None, right_on=None, # Statistical methods, etc. def corr(self, method='pearson', min_periods=1): - """ - Compute pairwise correlation of columns, excluding NA/null values + """Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- @@ -3688,6 +3685,7 @@ def corr(self, method='pearson', min_periods=1): Returns ------- y : DataFrame + """ numeric_df = self._get_numeric_data() cols = numeric_df.columns @@ -3722,8 +3720,7 @@ def corr(self, method='pearson', min_periods=1): return self._constructor(correl, index=cols, columns=cols) def cov(self, min_periods=None): - """ - Compute pairwise covariance of columns, excluding NA/null values + """Compute pairwise covariance of columns, excluding NA/null values. Parameters ---------- @@ -3739,6 +3736,7 @@ def cov(self, min_periods=None): ----- `y` contains the covariance matrix of the DataFrame's time series. The covariance is normalized by N-1 (unbiased estimator). + """ numeric_df = self._get_numeric_data() cols = numeric_df.columns @@ -3758,9 +3756,8 @@ def cov(self, min_periods=None): return self._constructor(baseCov, index=cols, columns=cols) def corrwith(self, other, axis=0, drop=False): - """ - Compute pairwise correlation between rows or columns of two DataFrame - objects. + """Compute pairwise correlation between rows or columns of two + DataFrame objects. Parameters ---------- @@ -3773,6 +3770,7 @@ def corrwith(self, other, axis=0, drop=False): Returns ------- correls : Series + """ axis = self._get_axis_number(axis) if isinstance(other, Series): @@ -3932,8 +3930,8 @@ def _count_level(self, level, axis=0, numeric_only=False): def any(self, axis=None, bool_only=None, skipna=True, level=None, **kwargs): - """ - Return whether any element is True over requested axis. + """Return whether any element is True over requested axis. + %(na_action)s Parameters @@ -3952,6 +3950,7 @@ def any(self, axis=None, bool_only=None, skipna=True, level=None, Returns ------- any : Series (or DataFrame if level specified) + """ if axis is None: axis = self._stat_axis_number @@ -3963,8 +3962,8 @@ def any(self, axis=None, bool_only=None, skipna=True, level=None, def all(self, axis=None, bool_only=None, skipna=True, level=None, **kwargs): - """ - Return whether all elements are True over requested axis. + """Return whether all elements are True over requested axis. + %(na_action)s Parameters @@ -3983,6 +3982,7 @@ def all(self, axis=None, bool_only=None, skipna=True, level=None, Returns ------- any : Series (or DataFrame if level specified) + """ if axis is None: axis = self._stat_axis_number @@ -4064,8 +4064,7 @@ def _reduce(self, op, axis=0, skipna=True, numeric_only=None, return Series(result, index=labels) def idxmin(self, axis=0, skipna=True): - """ - Return index of first occurrence of minimum over requested axis. + """Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters @@ -4087,6 +4086,7 @@ def idxmin(self, axis=0, skipna=True): See Also -------- Series.idxmin + """ axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) @@ -4095,8 +4095,7 @@ def idxmin(self, axis=0, skipna=True): return Series(result, index=self._get_agg_axis(axis)) def idxmax(self, axis=0, skipna=True): - """ - Return index of first occurrence of maximum over requested axis. + """Return index of first occurrence of maximum over requested axis. NA/null values are excluded. Parameters @@ -4118,6 +4117,7 @@ def idxmax(self, axis=0, skipna=True): See Also -------- Series.idxmax + """ axis = self._get_axis_number(axis) indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna) @@ -4126,7 +4126,7 @@ def idxmax(self, axis=0, skipna=True): return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): - """ let's be explict about this """ + """let's be explict about this.""" if axis_num == 0: return self.columns elif axis_num == 1: @@ -4135,10 +4135,9 @@ def _get_agg_axis(self, axis_num): raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num) def mode(self, axis=0, numeric_only=False): - """ - Gets the mode of each element along the axis selected. Empty if nothing - has 2+ occurrences. Adds a row for each mode per label, fills in gaps - with nan. + """Gets the mode of each element along the axis selected. Empty if + nothing has 2+ occurrences. Adds a row for each mode per label, fills + in gaps with nan. Parameters ---------- @@ -4151,15 +4150,15 @@ def mode(self, axis=0, numeric_only=False): Returns ------- modes : DataFrame (sorted) + """ data = self if not numeric_only else self._get_numeric_data() f = lambda s: s.mode() return data.apply(f, axis=axis) def quantile(self, q=0.5, axis=0, numeric_only=True): - """ - Return values at the given quantile over requested axis, a la - scoreatpercentile in scipy.stats + """Return values at the given quantile over requested axis, a la + scoreatpercentile in scipy.stats. Parameters ---------- @@ -4171,6 +4170,7 @@ def quantile(self, q=0.5, axis=0, numeric_only=True): Returns ------- quantiles : Series + """ per = q * 100 @@ -4189,9 +4189,8 @@ def f(arr): def rank(self, axis=0, numeric_only=None, method='average', na_option='keep', ascending=True, pct=False): - """ - Compute numerical data ranks (1 through n) along axis. Equal values are - assigned a rank that is the average of the ranks of those values + """Compute numerical data ranks (1 through n) along axis. Equal values + are assigned a rank that is the average of the ranks of those values. Parameters ---------- @@ -4217,6 +4216,7 @@ def rank(self, axis=0, numeric_only=None, method='average', Returns ------- ranks : DataFrame + """ axis = self._get_axis_number(axis) if numeric_only is None: @@ -4271,8 +4271,7 @@ def to_timestamp(self, freq=None, how='start', axis=0, copy=True): return self._constructor(new_data) def to_period(self, freq=None, axis=0, copy=True): - """ - Convert DataFrame from DatetimeIndex to PeriodIndex with desired + """Convert DataFrame from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed) Parameters @@ -4286,6 +4285,7 @@ def to_period(self, freq=None, axis=0, copy=True): Returns ------- ts : TimeSeries with PeriodIndex + """ new_data = self._data if copy: @@ -4306,8 +4306,7 @@ def to_period(self, freq=None, axis=0, copy=True): return self._constructor(new_data) def isin(self, values): - """ - Return boolean DataFrame showing whether each element in the + """Return boolean DataFrame showing whether each element in the DataFrame is contained in values. Parameters @@ -4353,6 +4352,7 @@ def isin(self, values): 0 True False 1 False False # Column A in `other` has a 3, but not at index 1. 2 True True + """ if isinstance(values, dict): from collections import defaultdict @@ -4385,11 +4385,9 @@ def isin(self, values): # Deprecated stuff def combineAdd(self, other): - """ - Add two DataFrame objects and do not propagate - NaN values, so if for a (column, time) one frame is missing a - value, it will default to the other frame's value (which might - be NaN as well) + """Add two DataFrame objects and do not propagate NaN values, so if for + a (column, time) one frame is missing a value, it will default to the + other frame's value (which might be NaN as well) Parameters ---------- @@ -4398,14 +4396,14 @@ def combineAdd(self, other): Returns ------- DataFrame + """ return self.add(other, fill_value=0.) def combineMult(self, other): - """ - Multiply two DataFrame objects and do not propagate NaN values, so if - for a (column, time) one frame is missing a value, it will default to - the other frame's value (which might be NaN as well) + """Multiply two DataFrame objects and do not propagate NaN values, so + if for a (column, time) one frame is missing a value, it will default + to the other frame's value (which might be NaN as well) Parameters ---------- @@ -4414,6 +4412,7 @@ def combineMult(self, other): Returns ------- DataFrame + """ return self.mul(other, fill_value=1.) @@ -4462,8 +4461,7 @@ def group_agg(values, bounds, f): def factor_agg(factor, vec, func): - """ - Aggregate array based on Categorical + """Aggregate array based on Categorical. Parameters ---------- @@ -4481,6 +4479,7 @@ def factor_agg(factor, vec, func): See Also -------- pandas.Categorical + """ indexer = np.argsort(factor.labels) unique_labels = np.arange(len(factor.levels)) @@ -4493,9 +4492,10 @@ def factor_agg(factor, vec, func): def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): - """ - Segregate Series based on type and coerce into matrices. + """Segregate Series based on type and coerce into matrices. + Needs to handle a lot of exceptional cases. + """ # figure out the index, if necessary if index is None: @@ -4598,9 +4598,7 @@ def convert(v): def _to_arrays(data, columns, coerce_float=False, dtype=None): - """ - Return list of arrays, columns - """ + """Return list of arrays, columns.""" if isinstance(data, DataFrame): if columns is not None: arrays = [data.icol(i).values for i, col in enumerate(data.columns) @@ -4643,7 +4641,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None): def _masked_rec_array_to_mgr(data, index, columns, dtype, copy): - """ extract from a masked rec array and create the manager """ + """extract from a masked rec array and create the manager.""" # essentially process a record array then fill it fill_value = data.fill_value @@ -4845,8 +4843,8 @@ def _put_str(s, space): def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, **kwds): - """ - Make a box plot from DataFrame column/columns optionally grouped + """Make a box plot from DataFrame column/columns optionally grouped. + (stratified) by one or more columns Parameters @@ -4866,6 +4864,7 @@ def boxplot(self, column=None, by=None, ax=None, fontsize=None, Returns ------- ax : matplotlib.axes.AxesSubplot + """ import pandas.tools.plotting as plots import matplotlib.pyplot as plt diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8ec4655c0a309..bd406c364db50 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -103,7 +103,7 @@ def __init__(self, data, axes=None, copy=False, dtype=None, object.__setattr__(self, '_item_cache', {}) def _validate_dtype(self, dtype): - """ validate the passed dtype """ + """validate the passed dtype.""" if dtype is not None: dtype = np.dtype(dtype) @@ -116,7 +116,7 @@ def _validate_dtype(self, dtype): return dtype def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): - """ passed a manager and a axes dict """ + """passed a manager and a axes dict.""" for a, axe in axes.items(): if axe is not None: mgr = mgr.reindex_axis( @@ -160,17 +160,18 @@ def _constructor_sliced(self): def _setup_axes( cls, axes, info_axis=None, stat_axis=None, aliases=None, slicers=None, axes_are_reversed=False, build_axes=True, ns=None): - """ provide axes setup for the major PandasObjects - - axes : the names of the axes in order (lowest to highest) - info_axis_num : the axis of the selector dimension (int) - stat_axis_num : the number of axis for the default stats (int) - aliases : other names for a single axis (dict) - slicers : how axes slice to others (dict) - axes_are_reversed : boolean whether to treat passed axes as - reversed (DataFrame) - build_axes : setup the axis properties (default True) - """ + """provide axes setup for the major PandasObjects. + + axes : the names of the axes in order (lowest to highest) + info_axis_num : the axis of the selector dimension (int) + stat_axis_num : the number of axis for the default stats (int) + aliases : other names for a single axis (dict) + slicers : how axes slice to others (dict) + axes_are_reversed : boolean whether to treat passed axes as + reversed (DataFrame) + build_axes : setup the axis properties (default True) + + """ cls._AXIS_ORDERS = axes cls._AXIS_NUMBERS = dict((a, i) for i, a in enumerate(axes)) @@ -217,29 +218,29 @@ def set_axis(a, i): setattr(cls, k, v) def _construct_axes_dict(self, axes=None, **kwargs): - """ return an axes dictionary for myself """ + """return an axes dictionary for myself.""" d = dict([(a, self._get_axis(a)) for a in (axes or self._AXIS_ORDERS)]) d.update(kwargs) return d @staticmethod def _construct_axes_dict_from(self, axes, **kwargs): - """ return an axes dictionary for the passed axes """ + """return an axes dictionary for the passed axes.""" d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)]) d.update(kwargs) return d def _construct_axes_dict_for_slice(self, axes=None, **kwargs): - """ return an axes dictionary for myself """ + """return an axes dictionary for myself.""" d = dict([(self._AXIS_SLICEMAP[a], self._get_axis(a)) for a in (axes or self._AXIS_ORDERS)]) d.update(kwargs) return d def _construct_axes_from_arguments(self, args, kwargs, require_all=False): - """ construct and returns axes if supplied in args/kwargs - if require_all, raise if all axis arguments are not supplied - return a tuple of (axes, kwargs) """ + """construct and returns axes if supplied in args/kwargs if + require_all, raise if all axis arguments are not supplied return a + tuple of (axes, kwargs)""" # construct the args args = list(args) @@ -313,7 +314,7 @@ def _get_axis(self, axis): return getattr(self, name) def _get_block_manager_axis(self, axis): - """ map the axis to the block_manager axis """ + """map the axis to the block_manager axis.""" axis = self._get_axis_number(axis) if self._AXIS_REVERSED: m = self._AXIS_LEN - 1 @@ -366,19 +367,19 @@ def _stat_axis(self): @property def shape(self): - "tuple of axis dimensions" + """tuple of axis dimensions.""" return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) @property def axes(self): - "index(es) of the NDFrame" + """index(es) of the NDFrame.""" # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] @property def ndim(self): - "Number of axes / array dimensions" + """Number of axes / array dimensions.""" return self._data.ndim def _expand_axes(self, key): @@ -394,7 +395,7 @@ def _expand_axes(self, key): return new_axes def set_axis(self, axis, labels): - """ public verson of axis assignment """ + """public verson of axis assignment.""" setattr(self,self._get_axis_name(axis),labels) def _set_axis(self, axis, labels): @@ -444,12 +445,12 @@ def transpose(self, *args, **kwargs): return self._constructor(new_values, **new_axes).__finalize__(self) def swapaxes(self, axis1, axis2, copy=True): - """ - Interchange axes and swap values axes appropriately + """Interchange axes and swap values axes appropriately. Returns ------- y : same as input + """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) @@ -470,15 +471,17 @@ def swapaxes(self, axis1, axis2, copy=True): return self._constructor(new_values, *new_axes).__finalize__(self) def pop(self, item): - """ - Return item and drop from frame. Raise KeyError if not found. + """Return item and drop from frame. + + Raise KeyError if not found. + """ result = self[item] del self[item] return result def squeeze(self): - """ squeeze length 1 dimensions """ + """squeeze length 1 dimensions.""" try: return self.ix[tuple([slice(None) if len(a) > 1 else a[0] for a in self.axes])] @@ -486,8 +489,7 @@ def squeeze(self): return self def swaplevel(self, i, j, axis=0): - """ - Swap levels i and j in a MultiIndex on a particular axis + """Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- @@ -497,6 +499,7 @@ def swaplevel(self, i, j, axis=0): Returns ------- swapped : type of caller (new object) + """ axis = self._get_axis_number(axis) result = self.copy() @@ -614,9 +617,10 @@ def __invert__(self): return self._wrap_array(arr, self.axes, copy=False) def equals(self, other): - """ - Determines if two NDFrame objects contain the same elements. NaNs in the - same location are considered equal. + """Determines if two NDFrame objects contain the same elements. + + NaNs in the same location are considered equal. + """ if not isinstance(other, self._constructor): return False @@ -630,24 +634,25 @@ def __hash__(self): ' hashed'.format(self.__class__.__name__)) def __iter__(self): - """ - Iterate over infor axis - """ + """Iterate over infor axis.""" return iter(self._info_axis) # can we get a better explanation of this? def keys(self): """Get the 'info axis' (see Indexing for more) - This is index for Series, columns for DataFrame and major_axis for - Panel.""" + This is index for Series, columns for DataFrame and major_axis + for Panel. + + """ return self._info_axis def iteritems(self): - """Iterate over (label, values) on info axis + """Iterate over (label, values) on info axis. + + This is index for Series, columns for DataFrame, major_axis for + Panel, and so on. - This is index for Series, columns for DataFrame, major_axis for Panel, - and so on. """ for h in self._info_axis: yield h, self[h] @@ -663,16 +668,16 @@ def iterkv(self, *args, **kwargs): return self.iteritems(*args, **kwargs) def __len__(self): - """Returns length of info axis """ + """Returns length of info axis.""" return len(self._info_axis) def __contains__(self, key): - """True if the key is in the info axis """ + """True if the key is in the info axis.""" return key in self._info_axis @property def empty(self): - "True if NDFrame is entirely empty [no items]" + """True if NDFrame is entirely empty [no items]""" return not all(len(self._get_axis(a)) > 0 for a in self._AXIS_ORDERS) def __nonzero__(self): @@ -683,11 +688,13 @@ def __nonzero__(self): __bool__ = __nonzero__ def bool(self): - """ Return the bool of a single element PandasObject - This must be a boolean scalar value, either True or False + """Return the bool of a single element PandasObject This must be a + boolean scalar value, either True or False. - Raise a ValueError if the PandasObject does not have exactly - 1 element, or that element is not boolean """ + Raise a ValueError if the PandasObject does not have exactly 1 + element, or that element is not boolean + + """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) @@ -723,7 +730,7 @@ def __array_wrap__(self, result): # return dict(typestr=values.dtype.str,shape=values.shape,data=values) def to_dense(self): - "Return dense representation of NDFrame (as opposed to sparse)" + """Return dense representation of NDFrame (as opposed to sparse)""" # compat return self @@ -781,8 +788,7 @@ def __setstate__(self, state): def to_json(self, path_or_buf=None, orient=None, date_format='epoch', double_precision=10, force_ascii=True, date_unit='ms', default_handler=None): - """ - Convert the object to a JSON string. + """Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. @@ -846,7 +852,7 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch', default_handler=default_handler) def to_hdf(self, path_or_buf, key, **kwargs): - """ activate the HDFStore + """activate the HDFStore. Parameters ---------- @@ -889,8 +895,7 @@ def to_hdf(self, path_or_buf, key, **kwargs): return pytables.to_hdf(path_or_buf, key, self, **kwargs) def to_msgpack(self, path_or_buf=None, **kwargs): - """ - msgpack (serialize) object to input file path + """msgpack (serialize) object to input file path. THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. @@ -903,6 +908,7 @@ def to_msgpack(self, path_or_buf=None, **kwargs): (default is False) compress : type of compressor (zlib or blosc), default to None (no compression) + """ from pandas.io import packers @@ -910,8 +916,7 @@ def to_msgpack(self, path_or_buf=None, **kwargs): def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True, index_label=None): - """ - Write records stored in a DataFrame to a SQL database. + """Write records stored in a DataFrame to a SQL database. Parameters ---------- @@ -942,35 +947,42 @@ def to_sql(self, name, con, flavor='sqlite', if_exists='fail', index=True, index_label=index_label) def to_pickle(self, path): - """ - Pickle (serialize) object to input file path + """Pickle (serialize) object to input file path. Parameters ---------- path : string File path + """ from pandas.io.pickle import to_pickle return to_pickle(self, path) def save(self, path): # TODO remove in 0.14 - "Deprecated. Use to_pickle instead" + """Deprecated. + + Use to_pickle instead + + """ import warnings from pandas.io.pickle import to_pickle warnings.warn("save is deprecated, use to_pickle", FutureWarning) return to_pickle(self, path) def load(self, path): # TODO remove in 0.14 - "Deprecated. Use read_pickle instead." + """Deprecated. + + Use read_pickle instead. + + """ import warnings from pandas.io.pickle import read_pickle warnings.warn("load is deprecated, use pd.read_pickle", FutureWarning) return read_pickle(path) def to_clipboard(self, excel=None, sep=None, **kwargs): - """ - Attempt to write text representation of object to the system clipboard - This can be pasted into Excel, for example. + """Attempt to write text representation of object to the system + clipboard This can be pasted into Excel, for example. Parameters ---------- @@ -988,6 +1000,7 @@ def to_clipboard(self, excel=None, sep=None, **kwargs): - Linux: xclip, or xsel (with gtk or PyQt4 modules) - Windows: none - OS X: none + """ from pandas.io import clipboard clipboard.to_clipboard(self, excel=excel, sep=sep, **kwargs) @@ -997,7 +1010,7 @@ def to_clipboard(self, excel=None, sep=None, **kwargs): @classmethod def _create_indexer(cls, name, indexer): - """ create an indexer like _name in the class """ + """create an indexer like _name in the class.""" if getattr(cls, name, None) is None: iname = '_%s' % name @@ -1016,9 +1029,8 @@ def _indexer(self): cls._internal_names_set.add(iname) def get(self, key, default=None): - """ - Get item from object for given key (DataFrame column, Panel slice, - etc.). Returns default value if not found + """Get item from object for given key (DataFrame column, Panel slice, + etc.). Returns default value if not found. Parameters ---------- @@ -1027,6 +1039,7 @@ def get(self, key, default=None): Returns ------- value : type of items contained in object + """ try: return self[key] @@ -1037,7 +1050,7 @@ def __getitem__(self, item): return self._get_item_cache(item) def _get_item_cache(self, item): - """ return the cached item, item represents a label indexer """ + """return the cached item, item represents a label indexer.""" cache = self._item_cache res = cache.get(item) if res is None: @@ -1051,12 +1064,12 @@ def _get_item_cache(self, item): return res def _set_as_cached(self, item, cacher): - """ set the _cacher attribute on the calling object with - a weakref to cacher """ + """set the _cacher attribute on the calling object with a weakref to + cacher.""" self._cacher = (item, weakref.ref(cacher)) def _iget_item_cache(self, item): - """ return the cached item, item represents a positional indexer """ + """return the cached item, item represents a positional indexer.""" ax = self._info_axis if ax.is_unique: lower = self._get_item_cache(ax[item]) @@ -1068,12 +1081,11 @@ def _box_item_values(self, key, values): raise NotImplementedError def _maybe_cache_changed(self, item, value): - """ - the object has called back to us saying - maybe it has changed + """the object has called back to us saying maybe it has changed. numpy < 1.8 has an issue with object arrays and aliasing GH6026 + """ self._data.set(item, value, check=pd._np_version_under1p8) @@ -1084,8 +1096,8 @@ def _is_cached(self): return cacher is not None def _maybe_update_cacher(self, clear=False): - """ see if we need to update our parent cacher - if clear, then clear our cache """ + """see if we need to update our parent cacher if clear, then clear our + cache.""" cacher = getattr(self, '_cacher', None) if cacher is not None: ref = cacher[1]() @@ -1113,10 +1125,10 @@ def _clear_item_cache(self, i=None): self._item_cache.clear() def _slice(self, slobj, axis=0, typ=None): - """ - Construct a slice of this container. + """Construct a slice of this container. - typ parameter is maintained for compatibility with Series slicing. + typ parameter is maintained for compatibility with Series + slicing. """ axis = self._get_block_manager_axis(axis) @@ -1136,10 +1148,12 @@ def _set_is_copy(self, ref=None, copy=True): self.is_copy = None def _check_setitem_copy(self, stacklevel=4, t='setting'): - """ validate if we are doing a settitem on a chained copy. + """validate if we are doing a settitem on a chained copy. If you call this function, be sure to set the stacklevel such that the - user will see the error *at the level of setting*""" + user will see the error *at the level of setting* + + """ if self.is_copy: value = config.get_option('mode.chained_assignment') @@ -1169,9 +1183,7 @@ def _check_setitem_copy(self, stacklevel=4, t='setting'): warnings.warn(t, SettingWithCopyWarning, stacklevel=stacklevel) def __delitem__(self, key): - """ - Delete item - """ + """Delete item.""" deleted = False maybe_shortcut = False @@ -1202,8 +1214,7 @@ def __delitem__(self, key): pass def take(self, indices, axis=0, convert=True, is_copy=True): - """ - Analogous to ndarray.take + """Analogous to ndarray.take. Parameters ---------- @@ -1215,6 +1226,7 @@ def take(self, indices, axis=0, convert=True, is_copy=True): Returns ------- taken : type of caller + """ # check/convert indicies here @@ -1386,8 +1398,7 @@ def xs(self, key, axis=0, level=None, copy=True, drop_level=True): # TODO: Check if this was clearer in 0.12 def select(self, crit, axis=0): - """ - Return data corresponding to axis labels matching criteria + """Return data corresponding to axis labels matching criteria. Parameters ---------- @@ -1398,6 +1409,7 @@ def select(self, crit, axis=0): Returns ------- selection : type of caller + """ axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) @@ -1412,7 +1424,7 @@ def select(self, crit, axis=0): return self.reindex(**{axis_name: new_axis}) def reindex_like(self, other, method=None, copy=True, limit=None): - """ return an object with matching indicies to myself + """return an object with matching indicies to myself. Parameters ---------- @@ -1430,13 +1442,13 @@ def reindex_like(self, other, method=None, copy=True, limit=None): Returns ------- reindexed : same as input + """ d = other._construct_axes_dict(method=method, copy=copy, limit=limit) return self.reindex(**d) def drop(self, labels, axis=0, level=None, inplace=False, **kwargs): - """ - Return new object with labels in requested axis removed + """Return new object with labels in requested axis removed. Parameters ---------- @@ -1450,6 +1462,7 @@ def drop(self, labels, axis=0, level=None, inplace=False, **kwargs): Returns ------- dropped : type of caller + """ axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) @@ -1490,7 +1503,7 @@ def drop(self, labels, axis=0, level=None, inplace=False, **kwargs): return result def _update_inplace(self, result): - "replace self internals with result." + """replace self internals with result.""" # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() @@ -1499,8 +1512,7 @@ def _update_inplace(self, result): self._maybe_update_cacher() def add_prefix(self, prefix): - """ - Concatenate prefix string with panel items names. + """Concatenate prefix string with panel items names. Parameters ---------- @@ -1509,13 +1521,13 @@ def add_prefix(self, prefix): Returns ------- with_prefix : type of caller + """ new_data = self._data.add_prefix(prefix) return self._constructor(new_data).__finalize__(self) def add_suffix(self, suffix): - """ - Concatenate suffix string with panel items names + """Concatenate suffix string with panel items names. Parameters ---------- @@ -1524,13 +1536,13 @@ def add_suffix(self, suffix): Returns ------- with_suffix : type of caller + """ new_data = self._data.add_suffix(suffix) return self._constructor(new_data).__finalize__(self) def sort_index(self, axis=0, ascending=True): - """ - Sort object by labels (along an axis) + """Sort object by labels (along an axis) Parameters ---------- @@ -1542,6 +1554,7 @@ def sort_index(self, axis=0, ascending=True): Returns ------- sorted_obj : type of caller + """ axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) @@ -1624,7 +1637,7 @@ def reindex(self, *args, **kwargs): method, fill_value, copy).__finalize__(self) def _reindex_axes(self, axes, level, limit, method, fill_value, copy): - """ perform the reinxed for all the axes """ + """perform the reinxed for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] @@ -1648,7 +1661,7 @@ def _reindex_axes(self, axes, level, limit, method, fill_value, copy): return obj def _needs_reindex_multi(self, axes, method, level): - """ check if we do need a multi reindex """ + """check if we do need a multi reindex.""" return ((com._count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type) @@ -1709,7 +1722,7 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, def _reindex_with_indexers(self, reindexers, method=None, fill_value=np.nan, limit=None, copy=False, allow_dups=False): - """ allow_dups indicates an internal call here """ + """allow_dups indicates an internal call here.""" # reindex doing multiple operations on different axes if indiciated new_data = self._data @@ -1759,8 +1772,7 @@ def _reindex_axis(self, new_index, fill_method, axis, copy): return self._constructor(new_data).__finalize__(self) def filter(self, items=None, like=None, regex=None, axis=None): - """ - Restrict the info axis to set of items or wildcard + """Restrict the info axis to set of items or wildcard. Parameters ---------- @@ -1798,18 +1810,14 @@ def filter(self, items=None, like=None, regex=None, axis=None): raise TypeError('Must pass either `items`, `like`, or `regex`') def head(self, n=5): - """ - Returns first n rows - """ + """Returns first n rows.""" l = len(self) if l == 0 or n==0: return self return self.iloc[:n] def tail(self, n=5): - """ - Returns last n rows - """ + """Returns last n rows.""" l = len(self) if l == 0 or n == 0: return self @@ -1819,8 +1827,7 @@ def tail(self, n=5): # Attribute access def __finalize__(self, other, method=None, **kwargs): - """ - propagate metadata from other to self + """propagate metadata from other to self. Parameters ---------- @@ -1839,6 +1846,7 @@ def __getattr__(self, name): info. This allows simpler access to columns for interactive use. + """ if name in self._internal_names_set: return object.__getattribute__(self, name) @@ -1880,10 +1888,9 @@ def _consolidate_inplace(self): self._data = self._protect_consolidate(f) def consolidate(self, inplace=False): - """ - Compute NDFrame with "consolidated" internals (data of each dtype + """Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Mainly an internal API function, - but available here to the savvy user + but available here to the savvy user. Parameters ---------- @@ -1893,6 +1900,7 @@ def consolidate(self, inplace=False): Returns ------- consolidated : type of caller + """ if inplace: self._consolidate_inplace() @@ -1964,7 +1972,7 @@ def as_matrix(self, columns=None): @property def values(self): - "Numpy representation of NDFrame" + """Numpy representation of NDFrame.""" return self.as_matrix() @property @@ -1973,32 +1981,30 @@ def _get_values(self): return self.as_matrix() def get_values(self): - """ same as values (but handles sparseness conversions) """ + """same as values (but handles sparseness conversions)""" return self.as_matrix() def get_dtype_counts(self): - """ Return the counts of dtypes in this object """ + """Return the counts of dtypes in this object.""" from pandas import Series return Series(self._data.get_dtype_counts()) def get_ftype_counts(self): - """ Return the counts of ftypes in this object """ + """Return the counts of ftypes in this object.""" from pandas import Series return Series(self._data.get_ftype_counts()) @property def dtypes(self): - """ Return the dtypes in this object """ + """Return the dtypes in this object.""" from pandas import Series return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_) @property def ftypes(self): - """ - Return the ftypes (indication of sparse/dense and dtype) - in this object. - """ + """Return the ftypes (indication of sparse/dense and dtype) in this + object.""" from pandas import Series return Series(self._data.get_ftypes(), index=self._info_axis, dtype=np.object_) @@ -2040,7 +2046,7 @@ def as_blocks(self): @property def blocks(self): - "Internal property, property synonym for as_blocks()" + """Internal property, property synonym for as_blocks()""" return self.as_blocks() def astype(self, dtype, copy=True, raise_on_error=True): @@ -2063,8 +2069,7 @@ def astype(self, dtype, copy=True, raise_on_error=True): return self._constructor(mgr).__finalize__(self) def copy(self, deep=True): - """ - Make a copy of this object + """Make a copy of this object. Parameters ---------- @@ -2074,6 +2079,7 @@ def copy(self, deep=True): Returns ------- copy : type of caller + """ data = self._data if deep: @@ -2082,8 +2088,7 @@ def copy(self, deep=True): def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): - """ - Attempt to infer better dtype for object columns + """Attempt to infer better dtype for object columns. Parameters ---------- @@ -2093,13 +2098,14 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, strings), non-convertibles get NaN convert_timedeltas : if True, attempt to soft convert timedeltas, if 'coerce', force conversion (and non-convertibles get NaT) - copy : Boolean, if True, return copy even if no copy is necessary + copy : Boolean, if True, return copy even if no copy is necessary (e.g. no conversion was done), default is True. It is meant for internal use, not to be confused with `inplace` kw. Returns ------- converted : asm as input object + """ return self._constructor( self._data.convert(convert_dates=convert_dates, @@ -2112,8 +2118,7 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, def fillna(self, value=None, method=None, axis=0, inplace=False, limit=None, downcast=None): - """ - Fill NA/NaN values using the specified method + """Fill NA/NaN values using the specified method. Parameters ---------- @@ -2147,6 +2152,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False, Returns ------- filled : same type as caller + """ if isinstance(value, (list, tuple)): raise TypeError('"value" parameter must be a scalar or dict, but ' @@ -2244,8 +2250,7 @@ def bfill(self, axis=0, inplace=False, limit=None, downcast=None): def replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad', axis=None): - """ - Replace values given in 'to_replace' with 'value'. + """Replace values given in 'to_replace' with 'value'. Parameters ---------- @@ -2490,8 +2495,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, def interpolate(self, method='linear', axis=0, limit=None, inplace=False, downcast=None, **kwargs): - """ - Interpolate values according to different methods. + """Interpolate values according to different methods. Parameters ---------- @@ -2623,8 +2627,7 @@ def notnull(self): return notnull(self).__finalize__(self) def clip(self, lower=None, upper=None, out=None): - """ - Trim values at input threshold(s) + """Trim values at input threshold(s) Parameters ---------- @@ -2634,6 +2637,7 @@ def clip(self, lower=None, upper=None, out=None): Returns ------- clipped : Series + """ if out is not None: # pragma: no cover raise Exception('out argument is not supported yet') @@ -2651,8 +2655,7 @@ def clip(self, lower=None, upper=None, out=None): return result def clip_upper(self, threshold): - """ - Return copy of input with values above given value truncated + """Return copy of input with values above given value truncated. See also -------- @@ -2661,6 +2664,7 @@ def clip_upper(self, threshold): Returns ------- clipped : same type as input + """ if isnull(threshold): raise ValueError("Cannot use an NA value as a clip threshold") @@ -2668,8 +2672,7 @@ def clip_upper(self, threshold): return self.where((self <= threshold) | isnull(self), threshold) def clip_lower(self, threshold): - """ - Return copy of the input with values below given value truncated + """Return copy of the input with values below given value truncated. See also -------- @@ -2678,6 +2681,7 @@ def clip_lower(self, threshold): Returns ------- clipped : same type as input + """ if isnull(threshold): raise ValueError("Cannot use an NA value as a clip threshold") @@ -2686,9 +2690,8 @@ def clip_lower(self, threshold): def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False): - """ - Group series using mapper (dict or key function, apply given function - to group, return result as series) or by a series of columns + """Group series using mapper (dict or key function, apply given + function to group, return result as series) or by a series of columns. Parameters ---------- @@ -2736,9 +2739,9 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, sort=sort, group_keys=group_keys, squeeze=squeeze) def asfreq(self, freq, method=None, how=None, normalize=False): - """ - Convert all TimeSeries inside to specified frequency using DateOffset - objects. Optionally provide fill method to pad/backfill missing values. + """Convert all TimeSeries inside to specified frequency using + DateOffset objects. Optionally provide fill method to pad/backfill + missing values. Parameters ---------- @@ -2755,6 +2758,7 @@ def asfreq(self, freq, method=None, how=None, normalize=False): Returns ------- converted : type of caller + """ from pandas.tseries.resample import asfreq return asfreq(self, freq, method=method, how=how, @@ -2843,9 +2847,8 @@ def resample(self, rule, how=None, axis=0, fill_method=None, return sampler.resample(self).__finalize__(self) def first(self, offset): - """ - Convenience method for subsetting initial periods of time series data - based on a date offset + """Convenience method for subsetting initial periods of time series + data based on a date offset. Parameters ---------- @@ -2858,6 +2861,7 @@ def first(self, offset): Returns ------- subset : type of caller + """ from pandas.tseries.frequencies import to_offset if not isinstance(self.index, DatetimeIndex): @@ -2877,9 +2881,8 @@ def first(self, offset): return self.ix[:end] def last(self, offset): - """ - Convenience method for subsetting final periods of time series data - based on a date offset + """Convenience method for subsetting final periods of time series data + based on a date offset. Parameters ---------- @@ -2892,6 +2895,7 @@ def last(self, offset): Returns ------- subset : type of caller + """ from pandas.tseries.frequencies import to_offset if not isinstance(self.index, DatetimeIndex): @@ -2908,9 +2912,8 @@ def last(self, offset): def align(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0): - """ - Align two object on their axes with the - specified join method for each axis Index + """Align two object on their axes with the specified join method for + each axis Index. Parameters ---------- @@ -2936,6 +2939,7 @@ def align(self, other, join='outer', axis=None, level=None, copy=True, ------- (left, right) : (type of input, type of other) Aligned objects + """ from pandas import DataFrame, Series method = com._clean_fill_method(method) @@ -3064,8 +3068,7 @@ def _align_series(self, other, join='outer', axis=None, level=None, def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, try_cast=False, raise_on_error=True): - """ - Return an object of same shape as self and whose corresponding + """Return an object of same shape as self and whose corresponding entries are from self where cond is True and otherwise are from other. Parameters @@ -3085,6 +3088,7 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, Returns ------- wh : same type as caller + """ if isinstance(cond, NDFrame): cond = cond.reindex(**self._construct_axes_dict()) @@ -3207,9 +3211,8 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, return self._constructor(new_data).__finalize__(self) def mask(self, cond): - """ - Returns copy whose values are replaced with nan if the - inverted condition is True + """Returns copy whose values are replaced with nan if the inverted + condition is True. Parameters ---------- @@ -3218,12 +3221,12 @@ def mask(self, cond): Returns ------- wh: same as input + """ return self.where(~cond, np.nan) def shift(self, periods=1, freq=None, axis=0, **kwds): - """ - Shift index by desired number of periods with an optional time freq + """Shift index by desired number of periods with an optional time freq. Parameters ---------- @@ -3242,6 +3245,7 @@ def shift(self, periods=1, freq=None, axis=0, **kwds): Returns ------- shifted : same type as caller + """ if periods == 0: return self @@ -3255,8 +3259,7 @@ def shift(self, periods=1, freq=None, axis=0, **kwds): return self._constructor(new_data).__finalize__(self) def tshift(self, periods=1, freq=None, axis=0, **kwds): - """ - Shift the time index, using the index's frequency if available + """Shift the time index, using the index's frequency if available. Parameters ---------- @@ -3276,6 +3279,7 @@ def tshift(self, periods=1, freq=None, axis=0, **kwds): Returns ------- shifted : NDFrame + """ from pandas.core.datetools import _resolve_offset @@ -3331,6 +3335,7 @@ def truncate(self, before=None, after=None, axis=None, copy=True): Returns ------- truncated : type of caller + """ if axis is None: @@ -3364,8 +3369,7 @@ def truncate(self, before=None, after=None, axis=None, copy=True): return result def tz_convert(self, tz, axis=0, copy=True): - """ - Convert the axis to target time zone. If it is time zone naive, it + """Convert the axis to target time zone. If it is time zone naive, it will be localized to the passed time zone. Parameters @@ -3376,6 +3380,7 @@ def tz_convert(self, tz, axis=0, copy=True): Returns ------- + """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) @@ -3429,13 +3434,13 @@ def tz_localize(self, tz, axis=0, copy=True, infer_dst=False): #---------------------------------------------------------------------- # Numeric Methods def abs(self): - """ - Return an object with absolute value taken. Only applicable to objects - that are all numeric + """Return an object with absolute value taken. Only applicable to + objects that are all numeric. Returns ------- abs: type of caller + """ # suprimo numpy 1.6 hacking @@ -3456,8 +3461,7 @@ def _convert_timedeltas(x): def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, **kwds): - """ - Percent change over given number of periods + """Percent change over given number of periods. Parameters ---------- @@ -3473,6 +3477,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, Returns ------- chg : same type as caller + """ # TODO: Not sure if above is correct - need someone to confirm. if fill_method is None: @@ -3496,7 +3501,7 @@ def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwds): @classmethod def _add_numeric_operations(cls): - """ add the operations to the cls; evaluate the doc strings again """ + """add the operations to the cls; evaluate the doc strings again.""" axis_descr = "{%s}" % ', '.join([ "{0} ({1})".format(a, i) for i, a in enumerate(cls._AXIS_ORDERS) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 8fd49bd2fe5bd..966e1786641ca 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -140,8 +140,8 @@ def _last(x): return _last(x) class Grouper(object): - """ - A Grouper allows the user to specify a groupby instruction for a target object + """A Grouper allows the user to specify a groupby instruction for a target + object. This specification will select a column via the key parameter, or if the level and/or axis parameters are given, a level of the index of the target object. @@ -207,22 +207,22 @@ def ax(self): def _get_grouper(self, obj): - """ - Parameters - ---------- + """Parameters. + obj : the subject object - Returns - ------- - a tuple of binner, grouper, obj (possibly sorted) + Returns + ------- + a tuple of binner, grouper, obj (possibly sorted) + """ self._set_grouper(obj) return self.binner, self.grouper, self.obj def _set_grouper(self, obj, sort=False): - """ - given an object and the specifcations, setup the internal grouper for this particular specification + """given an object and the specifcations, setup the internal grouper + for this particular specification. Parameters ---------- @@ -280,8 +280,7 @@ def groups(self): class GroupBy(PandasObject): - """ - Class for grouping and aggregating relational data. See aggregate, + """Class for grouping and aggregating relational data. See aggregate, transform, and apply functions on this object. It's easiest to use obj.groupby(...) to use GroupBy, but you can also do: @@ -344,6 +343,7 @@ class GroupBy(PandasObject): {group name -> group labels} len(grouped) : int Number of groups + """ _apply_whitelist = _common_apply_whitelist _internal_names = ['_cache'] @@ -402,7 +402,7 @@ def indices(self): return self.grouper.indices def _get_index(self, name): - """ safe get index """ + """safe get index.""" try: return self.indices[name] except: @@ -487,8 +487,7 @@ def curried(x): return wrapper def get_group(self, name, obj=None): - """ - Constructs NDFrame from group with provided name + """Constructs NDFrame from group with provided name. Parameters ---------- @@ -502,6 +501,7 @@ def get_group(self, name, obj=None): Returns ------- group : type of obj + """ if obj is None: obj = self._selected_obj @@ -510,13 +510,13 @@ def get_group(self, name, obj=None): return obj.take(inds, axis=self.axis, convert=False) def __iter__(self): - """ - Groupby iterator + """Groupby iterator. Returns ------- Generator yielding sequence of (name, subsetted object) for each group + """ return self.grouper.get_iterator(self.obj, axis=self.axis) @@ -593,10 +593,10 @@ def transform(self, func, *args, **kwargs): raise NotImplementedError def mean(self): - """ - Compute mean of groups, excluding missing values + """Compute mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex + """ try: return self._cython_agg_general('mean') @@ -607,10 +607,10 @@ def mean(self): return self._python_agg_general(f) def median(self): - """ - Compute median of groups, excluding missing values + """Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex + """ try: return self._cython_agg_general('median') @@ -625,10 +625,10 @@ def f(x): return self._python_agg_general(f) def std(self, ddof=1): - """ - Compute standard deviation of groups, excluding missing values + """Compute standard deviation of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex + """ # todo, implement at cython level? if ddof == 1: @@ -638,10 +638,10 @@ def std(self, ddof=1): return self._python_agg_general(f) def var(self, ddof=1): - """ - Compute variance of groups, excluding missing values + """Compute variance of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex + """ if ddof == 1: return self._cython_agg_general('var') @@ -650,9 +650,7 @@ def var(self, ddof=1): return self._python_agg_general(f) def size(self): - """ - Compute group sizes - """ + """Compute group sizes.""" return self.grouper.size() sum = _groupby_function('sum', 'add', np.sum) @@ -665,8 +663,7 @@ def size(self): _convert=True) def ohlc(self): - """ - Compute sum of values, excluding missing values + """Compute sum of values, excluding missing values. For multiple groupings, the result index will be a MultiIndex @@ -674,8 +671,7 @@ def ohlc(self): return self._cython_agg_general('ohlc') def nth(self, n, dropna=None): - """ - Take the nth row from each group. + """Take the nth row from each group. If dropna, will not show nth non-null row, dropna is either Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent @@ -793,8 +789,7 @@ def cumcount(self, **kwargs): return Series(cumcounts, index) def head(self, n=5): - """ - Returns first n rows of each group. + """Returns first n rows of each group. Essentially equivalent to ``.apply(lambda x: x.head(n))``, except ignores as_index flag. @@ -820,8 +815,7 @@ def head(self, n=5): return head def tail(self, n=5): - """ - Returns last n rows of each group + """Returns last n rows of each group. Essentially equivalent to ``.apply(lambda x: x.tail(n))``, except ignores as_index flag. @@ -848,9 +842,7 @@ def tail(self, n=5): return tail def _cumcount_array(self, arr=None, **kwargs): - """ - arr is where cumcount gets it's values from - """ + """arr is where cumcount gets it's values from.""" ascending = kwargs.pop('ascending', True) if arr is None: @@ -1028,9 +1020,8 @@ def _is_indexed_like(obj, axes): class BaseGrouper(object): - """ - This is an internal Grouper class, which actually holds the generated groups - """ + """This is an internal Grouper class, which actually holds the generated + groups.""" def __init__(self, axis, groupings, sort=True, group_keys=True): self.axis = axis @@ -1051,13 +1042,13 @@ def nkeys(self): return len(self.groupings) def get_iterator(self, data, axis=0): - """ - Groupby iterator + """Groupby iterator. Returns ------- Generator yielding sequence of (name, subsetted object) for each group + """ splitter = self._get_splitter(data, axis=axis) keys = self._get_group_keys() @@ -1132,10 +1123,7 @@ def names(self): return [ping.name for ping in self.groupings] def size(self): - """ - Compute group sizes - - """ + """Compute group sizes.""" # TODO: better impl labels, _, ngroups = self.group_info bin_counts = algos.value_counts(labels, sort=False) @@ -1145,10 +1133,7 @@ def size(self): @cache_readonly def _max_groupsize(self): - ''' - Compute size of largest group - - ''' + """Compute size of largest group.""" # For many items in each group this is much faster than # self.size().max(), in worst case marginally slower if self.indices: @@ -1425,9 +1410,8 @@ def _aggregate_series_pure_python(self, obj, func): def generate_bins_generic(values, binner, closed): - """ - Generate bin edge offsets and bin labels for one array using another array - which has bin edge values. Both arrays must be sorted. + """Generate bin edge offsets and bin labels for one array using another + array which has bin edge values. Both arrays must be sorted. Parameters ---------- @@ -1442,6 +1426,7 @@ def generate_bins_generic(values, binner, closed): bins : array of offsets (into 'values' argument) of bins. Zero and last edge are excluded in result, so for instance the first bin is values[0:bin[0]] and the last is values[bin[-1]:] + """ lenidx = len(values) lenbin = len(binner) @@ -1495,13 +1480,13 @@ def nkeys(self): return 1 def get_iterator(self, data, axis=0): - """ - Groupby iterator + """Groupby iterator. Returns ------- Generator yielding sequence of (name, subsetted object) for each group + """ if isinstance(data, NDFrame): slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis) @@ -1602,8 +1587,7 @@ def agg_series(self, obj, func): class Grouping(object): - """ - Holds the grouping information for a single key + """Holds the grouping information for a single key. Parameters ---------- @@ -1622,6 +1606,7 @@ class Grouping(object): * counts : array of group counts * group_index : unique groups * groups : dict of {group -> label_list} + """ def __init__(self, index, grouper=None, obj=None, name=None, level=None, @@ -1783,11 +1768,9 @@ def groups(self): def _get_grouper(obj, key=None, axis=0, level=None, sort=True): - """ - create and return a BaseGrouper, which is an internal - mapping of how to create the grouper indexers. - This may be composed of multiple Grouping objects, indicating - multiple groupers + """create and return a BaseGrouper, which is an internal mapping of how to + create the grouper indexers. This may be composed of multiple Grouping + objects, indicating multiple groupers. Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers @@ -1912,10 +1895,9 @@ class SeriesGroupBy(GroupBy): _apply_whitelist = _series_apply_whitelist def aggregate(self, func_or_funcs, *args, **kwargs): - """ - Apply aggregation function or functions to groups, yielding most likely - Series but in some cases DataFrame depending on the output of the - aggregation function + """Apply aggregation function or functions to groups, yielding most + likely Series but in some cases DataFrame depending on the output of + the aggregation function. Parameters ---------- @@ -1961,6 +1943,7 @@ def aggregate(self, func_or_funcs, *args, **kwargs): Returns ------- Series or DataFrame + """ if isinstance(func_or_funcs, compat.string_types): return getattr(self, func_or_funcs)(*args, **kwargs) @@ -2116,9 +2099,8 @@ def transform(self, func, *args, **kwargs): name=self._selected_obj.name) def filter(self, func, dropna=True, *args, **kwargs): - """ - Return a copy of a Series excluding elements from groups that - do not satisfy the boolean criterion specified by func. + """Return a copy of a Series excluding elements from groups that do not + satisfy the boolean criterion specified by func. Parameters ---------- @@ -2134,6 +2116,7 @@ def filter(self, func, dropna=True, *args, **kwargs): Returns ------- filtered : Series + """ if isinstance(func, compat.string_types): wrapper = lambda x: getattr(x, func)(*args, **kwargs) @@ -2685,9 +2668,8 @@ def _transform_item_by_item(self, obj, wrapper): return DataFrame(output, index=obj.index, columns=columns) def filter(self, func, dropna=True, *args, **kwargs): - """ - Return a copy of a DataFrame excluding elements from groups that - do not satisfy the boolean criterion specified by func. + """Return a copy of a DataFrame excluding elements from groups that do + not satisfy the boolean criterion specified by func. Parameters ---------- @@ -2705,6 +2687,7 @@ def filter(self, func, dropna=True, *args, **kwargs): -------- >>> grouped = df.groupby(lambda x: mapping[x]) >>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0) + """ from pandas.tools.merge import concat @@ -2873,10 +2856,10 @@ def _apply_to_column_groupbys(self, func): keys=self._selected_obj.columns, axis=1) def ohlc(self): - """ - Compute sum of values, excluding missing values + """Compute sum of values, excluding missing values. For multiple groupings, the result index will be a MultiIndex + """ return self._apply_to_column_groupbys( lambda x: x._cython_agg_general('ohlc')) @@ -3094,11 +3077,9 @@ def get_splitter(data, *args, **kwargs): def get_group_index(label_list, shape): - """ - For the particular label_list, gets the offsets into the hypothetical list - representing the totally ordered cartesian product of all possible label - combinations. - """ + """For the particular label_list, gets the offsets into the hypothetical + list representing the totally ordered cartesian product of all possible + label combinations.""" if len(label_list) == 1: return label_list[0] @@ -3232,8 +3213,10 @@ def _nargsort(items, kind='quicksort', ascending=True, na_position='last'): class _KeyMapper(object): - """ - Ease my suffering. Map compressed group id -> key tuple + """Ease my suffering. + + Map compressed group id -> key tuple + """ def __init__(self, comp_ids, ngroups, labels, levels): @@ -3275,10 +3258,12 @@ def _get_indices_dict(label_list, keys): def _compress_group_index(group_index, sort=True): - """ - Group_index is offsets into cartesian product of all possible labels. This - space can be huge, so this function compresses it, by computing offsets + """Group_index is offsets into cartesian product of all possible labels. + This space can be huge, so this function compresses it, by computing + offsets. + (comp_ids) into the list of unique labels (obs_group_ids). + """ table = _hash.Int64HashTable(min(1000000, len(group_index))) diff --git a/pandas/core/index.py b/pandas/core/index.py index b2b0764b81d43..750bd1ca3ece5 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -27,10 +27,8 @@ def _indexOp(opname): - """ - Wrapper function for index comparison operations, to avoid - code duplication. - """ + """Wrapper function for index comparison operations, to avoid code + duplication.""" def wrapper(self, other): func = getattr(self.view(np.ndarray), opname) @@ -59,9 +57,8 @@ def _shouldbe_timestamp(obj): class Index(IndexOpsMixin, FrozenNDArray): - """ - Immutable ndarray implementing an ordered, sliceable set. The basic object - storing axis labels for all pandas objects + """Immutable ndarray implementing an ordered, sliceable set. The basic + object storing axis labels for all pandas objects. Parameters ---------- @@ -75,6 +72,7 @@ class Index(IndexOpsMixin, FrozenNDArray): Notes ----- An Index instance can **only** contain hashable objects + """ # To hand over control to subclasses _join_precedence = 1 @@ -164,8 +162,8 @@ def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False, return subarr def is_(self, other): - """ - More flexible, faster check like ``is`` but that works through views + """More flexible, faster check like ``is`` but that works through + views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. @@ -178,12 +176,13 @@ def is_(self, other): Returns ------- True if both have same underlying data, False otherwise : bool + """ # use something other than None to be clearer return self._id is getattr(other, '_id', Ellipsis) def _reset_identity(self): - """Initializes or resets ``_id`` attribute with new object""" + """Initializes or resets ``_id`` attribute with new object.""" self._id = _Identity() def view(self, *args, **kwargs): @@ -207,8 +206,12 @@ def _string_data_error(cls, data): @classmethod def _coerce_to_ndarray(cls, data): - """coerces data to ndarray, raises on scalar data. Converts other - iterables to list first and then to array. Does not touch ndarrays.""" + """coerces data to ndarray, raises on scalar data. + + Converts other iterables to list first and then to array. Does + not touch ndarrays. + + """ if not isinstance(data, np.ndarray): if np.isscalar(data): @@ -232,8 +235,7 @@ def _shallow_copy(self): return self.view() def copy(self, names=None, name=None, dtype=None, deep=False): - """ - Make a copy of this object. Name and dtype sets those attributes on + """Make a copy of this object. Name and dtype sets those attributes on the new object. Parameters @@ -249,6 +251,7 @@ def copy(self, names=None, name=None, dtype=None, deep=False): ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. + """ if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") @@ -267,9 +270,8 @@ def copy(self, names=None, name=None, dtype=None, deep=False): return new_index def to_series(self, keep_tz=False): - """ - Create a Series with both index and values equal to the index keys - useful with map for returning an indexer based on an index + """Create a Series with both index and values equal to the index keys + useful with map for returning an indexer based on an index. Parameters ---------- @@ -279,6 +281,7 @@ def to_series(self, keep_tz=False): Returns ------- Series : dtype will be based on the type of the Index values. + """ import pandas as pd @@ -286,7 +289,8 @@ def to_series(self, keep_tz=False): return pd.Series(values, index=self, name=self.name) def _to_embed(self, keep_tz=False): - """ return an array repr of this object, potentially casting to object """ + """return an array repr of this object, potentially casting to + object.""" return self.values def astype(self, dtype): @@ -294,10 +298,8 @@ def astype(self, dtype): dtype=dtype) def to_datetime(self, dayfirst=False): - """ - For an Index containing strings or datetime.datetime objects, attempt - conversion to DatetimeIndex - """ + """For an Index containing strings or datetime.datetime objects, + attempt conversion to DatetimeIndex.""" from pandas.tseries.index import DatetimeIndex if self.inferred_type == 'string': from dateutil.parser import parse @@ -311,9 +313,7 @@ def _assert_can_do_setop(self, other): return True def tolist(self): - """ - Overridden version of ndarray.tolist - """ + """Overridden version of ndarray.tolist.""" return list(self.values) @cache_readonly @@ -338,8 +338,7 @@ def _set_names(self, values): names = property(fset=_set_names, fget=_get_names) def set_names(self, names, inplace=False): - """ - Set new names on index. Defaults to returning new index. + """Set new names on index. Defaults to returning new index. Parameters ---------- @@ -351,6 +350,7 @@ def set_names(self, names, inplace=False): Returns ------- new index (of same type and class...etc) [if inplace, returns None] + """ if not com.is_list_like(names): raise TypeError("Must pass list-like as `names`.") @@ -363,8 +363,7 @@ def set_names(self, names, inplace=False): return idx def rename(self, name, inplace=False): - """ - Set new names on index. Defaults to returning new index. + """Set new names on index. Defaults to returning new index. Parameters ---------- @@ -376,6 +375,7 @@ def rename(self, name, inplace=False): Returns ------- new index (of same type and class...etc) [if inplace, returns None] + """ return self.set_names([name], inplace=inplace) @@ -465,8 +465,8 @@ def to_int(): return key def _validate_slicer(self, key, f): - """ validate and raise if needed on a slice indexers according to the - passed in function """ + """validate and raise if needed on a slice indexers according to the + passed in function.""" if not f(key.start): self._convert_indexer_error(key.start, 'slice start value') @@ -476,19 +476,23 @@ def _validate_slicer(self, key, f): self._convert_indexer_error(key.step, 'slice step value') def _convert_slice_indexer_iloc(self, key): - """ convert a slice indexer for iloc only """ + """convert a slice indexer for iloc only.""" self._validate_slicer(key, lambda v: v is None or is_integer(v)) return key def _convert_slice_indexer_getitem(self, key, is_index_slice=False): - """ called from the getitem slicers, determine how to treat the key - whether positional or not """ + """called from the getitem slicers, determine how to treat the key + whether positional or not.""" if self.is_integer() or is_index_slice: return key return self._convert_slice_indexer(key) def _convert_slice_indexer(self, key, typ=None): - """ convert a slice indexer. disallow floats in the start/stop/step """ + """convert a slice indexer. + + disallow floats in the start/stop/step + + """ # validate slicers def validate(v): @@ -552,13 +556,19 @@ def is_int(v): return indexer def _convert_list_indexer(self, key, typ=None): - """ convert a list indexer. these should be locations """ + """convert a list indexer. + + these should be locations + + """ return key def _convert_list_indexer_for_mixed(self, keyarr, typ=None): - """ passed a key that is tuplesafe that is integer based - and we have a mixed index (e.g. number/labels). figure out - the indexer. return None if we can't help + """passed a key that is tuplesafe that is integer based and we have a + mixed index (e.g. number/labels). + + figure out the indexer. return None if we can't help + """ if com.is_integer_dtype(keyarr) and not self.is_floating(): if self.inferred_type != 'integer': @@ -624,14 +634,14 @@ def __iter__(self): return iter(self.values) def __reduce__(self): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" object_state = list(np.ndarray.__reduce__(self)) subclass_state = self.name, object_state[2] = (object_state[2], subclass_state) return tuple(object_state) def __setstate__(self, state): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" if len(state) == 2: nd_state, own_state = state np.ndarray.__setstate__(self, nd_state) @@ -654,8 +664,7 @@ def __hash__(self): raise TypeError("unhashable type: %r" % type(self).__name__) def __getitem__(self, key): - """ - Override numpy.ndarray's __getitem__ method to work as desired. + """Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). @@ -685,8 +694,7 @@ def __getitem__(self, key): return result def append(self, other): - """ - Append a collection of Index options together + """Append a collection of Index options together. Parameters ---------- @@ -695,6 +703,7 @@ def append(self, other): Returns ------- appended : Index + """ name = self.name to_concat = [self] @@ -728,17 +737,13 @@ def _ensure_compat_concat(indexes): return indexes def take(self, indexer, axis=0): - """ - Analogous to ndarray.take - """ + """Analogous to ndarray.take.""" indexer = com._ensure_platform_int(indexer) taken = self.view(np.ndarray).take(indexer) return self._constructor(taken, name=self.name) def format(self, name=False, formatter=None, **kwargs): - """ - Render a string representation of the Index - """ + """Render a string representation of the Index.""" header = [] if name: header.append(com.pprint_thing(self.name, @@ -774,23 +779,21 @@ def _format_with_header(self, header, na_rep='NaN', **kwargs): return header + result def to_native_types(self, slicer=None, **kwargs): - """ slice and dice then format """ + """slice and dice then format.""" values = self if slicer is not None: values = values[slicer] return values._format_native_types(**kwargs) def _format_native_types(self, na_rep='', **kwargs): - """ actually format my specific types """ + """actually format my specific types.""" mask = isnull(self) values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values.tolist() def equals(self, other): - """ - Determines if two Index objects contain the same elements. - """ + """Determines if two Index objects contain the same elements.""" if self.is_(other): return True @@ -804,16 +807,17 @@ def equals(self, other): def identical(self, other): """Similar to equals, but check that other comparable attributes are - also equal - """ + also equal.""" return (self.equals(other) and all((getattr(self, c, None) == getattr(other, c, None) for c in self._comparables))) def asof(self, label): - """ - For a sorted index, return the most recent label up to and including - the passed label. Return NaN if not found + """For a sorted index, return the most recent label up to and including + the passed label. + + Return NaN if not found + """ if isinstance(label, (Index, ABCSeries, np.ndarray)): raise TypeError('%s' % type(label)) @@ -846,9 +850,7 @@ def asof_locs(self, where, mask): return result def order(self, return_indexer=False, ascending=True): - """ - Return sorted copy of Index - """ + """Return sorted copy of Index.""" _as = self.argsort() if not ascending: _as = _as[::-1] @@ -864,13 +866,13 @@ def sort(self, *args, **kwargs): raise TypeError('Cannot sort an %r object' % self.__class__.__name__) def shift(self, periods=1, freq=None): - """ - Shift Index containing datetime objects by input number of periods and - DateOffset + """Shift Index containing datetime objects by input number of periods + and DateOffset. Returns ------- shifted : Index + """ if periods == 0: # OK because immutable @@ -880,9 +882,7 @@ def shift(self, periods=1, freq=None): return Index([idx + offset for idx in self], name=self.name) def argsort(self, *args, **kwargs): - """ - See docstring for ndarray.argsort - """ + """See docstring for ndarray.argsort.""" result = self.asi8 if result is None: result = self.view(np.ndarray) @@ -915,8 +915,7 @@ def __xor__(self, other): return self.sym_diff(other) def union(self, other): - """ - Form the union of two Index objects and sorts if possible + """Form the union of two Index objects and sorts if possible. Parameters ---------- @@ -925,6 +924,7 @@ def union(self, other): Returns ------- union : Index + """ if not hasattr(other, '__iter__'): raise TypeError('Input must be iterable.') @@ -979,9 +979,8 @@ def _wrap_union_result(self, other, result): return type(self)(data=result, name=name) def intersection(self, other): - """ - Form the intersection of two Index objects. Sortedness of the result is - not guaranteed + """Form the intersection of two Index objects. Sortedness of the result + is not guaranteed. Parameters ---------- @@ -990,6 +989,7 @@ def intersection(self, other): Returns ------- intersection : Index + """ if not hasattr(other, '__iter__'): raise TypeError('Input must be iterable!') @@ -1023,8 +1023,7 @@ def intersection(self, other): return self.take(indexer) def diff(self, other): - """ - Compute sorted set difference of two Index objects + """Compute sorted set difference of two Index objects. Parameters ---------- @@ -1040,6 +1039,7 @@ def diff(self, other): >>> index - index2 >>> index.diff(index2) + """ if not hasattr(other, '__iter__'): @@ -1058,8 +1058,7 @@ def diff(self, other): return Index(theDiff, name=result_name) def sym_diff(self, other, result_name=None): - """ - Compute the sorted symmetric difference of two Index objects. + """Compute the sorted symmetric difference of two Index objects. Parameters ---------- @@ -1091,6 +1090,7 @@ def sym_diff(self, other, result_name=None): >>> idx1 ^ idx2 Int64Index([1, 5], dtype='int64') + """ if not hasattr(other, '__iter__'): raise TypeError('Input must be iterable!') @@ -1103,12 +1103,12 @@ def sym_diff(self, other, result_name=None): return Index(the_diff, name=result_name) def get_loc(self, key): - """ - Get integer location for requested label + """Get integer location for requested label. Returns ------- loc : int if unique index, possibly slice or mask if not + """ return self._engine.get_loc(_values_from_object(key)) @@ -1157,9 +1157,8 @@ def set_value(self, arr, key, value): _values_from_object(arr), _values_from_object(key), value) def get_level_values(self, level): - """ - Return vector of label values for requested level, equal to the length - of the index + """Return vector of label values for requested level, equal to the + length of the index. Parameters ---------- @@ -1168,17 +1167,17 @@ def get_level_values(self, level): Returns ------- values : ndarray + """ # checks that level number is actually just 1 self._get_level_number(level) return self def get_indexer(self, target, method=None, limit=None): - """ - Compute indexer and mask for new index given the current index. The + """Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. The mask determines whether labels are - found or not in the current index + found or not in the current index. Parameters ---------- @@ -1199,6 +1198,7 @@ def get_indexer(self, target, method=None, limit=None): Returns ------- indexer : ndarray + """ method = self._get_method(method) target = _ensure_index(target) @@ -1270,9 +1270,8 @@ def map(self, mapper): return self._arrmap(self.values, mapper) def isin(self, values): - """ - Compute boolean array of whether each index value is found in the - passed set of values + """Compute boolean array of whether each index value is found in the + passed set of values. Parameters ---------- @@ -1281,6 +1280,7 @@ def isin(self, values): Returns ------- is_contained : ndarray (boolean dtype) + """ value_set = set(values) return lib.ismember(self._array_values(), value_set) @@ -1300,14 +1300,14 @@ def _get_method(self, method): def reindex(self, target, method=None, level=None, limit=None, copy_if_needed=False): - """ - For Index, simply returns the new index and the results of + """For Index, simply returns the new index and the results of get_indexer. Provided here to enable an interface that is amenable for subclasses of Index whose internals are different (like MultiIndex) Returns ------- (new_index, indexer, mask) : tuple + """ target = _ensure_index(target) if level is not None: @@ -1339,8 +1339,7 @@ def reindex(self, target, method=None, level=None, limit=None, return target, indexer def join(self, other, how='left', level=None, return_indexers=False): - """ - Internal API method. Compute join_index and indexers to conform data + """Internal API method. Compute join_index and indexers to conform data structures to the new index. Parameters @@ -1353,6 +1352,7 @@ def join(self, other, how='left', level=None, return_indexers=False): Returns ------- join_index, (left_indexer, right_indexer) + """ self_is_mi = isinstance(self, MultiIndex) other_is_mi = isinstance(other, MultiIndex) @@ -1615,9 +1615,8 @@ def _wrap_joined_index(self, joined, other): return Index(joined, name=name) def slice_indexer(self, start=None, end=None, step=None): - """ - For an ordered Index, compute the slice indexer for input labels and - step + """For an ordered Index, compute the slice indexer for input labels and + step. Parameters ---------- @@ -1634,6 +1633,7 @@ def slice_indexer(self, start=None, end=None, step=None): Notes ----- This function assumes that the data is sorted, so use at your own peril + """ start_slice, end_slice = self.slice_locs(start, end) @@ -1650,8 +1650,7 @@ def slice_indexer(self, start=None, end=None, step=None): return Index(start_slice) & Index(end_slice) def slice_locs(self, start=None, end=None): - """ - For an ordered Index, compute the slice locations for input labels + """For an ordered Index, compute the slice locations for input labels. Parameters ---------- @@ -1667,6 +1666,7 @@ def slice_locs(self, start=None, end=None): Notes ----- This function assumes that the data is sorted, so use at your own peril + """ is_unique = self.is_unique @@ -1720,19 +1720,18 @@ def slice_locs(self, start=None, end=None): return start_slice, end_slice def delete(self, loc): - """ - Make new Index with passed location deleted + """Make new Index with passed location deleted. Returns ------- new_index : Index + """ arr = np.delete(self.values, loc) return Index(arr) def insert(self, loc, item): - """ - Make new Index inserting new item at location + """Make new Index inserting new item at location. Parameters ---------- @@ -1742,6 +1741,7 @@ def insert(self, loc, item): Returns ------- new_index : Index + """ index = np.asarray(self) # because numpy is fussy with tuples @@ -1750,8 +1750,7 @@ def insert(self, loc, item): return Index(new_index, name=self.name) def drop(self, labels): - """ - Make new Index with passed list of labels deleted + """Make new Index with passed list of labels deleted. Parameters ---------- @@ -1760,6 +1759,7 @@ def drop(self, labels): Returns ------- dropped : Index + """ labels = com._index_labels_to_array(labels) indexer = self.get_indexer(labels) @@ -1771,12 +1771,11 @@ def drop(self, labels): class Int64Index(Index): - """ - Immutable ndarray implementing an ordered, sliceable set. The basic object - storing axis labels for all pandas objects. Int64Index is a special case - of `Index` with purely integer labels. This is the default index type used - by the DataFrame and Series ctors when no explicit index is provided by the - user. + """Immutable ndarray implementing an ordered, sliceable set. The basic + object storing axis labels for all pandas objects. Int64Index is a special + case of `Index` with purely integer labels. This is the default index type + used by the DataFrame and Series ctors when no explicit index is provided + by the user. Parameters ---------- @@ -1790,6 +1789,7 @@ class Int64Index(Index): Notes ----- An Index instance can **only** contain hashable objects + """ _groupby = _algos.groupby_int64 @@ -1844,15 +1844,11 @@ def asi8(self): @property def is_all_dates(self): - """ - Checks that all the labels are datetime objects - """ + """Checks that all the labels are datetime objects.""" return False def equals(self, other): - """ - Determines if two Index objects contain the same elements. - """ + """Determines if two Index objects contain the same elements.""" if self.is_(other): return True @@ -1872,10 +1868,9 @@ def _wrap_joined_index(self, joined, other): class Float64Index(Index): - """ - Immutable ndarray implementing an ordered, sliceable set. The basic object - storing axis labels for all pandas objects. Float64Index is a special case - of `Index` with purely floating point labels. + """Immutable ndarray implementing an ordered, sliceable set. The basic + object storing axis labels for all pandas objects. Float64Index is a + special case of `Index` with purely floating point labels. Parameters ---------- @@ -1889,6 +1884,7 @@ class Float64Index(Index): Notes ----- An Index instance can **only** contain hashable objects + """ # when this is not longer object dtype this can be changed @@ -1941,8 +1937,8 @@ def _convert_scalar_indexer(self, key, typ=None): return key def _convert_slice_indexer(self, key, typ=None): - """ convert a slice indexer, by definition these are labels - unless we are iloc """ + """convert a slice indexer, by definition these are labels unless we + are iloc.""" if typ == 'iloc': return self._convert_slice_indexer_iloc(key) elif typ == 'getitem': @@ -1956,7 +1952,7 @@ def _convert_slice_indexer(self, key, typ=None): return self.slice_indexer(key.start, key.stop, key.step) def get_value(self, series, key): - """ we always want to get an index value, never a value """ + """we always want to get an index value, never a value.""" if not np.isscalar(key): raise InvalidIndexError @@ -1974,9 +1970,7 @@ def get_value(self, series, key): return Series(new_values, index=new_index, name=series.name) def equals(self, other): - """ - Determines if two Index objects contain the same elements. - """ + """Determines if two Index objects contain the same elements.""" if self is other: return True @@ -2056,7 +2050,7 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None, def _verify_integrity(self): """Raises ValueError if length of levels and labels don't match or any - label would exceed level bounds""" + label would exceed level bounds.""" # NOTE: Currently does not check, among other things, that cached # nlevels matches nor that sortorder matches actually sortorder. labels, levels = self.labels, self.levels @@ -2100,9 +2094,7 @@ def _set_levels(self, levels, copy=False, validate=True, self._verify_integrity() def set_levels(self, levels, inplace=False, verify_integrity=True): - """ - Set new levels on MultiIndex. Defaults to returning - new index. + """Set new levels on MultiIndex. Defaults to returning new index. Parameters ---------- @@ -2116,6 +2108,7 @@ def set_levels(self, levels, inplace=False, verify_integrity=True): Returns ------- new index (of same type and class...etc) + """ if not com.is_list_like(levels) or not com.is_list_like(levels[0]): raise TypeError("Levels must be list of lists-like") @@ -2152,9 +2145,7 @@ def _set_labels(self, labels, copy=False, validate=True, self._verify_integrity() def set_labels(self, labels, inplace=False, verify_integrity=True): - """ - Set new labels on MultiIndex. Defaults to returning - new index. + """Set new labels on MultiIndex. Defaults to returning new index. Parameters ---------- @@ -2168,6 +2159,7 @@ def set_labels(self, labels, inplace=False, verify_integrity=True): Returns ------- new index (of same type and class...etc) + """ if not com.is_list_like(labels) or not com.is_list_like(labels[0]): raise TypeError("Labels must be list of lists-like") @@ -2189,8 +2181,7 @@ def set_labels(self, labels, inplace=False, verify_integrity=True): def copy(self, names=None, dtype=None, levels=None, labels=None, deep=False): - """ - Make a copy of this object. Names, dtype, levels and labels can be + """Make a copy of this object. Names, dtype, levels and labels can be passed and will be set on new copy. Parameters @@ -2209,6 +2200,7 @@ def copy(self, names=None, dtype=None, levels=None, labels=None, In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects. + """ new_index = np.ndarray.copy(self) if deep: @@ -2227,10 +2219,8 @@ def copy(self, names=None, dtype=None, levels=None, labels=None, return new_index def __array_finalize__(self, obj): - """ - Update custom MultiIndex attributes when a new array is created by - numpy, e.g. when calling ndarray.view() - """ + """Update custom MultiIndex attributes when a new array is created by + numpy, e.g. when calling ndarray.view()""" # overriden if a view self._reset_identity() if not isinstance(obj, type(self)): @@ -2272,11 +2262,11 @@ def __repr__(self): return res def __unicode__(self): - """ - Return a string representation for a particular Index + """Return a string representation for a particular Index. Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. + """ rows = self.format(names=True) max_rows = get_option('display.max_rows') @@ -2291,7 +2281,11 @@ def __len__(self): return len(self.labels[0]) def _convert_slice_indexer(self, key, typ=None): - """ convert a slice indexer. disallow floats in the start/stop/step """ + """convert a slice indexer. + + disallow floats in the start/stop/step + + """ if typ == 'iloc': return self._convert_slice_indexer_iloc(key) @@ -2395,9 +2389,7 @@ def _has_complex_internals(self): @property def has_duplicates(self): - """ - Return True if there are no unique groups - """ + """Return True if there are no unique groups.""" # has duplicates shape = [len(lev) for lev in self.levels] group_index = np.zeros(len(self), dtype='i8') @@ -2470,9 +2462,8 @@ def _try_mi(k): raise InvalidIndexError(key) def get_level_values(self, level): - """ - Return vector of label values for requested level, equal to the length - of the index + """Return vector of label values for requested level, equal to the + length of the index. Parameters ---------- @@ -2481,6 +2472,7 @@ def get_level_values(self, level): Returns ------- values : ndarray + """ num = self._get_level_number(level) unique_vals = self.levels[num] # .values @@ -2548,9 +2540,8 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False, return result_levels def to_hierarchical(self, n_repeat, n_shuffle=1): - """ - Return a MultiIndex reshaped to conform to the - shapes given by n_repeat and n_shuffle. + """Return a MultiIndex reshaped to conform to the shapes given by + n_repeat and n_shuffle. Useful to replicate and rearrange a MultiIndex for combination with another Index with n_repeat items. @@ -2577,6 +2568,7 @@ def to_hierarchical(self, n_repeat, n_shuffle=1): MultiIndex(levels=[[1, 2], [u'one', u'two']], labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) + """ levels = self.levels labels = [np.repeat(x, n_repeat) for x in self.labels] @@ -2590,15 +2582,11 @@ def is_all_dates(self): return False def is_lexsorted(self): - """ - Return True if the labels are lexicographically sorted - """ + """Return True if the labels are lexicographically sorted.""" return self.lexsort_depth == self.nlevels def is_lexsorted_for_tuple(self, tup): - """ - Return True if we are correctly lexsorted given the passed tuple - """ + """Return True if we are correctly lexsorted given the passed tuple.""" return len(tup) <= self.lexsort_depth @cache_readonly @@ -2618,8 +2606,7 @@ def lexsort_depth(self): @classmethod def from_arrays(cls, arrays, sortorder=None, names=None): - """ - Convert arrays to MultiIndex + """Convert arrays to MultiIndex. Parameters ---------- @@ -2644,6 +2631,7 @@ def from_arrays(cls, arrays, sortorder=None, names=None): MultiIndex.from_tuples : Convert list of tuples to MultiIndex MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables + """ from pandas.core.categorical import Categorical @@ -2663,8 +2651,7 @@ def from_arrays(cls, arrays, sortorder=None, names=None): @classmethod def from_tuples(cls, tuples, sortorder=None, names=None): - """ - Convert list of tuples to MultiIndex + """Convert list of tuples to MultiIndex. Parameters ---------- @@ -2689,6 +2676,7 @@ def from_tuples(cls, tuples, sortorder=None, names=None): MultiIndex.from_arrays : Convert list of arrays to MultiIndex MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables + """ if len(tuples) == 0: # I think this is right? Not quite sure... @@ -2709,8 +2697,7 @@ def from_tuples(cls, tuples, sortorder=None, names=None): @classmethod def from_product(cls, iterables, sortorder=None, names=None): - """ - Make a MultiIndex from the cartesian product of multiple iterables + """Make a MultiIndex from the cartesian product of multiple iterables. Parameters ---------- @@ -2740,6 +2727,7 @@ def from_product(cls, iterables, sortorder=None, names=None): -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex MultiIndex.from_tuples : Convert list of tuples to MultiIndex + """ from pandas.tools.util import cartesian_product product = cartesian_product(iterables) @@ -2764,7 +2752,7 @@ def __contains__(self, key): return False def __reduce__(self): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" object_state = list(np.ndarray.__reduce__(self)) subclass_state = ([lev.view(np.ndarray) for lev in self.levels], [label.view(np.ndarray) for label in self.labels], @@ -2773,7 +2761,7 @@ def __reduce__(self): return tuple(object_state) def __setstate__(self, state): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" nd_state, own_state = state np.ndarray.__setstate__(self, nd_state) levels, labels, sortorder, names = own_state @@ -2814,17 +2802,14 @@ def __getitem__(self, key): return result def take(self, indexer, axis=None): - """ - Analogous to ndarray.take - """ + """Analogous to ndarray.take.""" indexer = com._ensure_platform_int(indexer) new_labels = [lab.take(indexer) for lab in self.labels] return MultiIndex(levels=self.levels, labels=new_labels, names=self.names, verify_integrity=False) def append(self, other): - """ - Append a collection of Index options together + """Append a collection of Index options together. Parameters ---------- @@ -2833,6 +2818,7 @@ def append(self, other): Returns ------- appended : Index + """ if not isinstance(other, (list, tuple)): other = [other] @@ -2850,8 +2836,7 @@ def argsort(self, *args, **kwargs): return self.values.argsort() def drop(self, labels, level=None): - """ - Make new MultiIndex with passed list of labels deleted + """Make new MultiIndex with passed list of labels deleted. Parameters ---------- @@ -2862,6 +2847,7 @@ def drop(self, labels, level=None): Returns ------- dropped : MultiIndex + """ if level is not None: return self._drop_from_level(labels, level) @@ -2899,8 +2885,7 @@ def _drop_from_level(self, labels, level): return self[mask] def droplevel(self, level=0): - """ - Return Index with requested level removed. If MultiIndex has only 2 + """Return Index with requested level removed. If MultiIndex has only 2 levels, the result will be of Index type not MultiIndex. Parameters @@ -2914,6 +2899,7 @@ def droplevel(self, level=0): Returns ------- index : Index or MultiIndex + """ levels = level if not isinstance(levels, (tuple, list)): @@ -2945,8 +2931,7 @@ def droplevel(self, level=0): names=new_names, verify_integrity=False) def swaplevel(self, i, j): - """ - Swap level i with level j. Do not change the ordering of anything + """Swap level i with level j. Do not change the ordering of anything. Parameters ---------- @@ -2956,6 +2941,7 @@ def swaplevel(self, i, j): Returns ------- swapped : MultiIndex + """ new_levels = list(self.levels) new_labels = list(self.labels) @@ -2972,11 +2958,12 @@ def swaplevel(self, i, j): names=new_names, verify_integrity=False) def reorder_levels(self, order): - """ - Rearrange levels using input order. May not drop or duplicate levels + """Rearrange levels using input order. May not drop or duplicate + levels. Parameters ---------- + """ order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: @@ -2994,8 +2981,7 @@ def __getslice__(self, i, j): return self.__getitem__(slice(i, j)) def sortlevel(self, level=0, ascending=True): - """ - Sort MultiIndex at the requested level. The result will respect the + """Sort MultiIndex at the requested level. The result will respect the original ordering of the associated factor at that level. Parameters @@ -3008,6 +2994,7 @@ def sortlevel(self, level=0, ascending=True): Returns ------- sorted_index : MultiIndex + """ from pandas.core.groupby import _indexer_from_factorized @@ -3035,11 +3022,10 @@ def sortlevel(self, level=0, ascending=True): return new_index, indexer def get_indexer(self, target, method=None, limit=None): - """ - Compute indexer and mask for new index given the current index. The + """Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. The mask determines whether labels are - found or not in the current index + found or not in the current index. Parameters ---------- @@ -3061,6 +3047,7 @@ def get_indexer(self, target, method=None, limit=None): Returns ------- (indexer, mask) : (ndarray, ndarray) + """ method = self._get_method(method) @@ -3138,12 +3125,12 @@ def reindex(self, target, method=None, level=None, limit=None, @cache_readonly def _tuple_index(self): - """ - Convert MultiIndex to an Index of tuples + """Convert MultiIndex to an Index of tuples. Returns ------- index : Index + """ return Index(self.values) @@ -3216,8 +3203,7 @@ def _partial_tup_index(self, tup, side='left'): return start + section.searchsorted(idx, side=side) def get_loc(self, key): - """ - Get integer location slice for requested label or tuple + """Get integer location slice for requested label or tuple. Parameters ---------- @@ -3226,6 +3212,7 @@ def get_loc(self, key): Returns ------- loc : int or slice object + """ if isinstance(key, tuple): if len(key) == self.nlevels: @@ -3243,8 +3230,7 @@ def get_loc(self, key): return self._get_level_indexer(key, level=0) def get_loc_level(self, key, level=0, drop_level=True): - """ - Get integer location slice for requested label or tuple + """Get integer location slice for requested label or tuple. Parameters ---------- @@ -3254,6 +3240,7 @@ def get_loc_level(self, key, level=0, drop_level=True): Returns ------- loc : int or slice object + """ def _maybe_drop_levels(indexer, levels, drop_level): if not drop_level: @@ -3479,8 +3466,7 @@ def _convert_indexer(r): return reduce(np.logical_and,[ _convert_indexer(r) for r in ranges ]) def truncate(self, before=None, after=None): - """ - Slice index between two labels / tuples, return new MultiIndex + """Slice index between two labels / tuples, return new MultiIndex. Parameters ---------- @@ -3492,6 +3478,7 @@ def truncate(self, before=None, after=None): Returns ------- truncated : MultiIndex + """ if after and before and after < before: raise ValueError('after < before') @@ -3509,13 +3496,15 @@ def truncate(self, before=None, after=None): verify_integrity=False) def equals(self, other): - """ - Determines if two MultiIndex objects have the same labeling information + """Determines if two MultiIndex objects have the same labeling + information. + (the levels themselves do not necessarily have to be the same) See also -------- equal_levels + """ if self.is_(other): return True @@ -3540,10 +3529,8 @@ def equals(self, other): return True def equal_levels(self, other): - """ - Return True if the levels of both MultiIndex objects are the same - - """ + """Return True if the levels of both MultiIndex objects are the + same.""" if self.nlevels != other.nlevels: return False @@ -3553,8 +3540,7 @@ def equal_levels(self, other): return True def union(self, other): - """ - Form the union of two MultiIndex objects, sorting if possible + """Form the union of two MultiIndex objects, sorting if possible. Parameters ---------- @@ -3563,6 +3549,7 @@ def union(self, other): Returns ------- Index + """ self._assert_can_do_setop(other) @@ -3576,8 +3563,8 @@ def union(self, other): names=result_names) def intersection(self, other): - """ - Form the intersection of two MultiIndex objects, sorting if possible + """Form the intersection of two MultiIndex objects, sorting if + possible. Parameters ---------- @@ -3586,6 +3573,7 @@ def intersection(self, other): Returns ------- Index + """ self._assert_can_do_setop(other) @@ -3606,12 +3594,12 @@ def intersection(self, other): names=result_names) def diff(self, other): - """ - Compute sorted set difference of two MultiIndex objects + """Compute sorted set difference of two MultiIndex objects. Returns ------- diff : MultiIndex + """ self._assert_can_do_setop(other) @@ -3652,8 +3640,7 @@ def astype(self, dtype): return self._shallow_copy() def insert(self, loc, item): - """ - Make new MultiIndex inserting new item at location + """Make new MultiIndex inserting new item at location. Parameters ---------- @@ -3664,6 +3651,7 @@ def insert(self, loc, item): Returns ------- new_index : Index + """ # Pad the key with empty strings if lower levels of the key # aren't specified: @@ -3692,12 +3680,12 @@ def insert(self, loc, item): names=self.names, verify_integrity=False) def delete(self, loc): - """ - Make new index with passed location deleted + """Make new index with passed location deleted. Returns ------- new_index : MultiIndex + """ new_labels = [np.delete(lab, loc) for lab in self.labels] return MultiIndex(levels=self.levels, labels=new_labels, @@ -3709,10 +3697,8 @@ def delete(self, loc): @property def _bounds(self): - """ - Return or compute and return slice points for level 0, assuming - sortedness - """ + """Return or compute and return slice points for level 0, assuming + sortedness.""" if self.__bounds is None: inds = np.arange(len(self.levels[0])) self.__bounds = self.labels[0].searchsorted(inds) @@ -3849,9 +3835,7 @@ def _union_indexes(indexes): def _trim_front(strings): - """ - Trims zeros and decimal points - """ + """Trims zeros and decimal points.""" trimmed = strings while len(strings) > 0 and all([x[0] == ' ' for x in trimmed]): trimmed = [x[1:] for x in trimmed] diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e0c5fa573ff69..4ff2702ba84b2 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -125,7 +125,7 @@ def _has_valid_type(self, k, axis): raise NotImplementedError() def _has_valid_tuple(self, key): - """ check the key for valid keys across my indexer """ + """check the key for valid keys across my indexer.""" for i, k in enumerate(key): if i >= self.obj.ndim: raise IndexingError('Too many indexers') @@ -168,8 +168,8 @@ def _has_valid_setitem_indexer(self, indexer): return True def _has_valid_positional_setitem_indexer(self, indexer): - """ validate that an positional indexer cannot enlarge its target - will raise if needed, does not modify the indexer externally """ + """validate that an positional indexer cannot enlarge its target will + raise if needed, does not modify the indexer externally.""" if isinstance(indexer, dict): raise IndexError("{0} cannot enlarge its target object" .format(self.name)) @@ -391,7 +391,7 @@ def setter(item, v): self.obj[item] = s def can_do_equal_len(): - """ return True if we have an equal len settable """ + """return True if we have an equal len settable.""" if not len(labels) == 1: return False @@ -694,9 +694,8 @@ def _multi_take_opportunity(self, tup): return True def _multi_take(self, tup): - """ create the reindex map for our objects, raise the _exception if we - can't create the indexer - """ + """create the reindex map for our objects, raise the _exception if we + can't create the indexer.""" try: o = self.obj d = dict([ @@ -962,9 +961,8 @@ def _reindex(keys, level=None): return result def _convert_to_indexer(self, obj, axis=0, is_setter=False): - """ - Convert indexing key into something we can use to do actual fancy - indexing on an ndarray + """Convert indexing key into something we can use to do actual fancy + indexing on an ndarray. Examples ix[:5] -> slice(0, 5) @@ -975,6 +973,7 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False): "In the face of ambiguity, refuse the temptation to guess." raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing + """ labels = self.obj._get_axis(axis) @@ -1097,7 +1096,7 @@ def _get_slice_axis(self, slice_obj, axis=0): class _IXIndexer(_NDFrameIndexer): - """ A primarily location based indexer, with integer fallback """ + """A primarily location based indexer, with integer fallback.""" def _has_valid_type(self, key, axis): ax = self.obj._get_axis(axis) @@ -1140,7 +1139,7 @@ def _getbool_axis(self, key, axis=0): raise self._exception(detail) def _get_slice_axis(self, slice_obj, axis=0): - """ this is pretty simple as we just have to deal with labels """ + """this is pretty simple as we just have to deal with labels.""" obj = self.obj if not _need_slice(slice_obj): return obj @@ -1157,7 +1156,7 @@ def _get_slice_axis(self, slice_obj, axis=0): class _LocIndexer(_LocationIndexer): - """ purely label based location based indexing """ + """purely label based location based indexing.""" _valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH " "endpoints included! Can be slices of integers if the " "index is integers), listlike of labels, boolean") @@ -1263,7 +1262,7 @@ def _getitem_axis(self, key, axis=0, validate_iterable=False): class _iLocIndexer(_LocationIndexer): - """ purely integer based location based indexing """ + """purely integer based location based indexing.""" _valid_types = ("integer, integer slice (START point is INCLUDED, END " "point is EXCLUDED), listlike of integers, boolean array") _exception = IndexError @@ -1365,7 +1364,7 @@ def _getitem_axis(self, key, axis=0, validate_iterable=False): return self._get_loc(key, axis=axis) def _convert_to_indexer(self, obj, axis=0, is_setter=False): - """ much simpler as we only have to deal with our valid types """ + """much simpler as we only have to deal with our valid types.""" if self._has_valid_type(obj, axis): return obj @@ -1375,7 +1374,7 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False): class _ScalarAccessIndexer(_NDFrameIndexer): - """ access scalars quickly """ + """access scalars quickly.""" def _convert_key(self, key): return list(key) @@ -1405,20 +1404,20 @@ def __setitem__(self, key, value): class _AtIndexer(_ScalarAccessIndexer): - """ label based scalar accessor """ + """label based scalar accessor.""" _takeable = False class _iAtIndexer(_ScalarAccessIndexer): - """ integer based scalar accessor """ + """integer based scalar accessor.""" _takeable = True def _has_valid_setitem_indexer(self, indexer): self._has_valid_positional_setitem_indexer(indexer) def _convert_key(self, key): - """ require integer args (and convert to label arguments) """ + """require integer args (and convert to label arguments)""" for a, i in zip(self.obj.axes, key): if not com.is_integer(i): raise ValueError("iAt based indexing can only have integer " @@ -1458,8 +1457,8 @@ def _length_of_indexer(indexer, target=None): def _convert_to_index_sliceable(obj, key): - """if we are index sliceable, then return my slicer, otherwise return None - """ + """if we are index sliceable, then return my slicer, otherwise return + None.""" idx = obj.index if isinstance(key, slice): return idx._convert_slice_indexer(key, typ='getitem') @@ -1517,8 +1516,8 @@ def _check_bool_indexer(ax, key): def _convert_missing_indexer(indexer): - """ reverse convert a missing indexer, which is a dict - return the scalar indexer and a boolean indicating if we converted """ + """reverse convert a missing indexer, which is a dict return the scalar + indexer and a boolean indicating if we converted.""" if isinstance(indexer, dict): @@ -1533,7 +1532,7 @@ def _convert_missing_indexer(indexer): def _convert_from_missing_indexer_tuple(indexer, axes): - """ create a filtered indexer that doesn't have any missing indexers """ + """create a filtered indexer that doesn't have any missing indexers.""" def get_indexer(_i, _idx): return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else _idx) @@ -1541,8 +1540,8 @@ def get_indexer(_i, _idx): def _safe_append_to_index(index, key): - """ a safe append to an index, if incorrect type, then catch and recreate - """ + """a safe append to an index, if incorrect type, then catch and + recreate.""" try: return index.insert(len(index), key) except: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 25afaeaf62c18..6d6a8dc3362bb 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -117,11 +117,11 @@ def ref_locs(self): return self._ref_locs def take_ref_locs(self, indexer): - """ - need to preserve the ref_locs and just shift them - return None if ref_locs is None + """need to preserve the ref_locs and just shift them return None if + ref_locs is None. see GH6509 + """ ref_locs = self._ref_locs @@ -142,13 +142,12 @@ def take_ref_locs(self, indexer): return ref_locs def reset_ref_locs(self): - """ reset the block ref_locs """ + """reset the block ref_locs.""" self._ref_locs = np.empty(len(self.items), dtype='int64') def set_ref_locs(self, placement): - """ explicity set the ref_locs indexer, only necessary for duplicate - indicies - """ + """explicity set the ref_locs indexer, only necessary for duplicate + indicies.""" if placement is None: self._ref_locs = None else: @@ -202,7 +201,7 @@ def __setstate__(self, state): self.ndim = values.ndim def _slice(self, slicer): - """ return a slice of my values """ + """return a slice of my values.""" return self.values[slicer] @property @@ -222,7 +221,8 @@ def ftype(self): return "%s:%s" % (self.dtype, self._ftype) def as_block(self, result): - """ if we are not a block, then wrap as a block, must have compatible shape """ + """if we are not a block, then wrap as a block, must have compatible + shape.""" if not isinstance(result, Block): result = make_block(result, self.items, @@ -256,8 +256,7 @@ def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, def reindex_items_from(self, new_ref_items, indexer=None, method=None, fill_value=None, limit=None, copy=True): - """ - Reindex to only those items contained in the input set of items + """Reindex to only those items contained in the input set of items. E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'], then the resulting items will be ['b'] @@ -265,6 +264,7 @@ def reindex_items_from(self, new_ref_items, indexer=None, method=None, Returns ------- reindexed : Block + """ if indexer is None: new_ref_items, indexer = self.items.reindex(new_ref_items, @@ -326,10 +326,10 @@ def set(self, item, value, check=False): self.values[loc] = value def delete(self, item): - """ - Returns - ------- + """Returns. + y : Block (new object) + """ loc = self.items.get_loc(item) new_items = self.items.delete(loc) @@ -338,14 +338,14 @@ def delete(self, item): ndim=self.ndim, klass=self.__class__, fastpath=True) def split_block_at(self, item): - """ - Split block into zero or more blocks around columns with given label, - for "deleting" a column without having to copy data by returning views - on the original array. + """Split block into zero or more blocks around columns with given + label, for "deleting" a column without having to copy data by returning + views on the original array. Returns ------- generator of Block + """ loc = self.items.get_loc(item) @@ -364,7 +364,8 @@ def split_block_at(self, item): fastpath=True) def apply(self, func, **kwargs): - """ apply the function to my values; return a block if we are not one """ + """apply the function to my values; return a block if we are not + one.""" return self.as_block(func(self.values)) def fillna(self, value, limit=None, inplace=False, downcast=None): @@ -400,7 +401,7 @@ def _maybe_downcast(self, blocks, downcast=None): return result_blocks def downcast(self, dtypes=None): - """ try to downcast each item to the dict of dtypes if present """ + """try to downcast each item to the dict of dtypes if present.""" # turn it off completely if dtypes is False: @@ -493,7 +494,7 @@ def convert(self, copy=True, **kwargs): return [self.copy()] if copy else [self] def prepare_for_merge(self, **kwargs): - """ a regular block is ok to merge as is """ + """a regular block is ok to merge as is.""" return self def post_merge(self, items, **kwargs): @@ -556,22 +557,22 @@ def _try_cast_result(self, result, dtype=None): return _possibly_downcast_to_dtype(result, dtype) def _try_operate(self, values): - """ return a version to operate on as the input """ + """return a version to operate on as the input.""" return values def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments """ + """provide coercion to our input arguments.""" return values, other def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ + """reverse of try_coerce_args.""" return result def _try_fill(self, value): return value def to_native_types(self, slicer=None, na_rep='', **kwargs): - """ convert to our native types format, slicing if desired """ + """convert to our native types format, slicing if desired.""" values = self.values if slicer is not None: @@ -594,10 +595,13 @@ def copy(self, deep=True, ref_items=None): def replace(self, to_replace, value, inplace=False, filter=None, regex=False): - """ replace the to_replace value with value, possible to create new - blocks here this is just a call to putmask. regex is not used here. - It is used in ObjectBlocks. It is here for API - compatibility.""" + """replace the to_replace value with value, possible to create new + blocks here this is just a call to putmask. + + regex is not used here. It is used in ObjectBlocks. It is here + for API compatibility. + + """ mask = com.mask_missing(self.values, to_replace) if filter is not None: for i, item in enumerate(self.items): @@ -611,11 +615,12 @@ def replace(self, to_replace, value, inplace=False, filter=None, return self.putmask(mask, value, inplace=inplace) def setitem(self, indexer, value): - """ set the value inplace; return a new block (of a possibly different + """set the value inplace; return a new block (of a possibly different dtype) indexer is a direct slice/positional indexer; value must be a compatible shape + """ # coerce args @@ -684,8 +689,8 @@ def setitem(self, indexer, value): return [self] def putmask(self, mask, new, align=True, inplace=False): - """ putmask the data to the block; it is possible that we may create a - new dtype of block + """putmask the data to the block; it is possible that we may create a + new dtype of block. return the resulting block(s) @@ -699,6 +704,7 @@ def putmask(self, mask, new, align=True, inplace=False): Returns ------- a new block(s), the result of the putmask + """ new_values = self.values if inplace else self.values.copy() @@ -741,7 +747,7 @@ def putmask(self, mask, new, align=True, inplace=False): new_blocks = [] def create_block(v, m, n, item, reshape=True): - """ return a new block, try to preserve dtype if possible """ + """return a new block, try to preserve dtype if possible.""" # n should be the length of the mask or a scalar here if not is_list_like(n): @@ -870,7 +876,7 @@ def check_int_bool(self, inplace): def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, coerce=False, downcast=None): - """ fillna but using the interpolate machinery """ + """fillna but using the interpolate machinery.""" # if we are coercing, then don't force the conversion # if the block can't hold the type @@ -900,7 +906,7 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, def _interpolate(self, method=None, index=None, values=None, fill_value=None, axis=0, limit=None, inplace=False, downcast=None, **kwargs): - """ interpolate using scipy wrappers """ + """interpolate using scipy wrappers.""" data = self.values if inplace else self.values.copy() @@ -957,13 +963,13 @@ def get_merge_length(self): return len(self.values) def diff(self, n): - """ return block for the diff of the values """ + """return block for the diff of the values.""" new_values = com.diff(self.values, n, axis=1) return [make_block(new_values, self.items, self.ref_items, ndim=self.ndim, fastpath=True)] def shift(self, periods, axis=0): - """ shift the block by periods, possibly upcast """ + """shift the block by periods, possibly upcast.""" # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also new_values, fill_value = com._maybe_upcast(self.values) @@ -988,8 +994,7 @@ def shift(self, periods, axis=0): ndim=self.ndim, fastpath=True)] def eval(self, func, other, raise_on_error=True, try_cast=False): - """ - evaluate the block; return result block from the result + """evaluate the block; return result block from the result. Parameters ---------- @@ -1001,6 +1006,7 @@ def eval(self, func, other, raise_on_error=True, try_cast=False): Returns ------- a new block, the result of the func + """ values = self.values @@ -1084,8 +1090,7 @@ def handle_error(): def where(self, other, cond, align=True, raise_on_error=True, try_cast=False): - """ - evaluate the block; return result block(s) from the result + """evaluate the block; return result block(s) from the result. Parameters ---------- @@ -1098,6 +1103,7 @@ def where(self, other, cond, align=True, raise_on_error=True, Returns ------- a new block(s), the result of the func + """ values = self.values @@ -1234,7 +1240,7 @@ def _try_cast(self, element): def to_native_types(self, slicer=None, na_rep='', float_format=None, **kwargs): - """ convert to our native types format, slicing if desired """ + """convert to our native types format, slicing if desired.""" values = self.values if slicer is not None: @@ -1305,7 +1311,7 @@ def fill_value(self): return tslib.iNaT def _try_fill(self, value): - """ if we are a NaT, return the actual fill value """ + """if we are a NaT, return the actual fill value.""" if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all(): value = tslib.iNaT elif isinstance(value, np.timedelta64): @@ -1319,10 +1325,9 @@ def _try_fill(self, value): return value def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments - we are going to compare vs i8, so coerce to floats - repring NaT with np.nan so nans propagate - values is always ndarray like, other may not be """ + """provide coercion to our input arguments we are going to compare vs + i8, so coerce to floats repring NaT with np.nan so nans propagate + values is always ndarray like, other may not be.""" def masker(v): mask = isnull(v) v = v.view('i8').astype('float64') @@ -1343,11 +1348,11 @@ def masker(v): return values, other def _try_operate(self, values): - """ return a version to operate on """ + """return a version to operate on.""" return values.view('i8') def _try_coerce_result(self, result): - """ reverse of try_coerce_args / try_operate """ + """reverse of try_coerce_args / try_operate.""" if isinstance(result, np.ndarray): mask = isnull(result) if result.dtype.kind in ['i', 'f', 'O']: @@ -1361,7 +1366,7 @@ def should_store(self, value): return issubclass(value.dtype.type, np.timedelta64) def to_native_types(self, slicer=None, na_rep=None, **kwargs): - """ convert to our native types format, slicing if desired """ + """convert to our native types format, slicing if desired.""" values = self.values if slicer is not None: @@ -1422,9 +1427,8 @@ def __init__(self, values, items, ref_items, ndim=2, fastpath=False, @property def is_bool(self): - """ we can be a bool if we have only bool values but are of type - object - """ + """we can be a bool if we have only bool values but are of type + object.""" return lib.is_bool_array(self.values.ravel()) def convert(self, convert_dates=True, convert_numeric=True, convert_timedeltas=True, @@ -1653,13 +1657,13 @@ def _try_cast(self, element): return element def _try_operate(self, values): - """ return a version to operate on """ + """return a version to operate on.""" return values.view('i8') def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments - we are going to compare vs i8, so coerce to integer - values is always ndarra like, other may not be """ + """provide coercion to our input arguments we are going to compare vs + i8, so coerce to integer values is always ndarra like, other may not + be.""" values = values.view('i8') if _is_null_datelike_scalar(other): other = tslib.iNaT @@ -1671,7 +1675,7 @@ def _try_coerce_args(self, values, other): return values, other def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ + """reverse of try_coerce_args.""" if isinstance(result, np.ndarray): if result.dtype == 'i8': result = tslib.array_to_datetime( @@ -1687,7 +1691,7 @@ def fill_value(self): return tslib.iNaT def _try_fill(self, value): - """ if we are a NaT, return the actual fill value """ + """if we are a NaT, return the actual fill value.""" if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all(): value = tslib.iNaT return value @@ -1710,7 +1714,7 @@ def fillna(self, value, limit=None, def to_native_types(self, slicer=None, na_rep=None, date_format=None, **kwargs): - """ convert to our native types format, slicing if desired """ + """convert to our native types format, slicing if desired.""" values = self.values if slicer is not None: @@ -1737,9 +1741,7 @@ def should_store(self, value): return issubclass(value.dtype.type, np.datetime64) def astype(self, dtype, copy=False, raise_on_error=True): - """ - handle convert to object as a special case - """ + """handle convert to object as a special case.""" klass = None if np.dtype(dtype).type == np.object_: klass = ObjectBlock @@ -1771,7 +1773,7 @@ def get_values(self, dtype=None): class SparseBlock(Block): - """ implement as a list of sparse arrays of the same dtype """ + """implement as a list of sparse arrays of the same dtype.""" __slots__ = ['items', 'ref_items', '_ref_locs', 'ndim', 'values'] is_sparse = True is_numeric = True @@ -1853,7 +1855,7 @@ def should_store(self, value): return isinstance(value, SparseArray) def prepare_for_merge(self, **kwargs): - """ create a dense block """ + """create a dense block.""" return make_block(self.get_values(), self.items, self.ref_items) def post_merge(self, items, **kwargs): @@ -1870,11 +1872,11 @@ def get(self, item): return self.values def _slice(self, slicer): - """ return a slice of my values (but densify first) """ + """return a slice of my values (but densify first)""" return self.get_values()[slicer] def get_values(self, dtype=None): - """ need to to_dense myself (and always return a ndim sized object) """ + """need to to_dense myself (and always return a ndim sized object)""" values = self.values.to_dense() if values.ndim == self.ndim - 1: values = values.reshape((1,) + values.shape) @@ -1886,7 +1888,7 @@ def get_merge_length(self): def make_block(self, values, items=None, ref_items=None, sparse_index=None, kind=None, dtype=None, fill_value=None, copy=False, fastpath=True): - """ return a new block """ + """return a new block.""" if dtype is None: dtype = self.dtype if fill_value is None: @@ -1919,7 +1921,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): def shift(self, periods, axis=0): - """ shift the block by periods """ + """shift the block by periods.""" N = len(self.values.T) indexer = np.zeros(N, dtype=int) if periods > 0: @@ -1937,8 +1939,7 @@ def shift(self, periods, axis=0): return [self.make_block(new_values)] def take(self, indexer, ref_items, new_axis, axis=1): - """ going to take our items - along the long dimension""" + """going to take our items along the long dimension.""" if axis < 1: raise AssertionError('axis must be at least 1, got %d' % axis) @@ -1960,8 +1961,7 @@ def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, def reindex_items_from(self, new_ref_items, indexer=None, method=None, fill_value=None, limit=None, copy=True): - """ - Reindex to only those items contained in the input set of items + """Reindex to only those items contained in the input set of items. E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'], then the resulting items will be ['b'] @@ -1969,6 +1969,7 @@ def reindex_items_from(self, new_ref_items, indexer=None, method=None, Returns ------- reindexed : Block + """ # 1-d always @@ -2004,8 +2005,8 @@ def reindex_items_from(self, new_ref_items, indexer=None, method=None, ref_items=new_ref_items, copy=copy) def sparse_reindex(self, new_index): - """ sparse reindex and return a new block - current reindex only works for float64 dtype! """ + """sparse reindex and return a new block current reindex only works for + float64 dtype!""" values = self.values values = values.sp_index.to_int_index().reindex( values.sp_values.astype('float64'), values.fill_value, new_index) @@ -2076,8 +2077,7 @@ def make_block(values, items, ref_items, klass=None, ndim=None, dtype=None, class BlockManager(PandasObject): - """ - Core internal data structure to implement DataFrame + """Core internal data structure to implement DataFrame. Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a lightweight blocked set of labeled data to be manipulated by the DataFrame @@ -2090,6 +2090,7 @@ class BlockManager(PandasObject): Notes ----- This is *not* a public API class + """ __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', '_is_consolidated', '_has_sparse', '_ref_locs', '_items_map'] @@ -2116,7 +2117,7 @@ def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True): self._set_ref_locs(do_refs=True) def make_empty(self, axes=None): - """ return an empty BlockManager with the items axis of len 0 """ + """return an empty BlockManager with the items axis of len 0.""" if axes is None: axes = [_ensure_index([])] + [ _ensure_index(a) for a in self.axes[1:] @@ -2177,9 +2178,8 @@ def set_axis(self, axis, value, maybe_rename=True, check_axis=True): self._set_ref_locs(labels=value, do_refs=True) def _reset_ref_locs(self): - """ take the current _ref_locs and reset ref_locs on the blocks - to correctly map, ignoring Nones; - reset both _items_map and _ref_locs """ + """take the current _ref_locs and reset ref_locs on the blocks to + correctly map, ignoring Nones; reset both _items_map and _ref_locs.""" # let's reset the ref_locs in individual blocks if self.items.is_unique: @@ -2195,8 +2195,7 @@ def _reset_ref_locs(self): def _rebuild_ref_locs(self): """Take _ref_locs and set the individual block ref_locs, skipping Nones - no effect on a unique index - """ + no effect on a unique index.""" if getattr(self, '_ref_locs', None) is not None: item_count = 0 for v in self._ref_locs: @@ -2320,7 +2319,7 @@ def _get_items(self): items = property(fget=_get_items) def _get_counts(self, f): - """ return a dict of the counts of the function in BlockManager """ + """return a dict of the counts of the function in BlockManager.""" self._consolidate_inplace() counts = dict() for b in self.blocks: @@ -2329,7 +2328,7 @@ def _get_counts(self, f): return counts def _get_types(self, f): - """ return a list of the f per item """ + """return a list of the f per item.""" self._consolidate_inplace() # unique @@ -2421,8 +2420,7 @@ def _verify_integrity(self): tot_items)) def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs): - """ - iterate over the blocks, collect and create a new block manager + """iterate over the blocks, collect and create a new block manager. Parameters ---------- @@ -2498,7 +2496,7 @@ def replace(self, **kwargs): return self.apply('replace', **kwargs) def replace_list(self, src_list, dest_list, inplace=False, regex=False): - """ do a list replace """ + """do a list replace.""" # figure out our mask a-priori to avoid repeated replacements values = self.as_matrix() @@ -2551,7 +2549,7 @@ def prepare_for_merge(self, **kwargs): return self def post_merge(self, objs, **kwargs): - """ try to sparsify items that were previously sparse """ + """try to sparsify items that were previously sparse.""" is_sparse = defaultdict(list) for o in objs: for blk in o._data.blocks: @@ -2567,9 +2565,7 @@ def post_merge(self, objs, **kwargs): return self def is_consolidated(self): - """ - Return True if more than one block with the same dtype - """ + """Return True if more than one block with the same dtype.""" if not self._known_consolidated: self._consolidate_check() return self._is_consolidated @@ -2673,11 +2669,11 @@ def get_numeric_data(self, **kwargs): return self.get_data(**kwargs) def get_data(self, copy=False, columns=None, **kwargs): - """ - Parameters - ---------- + """Parameters. + copy : boolean, default False - Whether to copy the blocks + Whether to copy the blocks + """ blocks = self.get_block_map( typ='list', copy=copy, columns=columns, **kwargs) @@ -2687,7 +2683,7 @@ def get_data(self, copy=False, columns=None, **kwargs): return self.combine(blocks, copy=copy) def combine(self, blocks, copy=True): - """ return a new manager with the blocks """ + """return a new manager with the blocks.""" indexer = np.sort(np.concatenate([b.ref_locs for b in blocks])) new_items = self.items.take(indexer) @@ -2766,8 +2762,7 @@ def nblocks(self): return len(self.blocks) def copy(self, deep=True): - """ - Make deep or shallow copy of BlockManager + """Make deep or shallow copy of BlockManager. Parameters ---------- @@ -2777,6 +2772,7 @@ def copy(self, deep=True): Returns ------- copy : BlockManager + """ if deep: new_axes = [ax.view() for ax in self.axes] @@ -2804,10 +2800,8 @@ def as_matrix(self, items=None): return mat def _interleave(self, items): - """ - Return ndarray from blocks with specified item order - Items must be contained in the blocks - """ + """Return ndarray from blocks with specified item order Items must be + contained in the blocks.""" dtype = _interleaved_dtype(self.blocks) items = _ensure_index(items) @@ -2887,11 +2881,11 @@ def xs(self, key, axis=1, copy=True, takeable=False): return self.__class__(new_blocks, new_axes) def fast_xs(self, loc, copy=False): - """ - get a cross sectional for a given location in the - items ; handle dups + """get a cross sectional for a given location in the items ; handle + dups. return the result and a flag if a copy was actually made + """ if len(self.blocks) == 1: result = self.blocks[0].values[:, loc] @@ -2920,12 +2914,12 @@ def fast_xs(self, loc, copy=False): return result, True def consolidate(self): - """ - Join together blocks having same dtype + """Join together blocks having same dtype. Returns ------- y : BlockManager + """ if self.is_consolidated(): return self @@ -3013,9 +3007,7 @@ def get_for_nan_indexer(self, indexer): return b.iget(loc) def get_scalar(self, tup): - """ - Retrieve single item - """ + """Retrieve single item.""" item = tup[0] _, blk = self._find_block(item) @@ -3146,13 +3138,12 @@ def set_items_norename(self, value): self._shape = None def set_items_clear(self, value): - """ clear the ref_locs on all blocks """ + """clear the ref_locs on all blocks.""" self.set_axis(0, value, maybe_rename='clear', check_axis=False) def _delete_from_all_blocks(self, loc, item): - """ delete from the items loc the item - the item could be in multiple blocks which could - change each iteration (as we split blocks) """ + """delete from the items loc the item the item could be in multiple + blocks which could change each iteration (as we split blocks)""" # possibily convert to an indexer loc = _possibly_convert_to_indexer(loc) @@ -3168,12 +3159,12 @@ def _delete_from_all_blocks(self, loc, item): self._delete_from_block(i, item) def _delete_from_block(self, i, item): - """ - Delete and maybe remove the whole block + """Delete and maybe remove the whole block. + + Remap the split blocks to there old ranges, so after this + function, _ref_locs and _items_map (if used) are correct for the + items, None fills holes in _ref_locs - Remap the split blocks to there old ranges, - so after this function, _ref_locs and _items_map (if used) - are correct for the items, None fills holes in _ref_locs """ block = self.blocks.pop(i) ref_locs = self._set_ref_locs() @@ -3409,9 +3400,7 @@ def _reindex_indexer_items(self, new_items, indexer, fill_value): def reindex_items(self, new_items, indexer=None, copy=True, fill_value=None): - """ - - """ + """""" new_items = _ensure_index(new_items) data = self if not data.is_consolidated(): @@ -3553,9 +3542,7 @@ def rrenamer(x): return this, other def _is_indexed_like(self, other): - """ - Check all axes except items - """ + """Check all axes except items.""" if self.ndim != other.ndim: raise AssertionError(('Number of dimensions must agree ' 'got %d and %d') % (self.ndim, other.ndim)) @@ -3565,7 +3552,7 @@ def _is_indexed_like(self, other): return True def rename(self, mapper, axis, copy=False): - """ generic rename """ + """generic rename.""" if axis == 0: return self.rename_items(mapper, copy=copy) @@ -3654,7 +3641,7 @@ def equals(self, other): class SingleBlockManager(BlockManager): - """ manage a single block with """ + """manage a single block with.""" ndim = 1 _is_consolidated = True _known_consolidated = True @@ -3724,10 +3711,10 @@ def shape(self): return self._shape def apply(self, f, axes=None, do_integrity_check=False, **kwargs): - """ - fast path for SingleBlock Manager + """fast path for SingleBlock Manager. ssee also BlockManager.apply + """ applied = getattr(self._block, f)(**kwargs) bm = self.__class__(applied, axes or self.axes, @@ -3788,7 +3775,7 @@ def set_axis(self, axis, value, maybe_rename=True, check_axis=True): self._block.set_ref_items(self.items, maybe_rename=maybe_rename) def set_ref_items(self, ref_items, maybe_rename=True): - """ we can optimize and our ref_locs are always equal to ref_items """ + """we can optimize and our ref_locs are always equal to ref_items.""" if maybe_rename: self.items = ref_items self.ref_items = ref_items @@ -3798,7 +3785,7 @@ def index(self): return self.axes[0] def convert(self, **kwargs): - """ convert the whole block as one """ + """convert the whole block as one.""" kwargs['by_item'] = False return self.apply('convert', **kwargs) @@ -3839,7 +3826,7 @@ def fast_xs(self, loc, copy=False): return result, False def construction_error(tot_items, block_shape, axes, e=None): - """ raise a helpful message about our construction """ + """raise a helpful message about our construction.""" passed = tuple(map(int, [tot_items] + list(block_shape))) implied = tuple(map(int, [len(ax) for ax in axes])) if passed == implied and e is not None: @@ -3877,7 +3864,7 @@ def create_block_manager_from_arrays(arrays, names, axes): def maybe_create_block_in_items_map(im, block): - """ create/return the block in an items_map """ + """create/return the block in an items_map.""" try: return im[block] except: @@ -3983,9 +3970,8 @@ def form_blocks(arrays, names, axes): def _simple_blockify(tuples, ref_items, dtype, is_unique=True): - """ return a single array of a block that has a single dtype; if dtype is - not None, coerce to this dtype - """ + """return a single array of a block that has a single dtype; if dtype is + not None, coerce to this dtype.""" block_items, values, placement = _stack_arrays(tuples, ref_items, dtype) # CHECK DTYPE? @@ -3999,7 +3985,7 @@ def _simple_blockify(tuples, ref_items, dtype, is_unique=True): def _multi_blockify(tuples, ref_items, dtype=None, is_unique=True): - """ return an array of blocks that potentially have different dtypes """ + """return an array of blocks that potentially have different dtypes.""" # group by dtype grouper = itertools.groupby(tuples, lambda x: x[2].dtype) @@ -4018,9 +4004,8 @@ def _multi_blockify(tuples, ref_items, dtype=None, is_unique=True): def _sparse_blockify(tuples, ref_items, dtype=None): - """ return an array of blocks that potentially have different dtypes (and - are sparse) - """ + """return an array of blocks that potentially have different dtypes (and + are sparse)""" new_blocks = [] for i, names, array in tuples: @@ -4099,7 +4084,7 @@ def _interleaved_dtype(blocks): counts[type(x)].append(x) def _lcd_dtype(l): - """ find the lowest dtype that can accomodate the given types """ + """find the lowest dtype that can accomodate the given types.""" m = l[0].dtype for x in l[1:]: if x.dtype.itemsize > m.itemsize: @@ -4210,7 +4195,7 @@ def _merge_blocks(blocks, items, dtype=None, _can_consolidate=True): def _block_shape(values, ndim=1, shape=None): - """ guarantee the shape of the values to be at least 1 d """ + """guarantee the shape of the values to be at least 1 d.""" if values.ndim <= ndim: if shape is None: shape = values.shape diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index a47c7f82d9199..03ad4e3d111e0 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -109,7 +109,7 @@ def _has_infs(result): def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): - """ return the correct fill value for the dtype of the values """ + """return the correct fill value for the dtype of the values.""" if fill_value is not None: return fill_value if _na_ok_dtype(dtype): @@ -189,7 +189,7 @@ def _view_if_needed(values): def _wrap_results(result, dtype): - """ wrap our results if needed """ + """wrap our results if needed.""" if issubclass(dtype.type, np.datetime64): if not isinstance(result, np.ndarray): diff --git a/pandas/core/ops.py b/pandas/core/ops.py index b8e92fb25cec5..e577511c12c4d 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1,7 +1,7 @@ -""" -Arithmetic operations for PandasObjects +"""Arithmetic operations for PandasObjects. This is not a public API. + """ # necessary to enforce truediv in Python 2.X from __future__ import division @@ -136,8 +136,7 @@ def add_special_arithmetic_methods(cls, arith_method=None, radd_func=None, comp_method=None, bool_method=None, use_numexpr=True, force=False, select=None, exclude=None): - """ - Adds the full suite of special arithmetic methods (``__add__``, + """Adds the full suite of special arithmetic methods (``__add__``, ``__sub__``, etc.) to the class. Parameters @@ -158,6 +157,7 @@ def add_special_arithmetic_methods(cls, arith_method=None, radd_func=None, if passed, only sets functions with names in select exclude : iterable of strings (optional) if passed, will not set functions with names in exclude + """ radd_func = radd_func or operator.add # in frame, special methods have default_axis = None, comp methods use @@ -186,9 +186,8 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None, flex_comp_method=None, flex_bool_method=None, use_numexpr=True, force=False, select=None, exclude=None): - """ - Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) - to the class. + """Adds the full suite of flex arithmetic methods (``pow``, ``mul``, + ``add``) to the class. Parameters ---------- @@ -209,6 +208,7 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None, if passed, only sets functions with names in select exclude : iterable of strings (optional) if passed, will not set functions with names in exclude + """ radd_func = radd_func or (lambda x, y: operator.add(y, x)) # in frame, default axis is 'columns', doesn't matter for series and panel @@ -231,10 +231,11 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None, class _TimeOp(object): - """ - Wrapper around Series datetime/time/timedelta arithmetic operations. - Generally, you should use classmethod ``maybe_convert_for_time_op`` as an - entry point. + """Wrapper around Series datetime/time/timedelta arithmetic operations. + + Generally, you should use classmethod ``maybe_convert_for_time_op`` + as an entry point. + """ fill_value = tslib.iNaT wrap_results = staticmethod(lambda x: x) @@ -307,7 +308,7 @@ def _validate(self): 'or a timedelta') def _convert_to_array(self, values, name=None, other=None): - """converts values to ndarray""" + """converts values to ndarray.""" from pandas.tseries.timedeltas import _possibly_cast_to_timedelta coerce = 'compat' if pd._np_version_under1p7 else True @@ -411,13 +412,15 @@ def f(x): @classmethod def maybe_convert_for_time_op(cls, left, right, name): - """ - if ``left`` and ``right`` are appropriate for datetime arithmetic with - operation ``name``, processes them and returns a ``_TimeOp`` object - that stores all the required values. Otherwise, it will generate - either a ``NotImplementedError`` or ``None``, indicating that the - operation is unsupported for datetimes (e.g., an unsupported r_op) or - that the data is not the right type for time ops. + """if ``left`` and ``right`` are appropriate for datetime arithmetic + with operation ``name``, processes them and returns a ``_TimeOp`` + object that stores all the required values. + + Otherwise, it will generate either a ``NotImplementedError`` or + ``None``, indicating that the operation is unsupported for + datetimes (e.g., an unsupported r_op) or that the data is not + the right type for time ops. + """ # decide if we can do it is_timedelta_lhs = com.is_timedelta64_dtype(left) @@ -433,10 +436,8 @@ def maybe_convert_for_time_op(cls, left, right, name): def _arith_method_SERIES(op, name, str_rep=None, fill_zeros=None, default_axis=None, **eval_kwargs): - """ - Wrapper function for Series arithmetic operations, to avoid - code duplication. - """ + """Wrapper function for Series arithmetic operations, to avoid code + duplication.""" def na_op(x, y): try: result = expressions.evaluate(op, str_rep, x, y, @@ -507,10 +508,8 @@ def wrapper(left, right, name=name): def _comp_method_SERIES(op, name, str_rep=None, masker=False): - """ - Wrapper function for Series arithmetic operations, to avoid - code duplication. - """ + """Wrapper function for Series arithmetic operations, to avoid code + duplication.""" def na_op(x, y): if x.dtype == np.object_: if isinstance(y, list): @@ -579,10 +578,8 @@ def wrapper(self, other): def _bool_method_SERIES(op, name, str_rep=None): - """ - Wrapper function for Series arithmetic operations, to avoid - code duplication. - """ + """Wrapper function for Series arithmetic operations, to avoid code + duplication.""" def na_op(x, y): try: result = op(x, y) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index eeb0e292c01d4..266c91f79df55 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -39,9 +39,7 @@ def _ensure_like_indices(time, panels): - """ - Makes sure that time and panels are conformable - """ + """Makes sure that time and panels are conformable.""" n_time = len(time) n_panel = len(panels) u_panels = np.unique(panels) # this sorts! @@ -134,10 +132,8 @@ def __init__(self, data=None, items=None, major_axis=None, minor_axis=None, minor_axis=minor_axis, copy=copy, dtype=dtype) def _init_data(self, data, copy, dtype, **kwargs): - """ - Generate ND initialization; axes are passed - as required objects to __init__ - """ + """Generate ND initialization; axes are passed as required objects to + __init__""" if data is None: data = {} if dtype is not None: @@ -210,8 +206,7 @@ def _init_arrays(self, arrays, arr_names, axes): @classmethod def from_dict(cls, data, intersect=False, orient='items', dtype=None): - """ - Construct Panel from dict of DataFrame objects + """Construct Panel from dict of DataFrame objects. Parameters ---------- @@ -230,6 +225,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): Returns ------- Panel + """ orient = orient.lower() if orient == 'minor': @@ -309,11 +305,11 @@ def _compare_constructor(self, other, func): # Magic methods def __unicode__(self): - """ - Return a string representation for a particular Panel + """Return a string representation for a particular Panel. Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. + """ class_name = str(self.__class__) @@ -366,8 +362,7 @@ def _get_plane_axes(self, axis): fromDict = from_dict def to_sparse(self, fill_value=None, kind='block'): - """ - Convert to SparsePanel + """Convert to SparsePanel. Parameters ---------- @@ -377,6 +372,7 @@ def to_sparse(self, fill_value=None, kind='block'): Returns ------- y : SparseDataFrame + """ from pandas.core.sparse import SparsePanel frames = dict(compat.iteritems(self)) @@ -387,8 +383,7 @@ def to_sparse(self, fill_value=None, kind='block'): default_fill_value=fill_value) def to_excel(self, path, na_rep='', engine=None, **kwargs): - """ - Write each DataFrame in Panel to a separate excel sheet + """Write each DataFrame in Panel to a separate excel sheet. Parameters ---------- @@ -423,6 +418,7 @@ def to_excel(self, path, na_rep='', engine=None, **kwargs): ----- Keyword arguments (and na_rep) are passed to the ``to_excel`` method for each DataFrame written. + """ from pandas.io.excel import ExcelWriter @@ -445,8 +441,7 @@ def as_matrix(self): # Getting and setting elements def get_value(self, *args, **kwargs): - """ - Quickly retrieve single value at (item, major, minor) location + """Quickly retrieve single value at (item, major, minor) location. Parameters ---------- @@ -458,6 +453,7 @@ def get_value(self, *args, **kwargs): Returns ------- value : scalar value + """ nargs = len(args) nreq = self._AXIS_LEN @@ -477,8 +473,7 @@ def get_value(self, *args, **kwargs): return lower.get_value(*args[1:], takeable=takeable) def set_value(self, *args, **kwargs): - """ - Quickly set single value at (item, major, minor) location + """Quickly set single value at (item, major, minor) location. Parameters ---------- @@ -493,6 +488,7 @@ def set_value(self, *args, **kwargs): panel : Panel If label combo is contained, will be reference to calling Panel, otherwise a new object + """ # require an arg for each axis and the value nargs = len(args) @@ -562,7 +558,7 @@ def __setitem__(self, key, value): NDFrame._set_item(self, key, mat) def _unpickle_panel_compat(self, state): # pragma: no cover - "Unpickle the panel" + """Unpickle the panel.""" _unpickle = com._unpickle_array vals, items, major, minor = state @@ -574,8 +570,7 @@ def _unpickle_panel_compat(self, state): # pragma: no cover self._data = wp._data def conform(self, frame, axis='items'): - """ - Conform input DataFrame to align with chosen axis pair. + """Conform input DataFrame to align with chosen axis pair. Parameters ---------- @@ -589,6 +584,7 @@ def conform(self, frame, axis='items'): Returns ------- DataFrame + """ axes = self._get_plane_axes(axis) return frame.reindex(**self._extract_axes_for_slice(self, axes)) @@ -600,12 +596,11 @@ def tail(self, n=5): raise NotImplementedError def _needs_reindex_multi(self, axes, method, level): - """ don't allow a multi reindex on Panel or above ndim """ + """don't allow a multi reindex on Panel or above ndim.""" return False def dropna(self, axis=0, how='any', inplace=False, **kwargs): - """ - Drop 2D from panel, holding passed axis constant + """Drop 2D from panel, holding passed axis constant. Parameters ---------- @@ -621,6 +616,7 @@ def dropna(self, axis=0, how='any', inplace=False, **kwargs): Returns ------- dropped : Panel + """ axis = self._get_axis_number(axis) @@ -689,8 +685,7 @@ def _combine_panel(self, other, func): return self._constructor(result_values, items, major, minor) def major_xs(self, key, copy=True): - """ - Return slice of panel along major axis + """Return slice of panel along major axis. Parameters ---------- @@ -703,12 +698,12 @@ def major_xs(self, key, copy=True): ------- y : DataFrame index -> minor axis, columns -> items + """ return self.xs(key, axis=self._AXIS_LEN - 2, copy=copy) def minor_xs(self, key, copy=True): - """ - Return slice of panel along minor axis + """Return slice of panel along minor axis. Parameters ---------- @@ -721,12 +716,12 @@ def minor_xs(self, key, copy=True): ------- y : DataFrame index -> major axis, columns -> items + """ return self.xs(key, axis=self._AXIS_LEN - 1, copy=copy) def xs(self, key, axis=1, copy=True): - """ - Return slice of panel along selected axis + """Return slice of panel along selected axis. Parameters ---------- @@ -739,6 +734,7 @@ def xs(self, key, axis=1, copy=True): Returns ------- y : ndim(self)-1 + """ axis = self._get_axis_number(axis) if axis == 0: @@ -778,8 +774,7 @@ def _ixs(self, i, axis=0): return self._construct_return_type(new_data) def groupby(self, function, axis='major'): - """ - Group data on given axis, returning GroupBy object + """Group data on given axis, returning GroupBy object. Parameters ---------- @@ -790,16 +785,16 @@ def groupby(self, function, axis='major'): Returns ------- grouped : PanelGroupBy + """ from pandas.core.groupby import PanelGroupBy axis = self._get_axis_number(axis) return PanelGroupBy(self, function, axis=axis) def to_frame(self, filter_observations=True): - """ - Transform wide format into long (stacked) format as DataFrame whose - columns are the Panel's items and whose index is a MultiIndex formed - of the Panel's major and minor axes. + """Transform wide format into long (stacked) format as DataFrame whose + columns are the Panel's items and whose index is a MultiIndex formed of + the Panel's major and minor axes. Parameters ---------- @@ -810,6 +805,7 @@ def to_frame(self, filter_observations=True): Returns ------- y : DataFrame + """ _, N, K = self.shape @@ -872,8 +868,7 @@ def construct_index_parts(idx, major=True): toLong = deprecate('toLong', to_frame) def apply(self, func, axis='major', **kwargs): - """ - Applies function along input axis of the Panel + """Applies function along input axis of the Panel. Parameters ---------- @@ -894,6 +889,7 @@ def apply(self, func, axis='major', **kwargs): Returns ------- result : Pandas Object + """ if kwargs and not isinstance(func, np.ufunc): @@ -1019,7 +1015,7 @@ def _reduce(self, op, axis=0, skipna=True, numeric_only=None, return self._construct_return_type(result, axes) def _construct_return_type(self, result, axes=None, **kwargs): - """ return the type for the ndim of the result """ + """return the type for the ndim of the result.""" ndim = getattr(result,'ndim',None) # need to assume they are the same @@ -1041,7 +1037,7 @@ def _construct_return_type(self, result, axes=None, **kwargs): # same as self elif self.ndim == ndim: - """ return the construction dictionary for these axes """ + """return the construction dictionary for these axes.""" if axes is None: return self._constructor(result) return self._constructor(result, **self._construct_axes_dict()) @@ -1095,8 +1091,7 @@ def transpose(self, *args, **kwargs): return super(Panel, self).transpose(*args, **kwargs) def count(self, axis='major'): - """ - Return number of observations over requested axis. + """Return number of observations over requested axis. Parameters ---------- @@ -1105,6 +1100,7 @@ def count(self, axis='major'): Returns ------- count : DataFrame + """ i = self._get_axis_number(axis) @@ -1115,8 +1111,7 @@ def count(self, axis='major'): return self._wrap_result(result, axis) def shift(self, lags, freq=None, axis='major'): - """ - Shift major or minor axis by specified number of leads/lags. + """Shift major or minor axis by specified number of leads/lags. Parameters ---------- @@ -1126,6 +1121,7 @@ def shift(self, lags, freq=None, axis='major'): Returns ------- shifted : Panel + """ if freq: return self.tshift(lags, freq, axis=axis) @@ -1139,8 +1135,7 @@ def tshift(self, periods=1, freq=None, axis='major', **kwds): return super(Panel, self).tshift(periods, freq, axis, **kwds) def join(self, other, how='left', lsuffix='', rsuffix=''): - """ - Join items with other Panel either on major and minor axes column + """Join items with other Panel either on major and minor axes column. Parameters ---------- @@ -1161,6 +1156,7 @@ def join(self, other, how='left', lsuffix='', rsuffix=''): Returns ------- joined : Panel + """ from pandas.tools.merge import concat @@ -1235,13 +1231,13 @@ def _get_join_index(self, other, how): # miscellaneous data creation @staticmethod def _extract_axes(self, data, axes, **kwargs): - """ return a list of the axis indicies """ + """return a list of the axis indicies.""" return [self._extract_axis(self, data, axis=i, **kwargs) for i, a in enumerate(axes)] @staticmethod def _extract_axes_for_slice(self, axes): - """ return the slice dictionary for these axes """ + """return the slice dictionary for these axes.""" return dict([(self._AXIS_SLICEMAP[i], a) for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)]) @@ -1349,7 +1345,7 @@ def _extract_axis(self, data, axis=0, intersect=False): @classmethod def _add_aggregate_operations(cls, use_numexpr=True): - """ add the operations to the cls; evaluate the doc strings again """ + """add the operations to the cls; evaluate the doc strings again.""" # doc strings substitors _agg_doc = """ diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py index 3eebd51190e3d..2f8eca992c7d0 100644 --- a/pandas/core/panelnd.py +++ b/pandas/core/panelnd.py @@ -57,7 +57,7 @@ def __init__(self, *args, **kwargs): klass.__init__ = __init__ def _get_plane_axes_index(self, axis): - """ return the sliced index for this object """ + """return the sliced index for this object.""" axis_name = self._get_axis_name(axis) index = self._AXIS_ORDERS.index(axis) diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 0d06e9253ce1f..cc68937276215 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -246,7 +246,7 @@ def get_new_index(self): def _make_index_array_level(lev, lab): - """ create the combined index array, preserving nans, return an array """ + """create the combined index array, preserving nans, return an array.""" mask = lab == -1 if not mask.any(): return lev @@ -339,9 +339,7 @@ def _unstack_multiple(data, clocs): def pivot(self, index=None, columns=None, values=None): - """ - See DataFrame.pivot - """ + """See DataFrame.pivot.""" if values is None: indexed = self.set_index([index, columns]) return indexed.unstack(columns) @@ -353,9 +351,8 @@ def pivot(self, index=None, columns=None, values=None): def pivot_simple(index, columns, values): - """ - Produce 'pivot' table based on 3 columns of this DataFrame. - Uses unique values from index / columns and fills with values. + """Produce 'pivot' table based on 3 columns of this DataFrame. Uses unique + values from index / columns and fills with values. Parameters ---------- @@ -373,6 +370,7 @@ def pivot_simple(index, columns, values): Returns ------- DataFrame + """ if (len(index) != len(columns)) or (len(columns) != len(values)): raise AssertionError('Length of index, columns, and values must be the' @@ -388,9 +386,8 @@ def pivot_simple(index, columns, values): def _slow_pivot(index, columns, values): - """ - Produce 'pivot' table based on 3 columns of this DataFrame. - Uses unique values from index / columns and fills with values. + """Produce 'pivot' table based on 3 columns of this DataFrame. Uses unique + values from index / columns and fills with values. Parameters ---------- @@ -402,6 +399,7 @@ def _slow_pivot(index, columns, values): Column name to use for populating new frame's values Could benefit from some Cython here. + """ tree = {} for i, (idx, col) in enumerate(zip(index, columns)): @@ -615,9 +613,8 @@ def _stack_multi_columns(frame, level=-1, dropna=True): def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None): - """ - "Unpivots" a DataFrame from wide format to long format, optionally leaving - identifier variables set. + """"Unpivots" a DataFrame from wide format to long format, optionally + leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other @@ -909,10 +906,12 @@ def melt_stub(df, stub, i, j): def convert_dummies(data, cat_variables, prefix_sep='_'): - """ - Compute DataFrame with specified columns converted to dummy variables (0 / - 1). Result columns will be prefixed with the column name, then the level - name, e.g. 'A_foo' for column A and level foo + """Compute DataFrame with specified columns converted to dummy variables. + + (0. + + / 1). Result columns will be prefixed with the column name, then the level + name, e.g. 'A_foo' for column A and level foo. Parameters ---------- @@ -925,6 +924,7 @@ def convert_dummies(data, cat_variables, prefix_sep='_'): Returns ------- dummies : DataFrame + """ result = data.drop(cat_variables, axis=1) for variable in cat_variables: @@ -935,8 +935,7 @@ def convert_dummies(data, cat_variables, prefix_sep='_'): def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False): - """ - Convert categorical variable into dummy/indicator variables + """Convert categorical variable into dummy/indicator variables. Parameters ---------- @@ -1060,7 +1059,7 @@ def make_axis_dummies(frame, axis='minor', transform=None): def block2d_to_blocknd(values, items, shape, labels, ref_items=None): - """ pivot to the labels shape """ + """pivot to the labels shape.""" from pandas.core.internals import make_block panel_shape = (len(items),) + shape @@ -1090,9 +1089,8 @@ def block2d_to_blocknd(values, items, shape, labels, ref_items=None): def factor_indexer(shape, labels): - """ given a tuple of shape and a list of Categorical labels, return the - expanded label indexer - """ + """given a tuple of shape and a list of Categorical labels, return the + expanded label indexer.""" mult = np.array(shape)[::-1].cumprod()[::-1] return com._ensure_platform_int( np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) diff --git a/pandas/core/series.py b/pandas/core/series.py index 544d327c9a13d..c488450263dc9 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -65,7 +65,7 @@ def _coerce_method(converter): - """ install the scalar coercion methods """ + """install the scalar coercion methods.""" def wrapper(self): if len(self) == 1: @@ -262,7 +262,7 @@ def is_time_series(self): _index = None def _set_axis(self, axis, labels, fastpath=False): - """ override generic, we want to set the _typ here """ + """override generic, we want to set the _typ here.""" if not fastpath: labels = _ensure_index(labels) @@ -315,7 +315,7 @@ def dtype(self): @property def dtypes(self): - """ for compat """ + """for compat.""" return self._data.dtype @property @@ -324,7 +324,7 @@ def ftype(self): @property def ftypes(self): - """ for compat """ + """for compat.""" return self._data.ftype @property @@ -347,13 +347,13 @@ def compress(self, condition, axis=0, out=None, **kwargs): return self[condition] def transpose(self): - """ support for compatiblity """ + """support for compatiblity.""" return self T = property(transpose) def nonzero(self): - """ numpy like, returns same as nonzero """ + """numpy like, returns same as nonzero.""" return self.values.nonzero() def put(self, *args, **kwargs): @@ -367,13 +367,11 @@ def view(self, dtype=None): index=self.index).__finalize__(self) def __array__(self, result=None): - """ the array interface, return my values """ + """the array interface, return my values.""" return self.values def __array_wrap__(self, result): - """ - Gets called prior to a ufunc (and after) - """ + """Gets called prior to a ufunc (and after)""" return self._constructor(result, index=self.index, copy=False).__finalize__(self) @@ -699,18 +697,14 @@ def _set_values(self, key, value): _get_val_at = ndarray.__getitem__ def repeat(self, reps): - """ - See ndarray.repeat - """ + """See ndarray.repeat.""" new_index = self.index.repeat(reps) new_values = self.values.repeat(reps) return self._constructor(new_values, index=new_index).__finalize__(self) def reshape(self, *args, **kwargs): - """ - See numpy.ndarray.reshape - """ + """See numpy.ndarray.reshape.""" if len(args) == 1 and hasattr(args[0], '__iter__'): shape = args[0] else: @@ -727,8 +721,7 @@ def reshape(self, *args, **kwargs): irow = _ixs def get_value(self, label, takeable=False): - """ - Quickly retrieve single value at passed index label + """Quickly retrieve single value at passed index label. Parameters ---------- @@ -738,16 +731,16 @@ def get_value(self, label, takeable=False): Returns ------- value : scalar value + """ if takeable is True: return self.values[label] return self.index.get_value(self.values, label) def set_value(self, label, value, takeable=False): - """ - Quickly set single value at passed label. If label is not contained, a - new object is created with the label placed at the end of the result - index + """Quickly set single value at passed label. If label is not contained, + a new object is created with the label placed at the end of the result + index. Parameters ---------- @@ -762,6 +755,7 @@ def set_value(self, label, value, takeable=False): series : Series If label is contained, will be reference to calling Series, otherwise a new object + """ try: if takeable: @@ -820,11 +814,11 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): return df.reset_index(level=level, drop=drop) def __unicode__(self): - """ - Return a string representation for a particular DataFrame + """Return a string representation for a particular DataFrame. Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. + """ width, height = get_terminal_size() max_rows = (height if get_option("display.max_rows") == 0 @@ -844,10 +838,7 @@ def __unicode__(self): return result def _tidy_repr(self, max_vals=20): - """ - - Internal function, should always return unicode string - """ + """Internal function, should always return unicode string.""" num = max_vals // 2 head = self.iloc[:num]._get_repr(print_header=True, length=False, dtype=False, name=False) @@ -882,8 +873,7 @@ def _repr_footer(self): def to_string(self, buf=None, na_rep='NaN', float_format=None, nanRep=None, length=False, dtype=False, name=False): - """ - Render a string representation of the Series + """Render a string representation of the Series. Parameters ---------- @@ -904,6 +894,7 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, Returns ------- formatted : string (if not buffer passed) + """ if nanRep is not None: # pragma: no cover @@ -931,10 +922,7 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, def _get_repr( self, name=False, print_header=False, length=True, dtype=True, na_rep='NaN', float_format=None): - """ - - Internal function, should always return unicode string - """ + """Internal function, should always return unicode string.""" formatter = fmt.SeriesFormatter(self, name=name, header=print_header, length=length, dtype=dtype, @@ -956,9 +944,7 @@ def __iter__(self): return iter(self.values) def iteritems(self): - """ - Lazily iterate over (index, value) tuples - """ + """Lazily iterate over (index, value) tuples.""" return zip(iter(self.index), iter(self)) if compat.PY3: # pragma: no cover @@ -983,26 +969,26 @@ def __invert__(self): # Misc public methods def keys(self): - "Alias for index" + """Alias for index.""" return self.index @property def values(self): - """ - Return Series as ndarray + """Return Series as ndarray. Returns ------- arr : numpy.ndarray + """ return self._data.values def get_values(self): - """ same as values (but handles sparseness conversions); is a view """ + """same as values (but handles sparseness conversions); is a view.""" return self._data.values def tolist(self): - """ Convert Series to a nested list """ + """Convert Series to a nested list.""" return list(self) def to_dict(self): @@ -1016,8 +1002,7 @@ def to_dict(self): return dict(compat.iteritems(self)) def to_frame(self, name=None): - """ - Convert Series to DataFrame + """Convert Series to DataFrame. Parameters ---------- @@ -1028,6 +1013,7 @@ def to_frame(self, name=None): Returns ------- data_frame : DataFrame + """ from pandas.core.frame import DataFrame if name is None: @@ -1038,8 +1024,7 @@ def to_frame(self, name=None): return df def to_sparse(self, kind='block', fill_value=None): - """ - Convert Series to SparseSeries + """Convert Series to SparseSeries. Parameters ---------- @@ -1049,6 +1034,7 @@ def to_sparse(self, kind='block', fill_value=None): Returns ------- sp : SparseSeries + """ from pandas.core.sparse import SparseSeries return SparseSeries(self, kind=kind, @@ -1110,14 +1096,14 @@ def mode(self): Returns ------- modes : Series (sorted) + """ # TODO: Add option for bins like value_counts() from pandas.core.algorithms import mode return mode(self) def drop_duplicates(self, take_last=False, inplace=False): - """ - Return Series with duplicate values removed + """Return Series with duplicate values removed. Parameters ---------- @@ -1129,6 +1115,7 @@ def drop_duplicates(self, take_last=False, inplace=False): Returns ------- deduplicated : Series + """ duplicated = self.duplicated(take_last=take_last) result = self[-duplicated] @@ -1138,8 +1125,7 @@ def drop_duplicates(self, take_last=False, inplace=False): return result def duplicated(self, take_last=False): - """ - Return boolean Series denoting duplicate values + """Return boolean Series denoting duplicate values. Parameters ---------- @@ -1149,6 +1135,7 @@ def duplicated(self, take_last=False): Returns ------- duplicated : Series + """ keys = _ensure_object(self.values) duplicated = lib.duplicated(keys, take_last=take_last) @@ -1156,8 +1143,7 @@ def duplicated(self, take_last=False): index=self.index).__finalize__(self) def idxmin(self, axis=None, out=None, skipna=True): - """ - Index of first occurrence of minimum of values. + """Index of first occurrence of minimum of values. Parameters ---------- @@ -1175,6 +1161,7 @@ def idxmin(self, axis=None, out=None, skipna=True): See Also -------- DataFrame.idxmin + """ i = nanops.nanargmin(_values_from_object(self), skipna=skipna) if i == -1: @@ -1182,8 +1169,7 @@ def idxmin(self, axis=None, out=None, skipna=True): return self.index[i] def idxmax(self, axis=None, out=None, skipna=True): - """ - Index of first occurrence of maximum of values. + """Index of first occurrence of maximum of values. Parameters ---------- @@ -1201,6 +1187,7 @@ def idxmax(self, axis=None, out=None, skipna=True): See Also -------- DataFrame.idxmax + """ i = nanops.nanargmax(_values_from_object(self), skipna=skipna) if i == -1: @@ -1213,9 +1200,7 @@ def idxmax(self, axis=None, out=None, skipna=True): @Appender(pa.Array.round.__doc__) def round(self, decimals=0, out=None): - """ - - """ + """""" result = _values_from_object(self).round(decimals, out=out) if out is None: result = self._constructor(result, @@ -1224,9 +1209,8 @@ def round(self, decimals=0, out=None): return result def quantile(self, q=0.5): - """ - Return value at the given quantile, a la scoreatpercentile in - scipy.stats + """Return value at the given quantile, a la scoreatpercentile in + scipy.stats. Parameters ---------- @@ -1236,6 +1220,7 @@ def quantile(self, q=0.5): Returns ------- quantile : float + """ valid_values = self.dropna().values if len(valid_values) == 0: @@ -1313,8 +1298,7 @@ def pretty_name(x): def corr(self, other, method='pearson', min_periods=None): - """ - Compute correlation with `other` Series, excluding missing values + """Compute correlation with `other` Series, excluding missing values. Parameters ---------- @@ -1330,6 +1314,7 @@ def corr(self, other, method='pearson', Returns ------- correlation : float + """ this, other = self.align(other, join='inner', copy=False) if len(this) == 0: @@ -1338,8 +1323,7 @@ def corr(self, other, method='pearson', min_periods=min_periods) def cov(self, other, min_periods=None): - """ - Compute covariance with Series, excluding missing values + """Compute covariance with Series, excluding missing values. Parameters ---------- @@ -1352,6 +1336,7 @@ def cov(self, other, min_periods=None): covariance : float Normalized by N-1 (unbiased estimator). + """ this, other = self.align(other, join='inner') if len(this) == 0: @@ -1360,8 +1345,7 @@ def cov(self, other, min_periods=None): min_periods=min_periods) def diff(self, periods=1): - """ - 1st discrete difference of object + """1st discrete difference of object. Parameters ---------- @@ -1371,6 +1355,7 @@ def diff(self, periods=1): Returns ------- diffed : Series + """ result = com.diff(_values_from_object(self), periods) return self._constructor(result, index=self.index).__finalize__(self) @@ -1431,8 +1416,7 @@ def dot(self, other): # Combination def append(self, to_append, verify_integrity=False): - """ - Concatenate two or more Series. The indexes must not overlap + """Concatenate two or more Series. The indexes must not overlap. Parameters ---------- @@ -1443,6 +1427,7 @@ def append(self, to_append, verify_integrity=False): Returns ------- appended : Series + """ from pandas.tools.merge import concat @@ -1454,8 +1439,7 @@ def append(self, to_append, verify_integrity=False): verify_integrity=verify_integrity) def _binop(self, other, func, level=None, fill_value=None): - """ - Perform generic binary operation with optional fill value + """Perform generic binary operation with optional fill value. Parameters ---------- @@ -1471,6 +1455,7 @@ def _binop(self, other, func, level=None, fill_value=None): Returns ------- combined : Series + """ if not isinstance(other, Series): raise AssertionError('Other operand must be Series') @@ -1501,10 +1486,9 @@ def _binop(self, other, func, level=None, fill_value=None): return self._constructor(result, index=new_index).__finalize__(self) def combine(self, other, func, fill_value=nan): - """ - Perform elementwise binary operation on two Series using given function - with optional fill value when an index is missing from one Series or - the other + """Perform elementwise binary operation on two Series using given + function with optional fill value when an index is missing from one + Series or the other. Parameters ---------- @@ -1515,6 +1499,7 @@ def combine(self, other, func, fill_value=nan): Returns ------- result : Series + """ if isinstance(other, Series): new_index = self.index + other.index @@ -1531,9 +1516,8 @@ def combine(self, other, func, fill_value=nan): return self._constructor(new_values, index=new_index, name=new_name) def combine_first(self, other): - """ - Combine Series values, choosing the calling Series's values - first. Result index will be the union of the two indexes + """Combine Series values, choosing the calling Series's values first. + Result index will be the union of the two indexes. Parameters ---------- @@ -1542,6 +1526,7 @@ def combine_first(self, other): Returns ------- y : Series + """ new_index = self.index + other.index this = self.reindex(new_index, copy=False) @@ -1569,9 +1554,8 @@ def update(self, other): # Reindexing, sorting def sort(self, axis=0, kind='quicksort', order=None, ascending=True): - """ - Sort values and index labels by value, in place. For compatibility with - ndarray API. No return value + """Sort values and index labels by value, in place. For compatibility + with ndarray API. No return value. Parameters ---------- @@ -1586,6 +1570,7 @@ def sort(self, axis=0, kind='quicksort', order=None, ascending=True): See Also -------- Series.order + """ # GH 5856/5863 @@ -1600,8 +1585,7 @@ def sort(self, axis=0, kind='quicksort', order=None, ascending=True): self._update_inplace(result) def sort_index(self, ascending=True): - """ - Sort object by labels (along an axis) + """Sort object by labels (along an axis) Parameters ---------- @@ -1617,6 +1601,7 @@ def sort_index(self, ascending=True): Returns ------- sorted_obj : Series + """ index = self.index if isinstance(index, MultiIndex): @@ -1667,9 +1652,8 @@ def argsort(self, axis=0, kind='quicksort', order=None): def rank(self, method='average', na_option='keep', ascending=True, pct=False): - """ - Compute data ranks (1 through n). Equal values are assigned a rank that - is the average of the ranks of those values + """Compute data ranks (1 through n). Equal values are assigned a rank + that is the average of the ranks of those values. Parameters ---------- @@ -1689,6 +1673,7 @@ def rank(self, method='average', na_option='keep', ascending=True, Returns ------- ranks : Series + """ from pandas.core.algorithms import rank ranks = rank(self.values, method=method, na_option=na_option, @@ -1758,8 +1743,7 @@ def _try_kind_sort(arr): .__finalize__(self) def sortlevel(self, level=0, ascending=True): - """ - Sort Series with MultiIndex by chosen level. Data will be + """Sort Series with MultiIndex by chosen level. Data will be lexicographically sorted by the chosen level followed by the other levels (in order) @@ -1771,6 +1755,7 @@ def sortlevel(self, level=0, ascending=True): Returns ------- sorted : Series + """ if not isinstance(self.index, MultiIndex): raise TypeError('can only sort by level with a hierarchical index') @@ -1781,8 +1766,7 @@ def sortlevel(self, level=0, ascending=True): index=new_index).__finalize__(self) def swaplevel(self, i, j, copy=True): - """ - Swap levels i and j in a MultiIndex + """Swap levels i and j in a MultiIndex. Parameters ---------- @@ -1792,15 +1776,15 @@ def swaplevel(self, i, j, copy=True): Returns ------- swapped : Series + """ new_index = self.index.swaplevel(i, j) return self._constructor(self.values, index=new_index, copy=copy).__finalize__(self) def reorder_levels(self, order): - """ - Rearrange index levels using input order. May not drop or duplicate - levels + """Rearrange index levels using input order. May not drop or duplicate + levels. Parameters ---------- @@ -1811,6 +1795,7 @@ def reorder_levels(self, order): Returns ------- type of caller (new object) + """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception('Can only reorder levels on a hierarchical axis.') @@ -1820,8 +1805,7 @@ def reorder_levels(self, order): return result def unstack(self, level=-1): - """ - Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame + """Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- @@ -1849,6 +1833,7 @@ def unstack(self, level=-1): Returns ------- unstacked : DataFrame + """ from pandas.core.reshape import unstack return unstack(self, level) @@ -1857,9 +1842,8 @@ def unstack(self, level=-1): # function application def map(self, arg, na_action=None): - """ - Map values of Series using input correspondence (which can be - a dict, Series, or function) + """Map values of Series using input correspondence (which can be a + dict, Series, or function) Parameters ---------- @@ -1888,6 +1872,7 @@ def map(self, arg, na_action=None): ------- y : Series same index as caller + """ values = self.values if com.is_datetime64_dtype(values.dtype): @@ -1915,10 +1900,9 @@ def map_f(values, f): index=self.index).__finalize__(self) def apply(self, func, convert_dtype=True, args=(), **kwds): - """ - Invoke function on values of Series. Can be ufunc (a NumPy function + """Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works - on single values + on single values. Parameters ---------- @@ -1937,6 +1921,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): Returns ------- y : Series or DataFrame if func returns a Series + """ if len(self) == 0: return Series() @@ -1963,7 +1948,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): def _reduce(self, op, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): - """ perform a reduction operation """ + """perform a reduction operation.""" return op(_values_from_object(self), skipna=skipna, **kwds) def _reindex_indexer(self, new_index, indexer, copy): @@ -1977,9 +1962,8 @@ def _reindex_indexer(self, new_index, indexer, copy): return self._constructor(new_values, index=new_index) def _needs_reindex_multi(self, axes, method, level): - """ check if we do need a multi reindex; this is for compat with - higher dims - """ + """check if we do need a multi reindex; this is for compat with higher + dims.""" return False @Appender(generic._shared_docs['rename'] % _shared_doc_kwargs) @@ -1991,15 +1975,14 @@ def reindex(self, index=None, **kwargs): return super(Series, self).reindex(index=index, **kwargs) def reindex_axis(self, labels, axis=0, **kwargs): - """ for compatibility with higher dims """ + """for compatibility with higher dims.""" if axis != 0: raise ValueError("cannot reindex series on non-zero axis!") return self.reindex(index=labels, **kwargs) def take(self, indices, axis=0, convert=True, is_copy=False): - """ - Analogous to ndarray.take, return Series corresponding to requested - indices + """Analogous to ndarray.take, return Series corresponding to requested + indices. Parameters ---------- @@ -2009,6 +1992,7 @@ def take(self, indices, axis=0, convert=True, is_copy=False): Returns ------- taken : Series + """ # check/convert indicies here if convert: @@ -2115,8 +2099,7 @@ def between(self, left, right, inclusive=True): @classmethod def from_csv(cls, path, sep=',', parse_dates=True, header=None, index_col=0, encoding=None, infer_datetime_format=False): - """ - Read delimited file into Series + """Read delimited file into Series. Parameters ---------- @@ -2141,6 +2124,7 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, Returns ------- y : Series + """ from pandas.core.frame import DataFrame df = DataFrame.from_csv(path, header=header, index_col=index_col, @@ -2190,14 +2174,14 @@ def to_csv(self, path, index=True, sep=",", na_rep='', encoding=encoding, date_format=date_format) def dropna(self, axis=0, inplace=False, **kwargs): - """ - Return Series without null values + """Return Series without null values. Returns ------- valid : Series inplace : boolean, default False Do operation in place. + """ axis = self._get_axis_number(axis or 0) result = remove_na(self) @@ -2316,8 +2300,7 @@ def to_timestamp(self, freq=None, how='start', copy=True): index=new_index).__finalize__(self) def to_period(self, freq=None, copy=True): - """ - Convert TimeSeries from DatetimeIndex to PeriodIndex with desired + """Convert TimeSeries from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed) Parameters @@ -2327,6 +2310,7 @@ def to_period(self, freq=None, copy=True): Returns ------- ts : TimeSeries with PeriodIndex + """ new_values = self.values if copy: diff --git a/pandas/core/sparse.py b/pandas/core/sparse.py index 84149e5598f82..39844f80b34e4 100644 --- a/pandas/core/sparse.py +++ b/pandas/core/sparse.py @@ -1,6 +1,7 @@ -""" -Data structures for sparse float data. Life is made simpler by dealing only -with float64 data +"""Data structures for sparse float data. + +Life is made simpler by dealing only with float64 data + """ # pylint: disable=W0611 diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 6add1767a05d6..0990eedcd18d1 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -20,8 +20,7 @@ def _get_array_list(arr, others): def str_cat(arr, others=None, sep=None, na_rep=None): - """ - Concatenate arrays of strings with given separator + """Concatenate arrays of strings with given separator. Parameters ---------- @@ -34,6 +33,7 @@ def str_cat(arr, others=None, sep=None, na_rep=None): Returns ------- concat : array + """ if sep is None: sep = '' @@ -118,19 +118,18 @@ def g(x): def str_title(arr): - """ - Convert strings to titlecased version + """Convert strings to titlecased version. Returns ------- titled : array + """ return _na_map(lambda x: x.title(), arr) def str_count(arr, pat, flags=0): - """ - Count occurrences of pattern in each string + """Count occurrences of pattern in each string. Parameters ---------- @@ -142,6 +141,7 @@ def str_count(arr, pat, flags=0): Returns ------- counts : arrays + """ regex = re.compile(pat, flags=flags) f = lambda x: len(regex.findall(x)) @@ -149,8 +149,7 @@ def str_count(arr, pat, flags=0): def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): - """ - Check whether given pattern is contained in each string in the array + """Check whether given pattern is contained in each string in the array. Parameters ---------- @@ -190,9 +189,8 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): def str_startswith(arr, pat, na=np.nan): - """ - Return boolean array indicating whether each string starts with passed - pattern + """Return boolean array indicating whether each string starts with passed + pattern. Parameters ---------- @@ -203,15 +201,15 @@ def str_startswith(arr, pat, na=np.nan): Returns ------- startswith : array (boolean) + """ f = lambda x: x.startswith(pat) return _na_map(f, arr, na) def str_endswith(arr, pat, na=np.nan): - """ - Return boolean array indicating whether each string ends with passed - pattern + """Return boolean array indicating whether each string ends with passed + pattern. Parameters ---------- @@ -222,36 +220,36 @@ def str_endswith(arr, pat, na=np.nan): Returns ------- endswith : array (boolean) + """ f = lambda x: x.endswith(pat) return _na_map(f, arr, na) def str_lower(arr): - """ - Convert strings in array to lowercase + """Convert strings in array to lowercase. Returns ------- lowercase : array + """ return _na_map(lambda x: x.lower(), arr) def str_upper(arr): - """ - Convert strings in array to uppercase + """Convert strings in array to uppercase. Returns ------- uppercase : array + """ return _na_map(lambda x: x.upper(), arr) def str_replace(arr, pat, repl, n=-1, case=True, flags=0): - """ - Replace + """Replace. Parameters ---------- @@ -269,6 +267,7 @@ def str_replace(arr, pat, repl, n=-1, case=True, flags=0): Returns ------- replaced : array + """ use_re = not case or len(pat) > 1 or flags @@ -287,8 +286,7 @@ def f(x): def str_repeat(arr, repeats): - """ - Duplicate each string in the array by indicated number of times + """Duplicate each string in the array by indicated number of times. Parameters ---------- @@ -298,6 +296,7 @@ def str_repeat(arr, repeats): Returns ------- repeated : array + """ if np.isscalar(repeats): def rep(x): @@ -388,8 +387,7 @@ def f(x): def str_extract(arr, pat, flags=0): - """ - Find groups in each string using passed regular expression + """Find groups in each string using passed regular expression. Parameters ---------- @@ -464,8 +462,8 @@ def f(x): def str_get_dummies(arr, sep='|'): - """ - Split each string by sep and return a frame of dummy/indicator variables. + """Split each string by sep and return a frame of dummy/indicator + variables. Examples -------- @@ -505,8 +503,7 @@ def str_get_dummies(arr, sep='|'): def str_join(arr, sep): - """ - Join lists contained as elements in array, a la str.join + """Join lists contained as elements in array, a la str.join. Parameters ---------- @@ -516,24 +513,24 @@ def str_join(arr, sep): Returns ------- joined : array + """ return _na_map(sep.join, arr) def str_len(arr): - """ - Compute length of each string in array. + """Compute length of each string in array. Returns ------- lengths : array + """ return _na_map(len, arr) def str_findall(arr, pat, flags=0): - """ - Find all occurrences of pattern or regular expression + """Find all occurrences of pattern or regular expression. Parameters ---------- @@ -545,14 +542,14 @@ def str_findall(arr, pat, flags=0): Returns ------- matches : array + """ regex = re.compile(pat, flags=flags) return _na_map(regex.findall, arr) def str_pad(arr, width, side='left'): - """ - Pad strings with whitespace + """Pad strings with whitespace. Parameters ---------- @@ -565,6 +562,7 @@ def str_pad(arr, width, side='left'): Returns ------- padded : array + """ if side == 'left': f = lambda x: x.rjust(width) @@ -579,8 +577,8 @@ def str_pad(arr, width, side='left'): def str_center(arr, width): - """ - "Center" strings, filling left and right side with additional whitespace + """"Center" strings, filling left and right side with additional + whitespace. Parameters ---------- @@ -591,14 +589,14 @@ def str_center(arr, width): Returns ------- centered : array + """ return str_pad(arr, width, side='both') def str_split(arr, pat=None, n=None): - """ - Split each string (a la re.split) in array by given pattern, propagating NA - values + """Split each string (a la re.split) in array by given pattern, propagating + NA values. Parameters ---------- @@ -613,6 +611,7 @@ def str_split(arr, pat=None, n=None): Returns ------- split : array + """ if pat is None: if n is None or n == 0: @@ -633,8 +632,7 @@ def str_split(arr, pat=None, n=None): def str_slice(arr, start=None, stop=None, step=1): - """ - Slice substrings from each element in array + """Slice substrings from each element in array. Parameters ---------- @@ -644,6 +642,7 @@ def str_slice(arr, start=None, stop=None, step=1): Returns ------- sliced : array + """ obj = slice(start, stop, step) f = lambda x: x[obj] @@ -651,21 +650,18 @@ def str_slice(arr, start=None, stop=None, step=1): def str_slice_replace(arr, start=None, stop=None, repl=None): - """ - - Parameters - ---------- + """Parameters. Returns - ------- - replaced : array + ------- + replaced : array + """ raise NotImplementedError def str_strip(arr, to_strip=None): - """ - Strip whitespace (including newlines) from each string in the array + """Strip whitespace (including newlines) from each string in the array. Parameters ---------- @@ -674,14 +670,14 @@ def str_strip(arr, to_strip=None): Returns ------- stripped : array + """ return _na_map(lambda x: x.strip(to_strip), arr) def str_lstrip(arr, to_strip=None): - """ - Strip whitespace (including newlines) from left side of each string in the - array + """Strip whitespace (including newlines) from left side of each string in + the array. Parameters ---------- @@ -690,14 +686,14 @@ def str_lstrip(arr, to_strip=None): Returns ------- stripped : array + """ return _na_map(lambda x: x.lstrip(to_strip), arr) def str_rstrip(arr, to_strip=None): - """ - Strip whitespace (including newlines) from right side of each string in the - array + """Strip whitespace (including newlines) from right side of each string in + the array. Parameters ---------- @@ -706,13 +702,13 @@ def str_rstrip(arr, to_strip=None): Returns ------- stripped : array + """ return _na_map(lambda x: x.rstrip(to_strip), arr) def str_wrap(arr, width=80): - """ - Wrap long strings to be formatted in paragraphs + """Wrap long strings to be formatted in paragraphs. Parameters ---------- @@ -722,13 +718,14 @@ def str_wrap(arr, width=80): Returns ------- wrapped : array + """ raise NotImplementedError def str_get(arr, i): - """ - Extract element from lists, tuples, or strings in each element in the array + """Extract element from lists, tuples, or strings in each element in the + array. Parameters ---------- @@ -738,14 +735,14 @@ def str_get(arr, i): Returns ------- items : array + """ f = lambda x: x[i] if len(x) > i else np.nan return _na_map(f, arr) def str_decode(arr, encoding, errors="strict"): - """ - Decode character string to unicode using indicated encoding + """Decode character string to unicode using indicated encoding. Parameters ---------- @@ -755,14 +752,14 @@ def str_decode(arr, encoding, errors="strict"): Returns ------- decoded : array + """ f = lambda x: x.decode(encoding, errors) return _na_map(f, arr) def str_encode(arr, encoding, errors="strict"): - """ - Encode character string to some other encoding using indicated encoding + """Encode character string to some other encoding using indicated encoding. Parameters ---------- @@ -772,6 +769,7 @@ def str_encode(arr, encoding, errors="strict"): Returns ------- encoded : array + """ f = lambda x: x.encode(encoding, errors) return _na_map(f, arr) @@ -812,7 +810,7 @@ def wrapper3(self, pat, na=np.nan): def copy(source): - "Copy a docstring from another source function (if present)" + """Copy a docstring from another source function (if present)""" def do_copy(target): if source.__doc__: target.__doc__ = source.__doc__ @@ -822,8 +820,7 @@ def do_copy(target): class StringMethods(object): - """ - Vectorized string functions for Series. NAs stay NA unless handled + """Vectorized string functions for Series. NAs stay NA unless handled otherwise by a particular method. Patterned after Python's string methods, with some inspiration from R's stringr package. @@ -831,6 +828,7 @@ class StringMethods(object): -------- >>> s.str.split('_') >>> s.str.replace('_', '') + """ def __init__(self, series): diff --git a/pandas/io/api.py b/pandas/io/api.py index cf3615cd822cd..07456aec8c9c4 100644 --- a/pandas/io/api.py +++ b/pandas/io/api.py @@ -1,6 +1,4 @@ -""" -Data IO api -""" +"""Data IO api.""" from pandas.io.parsers import read_csv, read_table, read_fwf from pandas.io.clipboard import read_clipboard diff --git a/pandas/io/auth.py b/pandas/io/auth.py index 74b6b13000108..b96205cc522b2 100644 --- a/pandas/io/auth.py +++ b/pandas/io/auth.py @@ -64,11 +64,8 @@ def process_flags(flags=[]): def get_flow(secret, scope, redirect): - """ - Retrieve an authentication flow object based on the given - configuration in the secret file name, the authentication scope, - and a redirect URN - """ + """Retrieve an authentication flow object based on the given configuration + in the secret file name, the authentication scope, and a redirect URN.""" key = (secret, scope, redirect) flow = FLOWS.get(key, None) if flow is None: @@ -83,22 +80,22 @@ def get_flow(secret, scope, redirect): def make_token_store(fpath=None): - """create token storage from give file name""" + """create token storage from give file name.""" if fpath is None: fpath = DEFAULT_TOKEN_FILE return auth_file.Storage(fpath) def authenticate(flow, storage=None): - """ - Try to retrieve a valid set of credentials from the token store if possible - Otherwise use the given authentication flow to obtain new credentials - and return an authenticated http object + """Try to retrieve a valid set of credentials from the token store if + possible Otherwise use the given authentication flow to obtain new + credentials and return an authenticated http object. Parameters ---------- flow : authentication workflow storage: token storage, default None + """ http = httplib2.Http() @@ -112,9 +109,7 @@ def authenticate(flow, storage=None): def init_service(http): - """ - Use the given http object to build the analytics service object - """ + """Use the given http object to build the analytics service object.""" return gapi.build('analytics', 'v3', http=http) diff --git a/pandas/io/clipboard.py b/pandas/io/clipboard.py index 204eeab74196e..44390eb577ee3 100644 --- a/pandas/io/clipboard.py +++ b/pandas/io/clipboard.py @@ -1,18 +1,18 @@ -""" io on the clipboard """ +"""io on the clipboard.""" from pandas import compat, get_option, DataFrame from pandas.compat import StringIO def read_clipboard(**kwargs): # pragma: no cover - """ - Read text from clipboard and pass to read_table. See read_table for the - full argument list + """Read text from clipboard and pass to read_table. See read_table for the + full argument list. If unspecified, `sep` defaults to '\s+' Returns ------- parsed : DataFrame + """ from pandas.util.clipboard import clipboard_get from pandas.io.parsers import read_table @@ -52,8 +52,7 @@ def read_clipboard(**kwargs): # pragma: no cover def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover - """ - Attempt to write text representation of object to the system clipboard + """Attempt to write text representation of object to the system clipboard The clipboard can be then pasted into Excel for example. Parameters @@ -73,6 +72,7 @@ def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover - Linux: xclip, or xsel (with gtk or PyQt4 modules) - Windows: - OS X: + """ from pandas.util.clipboard import clipboard_set if excel is None: diff --git a/pandas/io/common.py b/pandas/io/common.py index d6b2827f94d36..22f75e7ecb1f7 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -1,4 +1,4 @@ -"""Common IO api utilities""" +"""Common IO api utilities.""" import sys import zipfile @@ -57,6 +57,7 @@ def _is_url(url): ------- isurl : bool If `url` has a valid protocol return True otherwise False. + """ try: return parse_url(url).scheme in _VALID_URLS @@ -65,7 +66,7 @@ def _is_url(url): def _is_s3_url(url): - """Check for an s3 url""" + """Check for an s3 url.""" try: return parse_url(url).scheme == 's3' except: @@ -74,16 +75,16 @@ def _is_s3_url(url): def maybe_read_encoded_stream(reader, encoding=None): """read an encoded stream from the reader and transform the bytes to - unicode if required based on the encoding + unicode if required based on the encoding. - Parameters - ---------- - reader : a streamable file-like object - encoding : optional, the encoding to attempt to read + Parameters + ---------- + reader : a streamable file-like object + encoding : optional, the encoding to attempt to read - Returns - ------- - a tuple of (a stream of decoded bytes, the encoding which was used) + Returns + ------- + a tuple of (a stream of decoded bytes, the encoding which was used) """ @@ -100,8 +101,7 @@ def maybe_read_encoded_stream(reader, encoding=None): def get_filepath_or_buffer(filepath_or_buffer, encoding=None): - """ - If the filepath_or_buffer is a url, translate and return the buffer + """If the filepath_or_buffer is a url, translate and return the buffer passthru otherwise. Parameters @@ -112,6 +112,7 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None): Returns ------- a filepath_or_buffer, the encoding + """ if _is_url(filepath_or_buffer): @@ -137,8 +138,7 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None): def file_path_to_url(path): - """ - converts an absolute native path to a FILE URL. + """converts an absolute native path to a FILE URL. Parameters ---------- @@ -147,6 +147,7 @@ def file_path_to_url(path): Returns ------- a valid FILE URL + """ return urljoin('file:', pathname2url(path)) diff --git a/pandas/io/data.py b/pandas/io/data.py index dc5dd2b4b7d80..5e25b5e540a31 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -1,8 +1,4 @@ -""" -Module contains tools for collecting data from various remote sources - - -""" +"""Module contains tools for collecting data from various remote sources.""" import warnings import tempfile import datetime as dt @@ -33,8 +29,7 @@ class RemoteDataError(PandasError, IOError): def DataReader(name, data_source=None, start=None, end=None, retry_count=3, pause=0.001): - """ - Imports data from a number of online sources. + """Imports data from a number of online sources. Currently supports Yahoo! Finance, Google Finance, St. Louis FED (FRED) and Kenneth French's data library. @@ -68,6 +63,7 @@ def DataReader(name, data_source=None, start=None, end=None, ff = DataReader("F-F_Research_Data_Factors_weekly", "famafrench") ff = DataReader("6_Portfolios_2x3", "famafrench") ff = DataReader("F-F_ST_Reversal_Factor", "famafrench") + """ start, end = _sanitize_dates(start, end) @@ -97,9 +93,7 @@ def _sanitize_dates(start, end): def _in_chunks(seq, size): - """ - Return sequence in 'chunks' of size defined by size - """ + """Return sequence in 'chunks' of size defined by size.""" return (seq[pos:pos + size] for pos in range(0, len(seq), size)) @@ -111,10 +105,10 @@ def _in_chunks(seq, size): def get_quote_yahoo(symbols): - """ - Get current yahoo quote + """Get current yahoo quote. Returns a DataFrame + """ if isinstance(symbols, compat.string_types): sym_list = symbols @@ -181,11 +175,11 @@ def _retry_read_url(url, retry_count, pause, name): def _get_hist_yahoo(sym, start, end, retry_count, pause): - """ - Get historical data for the given name from yahoo. - Date format is datetime + """Get historical data for the given name from yahoo. Date format is + datetime. Returns a DataFrame. + """ start, end = _sanitize_dates(start, end) url = (_HISTORICAL_YAHOO_URL + 's=%s' % sym + @@ -204,11 +198,11 @@ def _get_hist_yahoo(sym, start, end, retry_count, pause): def _get_hist_google(sym, start, end, retry_count, pause): - """ - Get historical data for the given name from google. - Date format is datetime + """Get historical data for the given name from google. Date format is + datetime. Returns a DataFrame. + """ start, end = _sanitize_dates(start, end) @@ -222,9 +216,11 @@ def _get_hist_google(sym, start, end, retry_count, pause): def _adjust_prices(hist_data, price_list=None): - """ - Return modifed DataFrame or Panel with adjusted prices based on - 'Adj Close' price. Adds 'Adj_Ratio' column. + """Return modifed DataFrame or Panel with adjusted prices based on 'Adj + Close' price. + + Adds 'Adj_Ratio' column. + """ if price_list is None: price_list = 'Open', 'High', 'Low', 'Close' @@ -239,9 +235,10 @@ def _adjust_prices(hist_data, price_list=None): def _calc_return_index(price_df): - """ - Return a returns index from a input price df or series. Initial value + """Return a returns index from a input price df or series. Initial value. + (typically NaN) is set to 1. + """ df = price_df.pct_change().add(1).cumprod() mask = df.ix[1].notnull() & df.ix[0].isnull() @@ -262,9 +259,9 @@ def _calc_return_index(price_df): def get_components_yahoo(idx_sym): - """ - Returns DataFrame containing list of component information for - index represented in idx_sym from yahoo. Includes component symbol + """Returns DataFrame containing list of component information for index + represented in idx_sym from yahoo. Includes component symbol. + (ticker), exchange, and name. Parameters @@ -281,6 +278,7 @@ def get_components_yahoo(idx_sym): Returns ------- idx_df : DataFrame + """ stats = 'snx' # URL of form: @@ -368,10 +366,9 @@ def _get_data_from(symbols, start, end, retry_count, pause, adjust_price, def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, pause=0.001, adjust_price=False, ret_index=False, chunksize=25, name=None): - """ - Returns DataFrame/Panel of historical stock prices from symbols, over date - range, start to end. To avoid being penalized by Yahoo! Finance servers, - pauses between downloading 'chunks' of symbols can be specified. + """Returns DataFrame/Panel of historical stock prices from symbols, over + date range, start to end. To avoid being penalized by Yahoo! Finance + servers, pauses between downloading 'chunks' of symbols can be specified. Parameters ---------- @@ -400,6 +397,7 @@ def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, Returns ------- hist_data : DataFrame (str) or Panel (array-like object, DataFrame) + """ return _get_data_from(symbols, start, end, retry_count, pause, adjust_price, ret_index, chunksize, 'yahoo', name) @@ -408,10 +406,9 @@ def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, def get_data_google(symbols=None, start=None, end=None, retry_count=3, pause=0.001, adjust_price=False, ret_index=False, chunksize=25, name=None): - """ - Returns DataFrame/Panel of historical stock prices from symbols, over date - range, start to end. To avoid being penalized by Google Finance servers, - pauses between downloading 'chunks' of symbols can be specified. + """Returns DataFrame/Panel of historical stock prices from symbols, over + date range, start to end. To avoid being penalized by Google Finance + servers, pauses between downloading 'chunks' of symbols can be specified. Parameters ---------- @@ -434,6 +431,7 @@ def get_data_google(symbols=None, start=None, end=None, retry_count=3, Returns ------- hist_data : DataFrame (str) or Panel (array-like object, DataFrame) + """ return _get_data_from(symbols, start, end, retry_count, pause, adjust_price, ret_index, chunksize, 'google', name) @@ -444,14 +442,14 @@ def get_data_google(symbols=None, start=None, end=None, retry_count=3, def get_data_fred(name, start=dt.datetime(2010, 1, 1), end=dt.datetime.today()): - """ - Get data for the given name from the St. Louis FED (FRED). - Date format is datetime + """Get data for the given name from the St. Louis FED (FRED). Date format + is datetime. Returns a DataFrame. If multiple names are passed for "series" then the index of the DataFrame is the outer join of the indicies of each series. + """ start, end = _sanitize_dates(start, end) @@ -543,8 +541,7 @@ def _two_char_month(s): class Options(object): - """ - This class fetches call/put data for a given stock/expiry month. + """This class fetches call/put data for a given stock/expiry month. It is instantiated with a string representing the ticker symbol. @@ -581,7 +578,7 @@ class Options(object): """ def __init__(self, symbol, data_source=None): - """ Instantiates options_data with a ticker saved as symbol """ + """Instantiates options_data with a ticker saved as symbol.""" self.symbol = symbol.upper() if data_source is None: warnings.warn("Options(symbol) is deprecated, use Options(symbol," @@ -591,9 +588,8 @@ def __init__(self, symbol, data_source=None): raise NotImplementedError("currently only yahoo supported") def get_options_data(self, month=None, year=None, expiry=None): - """ - Gets call/put data for the stock with the expiration data in the - given month and year + """Gets call/put data for the stock with the expiration data in the + given month and year. Parameters ---------- @@ -625,6 +621,7 @@ def get_options_data(self, month=None, year=None, expiry=None): putsMMYY where MM and YY are, repsectively, two digit representations of the month and year for the expiry of the options. + """ return [f(month, year, expiry) for f in (self.get_put_data, self.get_call_data)] @@ -680,9 +677,8 @@ def _get_option_data(self, month, year, expiry, table_loc, name): return option_data def get_call_data(self, month=None, year=None, expiry=None): - """ - Gets call/put data for the stock with the expiration data in the - given month and year + """Gets call/put data for the stock with the expiration data in the + given month and year. Parameters ---------- @@ -709,13 +705,13 @@ def get_call_data(self, month=None, year=None, expiry=None): or year, the ivar will be named callsMMYY where MM and YY are, repsectively, two digit representations of the month and year for the expiry of the options. + """ return self._get_option_data(month, year, expiry, 9, 'calls') def get_put_data(self, month=None, year=None, expiry=None): - """ - Gets put data for the stock with the expiration data in the - given month and year + """Gets put data for the stock with the expiration data in the given + month and year. Parameters ---------- @@ -744,14 +740,14 @@ def get_put_data(self, month=None, year=None, expiry=None): or year, the ivar will be named putsMMYY where MM and YY are, repsectively, two digit representations of the month and year for the expiry of the options. + """ return self._get_option_data(month, year, expiry, 13, 'puts') def get_near_stock_price(self, above_below=2, call=True, put=False, month=None, year=None, expiry=None): - """ - Cuts the data frame opt_df that is passed in to only take - options that are near the current stock price. + """Cuts the data frame opt_df that is passed in to only take options + that are near the current stock price. Parameters ---------- @@ -776,6 +772,7 @@ def get_near_stock_price(self, above_below=2, call=True, put=False, The resultant DataFrame chopped down to be 2 * above_below + 1 rows desired. If there isn't data as far out as the user has asked for then + """ year, month = self._try_parse_dates(year, month, expiry) price = float(get_quote_yahoo([self.symbol])['last']) @@ -817,9 +814,8 @@ def _try_parse_dates(self, year, month, expiry): def get_forward_data(self, months, call=True, put=False, near=False, above_below=2): - """ - Gets either call, put, or both data for months starting in the current - month and going out in the future a specified amount of time. + """Gets either call, put, or both data for months starting in the + current month and going out in the future a specified amount of time. Parameters ---------- @@ -844,6 +840,7 @@ def get_forward_data(self, months, call=True, put=False, near=False, Returns ------- data : dict of str, DataFrame + """ warnings.warn("get_forward_data() is deprecated", FutureWarning) in_months = lrange(CUR_MONTH, CUR_MONTH + months + 1) diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py index 3ffcef4b21552..813e6a7ff4c08 100644 --- a/pandas/io/date_converters.py +++ b/pandas/io/date_converters.py @@ -1,4 +1,5 @@ -"""This module is designed for community supported date conversion functions""" +"""This module is designed for community supported date conversion +functions.""" from pandas.compat import range, map import numpy as np import pandas.lib as lib diff --git a/pandas/io/excel.py b/pandas/io/excel.py index fef5a24e6ea20..483191502a4c6 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -1,6 +1,4 @@ -""" -Module parse to/from Excel -""" +"""Module parse to/from Excel.""" #---------------------------------------------------------------------- # ExcelFile class @@ -26,9 +24,13 @@ def register_writer(klass): - """Adds engine to the excel writer registry. You must use this method to - integrate with ``to_excel``. Also adds config options for any new - ``supported_extensions`` defined on the writer.""" + """Adds engine to the excel writer registry. + + You must use this method to integrate with ``to_excel``. Also adds + config options for any new ``supported_extensions`` defined on the + writer. + + """ if not compat.callable(klass): raise ValueError("Can only register callables as engines") engine_name = klass.engine @@ -50,7 +52,7 @@ def get_writer(engine_name): def read_excel(io, sheetname=0, **kwds): - """Read an Excel table into a pandas DataFrame + """Read an Excel table into a pandas DataFrame. Parameters ---------- @@ -96,6 +98,7 @@ def read_excel(io, sheetname=0, **kwds): ------- parsed : DataFrame DataFrame from the passed in Excel file + """ if 'kind' in kwds: kwds.pop('kind') @@ -108,9 +111,8 @@ def read_excel(io, sheetname=0, **kwds): class ExcelFile(object): - """ - Class for parsing tabular excel sheets into DataFrame objects. - Uses xlrd. See ExcelFile.parse for more documentation + """Class for parsing tabular excel sheets into DataFrame objects. Uses + xlrd. See ExcelFile.parse for more documentation. Parameters ---------- @@ -119,6 +121,7 @@ class ExcelFile(object): engine: string, default None If io is not a buffer or path, this must be set to identify io. Acceptable values are None or xlrd + """ def __init__(self, io, **kwds): @@ -151,7 +154,7 @@ def parse(self, sheetname=0, header=0, skiprows=None, skip_footer=0, index_col=None, parse_cols=None, parse_dates=False, date_parser=None, na_values=None, thousands=None, chunksize=None, convert_float=True, has_index_names=False, **kwds): - """Read an Excel table into DataFrame + """Read an Excel table into DataFrame. Parameters ---------- @@ -195,6 +198,7 @@ def parse(self, sheetname=0, header=0, skiprows=None, skip_footer=0, ------- parsed : DataFrame DataFrame parsed from the Excel file + """ skipfooter = kwds.pop('skipfooter', None) if skipfooter is not None: @@ -313,7 +317,7 @@ def sheet_names(self): return self.book.sheet_names() def close(self): - """close io if necessary""" + """close io if necessary.""" if hasattr(self.io, 'close'): self.io.close() @@ -348,8 +352,7 @@ def _conv_value(val): @add_metaclass(abc.ABCMeta) class ExcelWriter(object): - """ - Class for writing DataFrame objects into excel sheets, default is to use + """Class for writing DataFrame objects into excel sheets, default is to use xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage. Parameters @@ -365,6 +368,7 @@ class ExcelWriter(object): datetime_format : string, default None Format string for datetime objects written into Excel files (e.g. 'YYYY-MM-DD HH:MM:SS') + """ # Defining an ExcelWriter implementation (see abstract methods for more...) @@ -408,18 +412,17 @@ def __new__(cls, path, engine=None, **kwargs): @abc.abstractproperty def supported_extensions(self): - "extensions that writer engine supports" + """extensions that writer engine supports.""" pass @abc.abstractproperty def engine(self): - "name of engine" + """name of engine.""" pass @abc.abstractmethod def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): - """ - Write given formated cells into Excel an excel sheet + """Write given formated cells into Excel an excel sheet. Parameters ---------- @@ -429,14 +432,13 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): Name of Excel sheet, if None, then use self.cur_sheet startrow: upper left cell row to dump data frame startcol: upper left cell column to dump data frame + """ pass @abc.abstractmethod def save(self): - """ - Save workbook to disk. - """ + """Save workbook to disk.""" pass def __init__(self, path, engine=None, @@ -469,7 +471,11 @@ def _get_sheet_name(self, sheet_name): @classmethod def check_extension(cls, ext): """checks that path's extension against the Writer's supported - extensions. If it isn't supported, raises UnsupportedFiletypeError.""" + extensions. + + If it isn't supported, raises UnsupportedFiletypeError. + + """ if ext.startswith('.'): ext = ext[1:] if not any(ext in extension for extension in cls.supported_extensions): @@ -508,9 +514,7 @@ def __init__(self, path, engine=None, **engine_kwargs): self.book.remove_sheet(self.book.worksheets[0]) def save(self): - """ - Save workbook to disk. - """ + """Save workbook to disk.""" return self.book.save(self.path) def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): @@ -572,11 +576,10 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): @classmethod def _convert_to_style(cls, style_dict): - """ - converts a style_dict to an openpyxl style object - Parameters - ---------- + """converts a style_dict to an openpyxl style object Parameters. + style_dict: style dictionary to convert + """ from openpyxl.style import Style @@ -611,9 +614,7 @@ def __init__(self, path, engine=None, encoding=None, **engine_kwargs): self.fm_date = xlwt.easyxf(num_format_str=self.date_format) def save(self): - """ - Save workbook to disk. - """ + """Save workbook to disk.""" return self.book.save(self.path) def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): @@ -695,12 +696,11 @@ def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',', @classmethod def _convert_to_style(cls, style_dict, num_format_str=None): - """ - converts a style_dict to an xlwt style object - Parameters - ---------- + """converts a style_dict to an xlwt style object Parameters. + style_dict: style dictionary to convert - num_format_str: optional number format string + num_format_str: optional number format string + """ import xlwt @@ -733,9 +733,7 @@ def __init__(self, path, engine=None, self.book = xlsxwriter.Workbook(path, **engine_kwargs) def save(self): - """ - Save workbook to disk. - """ + """Save workbook to disk.""" return self.book.close() def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): @@ -780,12 +778,11 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): cell.val, style) def _convert_to_style(self, style_dict, num_format_str=None): - """ - converts a style_dict to an xlsxwriter format object - Parameters - ---------- + """converts a style_dict to an xlsxwriter format object Parameters. + style_dict: style dictionary to convert - num_format_str: optional number format string + num_format_str: optional number format string + """ # Create a XlsxWriter format object. diff --git a/pandas/io/ga.py b/pandas/io/ga.py index f002994888932..7177355abaaea 100644 --- a/pandas/io/ga.py +++ b/pandas/io/ga.py @@ -91,9 +91,7 @@ def reset_token_store(): - """ - Deletes the default token store - """ + """Deletes the default token store.""" auth.reset_default_token_store() @@ -108,29 +106,25 @@ def read_ga(metrics, dimensions, start_date, **kwargs): class OAuthDataReader(object): - """ - Abstract class for handling OAuth2 authentication using the Google - oauth2client library - """ + """Abstract class for handling OAuth2 authentication using the Google + oauth2client library.""" def __init__(self, scope, token_file_name, redirect): - """ - Parameters - ---------- + """Parameters. + scope : str - Designates the authentication scope - token_file_name : str - Location of cache for authenticated tokens - redirect : str - Redirect URL + Designates the authentication scope + token_file_name : str + Location of cache for authenticated tokens + redirect : str + Redirect URL + """ self.scope = scope self.token_store = auth.make_token_store(token_file_name) self.redirect_url = redirect def authenticate(self, secrets): - """ - Run the authentication process and return an authorized - http object + """Run the authentication process and return an authorized http object. Parameters ---------- @@ -141,13 +135,13 @@ def authenticate(self, secrets): ----- See google documention for format of secrets file %s + """ % DOC_URL flow = self._create_flow(secrets) return auth.authenticate(flow, self.token_store) def _create_flow(self, secrets): - """ - Create an authentication flow based on the secrets file + """Create an authentication flow based on the secrets file. Parameters ---------- @@ -158,15 +152,14 @@ def _create_flow(self, secrets): ----- See google documentation for format of secrets file %s + """ % DOC_URL return auth.get_flow(secrets, self.scope, self.redirect_url) class GDataReader(OAuthDataReader): - """ - Abstract class for reading data from google APIs using OAuth2 - Subclasses must implement create_query method - """ + """Abstract class for reading data from google APIs using OAuth2 Subclasses + must implement create_query method.""" def __init__(self, scope=auth.DEFAULT_SCOPE, token_file_name=auth.DEFAULT_TOKEN_FILE, redirect=NO_CALLBACK, secrets=auth.DEFAULT_SECRETS): @@ -175,14 +168,12 @@ def __init__(self, scope=auth.DEFAULT_SCOPE, @property def service(self): - """The authenticated request service object""" + """The authenticated request service object.""" return self._service def _init_service(self, secrets): - """ - Build an authenticated google api request service using the given - secrets file - """ + """Build an authenticated google api request service using the given + secrets file.""" http = self.authenticate(secrets) return auth.init_service(http) @@ -199,15 +190,15 @@ def get_account(self, name=None, id=None, **kwargs): return _get_match(accounts, name, id, **kwargs) def get_web_property(self, account_id=None, name=None, id=None, **kwargs): - """ - Retrieve a web property given and account and property name, id, or - custom attribute + """Retrieve a web property given and account and property name, id, or + custom attribute. Parameters ---------- account_id : str, optional name : str, optional id : str, optional + """ prop_store = self.service.management().webproperties() kwds = {} @@ -219,8 +210,7 @@ def get_web_property(self, account_id=None, name=None, id=None, **kwargs): def get_profile(self, account_id=None, web_property_id=None, name=None, id=None, **kwargs): - """ - Retrieve the right profile for the given account, web property, and + """Retrieve the right profile for the given account, web property, and profile attribute (name, id, or arbitrary parameter in kwargs) Parameters @@ -229,6 +219,7 @@ def get_profile(self, account_id=None, web_property_id=None, name=None, web_property_id : str, optional name : str, optional id : str, optional + """ profile_store = self.service.management().profiles() kwds = {} diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 60381a2a628c2..fd62398e46920 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -1,6 +1,4 @@ -""" -Pandas module to interface with Google BigQuery. -""" +"""Pandas module to interface with Google BigQuery.""" import os import sys import tempfile @@ -42,59 +40,46 @@ class SchemaMissing(PandasError, IOError): - """ - Raised when attempting to write a DataFrame to - a new table in Google BigQuery without specifying - a schema describing the DataFrame. - """ + """Raised when attempting to write a DataFrame to a new table in Google + BigQuery without specifying a schema describing the DataFrame.""" pass class InvalidSchema(PandasError, IOError): - """ - Raised when attempting to write a DataFrame to - Google BigQuery with an invalid table schema. - """ + """Raised when attempting to write a DataFrame to Google BigQuery with an + invalid table schema.""" pass class TableExistsFail(PandasError, IOError): - """ - Raised when attempting to write a DataFrame to - an existing Google BigQuery table without specifying - that a replace/update action be taken. - """ + """Raised when attempting to write a DataFrame to an existing Google + BigQuery table without specifying that a replace/update action be taken.""" pass class InvalidColumnOrder(PandasError, IOError): - """ - Raised when the provided column order for output - results DataFrame does not match the schema - returned by BigQuery. - """ + """Raised when the provided column order for output results DataFrame does + not match the schema returned by BigQuery.""" pass def _authenticate(): - """ - For testing, we abstract the authentication to BigQuery API. - Presently this is implemented using the bq.py Client.Get() - method. Any exceptions raised are considered fatal, so we - do not process them. + """For testing, we abstract the authentication to BigQuery API. Presently + this is implemented using the bq.py Client.Get() method. Any exceptions + raised are considered fatal, so we do not process them. Returns ------- BigqueryClient : Configured connection to Google BigQuery + """ return bq.Client.Get() def _parse_entry(field_value, field_type): - """ - Given a value and the corresponding BigQuery data type, - perform any operations needed and return in a format - appropriate for a numpy record dictionary + """Given a value and the corresponding BigQuery data type, perform any + operations needed and return in a format appropriate for a numpy record + dictionary. Parameters ---------- @@ -106,6 +91,7 @@ def _parse_entry(field_value, field_type): ------- field_value : object or primitive of type corresponding to field_type + """ # Avoid any casting problems @@ -126,10 +112,9 @@ def _parse_entry(field_value, field_type): def _parse_page(raw_page, col_names, col_types, col_dtypes): - """ - Given a list of rows produced by the client.apiclient.tabledata().list(), - build a numpy array with proper dtypes and column names as specified - by the arguments. + """Given a list of rows produced by the + client.apiclient.tabledata().list(), build a numpy array with proper dtypes + and column names as specified by the arguments. Parameters ---------- @@ -145,6 +130,7 @@ def _parse_page(raw_page, col_names, col_types, col_dtypes): ------- page_array : numpy record array corresponding to the page data + """ # Should be at most 100,000 per the API, but this could @@ -168,12 +154,10 @@ def _parse_page(raw_page, col_names, col_types, col_dtypes): def _parse_data(client, job, index_col=None, col_order=None): - """ - Iterate through the query results and piece together the - final DataFrame. Builds a DataFrame for each page of - results, then concatenates them together when finished. - To save memory, we use numpy record arrays to build these - DataFrames. + """Iterate through the query results and piece together the final + DataFrame. Builds a DataFrame for each page of results, then concatenates + them together when finished. To save memory, we use numpy record arrays to + build these DataFrames. Parameters ---------- @@ -204,6 +188,7 @@ def _parse_data(client, job, index_col=None, col_order=None): pagination API. We are using the most flexible iteration method that we could find in the bq.py/bigquery_client.py API's, but these have undergone large amounts of change recently. + """ # dtype Map - @@ -383,6 +368,7 @@ def to_gbq(dataframe, destination_table, schema=None, col_order=None, parameter is set to 'fail' (the default) InvalidSchema : Raised if the 'schema' parameter does not match the provided DataFrame + """ if not _BQ_INSTALLED: diff --git a/pandas/io/html.py b/pandas/io/html.py index 4375d08abc37c..3f60a8f884f5e 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -66,6 +66,7 @@ def _remove_whitespace(s, regex=_RE_WHITESPACE): ------- subd : str or unicode `s` with all extra whitespace replaced with a single space. + """ return regex.sub(' ', s.strip()) @@ -87,6 +88,7 @@ def _get_skiprows(skiprows): ------- it : iterable A proper iterator to use to skip rows of a DataFrame. + """ if isinstance(skiprows, slice): return lrange(skiprows.start or 0, skiprows.stop, skiprows.step or 1) @@ -108,6 +110,7 @@ def _read(io): Returns ------- raw_text : str + """ if _is_url(io): with urlopen(io) as url: @@ -164,6 +167,7 @@ class _HtmlFrameParser(object): * :func:`_parse_tfoot` See each method's respective documentation for details on their functionality. + """ def __init__(self, io, match, attrs): self.io = io @@ -193,6 +197,7 @@ def _parse_raw_data(self, rows): Returns ------- data : list of list of strings + """ data = [[_remove_whitespace(self._text_getter(col)) for col in self._parse_td(row)] for row in rows] @@ -210,6 +215,7 @@ def _text_getter(self, obj): ------- text : str or unicode The text from an individual DOM node. + """ raise NotImplementedError @@ -224,6 +230,7 @@ def _parse_td(self, obj): ------- columns : list of node-like These are the elements of each row, i.e., the columns. + """ raise NotImplementedError @@ -251,6 +258,7 @@ def _parse_tables(self, doc, match, attrs): ------- tables : list of node-like A list of <table> elements to be parsed into raw data. + """ raise NotImplementedError @@ -266,6 +274,7 @@ def _parse_tr(self, table): ------- rows : list of node-like A list row elements of a table, usually <tr> or <th> elements. + """ raise NotImplementedError @@ -281,6 +290,7 @@ def _parse_thead(self, table): ------- thead : node-like A <thead>...</thead> element. + """ raise NotImplementedError @@ -296,6 +306,7 @@ def _parse_tbody(self, table): ------- tbody : node-like A <tbody>...</tbody> element. + """ raise NotImplementedError @@ -311,6 +322,7 @@ def _parse_tfoot(self, table): ------- tfoot : node-like A <tfoot>...</tfoot> element. + """ raise NotImplementedError @@ -365,6 +377,7 @@ class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser): ----- Documentation strings for this class are in the base class :class:`pandas.io.html._HtmlFrameParser`. + """ def __init__(self, *args, **kwargs): super(_BeautifulSoupHtml5LibFrameParser, self).__init__(*args, @@ -438,6 +451,7 @@ def _build_xpath_expr(attrs): ------- expr : unicode An XPath expression that checks for the given HTML attributes. + """ # give class attribute as class_ because class is a python keyword if 'class_' in attrs: @@ -467,6 +481,7 @@ class _LxmlFrameParser(_HtmlFrameParser): ----- Documentation strings for this class are in the base class :class:`_HtmlFrameParser`. + """ def __init__(self, *args, **kwargs): super(_LxmlFrameParser, self).__init__(*args, **kwargs) @@ -501,20 +516,20 @@ def _parse_tables(self, doc, match, kwargs): return tables def _build_doc(self): - """ - Raises - ------ + """Raises. + ValueError - * If a URL that lxml cannot parse is passed. + * If a URL that lxml cannot parse is passed. - Exception - * Any other ``Exception`` thrown. For example, trying to parse a - URL that is syntactically correct on a machine with no internet - connection will fail. + Exception + * Any other ``Exception`` thrown. For example, trying to parse a + URL that is syntactically correct on a machine with no internet + connection will fail. + + See Also + -------- + pandas.io.html._HtmlFrameParser._build_doc - See Also - -------- - pandas.io.html._HtmlFrameParser._build_doc """ from lxml.html import parse, fromstring, HTMLParser from lxml.etree import XMLSyntaxError @@ -634,6 +649,7 @@ def _parser_dispatch(flavor): * If `flavor` is not a valid backend. ImportError * If you do not have the requested `flavor` + """ valid_parsers = list(_valid_parsers.keys()) if flavor not in valid_parsers: diff --git a/pandas/io/json.py b/pandas/io/json.py index 4ed325df9a747..ceb1741970e6d 100644 --- a/pandas/io/json.py +++ b/pandas/io/json.py @@ -90,7 +90,7 @@ class FrameWriter(Writer): _default_orient = 'columns' def _format_axes(self): - """ try to axes if they are datelike """ + """try to axes if they are datelike.""" if not self.obj.index.is_unique and self.orient in ( 'index', 'columns'): raise ValueError("DataFrame index must be unique for orient=" @@ -104,8 +104,7 @@ def _format_axes(self): def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, convert_axes=True, convert_dates=True, keep_default_dates=True, numpy=False, precise_float=False, date_unit=None): - """ - Convert a JSON string to pandas object + """Convert a JSON string to pandas object. Parameters ---------- @@ -169,6 +168,7 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, Returns ------- result : Series or DataFrame + """ filepath_or_buffer, _ = get_filepath_or_buffer(path_or_buf) @@ -273,7 +273,7 @@ def parse(self): return self.obj def _convert_axes(self): - """ try to convert axes """ + """try to convert axes.""" for axis in self.obj._AXIS_NUMBERS.keys(): new_axis, result = self._try_convert_data( axis, self.obj._get_axis(axis), use_dtypes=False, @@ -286,7 +286,7 @@ def _try_convert_types(self): def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): - """ try to parse a ndarray like into a column by inferring dtype """ + """try to parse a ndarray like into a column by inferring dtype.""" # don't try to coerce, unless a force conversion if use_dtypes: @@ -359,10 +359,9 @@ def _try_convert_data(self, name, data, use_dtypes=True, return data, result def _try_convert_to_date(self, data): - """ try to parse a ndarray like into a date column - try to coerce object in epoch/iso formats and - integer/float in epcoh formats, return a boolean if parsing - was successful """ + """try to parse a ndarray like into a date column try to coerce object + in epoch/iso formats and integer/float in epcoh formats, return a + boolean if parsing was successful.""" # no conversion on empty if not len(data): @@ -496,7 +495,7 @@ def _parse_no_numpy(self): loads(json, precise_float=self.precise_float), dtype=None) def _process_converter(self, f, filt=None): - """ take a conversion function and possibly recreate the frame """ + """take a conversion function and possibly recreate the frame.""" if filt is None: filt = lambda col, c: True @@ -538,7 +537,7 @@ def _try_convert_dates(self): convert_dates = set(convert_dates) def is_ok(col): - """ return if this col is ok to try for a date parse """ + """return if this col is ok to try for a date parse.""" if not isinstance(col, compat.string_types): return False @@ -560,7 +559,7 @@ def is_ok(col): # JSON normalization routines def nested_to_record(ds, prefix="", level=0): - """a simplified json_normalize + """a simplified json_normalize. converts a nested dict into a flat dict ("record"), unlike json_normalize, it does not attempt to extract a subset of the data. @@ -583,6 +582,7 @@ def nested_to_record(ds, prefix="", level=0): 'nested.d': 2, 'nested.e.c': 1, 'nested.e.d': 2} + """ singleton = False if isinstance(ds, dict): diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 105bea92124fd..72e8c190df857 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -1,7 +1,5 @@ -""" -Msgpack serializer support for reading and writing pandas data structures -to disk -""" +"""Msgpack serializer support for reading and writing pandas data structures to +disk.""" # portions of msgpack_numpy package, by Lev Givon were incorporated # into this module (and tests_packers.py) @@ -75,8 +73,7 @@ def to_msgpack(path_or_buf, *args, **kwargs): - """ - msgpack (serialize) object to input file path + """msgpack (serialize) object to input file path. THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. @@ -90,6 +87,7 @@ def to_msgpack(path_or_buf, *args, **kwargs): (default is False) compress : type of compressor (zlib or blosc), default to None (no compression) + """ global compressor compressor = kwargs.pop('compress', None) @@ -115,9 +113,7 @@ def writer(fh): def read_msgpack(path_or_buf, iterator=False, **kwargs): - """ - Load msgpack pandas object from the specified - file path + """Load msgpack pandas object from the specified file path. THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. @@ -190,16 +186,14 @@ def dtype_for(t): def c2f(r, i, ctype_name): - """ - Convert strings to complex number instance with specified numpy type. - """ + """Convert strings to complex number instance with specified numpy type.""" ftype = c2f_dict[ctype_name] return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i)) def convert(values): - """ convert the numpy values to a list """ + """convert the numpy values to a list.""" dtype = values.dtype if needs_i8_conversion(dtype): @@ -259,9 +253,7 @@ def unconvert(values, dtype, compress=None): def encode(obj): - """ - Data encoder - """ + """Data encoder.""" tobj = type(obj) if isinstance(obj, Index): @@ -435,9 +427,7 @@ def encode(obj): def decode(obj): - """ - Decoder for deserializing numpy data types. - """ + """Decoder for deserializing numpy data types.""" typ = obj.get('typ') if typ is None: @@ -542,9 +532,7 @@ def create_block(b): def pack(o, default=encode, encoding='latin1', unicode_errors='strict', use_single_float=False): - """ - Pack an object and return the packed bytes. - """ + """Pack an object and return the packed bytes.""" return Packer(default=default, encoding=encoding, unicode_errors=unicode_errors, @@ -597,8 +585,7 @@ def __init__(self, file_like=None, read_size=0, use_list=False, class Iterator(object): - """ manage the unpacking iteration, - close the file on completion """ + """manage the unpacking iteration, close the file on completion.""" def __init__(self, path, **kwargs): self.path = path diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index b45b8929e7af3..99181d9965e17 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1,6 +1,5 @@ -""" -Module contains tools for processing files into DataFrames or other objects -""" +"""Module contains tools for processing files into DataFrames or other +objects.""" from __future__ import print_function from pandas.compat import range, lrange, StringIO, lzip, zip, string_types, map from pandas import compat @@ -197,7 +196,7 @@ def _read(filepath_or_buffer, kwds): - "Generic reader of line files." + """Generic reader of line files.""" encoding = kwds.get('encoding', None) skipfooter = kwds.pop('skipfooter', None) if skipfooter is not None: @@ -462,11 +461,7 @@ def read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds): class TextFileReader(object): - """ - - Passed dialect overrides any of the related parser options - - """ + """Passed dialect overrides any of the related parser options.""" def __init__(self, f, engine='python', **kwds): @@ -949,9 +944,7 @@ def _do_date_conversions(self, names, data): class CParserWrapper(ParserBase): - """ - - """ + """""" def __init__(self, src, **kwds): self.kwds = kwds @@ -1151,10 +1144,9 @@ def _maybe_parse_dates(self, values, index, try_parse_dates=True): def TextParser(*args, **kwds): - """ - Converts lists of lists/tuples into DataFrames with proper type inference - and optional (e.g. string to datetime) conversion. Also enables iterating - lazily over chunks of large files + """Converts lists of lists/tuples into DataFrames with proper type + inference and optional (e.g. string to datetime) conversion. Also enables + iterating lazily over chunks of large files. Parameters ---------- @@ -1193,6 +1185,7 @@ def TextParser(*args, **kwds): If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. + """ kwds['engine'] = 'python' return TextFileReader(*args, **kwds) @@ -1240,10 +1233,10 @@ def _wrap_compressed(f, compression, encoding=None): class PythonParser(ParserBase): def __init__(self, f, **kwds): - """ - Workhorse function for processing nested list into DataFrame + """Workhorse function for processing nested list into DataFrame. Should be replaced by np.genfromtxt eventually? + """ ParserBase.__init__(self, kwds) @@ -1609,10 +1602,10 @@ def _infer_columns(self): return columns, num_original_columns def _handle_usecols(self, columns, usecols_key): - """ - Sets self._col_indices + """Sets self._col_indices. usecols_key is used if there are string usecols. + """ if self.usecols is not None: if any([isinstance(u, string_types) for u in self.usecols]): @@ -1634,9 +1627,7 @@ def _handle_usecols(self, columns, usecols_key): return columns def _buffered_line(self): - """ - Return a line from buffer, filling buffer if required. - """ + """Return a line from buffer, filling buffer if required.""" if len(self.buf) > 0: return self.buf[0] else: @@ -2089,7 +2080,7 @@ def _floatify_na_values(na_values): def _stringify_na_values(na_values): - """ return a stringified and numeric for these values """ + """return a stringified and numeric for these values.""" result = [] for x in na_values: result.append(str(x)) diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 915c1e9ae1574..4602e145ab3f5 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -2,23 +2,22 @@ def to_pickle(obj, path): - """ - Pickle (serialize) object to input file path + """Pickle (serialize) object to input file path. Parameters ---------- obj : any object path : string File path + """ with open(path, 'wb') as f: pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL) def read_pickle(path): - """ - Load pickled pandas object (or any other pickled object) from the specified - file path + """Load pickled pandas object (or any other pickled object) from the + specified file path. Warning: Loading pickled data received from untrusted sources can be unsafe. See: http://docs.python.org/2.7/library/pickle.html @@ -31,6 +30,7 @@ def read_pickle(path): Returns ------- unpickled : type of object stored in file + """ def try_read(path, encoding=None): diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index c4a839b4842c5..22c3cbd9ac1f4 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1,7 +1,5 @@ -""" -High level interface to PyTables for reading and writing pandas data structures -to disk -""" +"""High level interface to PyTables for reading and writing pandas data +structures to disk.""" # pylint: disable-msg=E1101,W0613,W0603 from datetime import datetime, date @@ -49,7 +47,7 @@ _default_encoding = 'UTF-8' def _ensure_decoded(s): - """ if we have bytes, decode them to unicde """ + """if we have bytes, decode them to unicde.""" if isinstance(s, np.bytes_): s = s.decode('UTF-8') return s @@ -243,8 +241,8 @@ def _tables(): @contextmanager def get_store(path, **kwargs): - """ - Creates an HDFStore instance. This function can be used in a with statement + """Creates an HDFStore instance. This function can be used in a with + statement. Parameters ---------- @@ -258,6 +256,7 @@ def get_store(path, **kwargs): >>> with get_store('test.h5') as store: ... store['foo'] = bar # write to HDF5 ... bar = store['foo'] # retrieve + """ store = None try: @@ -272,7 +271,7 @@ def get_store(path, **kwargs): def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, append=None, **kwargs): - """ store this object, close it if we opened it """ + """store this object, close it if we opened it.""" if append: f = lambda store: store.append(key, value, **kwargs) else: @@ -287,32 +286,32 @@ def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, def read_hdf(path_or_buf, key, **kwargs): - """ read from the store, close it if we opened it + """read from the store, close it if we opened it. - Retrieve pandas object stored in file, optionally based on where - criteria + Retrieve pandas object stored in file, optionally based on where + criteria - Parameters - ---------- - path_or_buf : path (string), or buffer to read from - key : group identifier in the store - where : list of Term (or convertable) objects, optional - start : optional, integer (defaults to None), row number to start - selection - stop : optional, integer (defaults to None), row number to stop - selection - columns : optional, a list of columns that if not None, will limit the - return columns - iterator : optional, boolean, return an iterator, default False - chunksize : optional, nrows to include in iteration, return an iterator - auto_close : optional, boolean, should automatically close the store - when finished, default is False - - Returns - ------- - The selected object + Parameters + ---------- + path_or_buf : path (string), or buffer to read from + key : group identifier in the store + where : list of Term (or convertable) objects, optional + start : optional, integer (defaults to None), row number to start + selection + stop : optional, integer (defaults to None), row number to stop + selection + columns : optional, a list of columns that if not None, will limit the + return columns + iterator : optional, boolean, return an iterator, default False + chunksize : optional, nrows to include in iteration, return an iterator + auto_close : optional, boolean, should automatically close the store + when finished, default is False + + Returns + ------- + The selected object - """ + """ # grab the scope if 'where' in kwargs: @@ -404,7 +403,7 @@ def __init__(self, path, mode=None, complevel=None, complib=None, @property def root(self): - """ return the root node """ + """return the root node.""" self._check_if_open() return self._handle.root @@ -422,7 +421,7 @@ def __delitem__(self, key): return self.remove(key) def __getattr__(self, name): - """ allow attribute access to get stores """ + """allow attribute access to get stores.""" self._check_if_open() try: return self.get(name) @@ -432,9 +431,8 @@ def __getattr__(self, name): (type(self).__name__, name)) def __contains__(self, key): - """ check for existance of this key - can match the exact pathname or the pathnm w/o the leading '/' - """ + """check for existance of this key can match the exact pathname or the + pathnm w/o the leading '/'.""" node = self.get_node(key) if node is not None: name = node._v_pathname @@ -474,10 +472,12 @@ def __unicode__(self): return output def keys(self): - """ - Return a (potentially unordered) list of the keys corresponding to the - objects stored in the HDFStore. These are ABSOLUTE path-names (e.g. + """Return a (potentially unordered) list of the keys corresponding to + the objects stored in the HDFStore. + + These are ABSOLUTE path-names (e.g. have the leading '/' + """ return [n._v_pathname for n in self.groups()] @@ -491,13 +491,13 @@ def items(self): iteritems = items def open(self, mode='a', **kwargs): - """ - Open the file in the specified mode + """Open the file in the specified mode. Parameters ---------- mode : {'a', 'w', 'r', 'r+'}, default 'a' See HDFStore docstring or tables.openFile for info about modes + """ tables = _tables() @@ -562,25 +562,20 @@ def open(self, mode='a', **kwargs): raise def close(self): - """ - Close the PyTables file handle - """ + """Close the PyTables file handle.""" if self._handle is not None: self._handle.close() self._handle = None @property def is_open(self): - """ - return a boolean indicating whether the file is open - """ + """return a boolean indicating whether the file is open.""" if self._handle is None: return False return bool(self._handle.isopen) def flush(self, fsync=False): - """ - Force all buffered modifications to be written to disk. + """Force all buffered modifications to be written to disk. Parameters ---------- @@ -593,6 +588,7 @@ def flush(self, fsync=False): to disk. With fsync, the operation will block until the OS claims the file has been written; however, other caching layers may still interfere. + """ if self._handle is not None: self._handle.flush() @@ -603,8 +599,7 @@ def flush(self, fsync=False): pass def get(self, key): - """ - Retrieve pandas object stored in file + """Retrieve pandas object stored in file. Parameters ---------- @@ -613,6 +608,7 @@ def get(self, key): Returns ------- obj : type of object stored in file + """ group = self.get_node(key) if group is None: @@ -621,9 +617,8 @@ def get(self, key): def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, auto_close=False, **kwargs): - """ - Retrieve pandas object stored in file, optionally based on where - criteria + """Retrieve pandas object stored in file, optionally based on where + criteria. Parameters ---------- @@ -670,8 +665,7 @@ def func(_start, _stop): def select_as_coordinates( self, key, where=None, start=None, stop=None, **kwargs): - """ - return the selection as an Index + """return the selection as an Index. Parameters ---------- @@ -679,6 +673,7 @@ def select_as_coordinates( where : list of Term (or convertable) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection + """ where = _ensure_term(where, scope_level=1) return self.get_storer(key).read_coordinates(where=where, start=start, @@ -692,9 +687,8 @@ def unique(self, key, column, **kwargs): **kwargs).unique() def select_column(self, key, column, **kwargs): - """ - return a single column from the table. This is generally only useful to - select an indexable + """return a single column from the table. This is generally only useful + to select an indexable. Parameters ---------- @@ -714,7 +708,7 @@ def select_column(self, key, column, **kwargs): def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs): - """ Retrieve pandas objects from multiple tables + """Retrieve pandas objects from multiple tables. Parameters ---------- @@ -732,6 +726,7 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None, raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS + """ # default to single select @@ -798,8 +793,7 @@ def func(_start, _stop): auto_close=auto_close).get_values() def put(self, key, value, format=None, append=False, **kwargs): - """ - Store object in HDFStore + """Store object in HDFStore. Parameters ---------- @@ -816,6 +810,7 @@ def put(self, key, value, format=None, append=False, **kwargs): This will force Table format, append the input data to the existing. encoding : default None, provide an encoding for strings + """ if format is None: format = get_option("io.hdf.default_format") or 'fixed' @@ -823,8 +818,7 @@ def put(self, key, value, format=None, append=False, **kwargs): self._write_to_group(key, value, append=append, **kwargs) def remove(self, key, where=None, start=None, stop=None): - """ - Remove pandas object partially by specifying the where condition + """Remove pandas object partially by specifying the where condition. Parameters ---------- @@ -874,8 +868,7 @@ def remove(self, key, where=None, start=None, stop=None): def append(self, key, value, format=None, append=True, columns=None, dropna=None, **kwargs): - """ - Append to Table in file. Node must already exist and be Table + """Append to Table in file. Node must already exist and be Table format. Parameters @@ -902,6 +895,7 @@ def append(self, key, value, format=None, append=True, columns=None, ----- Does *not* check if data being appended overlaps with existing data in the table, so be careful + """ if columns is not None: raise TypeError("columns is not a supported keyword in append, " @@ -917,8 +911,7 @@ def append(self, key, value, format=None, append=True, columns=None, def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, dropna=True, **kwargs): - """ - Append to multiple tables + """Append to multiple tables. Parameters ---------- @@ -998,14 +991,13 @@ def append_to_multiple(self, d, value, selector, data_columns=None, self.append(k, val, data_columns=dc, **kwargs) def create_table_index(self, key, **kwargs): - """ Create a pytables index on the table - Paramaters - ---------- + """Create a pytables index on the table Paramaters. + key : object (the node to index) - Exceptions - ---------- - raises if the node is not a table + Exceptions + ---------- + raises if the node is not a table """ @@ -1038,7 +1030,7 @@ def groups(self): ] def get_node(self, key): - """ return the node with the key or None if it does not exist """ + """return the node with the key or None if it does not exist.""" self._check_if_open() try: if not key.startswith('/'): @@ -1048,7 +1040,7 @@ def get_node(self, key): return None def get_storer(self, key): - """ return the storer object for a key, raise if not in the file """ + """return the storer object for a key, raise if not in the file.""" group = self.get_node(key) if group is None: return None @@ -1058,19 +1050,19 @@ def get_storer(self, key): def copy(self, file, mode='w', propindexes=True, keys=None, complib=None, complevel=None, fletcher32=False, overwrite=True): - """ copy the existing store to a new file, upgrading in place + """copy the existing store to a new file, upgrading in place. - Parameters - ---------- - propindexes: restore indexes in copied file (defaults to True) - keys : list of keys to include in the copy (defaults to all) - overwrite : overwrite (remove and replace) existing nodes in the - new store (default is True) - mode, complib, complevel, fletcher32 same as in HDFStore.__init__ + Parameters + ---------- + propindexes: restore indexes in copied file (defaults to True) + keys : list of keys to include in the copy (defaults to all) + overwrite : overwrite (remove and replace) existing nodes in the + new store (default is True) + mode, complib, complevel, fletcher32 same as in HDFStore.__init__ - Returns - ------- - open file handle of the new store + Returns + ------- + open file handle of the new store """ new_store = HDFStore( @@ -1113,7 +1105,7 @@ def _check_if_open(self): raise ClosedFileError("{0} file is not open!".format(self._path)) def _validate_format(self, format, kwargs): - """ validate / deprecate formats; return the new kwargs """ + """validate / deprecate formats; return the new kwargs.""" kwargs = kwargs.copy() # table arg @@ -1138,7 +1130,7 @@ def _validate_format(self, format, kwargs): def _create_storer(self, group, format=None, value=None, append=False, **kwargs): - """ return a suitable class to operate """ + """return a suitable class to operate.""" def error(t): raise TypeError( @@ -1285,21 +1277,22 @@ def _read_group(self, group, **kwargs): class TableIterator(object): - """ define the iteration interface on a table + """define the iteration interface on a table. - Parameters - ---------- + Parameters + ---------- - store : the reference store - func : the function to get results - nrows : the rows to iterate on - start : the passed start value (default is None) - stop : the passed stop value (default is None) - chunksize : the passed chunking valeu (default is 50000) - auto_close : boolean, automatically close the store at the end of - iteration, default is False - kwargs : the passed kwargs - """ + store : the reference store + func : the function to get results + nrows : the rows to iterate on + start : the passed start value (default is None) + stop : the passed stop value (default is None) + chunksize : the passed chunking valeu (default is 50000) + auto_close : boolean, automatically close the store at the end of + iteration, default is False + kwargs : the passed kwargs + + """ def __init__(self, store, func, nrows, start=None, stop=None, chunksize=None, auto_close=False): @@ -1344,18 +1337,18 @@ def get_values(self): class IndexCol(StringMixin): - """ an index column description class + """an index column description class. - Parameters - ---------- + Parameters + ---------- - axis : axis which I reference - values : the ndarray like converted values - kind : a string description of this type - typ : the pytables type - pos : the position in the pytables + axis : axis which I reference + values : the ndarray like converted values + kind : a string description of this type + typ : the pytables type + pos : the position in the pytables - """ + """ is_an_indexable = True is_data_indexable = True _info_fields = ['freq', 'tz', 'index_name'] @@ -1383,7 +1376,7 @@ def __init__(self, values=None, kind=None, typ=None, cname=None, self.set_pos(pos) def set_name(self, name, kind_attr=None): - """ set the name of this indexer """ + """set the name of this indexer.""" self.name = name self.kind_attr = kind_attr or "%s_kind" % name if self.cname is None: @@ -1392,13 +1385,13 @@ def set_name(self, name, kind_attr=None): return self def set_axis(self, axis): - """ set the axis over which I index """ + """set the axis over which I index.""" self.axis = axis return self def set_pos(self, pos): - """ set the position of this column in the Table """ + """set the position of this column in the Table.""" self.pos = pos if pos is not None and self.typ is not None: self.typ._v_pos = pos @@ -1419,7 +1412,7 @@ def __unicode__(self): return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp def __eq__(self, other): - """ compare 2 col items """ + """compare 2 col items.""" return all([getattr(self, a, None) == getattr(other, a, None) for a in ['name', 'cname', 'axis', 'pos']]) @@ -1428,7 +1421,7 @@ def __ne__(self, other): @property def is_indexed(self): - """ return whether I am an indexed column """ + """return whether I am an indexed column.""" try: return getattr(self.table.cols, self.cname).is_indexed except: @@ -1478,7 +1471,7 @@ def convert(self, values, nan_rep, encoding): return self def take_data(self): - """ return the values & release the memory """ + """return the values & release the memory.""" self.values, values = None, self.values return values @@ -1492,12 +1485,12 @@ def description(self): @property def col(self): - """ return my current col description """ + """return my current col description.""" return getattr(self.description, self.cname, None) @property def cvalues(self): - """ return my cython values """ + """return my cython values.""" return self.values def __iter__(self): @@ -1550,8 +1543,8 @@ def validate_attr(self, append): (existing_kind, self.kind)) def update_info(self, info): - """ set/update the info for this indexable with the key/value - if there is a conflict raise/warn as needed """ + """set/update the info for this indexable with the key/value if there + is a conflict raise/warn as needed.""" for key in self._info_fields: @@ -1582,23 +1575,23 @@ def update_info(self, info): return self def set_info(self, info): - """ set my state from the passed info """ + """set my state from the passed info.""" idx = info.get(self.name) if idx is not None: self.__dict__.update(idx) def get_attr(self): - """ set the kind for this colummn """ + """set the kind for this colummn.""" self.kind = getattr(self.attrs, self.kind_attr, None) def set_attr(self): - """ set the kind for this colummn """ + """set the kind for this colummn.""" setattr(self.attrs, self.kind_attr, self.kind) class GenericIndexCol(IndexCol): - """ an index which is not represented in the data of the table """ + """an index which is not represented in the data of the table.""" @property def is_indexed(self): @@ -1619,15 +1612,16 @@ def set_attr(self): class DataCol(IndexCol): - """ a data holding column, by definition this is not indexable + """a data holding column, by definition this is not indexable. - Parameters - ---------- + Parameters + ---------- - data : the actual data - cname : the column name in the table to hold the data (typically - values) - """ + data : the actual data + cname : the column name in the table to hold the data (typically + values) + + """ is_an_indexable = False is_data_indexable = False _info_fields = ['tz'] @@ -1635,7 +1629,7 @@ class DataCol(IndexCol): @classmethod def create_for_block( cls, i=None, name=None, cname=None, version=None, **kwargs): - """ return a new datacol with the block i """ + """return a new datacol with the block i.""" if cname is None: cname = name or 'values_block_%d' % i @@ -1668,7 +1662,7 @@ def __unicode__(self): ) def __eq__(self, other): - """ compare 2 col items """ + """compare 2 col items.""" return all([getattr(self, a, None) == getattr(other, a, None) for a in ['name', 'cname', 'dtype', 'pos']]) @@ -1683,7 +1677,7 @@ def set_data(self, data, dtype=None): self.set_kind() def take_data(self): - """ return the data & release the memory """ + """return the data & release the memory.""" self.data, data = None, self.data return data @@ -1713,7 +1707,7 @@ def set_kind(self): def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, encoding=None, **kwargs): - """ create and setup my atom from the block b """ + """create and setup my atom from the block b.""" self.values = list(block.items) dtype = block.dtype.name @@ -1831,7 +1825,7 @@ def convert_string_data(self, data, itemsize, encoding): return _convert_string_array(data, encoding, itemsize) def get_atom_coltype(self): - """ return the PyTables column class for this column """ + """return the PyTables column class for this column.""" if self.kind.startswith('uint'): col_name = "UInt%sCol" % self.kind[4:] else: @@ -1873,11 +1867,12 @@ def shape(self): @property def cvalues(self): - """ return my cython values """ + """return my cython values.""" return self.data def validate_attr(self, append): - """validate that we have the same order as the existing & same dtype""" + """validate that we have the same order as the existing & same + dtype.""" if append: existing_fields = getattr(self.attrs, self.kind_attr, None) if (existing_fields is not None and @@ -1893,8 +1888,7 @@ def validate_attr(self, append): def convert(self, values, nan_rep, encoding): """set the data from this selection (and convert to the correct dtype - if we can) - """ + if we can)""" try: values = values[self.cname] except: @@ -1950,13 +1944,13 @@ def convert(self, values, nan_rep, encoding): return self def get_attr(self): - """ get the data for this colummn """ + """get the data for this colummn.""" self.values = getattr(self.attrs, self.kind_attr, None) self.dtype = getattr(self.attrs, self.dtype_attr, None) self.set_kind() def set_attr(self): - """ set the data for this colummn """ + """set the data for this colummn.""" setattr(self.attrs, self.kind_attr, self.values) if self.dtype is not None: setattr(self.attrs, self.dtype_attr, self.dtype) @@ -1964,7 +1958,7 @@ def set_attr(self): class DataIndexableCol(DataCol): - """ represent a data column that can be indexed """ + """represent a data column that can be indexed.""" is_data_indexable = True def get_atom_string(self, block, itemsize): @@ -1982,7 +1976,7 @@ def get_atom_timedelta64(self, block): class GenericDataIndexableCol(DataIndexableCol): - """ represent a generic pytables data column """ + """represent a generic pytables data column.""" def get_attr(self): pass @@ -1990,16 +1984,16 @@ def get_attr(self): class Fixed(StringMixin): - """ represent an object in my store - facilitate read/write of various types of objects - this is an abstract base class + """represent an object in my store facilitate read/write of various types + of objects this is an abstract base class. - Parameters - ---------- + Parameters + ---------- - parent : my parent HDFStore - group : the group node where the table resides - """ + parent : my parent HDFStore + group : the group node where the table resides + + """ pandas_kind = None obj_type = None ndim = None @@ -2017,7 +2011,7 @@ def is_old_version(self): self.version[2] < 1) def set_version(self): - """ compute and set our version """ + """compute and set our version.""" version = _ensure_decoded( getattr(self.group._v_attrs, 'pandas_version', None)) try: @@ -2037,7 +2031,7 @@ def format_type(self): return 'fixed' def __unicode__(self): - """ return a pretty representation of myself """ + """return a pretty representation of myself.""" self.infer_axes() s = self.shape if s is not None: @@ -2047,7 +2041,7 @@ def __unicode__(self): return self.pandas_type def set_object_info(self): - """ set my pandas type & version """ + """set my pandas type & version.""" self.attrs.pandas_type = str(self.pandas_kind) self.attrs.pandas_version = str(_version) self.set_version() @@ -2093,16 +2087,16 @@ def attrs(self): return self.group._v_attrs def set_attrs(self): - """ set our object attributes """ + """set our object attributes.""" pass def get_attrs(self): - """ get our object attributes """ + """get our object attributes.""" pass @property def storable(self): - """ return my storable """ + """return my storable.""" return self.group @property @@ -2114,18 +2108,18 @@ def nrows(self): return getattr(self.storable, 'nrows', None) def validate(self, other): - """ validate against an existing storable """ + """validate against an existing storable.""" if other is None: return return True def validate_version(self, where=None): - """ are we trying to operate on an old version? """ + """are we trying to operate on an old version?""" return True def infer_axes(self): - """ infer the axes of my storer - return a boolean indicating if we have a valid storer or not """ + """infer the axes of my storer return a boolean indicating if we have a + valid storer or not.""" s = self.storable if s is None: @@ -2152,7 +2146,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs): class GenericFixed(Fixed): - """ a generified fixed version """ + """a generified fixed version.""" _index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'} _reverse_index_map = dict([(v, k) for k, v in compat.iteritems(_index_type_map)]) @@ -2191,11 +2185,11 @@ def is_exists(self): return True def set_attrs(self): - """ set our object attributes """ + """set our object attributes.""" self.attrs.encoding = self.encoding def get_attrs(self): - """ retrieve our attributes """ + """retrieve our attributes.""" self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None)) for n in self.attributes: setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None))) @@ -2204,7 +2198,7 @@ def write(self, obj, **kwargs): self.set_attrs() def read_array(self, key): - """ read an array for the specified node (off of group """ + """read an array for the specified node (off of group.""" import tables node = getattr(self.group, key) data = node[:] @@ -2560,7 +2554,7 @@ def read(self, **kwargs): default_fill_value=self.default_fill_value) def write(self, obj, **kwargs): - """ write it as a collection of individual sparse series """ + """write it as a collection of individual sparse series.""" super(SparseFrameFixed, self).write(obj, **kwargs) for name, ss in compat.iteritems(obj): key = 'sparse_series_%s' % name @@ -2744,7 +2738,7 @@ def format_type(self): return 'table' def __unicode__(self): - """ return a pretty representatgion of myself """ + """return a pretty representatgion of myself.""" self.infer_axes() dc = ",dc->[%s]" % ','.join( self.data_columns) if len(self.data_columns) else '' @@ -2759,14 +2753,14 @@ def __unicode__(self): ) def __getitem__(self, c): - """ return the axis for c """ + """return the axis for c.""" for a in self.axes: if c == a.name: return a return None def validate(self, other): - """ validate against an existing table """ + """validate against an existing table.""" if other is None: return @@ -2811,12 +2805,12 @@ def validate_multiindex(self, obj): @property def nrows_expected(self): - """ based on our axes, compute the expected nrows """ + """based on our axes, compute the expected nrows.""" return np.prod([i.cvalues.shape[0] for i in self.index_axes]) @property def is_exists(self): - """ has this table been created """ + """has this table been created.""" return u('table') in self.group @property @@ -2825,7 +2819,7 @@ def storable(self): @property def table(self): - """ return the table group (this is my storable) """ + """return the table group (this is my storable)""" return self.storable @property @@ -2842,7 +2836,7 @@ def axes(self): @property def ncols(self): - """ the number of total columns in the values axes """ + """the number of total columns in the values axes.""" return sum([len(a.values) for a in self.values_axes]) @property @@ -2851,12 +2845,12 @@ def is_transposed(self): @property def data_orientation(self): - """return a tuple of my permutated axes, non_indexable at the front""" + """return a tuple of my permutated axes, non_indexable at the front.""" return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes], [int(a.axis) for a in self.index_axes])) def queryables(self): - """ return a dict of the kinds allowable columns for this object """ + """return a dict of the kinds allowable columns for this object.""" # compute the values_axes queryables return dict( @@ -2868,19 +2862,19 @@ def queryables(self): ) def index_cols(self): - """ return a list of my index cols """ + """return a list of my index cols.""" return [(i.axis, i.cname) for i in self.index_axes] def values_cols(self): - """ return a list of my values cols """ + """return a list of my values cols.""" return [i.cname for i in self.values_axes] def set_info(self): - """ update our table index info """ + """update our table index info.""" self.attrs.info = self.info def set_attrs(self): - """ set our table type & indexables """ + """set our table type & indexables.""" self.attrs.table_type = str(self.table_type) self.attrs.index_cols = self.index_cols() self.attrs.values_cols = self.values_cols() @@ -2892,7 +2886,7 @@ def set_attrs(self): self.set_info() def get_attrs(self): - """ retrieve our attributes """ + """retrieve our attributes.""" self.non_index_axes = getattr( self.attrs, 'non_index_axes', None) or [] self.data_columns = getattr( @@ -2913,7 +2907,7 @@ def get_attrs(self): ] def validate_version(self, where=None): - """ are we trying to operate on an old version? """ + """are we trying to operate on an old version?""" if where is not None: if (self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1): @@ -2923,8 +2917,7 @@ def validate_version(self, where=None): def validate_min_itemsize(self, min_itemsize): """validate the min_itemisze doesn't contain items that are not in the - axes this needs data_columns to be defined - """ + axes this needs data_columns to be defined.""" if min_itemsize is None: return if not isinstance(min_itemsize, dict): @@ -2943,7 +2936,7 @@ def validate_min_itemsize(self, min_itemsize): @property def indexables(self): - """ create/cache the indexables if they don't exist """ + """create/cache the indexables if they don't exist.""" if self._indexables is None: self._indexables = [] @@ -3055,13 +3048,12 @@ def read_axes(self, where, **kwargs): return True def get_object(self, obj): - """ return the data for this obj """ + """return the data for this obj.""" return obj def validate_data_columns(self, data_columns, min_itemsize): """take the input data_columns and min_itemize and create a data - columns spec - """ + columns spec.""" if not len(self.non_index_axes): return [] @@ -3287,7 +3279,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, self.validate(existing_table) def process_axes(self, obj, columns=None): - """ process axes filters """ + """process axes filters.""" # make sure to include levels if we have them if columns is not None and self.is_multi_index: @@ -3344,7 +3336,7 @@ def process_filter(field, filt): def create_description(self, complib=None, complevel=None, fletcher32=False, expectedrows=None): - """ create the description of the table from the axes & values """ + """create the description of the table from the axes & values.""" # expected rows estimate if expectedrows is None: @@ -3368,8 +3360,7 @@ def create_description(self, complib=None, complevel=None, def read_coordinates(self, where=None, start=None, stop=None, **kwargs): """select coordinates (row numbers) from a table; return the - coordinates object - """ + coordinates object.""" # validate the version self.validate_version(where) @@ -3390,9 +3381,8 @@ def read_coordinates(self, where=None, start=None, stop=None, **kwargs): return Index(coords) def read_column(self, column, where=None, start=None, stop=None, **kwargs): - """return a single column from the table, generally only indexables - are interesting - """ + """return a single column from the table, generally only indexables are + interesting.""" # validate the version self.validate_version() @@ -3432,8 +3422,8 @@ class WORMTable(Table): table_type = u('worm') def read(self, **kwargs): - """ read the indicies and the indexing array, calculate offset rows and - return """ + """read the indicies and the indexing array, calculate offset rows and + return.""" raise NotImplementedError("WORMTable needs to implement read") def write(self, **kwargs): @@ -3465,8 +3455,7 @@ def write(self, **kwargs): def read(self, where=None, columns=None, **kwargs): """we have n indexable columns, with an arbitrary number of data - axes - """ + axes.""" if not self.read_axes(where=where, **kwargs): return None @@ -3549,7 +3538,7 @@ def read(self, where=None, columns=None, **kwargs): class LegacyFrameTable(LegacyTable): - """ support the legacy frame table """ + """support the legacy frame table.""" pandas_kind = u('frame_table') table_type = u('legacy_frame') obj_type = Panel @@ -3560,14 +3549,14 @@ def read(self, *args, **kwargs): class LegacyPanelTable(LegacyTable): - """ support the legacy panel table """ + """support the legacy panel table.""" table_type = u('legacy_panel') obj_type = Panel class AppendableTable(LegacyTable): - """ suppor the new appendable table formats """ + """suppor the new appendable table formats.""" _indexables = None table_type = u('appendable') @@ -3780,7 +3769,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs): class AppendableFrameTable(AppendableTable): - """ suppor the new appendable table formats """ + """suppor the new appendable table formats.""" pandas_kind = u('frame_table') table_type = u('appendable_frame') ndim = 2 @@ -3791,7 +3780,7 @@ def is_transposed(self): return self.index_axes[0].axis == 1 def get_object(self, obj): - """ these are written transposed """ + """these are written transposed.""" if self.is_transposed: obj = obj.T return obj @@ -3846,7 +3835,7 @@ def read(self, where=None, columns=None, **kwargs): class AppendableSeriesTable(AppendableFrameTable): - """ support the new appendable table formats """ + """support the new appendable table formats.""" pandas_kind = u('series_table') table_type = u('appendable_series') ndim = 2 @@ -3861,7 +3850,7 @@ def get_object(self, obj): return obj def write(self, obj, data_columns=None, **kwargs): - """ we are going to write this as a frame table """ + """we are going to write this as a frame table.""" if not isinstance(obj, DataFrame): name = obj.name or 'values' obj = DataFrame({name: obj}, index=obj.index) @@ -3889,12 +3878,12 @@ def read(self, columns=None, **kwargs): class AppendableMultiSeriesTable(AppendableSeriesTable): - """ support the new appendable table formats """ + """support the new appendable table formats.""" pandas_kind = u('series_table') table_type = u('appendable_multiseries') def write(self, obj, **kwargs): - """ we are going to write this as a frame table """ + """we are going to write this as a frame table.""" name = obj.name or 'values' obj, self.levels = self.validate_multiindex(obj) cols = list(self.levels) @@ -3904,7 +3893,7 @@ def write(self, obj, **kwargs): class GenericTable(AppendableFrameTable): - """ a table that read/writes the generic pytables table format """ + """a table that read/writes the generic pytables table format.""" pandas_kind = u('frame_table') table_type = u('generic_table') ndim = 2 @@ -3919,7 +3908,7 @@ def storable(self): return getattr(self.group, 'table', None) or self.group def get_attrs(self): - """ retrieve our attributes """ + """retrieve our attributes.""" self.non_index_axes = [] self.nan_rep = None self.levels = [] @@ -3932,7 +3921,7 @@ def get_attrs(self): @property def indexables(self): - """ create the indexables from the table description """ + """create the indexables from the table description.""" if self._indexables is None: d = self.description @@ -3990,13 +3979,13 @@ def read(self, **kwargs): class AppendablePanelTable(AppendableTable): - """ suppor the new appendable table formats """ + """suppor the new appendable table formats.""" table_type = u('appendable_panel') ndim = 3 obj_type = Panel def get_object(self, obj): - """ these are written transposed """ + """these are written transposed.""" if self.is_transposed: obj = obj.transpose(*self.data_orientation) return obj @@ -4008,7 +3997,7 @@ def is_transposed(self): class AppendableNDimTable(AppendablePanelTable): - """ suppor the new appendable table formats """ + """suppor the new appendable table formats.""" table_type = u('appendable_ndim') ndim = 4 obj_type = Panel4D @@ -4036,7 +4025,7 @@ def _reindex_axis(obj, axis, labels, other=None): def _get_info(info, name): - """ get/create the info for this name """ + """get/create the info for this name.""" try: idx = info[name] except: @@ -4171,7 +4160,7 @@ def _convert_string_array(data, encoding, itemsize=None): return data def _unconvert_string_array(data, nan_rep=None, encoding=None): - """ deserialize a string array, possibly decoding """ + """deserialize a string array, possibly decoding.""" shape = data.shape data = np.array(data.ravel(), dtype=object) @@ -4227,8 +4216,7 @@ def _need_convert(kind): class Selection(object): - """ - Carries out a selection operation on a tables.Table object. + """Carries out a selection operation on a tables.Table object. Parameters ---------- @@ -4307,9 +4295,7 @@ def generate(self, where): ) def select(self): - """ - generate the selection - """ + """generate the selection.""" if self.condition is not None: return self.table.table.readWhere(self.condition.format(), start=self.start, stop=self.stop) @@ -4318,9 +4304,7 @@ def select(self): return self.table.table.read(start=self.start, stop=self.stop) def select_coords(self): - """ - generate the selection - """ + """generate the selection.""" start, stop = self.start, self.stop nrows = self.table.nrows if start is None: diff --git a/pandas/io/sql.py b/pandas/io/sql.py index ac90555526a5e..5716e7069e966 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -80,8 +80,7 @@ def _parse_date_columns(data_frame, parse_dates): def execute(sql, con, cur=None, params=None, flavor='sqlite'): - """ - Execute the given SQL query using the provided connection object. + """Execute the given SQL query using the provided connection object. Parameters ---------- @@ -100,6 +99,7 @@ def execute(sql, con, cur=None, params=None, flavor='sqlite'): Returns ------- Results Iterable + """ pandas_sql = pandasSQL_builder(con, flavor=flavor) args = _convert_params(sql, params) @@ -107,9 +107,7 @@ def execute(sql, con, cur=None, params=None, flavor='sqlite'): def tquery(sql, con, cur=None, params=None, flavor='sqlite'): - """ - Returns list of tuples corresponding to each row in given sql - query. + """Returns list of tuples corresponding to each row in given sql query. If only one column selected, then plain list is returned. @@ -131,6 +129,7 @@ def tquery(sql, con, cur=None, params=None, flavor='sqlite'): Returns ------- Results Iterable + """ warnings.warn( "tquery is depreciated, and will be removed in future versions", @@ -142,8 +141,7 @@ def tquery(sql, con, cur=None, params=None, flavor='sqlite'): def uquery(sql, con, cur=None, params=None, engine=None, flavor='sqlite'): - """ - Does the same thing as tquery, but instead of returning results, it + """Does the same thing as tquery, but instead of returning results, it returns the number of rows affected. Good for update queries. Parameters @@ -164,6 +162,7 @@ def uquery(sql, con, cur=None, params=None, engine=None, flavor='sqlite'): Returns ------- Number of affected rows + """ warnings.warn( "uquery is depreciated, and will be removed in future versions", @@ -179,9 +178,7 @@ def uquery(sql, con, cur=None, params=None, engine=None, flavor='sqlite'): def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, params=None, parse_dates=None): - """ - Returns a DataFrame corresponding to the result set of the query - string. + """Returns a DataFrame corresponding to the result set of the query string. Optionally provide an `index_col` parameter to use one of the columns as the index, otherwise default integer index will be used. @@ -234,8 +231,7 @@ def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True, def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, index_label=None): - """ - Write records stored in a DataFrame to a SQL database. + """Write records stored in a DataFrame to a SQL database. Parameters ---------- @@ -273,8 +269,7 @@ def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, def has_table(table_name, con, meta=None, flavor='sqlite'): - """ - Check if DataBase has named table. + """Check if DataBase has named table. Parameters ---------- @@ -291,6 +286,7 @@ def has_table(table_name, con, meta=None, flavor='sqlite'): Returns ------- boolean + """ pandas_sql = pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name) @@ -349,10 +345,8 @@ def read_table(table_name, con, meta=None, index_col=None, coerce_float=True, def pandasSQL_builder(con, flavor=None, meta=None): - """ - Convenience function to return the correct PandasSQL subclass based on the - provided parameters - """ + """Convenience function to return the correct PandasSQL subclass based on + the provided parameters.""" try: import sqlalchemy @@ -378,12 +372,12 @@ def pandasSQL_builder(con, flavor=None, meta=None): class PandasSQLTable(PandasObject): - """ - For mapping Pandas tables to SQL tables. - Uses fact that table is reflected by SQLAlchemy to - do better type convertions. - Also holds various flags needed to avoid having to - pass them between functions all the time. + """For mapping Pandas tables to SQL tables. + + Uses fact that table is reflected by SQLAlchemy to do better type + convertions. Also holds various flags needed to avoid having to pass + them between functions all the time. + """ # TODO: support for multiIndex def __init__(self, name, pandas_sql_engine, frame=None, index=True, @@ -514,17 +508,16 @@ def _create_table_statement(self): return Table(self.name, self.pd_sql.meta, *columns) def _harmonize_columns(self, parse_dates=None): - """ Make a data_frame's column type align with an sql_table - column types - Need to work around limited NA value support. - Floats are always fine, ints must always - be floats if there are Null values. - Booleans are hard because converting bool column with None replaces - all Nones with false. Therefore only convert bool if there are no - NA values. - Datetimes should already be converted - to np.datetime if supported, but here we also force conversion - if required + """Make a data_frame's column type align with an sql_table column types + Need to work around limited NA value support. + + Floats are always fine, ints must always be floats if there are + Null values. Booleans are hard because converting bool column + with None replaces all Nones with false. Therefore only convert + bool if there are no NA values. Datetimes should already be + converted to np.datetime if supported, but here we also force + conversion if required + """ # handle non-list entries for parse_dates gracefully if parse_dates is True or parse_dates is None or parse_dates is False: @@ -606,9 +599,7 @@ def _numpy_type(self, sqltype): class PandasSQL(PandasObject): - """ - Subclasses Should define read_sql and to_sql - """ + """Subclasses Should define read_sql and to_sql.""" def read_sql(self, *args, **kwargs): raise ValueError( @@ -620,10 +611,8 @@ def to_sql(self, *args, **kwargs): class PandasSQLAlchemy(PandasSQL): - """ - This class enables convertion between DataFrame and SQL databases - using SQLAlchemy to handle DataBase abstraction - """ + """This class enables convertion between DataFrame and SQL databases using + SQLAlchemy to handle DataBase abstraction.""" def __init__(self, engine, meta=None): self.engine = engine @@ -635,7 +624,7 @@ def __init__(self, engine, meta=None): self.meta = meta def execute(self, *args, **kwargs): - """Simple passthrough to SQLAlchemy engine""" + """Simple passthrough to SQLAlchemy engine.""" return self.engine.execute(*args, **kwargs) def tquery(self, *args, **kwargs): @@ -750,8 +739,10 @@ def _create_sql_schema(self, frame, table_name): class PandasSQLTableLegacy(PandasSQLTable): """Patch the PandasSQLTable for legacy support. - Instead of a table variable just use the Create Table - statement""" + + Instead of a table variable just use the Create Table statement + + """ def sql_schema(self): return str(self.table) @@ -787,7 +778,8 @@ def insert(self): cur.close() def _create_table_statement(self): - "Return a CREATE TABLE statement to suit the contents of a DataFrame." + """Return a CREATE TABLE statement to suit the contents of a + DataFrame.""" # Replace spaces in DataFrame column names with _. safe_columns = [_safe_col_name(n) for n in self.frame.dtypes.index] @@ -898,8 +890,7 @@ def _fetchall_as_list(self, cur): def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): - """ - Write records stored in a DataFrame to a SQL database. + """Write records stored in a DataFrame to a SQL database. Parameters ---------- @@ -911,6 +902,7 @@ def to_sql(self, frame, name, if_exists='fail', index=True, replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if does not exist. index_label : ignored (only used in sqlalchemy mode) + """ table = PandasSQLTableLegacy( name, self, frame=frame, index=index, if_exists=if_exists) @@ -935,8 +927,7 @@ def drop_table(self, name): # legacy names, with depreciation warnings and copied docs def get_schema(frame, name, con, flavor='sqlite'): - """ - Get the SQL db table schema for the given frame + """Get the SQL db table schema for the given frame. Parameters ---------- diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 75aaf68b4dd0a..e69e30469d8ba 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1,5 +1,4 @@ -""" -Module contains tools for processing Stata files into DataFrames +"""Module contains tools for processing Stata files into DataFrames. The StataReader below was originally written by Joe Presbrey as part of PyDTA. It has been extended and improved by Skipper Seabold from the Statsmodels @@ -8,6 +7,7 @@ You can find more information on http://presbrey.mit.edu/PyDTA and http://statsmodels.sourceforge.net/devel/ + """ # TODO: Fix this module so it can use cross-compatible zip, map, and range import numpy as np @@ -27,8 +27,7 @@ def read_stata(filepath_or_buffer, convert_dates=True, convert_categoricals=True, encoding=None, index=None): - """ - Read Stata file into DataFrame + """Read Stata file into DataFrame. Parameters ---------- @@ -43,6 +42,7 @@ def read_stata(filepath_or_buffer, convert_dates=True, support unicode. None defaults to cp1252. index : identifier of index column identifier of column that should be used as index of the DataFrame + """ reader = StataReader(filepath_or_buffer, encoding) @@ -207,9 +207,9 @@ class InvalidColumnName(Warning): """ def _cast_to_stata_types(data): - """Checks the dtypes of the columns of a pandas DataFrame for - compatibility with the data types and ranges supported by Stata, and - converts if necessary. + """Checks the dtypes of the columns of a pandas DataFrame for compatibility + with the data types and ranges supported by Stata, and converts if + necessary. Parameters ---------- @@ -226,6 +226,7 @@ def _cast_to_stata_types(data): sidecast to float64 when larger than this range. If the int64 values are outside of the range of those perfectly representable as float64 values, a warning is raised. + """ ws = '' for col in data: @@ -253,8 +254,7 @@ def _cast_to_stata_types(data): class StataMissingValue(StringMixin): - """ - An observation's missing value. + """An observation's missing value. Parameters ----------- @@ -269,6 +269,7 @@ class StataMissingValue(StringMixin): Notes ----- More information: <http://www.stata.com/help.cgi?missing> + """ # TODO: Needs test def __init__(self, offset, value): @@ -721,9 +722,7 @@ def _next(self): ) def _dataset(self): - """ - Returns a Python generator object for iterating over the dataset. - + """Returns a Python generator object for iterating over the dataset. Parameters ---------- @@ -738,6 +737,7 @@ def _dataset(self): If missing_values is True during instantiation of StataReader then observations with _StataMissingValue(s) are not filtered and should be handled by your applcation. + """ self.path_or_buf.seek(self.data_location) @@ -812,8 +812,8 @@ def _read_strls(self): self.path_or_buf.read(1) # zero-termination def data(self, convert_dates=True, convert_categoricals=True, index=None): - """ - Reads observations from Stata file, converting them into a dataframe + """Reads observations from Stata file, converting them into a + dataframe. Parameters ---------- @@ -828,6 +828,7 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None): Returns ------- y : DataFrame instance + """ if self._data_read: raise Exception("Data has already been read.") @@ -887,19 +888,17 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None): return data def data_label(self): - """Returns data label of Stata file""" + """Returns data label of Stata file.""" return self.data_label def variable_labels(self): """Returns variable labels as a dict, associating each variable name - with corresponding label - """ + with corresponding label.""" return dict(zip(self.varlist, self.vlblist)) def value_labels(self): """Returns a dict, associating each variable name a dict, associating - each value its corresponding label - """ + each value its corresponding label.""" if not self._value_labels_read: self._read_value_labels() @@ -923,23 +922,22 @@ def _set_endianness(endianness): def _pad_bytes(name, length): - """ - Takes a char string and pads it wih null bytes until it's length chars - """ + """Takes a char string and pads it wih null bytes until it's length + chars.""" return name + "\x00" * (length - len(name)) def _default_names(nvar): - """ - Returns default Stata names v1, v2, ... vnvar + """Returns default Stata names v1, v2, ... + + vnvar + """ return ["v%d" % i for i in range(1, nvar+1)] def _convert_datetime_to_stata_type(fmt): - """ - Converts from one of the stata date formats to a type in TYPE_MAP - """ + """Converts from one of the stata date formats to a type in TYPE_MAP.""" if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq", "%tq", "th", "%th", "ty", "%ty"]: return np.float64 # Stata expects doubles for SIFs @@ -964,10 +962,10 @@ def _maybe_convert_to_int_keys(convert_dates, varlist): def _dtype_to_stata_type(dtype): - """ - Converts dtype types to stata types. Returns the byte of the given ordinal. - See TYPE_MAP and comments for an explanation. This is also explained in - the dta spec. + """Converts dtype types to stata types. Returns the byte of the given + ordinal. See TYPE_MAP and comments for an explanation. This is also + explained in the dta spec. + 1 - 244 are strings of this length Pandas Stata 251 - chr(251) - for int8 byte @@ -978,6 +976,7 @@ def _dtype_to_stata_type(dtype): If there are dates to convert, then dtype will already have the correct type inserted. + """ #TODO: expand to handle datetime to integer conversion if dtype.type == np.string_: @@ -1002,9 +1001,8 @@ def _dtype_to_stata_type(dtype): def _dtype_to_default_stata_fmt(dtype): - """ - Maps numpy dtype to stata's default format for this type. Not terribly - important since users can change this in Stata. Semantics are + """Maps numpy dtype to stata's default format for this type. Not terribly + important since users can change this in Stata. Semantics are. string -> "%DDs" where DD is the length of the string float64 -> "%10.0g" @@ -1013,6 +1011,7 @@ def _dtype_to_default_stata_fmt(dtype): int32 -> "%12.0g" int16 -> "%8.0g" int8 -> "%8.0g" + """ #TODO: expand this to handle a default datetime format? if dtype.type == np.string_: @@ -1094,9 +1093,7 @@ def __init__(self, fname, data, convert_dates=None, write_index=True, self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8} def _write(self, to_write): - """ - Helper to call encode before writing to file for Python 3 compat. - """ + """Helper to call encode before writing to file for Python 3 compat.""" if compat.PY3: self._file.write(to_write.encode(self._encoding or self._default_encoding)) diff --git a/pandas/io/tests/generate_legacy_pickles.py b/pandas/io/tests/generate_legacy_pickles.py index 08f63b0179db2..293aa7889b02d 100644 --- a/pandas/io/tests/generate_legacy_pickles.py +++ b/pandas/io/tests/generate_legacy_pickles.py @@ -51,7 +51,7 @@ def _create_sp_frame(): return SparseDataFrame(data, index=dates) def create_data(): - """ create the pickle data """ + """create the pickle data.""" import numpy as np import pandas diff --git a/pandas/io/tests/test_cparser.py b/pandas/io/tests/test_cparser.py index 6cfe4bea01045..2f868fd97b942 100644 --- a/pandas/io/tests/test_cparser.py +++ b/pandas/io/tests/test_cparser.py @@ -1,6 +1,4 @@ -""" -C/Cython ascii file parser tests -""" +"""C/Cython ascii file parser tests.""" from pandas.compat import StringIO, BytesIO, map from datetime import datetime diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 612840e82e3ff..d0ef42f2a378f 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -39,9 +39,7 @@ class ParserTests(object): - """ - Want to be able to test either C+Cython or Python+Cython parsers - """ + """Want to be able to test either C+Cython or Python+Cython parsers.""" data1 = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index b70248d1ef3f4..088711699c840 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -1,6 +1,6 @@ # pylint: disable=E1101,E1103,W0232 -""" manage legacy pickle tests """ +"""manage legacy pickle tests.""" from datetime import datetime, timedelta import operator diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 9c9d20e51be64..a96c911711902 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -56,7 +56,7 @@ def safe_close(store): def create_tempfile(path): - """ create an unopened named temporary file """ + """create an unopened named temporary file.""" return os.path.join(tempfile.gettempdir(),path) @@ -81,11 +81,9 @@ def ensure_clean_store(path, mode='a', complevel=None, complib=None, @contextmanager def ensure_clean_path(path): - """ - return essentially a named temporary file that is not opened - and deleted on existing; if path is a list, then create and - return list of filenames - """ + """return essentially a named temporary file that is not opened and deleted + on existing; if path is a list, then create and return list of + filenames.""" try: if isinstance(path, list): filenames = [ create_tempfile(p) for p in path ] @@ -104,8 +102,8 @@ def ensure_clean_path(path): tables.parameters.MAX_THREADS = 1 def _maybe_remove(store, key): - """For tests using tables, try removing the table to be sure there is - no content from previous tests using the same table name.""" + """For tests using tables, try removing the table to be sure there is no + content from previous tests using the same table name.""" try: store.remove(key) except: @@ -113,7 +111,7 @@ def _maybe_remove(store, key): def compat_assert_produces_warning(w,f): - """ don't produce a warning under PY3 """ + """don't produce a warning under PY3.""" if compat.PY3: f() else: @@ -1055,7 +1053,7 @@ def test_append_with_different_block_ordering(self): self.assertRaises(ValueError, store.append, 'df', df) def test_ndim_indexables(self): - """ test using ndim tables in new ways""" + """test using ndim tables in new ways.""" with ensure_clean_store(self.path) as store: diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 80da7ae6bf391..15419760e11fc 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -119,9 +119,8 @@ class PandasSQLTest(unittest.TestCase): - """Base class with common private methods for - SQLAlchemy and fallback cases. - """ + """Base class with common private methods for SQLAlchemy and fallback + cases.""" def drop_table(self, table_name): self._get_exec().execute("DROP TABLE IF EXISTS %s" % table_name) @@ -287,8 +286,8 @@ def _tquery(self): class _TestSQLApi(PandasSQLTest): - """Test the public API as it would be used - directly, including legacy names + """Test the public API as it would be used directly, including legacy + names. Notes: flavor can always be passed even in SQLAlchemy mode, @@ -311,7 +310,7 @@ def test_read_sql_iris(self): self._check_iris_loaded_frame(iris_frame) def test_legacy_read_frame(self): - """Test legacy name read_frame""" + """Test legacy name read_frame.""" iris_frame = sql.read_frame( "SELECT * FROM iris", self.conn, flavor='sqlite') self._check_iris_loaded_frame(iris_frame) @@ -375,7 +374,11 @@ def test_to_sql_panel(self): def test_legacy_write_frame(self): """Test legacy write frame name. - Assume that functionality is already tested above so just do quick check that it basically works""" + + Assume that functionality is already tested above so just do + quick check that it basically works + + """ sql.write_frame( self.test_frame1, 'test_frame_legacy', self.conn, flavor='sqlite') self.assertTrue( @@ -410,7 +413,7 @@ def test_tquery(self): tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) def test_date_parsing(self): - """ Test date parsing in read_sql """ + """Test date parsing in read_sql.""" # No Parsing df = sql.read_sql( "SELECT * FROM types_test_data", self.conn, flavor='sqlite') @@ -446,7 +449,7 @@ def test_date_parsing(self): "IntDateCol loaded with incorrect type") def test_date_and_index(self): - """ Test case where same column appears in parse_date and index_col""" + """Test case where same column appears in parse_date and index_col.""" df = sql.read_sql("SELECT * FROM types_test_data", self.conn, flavor='sqlite', @@ -461,8 +464,7 @@ def test_date_and_index(self): "IntDateCol loaded with incorrect type") class TestSQLApi(_TestSQLApi): - """Test the public API as it would be used directly - """ + """Test the public API as it would be used directly.""" flavor = 'sqlite' def connect(self): @@ -503,8 +505,7 @@ def test_to_sql_index_label(self): class TestSQLLegacyApi(_TestSQLApi): - """Test the public legacy API - """ + """Test the public legacy API.""" flavor = 'sqlite' def connect(self): @@ -512,10 +513,11 @@ def connect(self): class _TestSQLAlchemy(PandasSQLTest): - """ - Base class for testing the sqlalchemy backend. Subclasses for specific - database types are created below. - Assume that sqlalchemy takes case of the DB specifics + """Base class for testing the sqlalchemy backend. + + Subclasses for specific database types are created below. Assume + that sqlalchemy takes case of the DB specifics + """ def test_read_sql(self): diff --git a/pandas/io/wb.py b/pandas/io/wb.py index d815bb19ec8b8..94a80f675e993 100644 --- a/pandas/io/wb.py +++ b/pandas/io/wb.py @@ -9,8 +9,7 @@ def download(country=['MX', 'CA', 'US'], indicator=['GDPPCKD', 'GDPPCKN'], start=2003, end=2005): - """ - Download data series from the World Bank's World Development Indicators + """Download data series from the World Bank's World Development Indicators. Parameters ---------- @@ -29,6 +28,7 @@ def download(country=['MX', 'CA', 'US'], indicator=['GDPPCKD', 'GDPPCKN'], ------- ``pandas`` DataFrame with columns: country, iso2c, year, indicator value. + """ # Are ISO-2 country codes valid? @@ -104,8 +104,7 @@ def _get_data(indicator="NY.GNS.ICTR.GN.ZS", country='US', def get_countries(): - '''Query information about countries - ''' + """Query information about countries.""" url = 'http://api.worldbank.org/countries/?per_page=1000&format=json' with urlopen(url) as response: data = response.read() @@ -120,8 +119,7 @@ def get_countries(): def get_indicators(): - '''Download information about all World Bank data series - ''' + """Download information about all World Bank data series.""" url = 'http://api.worldbank.org/indicators?per_page=50000&format=json' with urlopen(url) as response: data = response.read() @@ -151,8 +149,7 @@ def get_value(x): def search(string='gdp.*capi', field='name', case=False): - """ - Search available data series from the world bank + """Search available data series from the world bank. Parameters ---------- @@ -181,6 +178,7 @@ def search(string='gdp.*capi', field='name', case=False): note: sourceNote: topics: + """ # Create cached list of series if it does not exist global _cached_series diff --git a/pandas/rpy/base.py b/pandas/rpy/base.py index 4cd86d3c3f4e3..70986516c901a 100644 --- a/pandas/rpy/base.py +++ b/pandas/rpy/base.py @@ -2,11 +2,11 @@ class lm(object): - """ - Examples - -------- - >>> model = lm('x ~ y + z', data) + """Examples. + +>>> model = lm('x ~ y + z', data) >>> model.coef + """ def __init__(self, formula, data): pass diff --git a/pandas/rpy/common.py b/pandas/rpy/common.py index 5747285deb988..4d4855bce12c9 100644 --- a/pandas/rpy/common.py +++ b/pandas/rpy/common.py @@ -37,9 +37,7 @@ def load_data(name, package=None, convert=True): def _rclass(obj): - """ - Return R class name for input object - """ + """Return R class name for input object.""" return r['class'](obj)[0] @@ -48,9 +46,7 @@ def _is_null(obj): def _convert_list(obj): - """ - Convert named Vector to dict, factors to list - """ + """Convert named Vector to dict, factors to list.""" try: values = [convert_robj(x) for x in obj] keys = r['names'](obj) @@ -64,9 +60,7 @@ def _convert_list(obj): def _convert_array(obj): - """ - Convert Array to DataFrame - """ + """Convert Array to DataFrame.""" def _list(item): try: return list(item) @@ -225,9 +219,8 @@ def convert_robj(obj, use_pandas=True): def convert_to_r_posixct(obj): - """ - Convert DatetimeIndex or np.datetime array to R POSIXct using - m8[s] format. + """Convert DatetimeIndex or np.datetime array to R POSIXct using m8[s] + format. Parameters ---------- @@ -280,8 +273,7 @@ def convert_to_r_posixct(obj): def convert_to_r_dataframe(df, strings_as_factors=False): - """ - Convert a pandas DataFrame to a R data.frame. + """Convert a pandas DataFrame to a R data.frame. Parameters ---------- @@ -329,8 +321,7 @@ def convert_to_r_dataframe(df, strings_as_factors=False): def convert_to_r_matrix(df, strings_as_factors=False): - """ - Convert a pandas DataFrame to a R matrix. + """Convert a pandas DataFrame to a R matrix. Parameters ---------- diff --git a/pandas/rpy/tests/test_common.py b/pandas/rpy/tests/test_common.py index a2e6d08d07b58..0304edabeeac1 100644 --- a/pandas/rpy/tests/test_common.py +++ b/pandas/rpy/tests/test_common.py @@ -1,6 +1,4 @@ -""" -Testing that functions from rpy work as expected -""" +"""Testing that functions from rpy work as expected.""" import pandas as pd import numpy as np @@ -135,9 +133,10 @@ def test_dist(self): assert np.array_equal(df.columns, labels) def test_timeseries(self): - """ - Test that the series has an informative index. + """Test that the series has an informative index. + Unfortunately the code currently does not build a DateTimeIndex + """ for name in ( 'austres', 'co2', 'fdeaths', 'freeny.y', 'JohnsonJohnson', diff --git a/pandas/rpy/vars.py b/pandas/rpy/vars.py index 4756b2779224c..19889af15466c 100644 --- a/pandas/rpy/vars.py +++ b/pandas/rpy/vars.py @@ -2,18 +2,17 @@ class VAR(object): - """ + """Parameters. - Parameters - ---------- y : - p : - type : {"const", "trend", "both", "none"} - season : - exogen : - lag_max : - ic : {"AIC", "HQ", "SC", "FPE"} - Information criterion to use, if lag_max is not None + p : + type : {"const", "trend", "both", "none"} + season : + exogen : + lag_max : + ic : {"AIC", "HQ", "SC", "FPE"} + Information criterion to use, if lag_max is not None + """ def __init__(y, p=1, type="none", season=None, exogen=None, lag_max=None, ic=None): diff --git a/pandas/sandbox/qtpandas.py b/pandas/sandbox/qtpandas.py index 3f284990efd40..a30522f44a2cb 100644 --- a/pandas/sandbox/qtpandas.py +++ b/pandas/sandbox/qtpandas.py @@ -1,8 +1,8 @@ -''' -Easy integration of DataFrame into pyqt framework +"""Easy integration of DataFrame into pyqt framework. @author: Jev Kuznetsov -''' + +""" try: from PyQt4.QtCore import QAbstractTableModel, Qt, QVariant, QModelIndex from PyQt4.QtGui import ( @@ -17,7 +17,7 @@ class DataFrameModel(QAbstractTableModel): - ''' data model for a DataFrame class ''' + """data model for a DataFrame class.""" def __init__(self): super(DataFrameModel, self).__init__() self.df = DataFrame() @@ -26,8 +26,8 @@ def setDataFrame(self, dataFrame): self.df = dataFrame def signalUpdate(self): - ''' tell viewers to update their data (this is full update, not - efficient)''' + """tell viewers to update their data (this is full update, not + efficient)""" self.layoutChanged.emit() #------------- table display functions ----------------- @@ -83,7 +83,7 @@ def columnCount(self, index=QModelIndex()): class DataFrameWidget(QWidget): - ''' a simple widget for using DataFrames in a gui ''' + """a simple widget for using DataFrames in a gui.""" def __init__(self, dataFrame, parent=None): super(DataFrameWidget, self).__init__(parent) @@ -106,7 +106,7 @@ def setDataFrame(self, dataFrame): def testDf(): - ''' creates test dataframe ''' + """creates test dataframe.""" data = {'int': [1, 2, 3], 'float': [1.5, 2.5, 3.5], 'string': ['a', 'b', 'c'], 'nan': [np.nan, np.nan, np.nan]} return DataFrame(data, index=Index(['AAA', 'BBB', 'CCC']), diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py index 7b23b306d2927..e7e24fb754837 100644 --- a/pandas/sparse/array.py +++ b/pandas/sparse/array.py @@ -1,6 +1,4 @@ -""" -SparseArray data structure -""" +"""SparseArray data structure.""" from __future__ import division # pylint: disable=E1101,E1103,W0231 @@ -21,10 +19,8 @@ def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None, **eval_kwargs): - """ - Wrapper function for Series arithmetic operations, to avoid - code duplication. - """ + """Wrapper function for Series arithmetic operations, to avoid code + duplication.""" def wrapper(self, other): if isinstance(other, np.ndarray): if len(self) != len(other): @@ -96,22 +92,23 @@ def _sparse_fillop(this, other, name): class SparseArray(PandasObject, np.ndarray): - """Data structure for labeled, sparse floating point data - -Parameters ----------- -data : {array-like, Series, SparseSeries, dict} -kind : {'block', 'integer'} -fill_value : float - Defaults to NaN (code for missing) -sparse_index : {BlockIndex, IntIndex}, optional - Only if you have one. Mainly used internally - -Notes ------ -SparseArray objects are immutable via the typical Python means. If you -must change values, convert to dense, make your changes, then convert back -to sparse + """Data structure for labeled, sparse floating point data. + + Parameters + ---------- + data : {array-like, Series, SparseSeries, dict} + kind : {'block', 'integer'} + fill_value : float + Defaults to NaN (code for missing) + sparse_index : {BlockIndex, IntIndex}, optional + Only if you have one. Mainly used internally + + Notes + ----- + SparseArray objects are immutable via the typical Python means. If you + must change values, convert to dense, make your changes, then convert back + to sparse + """ __array_priority__ = 15 _typ = 'array' @@ -189,22 +186,20 @@ def kind(self): return 'integer' def __array_finalize__(self, obj): - """ - Gets called after any ufunc or other array operations, necessary - to pass on the index. - """ + """Gets called after any ufunc or other array operations, necessary to + pass on the index.""" self.sp_index = getattr(obj, 'sp_index', None) self.fill_value = getattr(obj, 'fill_value', None) def __reduce__(self): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" object_state = list(ndarray.__reduce__(self)) subclass_state = self.fill_value, self.sp_index object_state[2] = (object_state[2], subclass_state) return tuple(object_state) def __setstate__(self, state): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" nd_state, own_state = state ndarray.__setstate__(self, nd_state) @@ -239,9 +234,7 @@ def disable(self, other): @property def values(self): - """ - Dense values - """ + """Dense values.""" output = np.empty(len(self), dtype=np.float64) int_index = self.sp_index.to_int_index() output.fill(self.fill_value) @@ -254,13 +247,11 @@ def sp_values(self): return self.view(np.ndarray) def get_values(self, fill=None): - """ return a dense representation """ + """return a dense representation.""" return self.to_dense(fill=fill) def to_dense(self, fill=None): - """ - Convert SparseSeries to (dense) Series - """ + """Convert SparseSeries to (dense) Series.""" values = self.values # fill the nans @@ -277,9 +268,7 @@ def __iter__(self): raise StopIteration def __getitem__(self, key): - """ - - """ + """""" if com.is_integer(key): return self._get_val_at(key) else: @@ -369,18 +358,17 @@ def __setslice__(self, i, j, value): "SparseArray does not support item assignment via slices") def astype(self, dtype=None): - """ - - """ + """""" dtype = np.dtype(dtype) if dtype is not None and dtype not in (np.float_, float): raise TypeError('Can only support floating point data for now') return self.copy() def copy(self, deep=True): - """ - Make a copy of the SparseSeries. Only the actual sparse values need to - be copied + """Make a copy of the SparseSeries. + + Only the actual sparse values need to be copied + """ if deep: values = self.sp_values.copy() @@ -434,14 +422,14 @@ def sum(self, axis=None, dtype=None, out=None): return sp_sum + self.fill_value * nsparse def cumsum(self, axis=0, dtype=None, out=None): - """ - Cumulative sum of values. Preserves locations of NaN values + """Cumulative sum of values. Preserves locations of NaN values. Extra parameters are to preserve ndarray interface. Returns ------- cumsum : Series + """ if com.notnull(self.fill_value): return self.to_dense().cumsum() @@ -470,7 +458,7 @@ def mean(self, axis=None, dtype=None, out=None): def _maybe_to_dense(obj): - """ try to convert to dense """ + """try to convert to dense.""" if hasattr(obj, 'to_dense'): return obj.to_dense() return obj @@ -486,8 +474,7 @@ def _maybe_to_sparse(array): def make_sparse(arr, kind='block', fill_value=nan): - """ - Convert ndarray to sparse format + """Convert ndarray to sparse format. Parameters ---------- @@ -498,6 +485,7 @@ def make_sparse(arr, kind='block', fill_value=nan): Returns ------- (sparse_values, index) : (ndarray, SparseIndex) + """ if hasattr(arr, 'values'): arr = arr.values diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index bd34c7e5f02b2..950e0ac8ea00a 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -1,6 +1,7 @@ -""" -Data structures for sparse float data. Life is made simpler by dealing only -with float64 data +"""Data structures for sparse float data. + +Life is made simpler by dealing only with float64 data + """ from __future__ import division # pylint: disable=E1101,E1103,W0231,E0202 @@ -30,9 +31,8 @@ class SparseDataFrame(DataFrame): - """ - DataFrame containing sparse floating point data in the form of SparseSeries - objects + """DataFrame containing sparse floating point data in the form of + SparseSeries objects. Parameters ---------- @@ -45,6 +45,7 @@ class SparseDataFrame(DataFrame): default_fill_value : float Default fill_value for converting Series to SparseSeries. Will not override SparseSeries passed in + """ _constructor_sliced = SparseSeries _subtyp = 'sparse_frame' @@ -216,7 +217,7 @@ def __getstate__(self): _default_kind=self._default_kind) def _unpickle_sparse_frame_compat(self, state): - """ original pickle format """ + """original pickle format.""" series, cols, idx, fv, kind = state if not isinstance(cols, Index): # pragma: no cover @@ -239,12 +240,12 @@ def _unpickle_sparse_frame_compat(self, state): self._default_kind = kind def to_dense(self): - """ - Convert to dense DataFrame + """Convert to dense DataFrame. Returns ------- df : DataFrame + """ data = dict((k, v.to_dense()) for k, v in compat.iteritems(self)) return DataFrame(data, index=self.index) @@ -253,9 +254,7 @@ def astype(self, dtype): raise NotImplementedError def copy(self, deep=True): - """ - Make a copy of this SparseDataFrame - """ + """Make a copy of this SparseDataFrame.""" result = super(SparseDataFrame, self).copy(deep=deep) result._default_fill_value = self._default_fill_value result._default_kind = self._default_kind @@ -334,9 +333,7 @@ def _sanitize_column(self, key, value): return clean def __getitem__(self, key): - """ - Retrieve column or slice from DataFrame - """ + """Retrieve column or slice from DataFrame.""" if isinstance(key, slice): date_rng = self.index[key] return self.reindex(date_rng) @@ -355,8 +352,7 @@ def get_value(self, index, col, takeable=False): return series.get_value(index, takeable=takeable) def set_value(self, index, col, value, takeable=False): - """ - Put single value at passed column and index + """Put single value at passed column and index. Parameters ---------- @@ -374,6 +370,7 @@ def set_value(self, index, col, value, takeable=False): Returns ------- frame : DataFrame + """ dense = self.to_dense().set_value(index, col, value, takeable=takeable) return dense.to_sparse(kind=self._default_kind, @@ -656,9 +653,7 @@ def rrenamer(x): return this, other def transpose(self): - """ - Returns a DataFrame with the rows/columns switched. - """ + """Returns a DataFrame with the rows/columns switched.""" return SparseDataFrame(self.values.T, index=self.columns, columns=self.index, default_fill_value=self._default_fill_value, @@ -670,8 +665,7 @@ def count(self, axis=0, **kwds): return self.apply(lambda x: x.count(), axis=axis) def cumsum(self, axis=0): - """ - Return SparseDataFrame of cumulative sums over requested axis. + """Return SparseDataFrame of cumulative sums over requested axis. Parameters ---------- @@ -681,12 +675,12 @@ def cumsum(self, axis=0): Returns ------- y : SparseDataFrame + """ return self.apply(lambda x: x.cumsum(), axis=axis) def apply(self, func, axis=0, broadcast=False, reduce=False): - """ - Analogous to DataFrame.apply, for SparseDataFrame + """Analogous to DataFrame.apply, for SparseDataFrame. Parameters ---------- @@ -700,6 +694,7 @@ def apply(self, func, axis=0, broadcast=False, reduce=False): Returns ------- applied : Series or SparseDataFrame + """ if not len(self.columns): return self @@ -722,10 +717,9 @@ def apply(self, func, axis=0, broadcast=False, reduce=False): return self._apply_broadcast(func, axis) def applymap(self, func): - """ - Apply a function to a DataFrame that is intended to operate + """Apply a function to a DataFrame that is intended to operate elementwise, i.e. like doing map(func, series) for each series in the - DataFrame + DataFrame. Parameters ---------- @@ -735,11 +729,13 @@ def applymap(self, func): Returns ------- applied : DataFrame + """ return self.apply(lambda x: lmap(func, x)) def dict_to_manager(sdict, columns, index): - """ create and return the block manager from a dict of series, columns, index """ + """create and return the block manager from a dict of series, columns, + index.""" # from BlockManager perspective axes = [_ensure_index(columns), _ensure_index(index)] @@ -748,9 +744,7 @@ def dict_to_manager(sdict, columns, index): def stack_sparse_frame(frame): - """ - Only makes sense when fill_value is NaN - """ + """Only makes sense when fill_value is NaN.""" lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)] nobs = sum(lengths) @@ -782,9 +776,8 @@ def stack_sparse_frame(frame): def homogenize(series_dict): - """ - Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex - corresponding to the locations where they all have data + """Conform a set of SparseSeries (with NaN fill_value) to a common + SparseIndex corresponding to the locations where they all have data. Parameters ---------- @@ -798,6 +791,7 @@ def homogenize(series_dict): Returns ------- homogenized : dict of SparseSeries + """ index = None diff --git a/pandas/sparse/list.py b/pandas/sparse/list.py index bfc4ab9d3eb48..5d02b431882dd 100644 --- a/pandas/sparse/list.py +++ b/pandas/sparse/list.py @@ -8,14 +8,14 @@ class SparseList(PandasObject): - """ - Data structure for accumulating data to be converted into a - SparseArray. Has similar API to the standard Python list + """Data structure for accumulating data to be converted into a SparseArray. + Has similar API to the standard Python list. Parameters ---------- data : scalar or array-like fill_value : scalar, default NaN + """ def __init__(self, data=None, fill_value=np.nan): @@ -57,8 +57,7 @@ def is_consolidated(self): return self.nchunks == 1 def consolidate(self, inplace=True): - """ - Internally consolidate chunks of data + """Internally consolidate chunks of data. Parameters ---------- @@ -70,6 +69,7 @@ def consolidate(self, inplace=True): splist : SparseList If inplace=False, new object, otherwise reference to existing object + """ if not inplace: result = self.copy() @@ -90,24 +90,24 @@ def _consolidate_inplace(self): self._chunks = [new_arr] def copy(self): - """ - Return copy of the list + """Return copy of the list. Returns ------- new_list : SparseList + """ new_splist = SparseList(fill_value=self.fill_value) new_splist._chunks = list(self._chunks) return new_splist def to_array(self): - """ - Return SparseArray from data stored in the SparseList + """Return SparseArray from data stored in the SparseList. Returns ------- sparr : SparseArray + """ self.consolidate(inplace=True) return self._chunks[0] diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index 20bbc58cc908f..913fcde8646a2 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -1,6 +1,7 @@ -""" -Data structures for sparse float data. Life is made simpler by dealing only -with float64 data +"""Data structures for sparse float data. + +Life is made simpler by dealing only with float64 data + """ # pylint: disable=E1101,E1103,W0231 @@ -42,8 +43,7 @@ def __set__(self, obj, value): class SparsePanel(Panel): - """ - Sparse version of Panel + """Sparse version of Panel. Parameters ---------- @@ -60,6 +60,7 @@ class SparsePanel(Panel): Notes ----- + """ ndim = 3 _typ = 'panel' @@ -120,18 +121,16 @@ def __array_wrap__(self, result): @classmethod def from_dict(cls, data): - """ - Analogous to Panel.from_dict - """ + """Analogous to Panel.from_dict.""" return SparsePanel(data) def to_dense(self): - """ - Convert SparsePanel to (dense) Panel + """Convert SparsePanel to (dense) Panel. Returns ------- dense : Panel + """ return Panel(self.values, self.items, self.major_axis, self.minor_axis) @@ -188,9 +187,7 @@ def _ixs(self, i, axis=0): return self.xs(key, axis=axis) def _slice(self, slobj, axis=0, typ=None): - """ - for compat as we don't support Block Manager here - """ + """for compat as we don't support Block Manager here.""" axis = self._get_axis_name(axis) index = self._get_axis(axis) @@ -215,8 +212,7 @@ def __setitem__(self, key, value): self._items = Index(list(self.items) + [key]) def set_value(self, item, major, minor, value): - """ - Quickly set single value at (item, major, minor) location + """Quickly set single value at (item, major, minor) location. Parameters ---------- @@ -233,6 +229,7 @@ def set_value(self, item, major, minor, value): Returns ------- panel : SparsePanel + """ dense = self.to_dense().set_value(item, major, minor, value) return dense.to_sparse(kind=self.default_kind, @@ -262,12 +259,12 @@ def __setstate__(self, state): self._frames = frames def copy(self, deep=True): - """ - Make a copy of the sparse panel + """Make a copy of the sparse panel. Returns ------- copy : SparsePanel + """ d = self._construct_axes_dict() @@ -282,12 +279,12 @@ def copy(self, deep=True): return SparsePanel(new_data, **d) def to_frame(self, filter_observations=True): - """ - Convert SparsePanel to (dense) DataFrame + """Convert SparsePanel to (dense) DataFrame. Returns ------- frame : DataFrame + """ if not filter_observations: raise TypeError('filter_observations=False not supported for ' @@ -337,8 +334,7 @@ def to_frame(self, filter_observations=True): def reindex(self, major=None, items=None, minor=None, major_axis=None, minor_axis=None, copy=False): - """ - Conform / reshape panel axis labels to new input labels + """Conform / reshape panel axis labels to new input labels. Parameters ---------- @@ -351,6 +347,7 @@ def reindex(self, major=None, items=None, minor=None, major_axis=None, Returns ------- reindexed : SparsePanel + """ major = com._mut_exclusive(major=major, major_axis=major_axis) minor = com._mut_exclusive(minor=minor, minor_axis=minor_axis) @@ -452,8 +449,7 @@ def _combinePanel(self, other, func): default_kind=self.default_kind) def major_xs(self, key): - """ - Return slice of panel along major axis + """Return slice of panel along major axis. Parameters ---------- @@ -464,13 +460,13 @@ def major_xs(self, key): ------- y : DataFrame index -> minor axis, columns -> items + """ slices = dict((k, v.xs(key)) for k, v in compat.iteritems(self)) return DataFrame(slices, index=self.minor_axis, columns=self.items) def minor_xs(self, key): - """ - Return slice of panel along minor axis + """Return slice of panel along minor axis. Parameters ---------- @@ -481,6 +477,7 @@ def minor_xs(self, key): ------- y : SparseDataFrame index -> major axis, columns -> items + """ slices = dict((k, v[key]) for k, v in compat.iteritems(self)) return SparseDataFrame(slices, index=self.major_axis, @@ -495,7 +492,7 @@ def pow(self, val, *args, **kwargs): return self.__pow__(val) def mod(self, val, *args, **kwargs): - """wrapper around `__mod__` (only works for scalar values""" + """wrapper around `__mod__` (only works for scalar values.""" return self.__mod__(val) # Sparse objects opt out of numexpr diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index 1c599653f9fc5..06e732012fbab 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -1,6 +1,7 @@ -""" -Data structures for sparse float data. Life is made simpler by dealing only -with float64 data +"""Data structures for sparse float data. + +Life is made simpler by dealing only with float64 data + """ # pylint: disable=E1101,E1103,W0231 @@ -35,12 +36,12 @@ def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None, **eval_kwargs): - """ - Wrapper function for Series arithmetic operations, to avoid - code duplication. + """Wrapper function for Series arithmetic operations, to avoid code + duplication. + + str_rep, default_axis, fill_zeros and eval_kwargs are not used, but + are present for compatibility. - str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are present - for compatibility. """ def wrapper(self, other): @@ -84,7 +85,7 @@ def _sparse_series_op(left, right, op, name): class SparseSeries(Series): - """Data structure for labeled, sparse floating point data + """Data structure for labeled, sparse floating point data. Parameters ---------- @@ -100,6 +101,7 @@ class SparseSeries(Series): SparseSeries objects are immutable via the typical Python means. If you must change values, convert to dense, make your changes, then convert back to sparse + """ _subtyp = 'sparse_series' @@ -210,11 +212,11 @@ def __init__(self, data, index=None, sparse_index=None, kind='block', @property def values(self): - """ return the array """ + """return the array.""" return self._data._values def get_values(self): - """ same as values """ + """same as values.""" return self._data._values.to_dense().view() @property @@ -243,9 +245,7 @@ def npoints(self): @classmethod def from_array(cls, arr, index=None, name=None, copy=False, fill_value=None, fastpath=False): - """ - Simplified alternate constructor - """ + """Simplified alternate constructor.""" return cls(arr, index=index, name=name, copy=copy, fill_value=fill_value, fastpath=fastpath) @property @@ -260,7 +260,7 @@ def kind(self): return 'integer' def as_sparse_array(self, kind=None, fill_value=None, copy=False): - """ return my self as a sparse array, do not copy by default """ + """return my self as a sparse array, do not copy by default.""" if fill_value is None: fill_value = self.fill_value @@ -282,9 +282,7 @@ def __unicode__(self): return rep def __array_wrap__(self, result): - """ - Gets called prior to a ufunc (and after) - """ + """Gets called prior to a ufunc (and after)""" return self._constructor(result, index=self.index, sparse_index=self.sp_index, @@ -292,10 +290,8 @@ def __array_wrap__(self, result): copy=False).__finalize__(self) def __array_finalize__(self, obj): - """ - Gets called after any ufunc or other array operations, necessary - to pass on the index. - """ + """Gets called after any ufunc or other array operations, necessary to + pass on the index.""" self.name = getattr(obj, 'name', None) self.fill_value = getattr(obj, 'fill_value', None) @@ -333,7 +329,7 @@ def _unpickle_series_compat(self, state): self.name = name def __iter__(self): - """ forward to the array """ + """forward to the array.""" return iter(self.values) def _set_subtyp(self, is_all_dates): @@ -343,13 +339,11 @@ def _set_subtyp(self, is_all_dates): object.__setattr__(self, '_subtyp', 'sparse_series') def _get_val_at(self, loc): - """ forward to the array """ + """forward to the array.""" return self.block.values._get_val_at(loc) def __getitem__(self, key): - """ - - """ + """""" try: return self._get_val_at(self.index.get_loc(key)) @@ -374,13 +368,13 @@ def _set_with_engine(self, key, value): return self.set_value(key, value) def abs(self): - """ - Return an object with absolute value taken. Only applicable to objects - that are all numeric + """Return an object with absolute value taken. Only applicable to + objects that are all numeric. Returns ------- abs: type of caller + """ res_sp_values = np.abs(self.sp_values) return self._constructor(res_sp_values, index=self.index, @@ -388,9 +382,8 @@ def abs(self): fill_value=self.fill_value) def get(self, label, default=None): - """ - Returns value occupying requested label, default to specified - missing value if not present. Analogous to dict.get + """Returns value occupying requested label, default to specified + missing value if not present. Analogous to dict.get. Parameters ---------- @@ -402,6 +395,7 @@ def get(self, label, default=None): Returns ------- y : scalar + """ if label in self.index: loc = self.index.get_loc(label) @@ -410,8 +404,7 @@ def get(self, label, default=None): return default def get_value(self, label, takeable=False): - """ - Retrieve single value at passed index label + """Retrieve single value at passed index label. Parameters ---------- @@ -421,15 +414,15 @@ def get_value(self, label, takeable=False): Returns ------- value : scalar value + """ loc = label if takeable is True else self.index.get_loc(label) return self._get_val_at(loc) def set_value(self, label, value, takeable=False): - """ - Quickly set single value at passed label. If label is not contained, a - new object is created with the label placed at the end of the result - index + """Quickly set single value at passed label. If label is not contained, + a new object is created with the label placed at the end of the result + index. Parameters ---------- @@ -447,6 +440,7 @@ def set_value(self, label, value, takeable=False): Returns ------- series : SparseSeries + """ values = self.to_dense() @@ -477,9 +471,7 @@ def _set_values(self, key, value): self._data = SingleBlockManager(values, self.index) def to_dense(self, sparse_only=False): - """ - Convert SparseSeries to (dense) Series - """ + """Convert SparseSeries to (dense) Series.""" if sparse_only: int_index = self.sp_index.to_int_index() index = self.index.take(int_index.indices) @@ -493,9 +485,10 @@ def density(self): return r def copy(self, deep=True): - """ - Make a copy of the SparseSeries. Only the actual sparse values need to - be copied + """Make a copy of the SparseSeries. + + Only the actual sparse values need to be copied + """ new_data = self._data if deep: @@ -506,14 +499,14 @@ def copy(self, deep=True): fill_value=self.fill_value).__finalize__(self) def reindex(self, index=None, method=None, copy=True, limit=None): - """ - Conform SparseSeries to new Index + """Conform SparseSeries to new Index. See Series.reindex docstring for general behavior Returns ------- reindexed : SparseSeries + """ new_index = _ensure_index(index) @@ -526,8 +519,7 @@ def reindex(self, index=None, method=None, copy=True, limit=None): index=new_index).__finalize__(self) def sparse_reindex(self, new_index): - """ - Conform sparse values to new SparseIndex + """Conform sparse values to new SparseIndex. Parameters ---------- @@ -536,6 +528,7 @@ def sparse_reindex(self, new_index): Returns ------- reindexed : SparseSeries + """ if not isinstance(new_index, splib.SparseIndex): raise TypeError('new index must be a SparseIndex') @@ -559,12 +552,12 @@ def take(self, indices, axis=0, convert=True): return self._constructor(new_values, index=new_index).__finalize__(self) def cumsum(self, axis=0, dtype=None, out=None): - """ - Cumulative sum of values. Preserves locations of NaN values + """Cumulative sum of values. Preserves locations of NaN values. Returns ------- cumsum : Series or SparseSeries + """ new_array = SparseArray.cumsum(self.values) if isinstance(new_array, SparseArray): @@ -572,8 +565,10 @@ def cumsum(self, axis=0, dtype=None, out=None): return Series(new_array, index=self.index).__finalize__(self) def dropna(self, axis=0, inplace=False, **kwargs): - """ - Analogous to Series.dropna. If fill_value=NaN, returns a dense Series + """Analogous to Series.dropna. + + If fill_value=NaN, returns a dense Series + """ # TODO: make more efficient axis = self._get_axis_number(axis or 0) @@ -588,9 +583,7 @@ def dropna(self, axis=0, inplace=False, **kwargs): return dense_valid.to_sparse(fill_value=self.fill_value) def shift(self, periods, freq=None, **kwds): - """ - Analogous to Series.shift - """ + """Analogous to Series.shift.""" from pandas.core.datetools import _resolve_offset offset = _resolve_offset(freq, kwds) @@ -627,9 +620,8 @@ def shift(self, periods, freq=None, **kwds): fill_value=self.fill_value).__finalize__(self) def combine_first(self, other): - """ - Combine Series values, choosing the calling Series's values - first. Result index will be the union of the two indexes + """Combine Series values, choosing the calling Series's values first. + Result index will be the union of the two indexes. Parameters ---------- @@ -638,6 +630,7 @@ def combine_first(self, other): Returns ------- y : Series + """ if isinstance(other, SparseSeries): other = other.to_dense() diff --git a/pandas/stats/api.py b/pandas/stats/api.py index 3732f9ed39524..409c2089450de 100644 --- a/pandas/stats/api.py +++ b/pandas/stats/api.py @@ -1,6 +1,4 @@ -""" -Common namespace of statistical functions -""" +"""Common namespace of statistical functions.""" # pylint: disable-msg=W0611,W0614,W0401 diff --git a/pandas/stats/common.py b/pandas/stats/common.py index c30b3e7a4bf61..694bb324a9f0e 100644 --- a/pandas/stats/common.py +++ b/pandas/stats/common.py @@ -30,9 +30,7 @@ def _get_window_type(window_type): return final_type def banner(text, width=80): - """ - - """ + """""" toFill = width - len(text) left = toFill // 2 diff --git a/pandas/stats/math.py b/pandas/stats/math.py index 505415bebf89e..231203a8e022a 100644 --- a/pandas/stats/math.py +++ b/pandas/stats/math.py @@ -9,10 +9,8 @@ def rank(X, cond=1.0e-12): - """ - Return the rank of a matrix X based on its generalized inverse, - not the SVD. - """ + """Return the rank of a matrix X based on its generalized inverse, not the + SVD.""" X = np.asarray(X) if len(X.shape) == 2: import scipy.linalg as SL diff --git a/pandas/stats/misc.py b/pandas/stats/misc.py index c79bae34f20c4..d73ced18a0268 100644 --- a/pandas/stats/misc.py +++ b/pandas/stats/misc.py @@ -12,8 +12,7 @@ def zscore(series): def correl_ts(frame1, frame2): - """ - Pairwise correlation of columns of two DataFrame objects + """Pairwise correlation of columns of two DataFrame objects. Parameters ---------- @@ -21,6 +20,7 @@ def correl_ts(frame1, frame2): Returns ------- y : Series + """ results = {} for col, series in compat.iteritems(frame1): @@ -99,8 +99,7 @@ def percentileRank(frame, column=None, kind='mean'): def bucket(series, k, by=None): - """ - Produce DataFrame representing quantiles of a Series + """Produce DataFrame representing quantiles of a Series. Parameters ---------- @@ -113,6 +112,7 @@ def bucket(series, k, by=None): Returns ------- DataFrame + """ if by is None: by = series @@ -138,8 +138,7 @@ def _split_quantile(arr, k): def bucketcat(series, cats): - """ - Produce DataFrame representing quantiles of a Series + """Produce DataFrame representing quantiles of a Series. Parameters ---------- @@ -150,6 +149,7 @@ def bucketcat(series, cats): Returns ------- DataFrame + """ if not isinstance(series, Series): series = Series(series, index=np.arange(len(series))) @@ -169,8 +169,7 @@ def bucketcat(series, cats): def bucketpanel(series, bins=None, by=None, cat=None): - """ - Bucket data by two Series to create summary panel + """Bucket data by two Series to create summary panel. Parameters ---------- @@ -185,6 +184,7 @@ def bucketpanel(series, bins=None, by=None, cat=None): Returns ------- DataFrame + """ use_by = by is not None use_cat = cat is not None diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index 523f055eaf605..1c717814bb0b2 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -1,7 +1,5 @@ -""" -Provides rolling statistical moments and related descriptive -statistics implemented in Cython -""" +"""Provides rolling statistical moments and related descriptive statistics +implemented in Cython.""" from __future__ import division from functools import wraps @@ -645,6 +643,7 @@ def rolling_quantile(arg, window, quantile, min_periods=None, freq=None, The `freq` keyword is used to conform time series data to a specified frequency by resampling the data. This is done with the default parameters of :meth:`~pandas.Series.resample` (i.e. using the `mean`). + """ def call_cython(arg, window, minp, args=(), kwargs={}): @@ -692,6 +691,7 @@ def rolling_apply(arg, window, func, min_periods=None, freq=None, The `freq` keyword is used to conform time series data to a specified frequency by resampling the data. This is done with the default parameters of :meth:`~pandas.Series.resample` (i.e. using the `mean`). + """ def call_cython(arg, window, minp, args, kwargs): minp = _use_window(minp, window) @@ -704,9 +704,8 @@ def call_cython(arg, window, minp, args, kwargs): def rolling_window(arg, window=None, win_type=None, min_periods=None, freq=None, center=False, mean=True, time_rule=None, axis=0, **kwargs): - """ - Applies a moving window of type ``window_type`` and size ``window`` - on the data. + """Applies a moving window of type ``window_type`` and size ``window`` on + the data. Parameters ---------- @@ -751,13 +750,14 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None, * ``gaussian`` (needs std) * ``general_gaussian`` (needs power, width) * ``slepian`` (needs width). - + By default, the result is set to the right edge of the window. This can be changed to the center of the window by setting ``center=True``. The `freq` keyword is used to conform time series data to a specified frequency by resampling the data. This is done with the default parameters of :meth:`~pandas.Series.resample` (i.e. using the `mean`). + """ if isinstance(window, (list, tuple, np.ndarray)): if win_type is not None: @@ -905,6 +905,7 @@ def expanding_quantile(arg, quantile, min_periods=1, freq=None, The `freq` keyword is used to conform time series data to a specified frequency by resampling the data. This is done with the default parameters of :meth:`~pandas.Series.resample` (i.e. using the `mean`). + """ return rolling_quantile(arg, len(arg), quantile, min_periods=min_periods, freq=freq, center=center, time_rule=time_rule) @@ -986,12 +987,13 @@ def expanding_apply(arg, func, min_periods=1, freq=None, center=False, Returns ------- y : type of input argument - + Notes ----- The `freq` keyword is used to conform time series data to a specified frequency by resampling the data. This is done with the default parameters of :meth:`~pandas.Series.resample` (i.e. using the `mean`). + """ window = len(arg) return rolling_apply(arg, window, func, min_periods=min_periods, freq=freq, diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py index 9d22068c1612f..4a3a877d63ff5 100644 --- a/pandas/stats/ols.py +++ b/pandas/stats/ols.py @@ -1,6 +1,4 @@ -""" -Ordinary least squares regression -""" +"""Ordinary least squares regression.""" # pylint: disable-msg=W0201 @@ -24,8 +22,7 @@ class OLS(StringMixin): - """ - Runs a full sample ordinary least squares regression. + """Runs a full sample ordinary least squares regression. Parameters ---------- @@ -75,8 +72,7 @@ def __init__(self, y, x, intercept=True, weights=None, nw_lags=None, self._x.get_values()).fit() def _prepare_data(self): - """ - Cleans the input for single OLS. + """Cleans the input for single OLS. Parameters ---------- @@ -89,6 +85,7 @@ def _prepare_data(self): ------- Series, DataFrame Cleaned lhs and rhs + """ (filt_lhs, filt_rhs, filt_weights, pre_filt_rhs, index, valid) = _filter_data(self._y_orig, self._x_orig, @@ -145,6 +142,7 @@ def df(self): """Returns the degrees of freedom. This equals the rank of the X matrix. + """ return self._df_raw @@ -208,7 +206,7 @@ def f_stat(self): def f_test(self, hypothesis): """Runs the F test, given a joint hypothesis. The hypothesis is - represented by a collection of equations, in the form + represented by a collection of equations, in the form. A*x_1+B*x_2=C @@ -222,6 +220,7 @@ def f_test(self, hypothesis): o = ols(...) o.f_test('1*x1+2*x2=0,1*x3=0') o.f_test(['1*x1+2*x2=0','1*x3=0']) + """ x_names = self._x.columns @@ -344,9 +343,7 @@ def t_stat(self): @cache_readonly def _var_beta_raw(self): - """ - Returns the raw covariance of beta. - """ + """Returns the raw covariance of beta.""" x = self._x.values y = self._y.values @@ -384,7 +381,11 @@ def _y_fitted_raw(self): @cache_readonly def y_fitted(self): - """Returns the fitted y values. This equals BX.""" + """Returns the fitted y values. + + This equals BX. + + """ if self._weights is None: index = self._x_filtered.index orig_index = index @@ -404,31 +405,33 @@ def _y_predict_raw(self): def y_predict(self): """Returns the predicted y values. - For in-sample, this is same as y_fitted.""" + For in-sample, this is same as y_fitted. + + """ return self.y_fitted def predict(self, beta=None, x=None, fill_value=None, fill_method=None, axis=0): - """ - Parameters - ---------- + """Parameters. + beta : Series - x : Series or DataFrame - fill_value : scalar or dict, default None - fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - axis : {0, 1}, default 0 - See DataFrame.fillna for more details - - Notes - ----- - 1. If both fill_value and fill_method are None then NaNs are dropped - (this is the default behavior) - 2. An intercept will be automatically added to the new_y_values if - the model was fitted using an intercept + x : Series or DataFrame + fill_value : scalar or dict, default None + fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + axis : {0, 1}, default 0 + See DataFrame.fillna for more details + + Notes + ----- + 1. If both fill_value and fill_method are None then NaNs are dropped + (this is the default behavior) + 2. An intercept will be automatically added to the new_y_values if + the model was fitted using an intercept + + Returns + ------- + Series of predicted values - Returns - ------- - Series of predicted values """ if beta is None and x is None: return self.y_predict @@ -520,9 +523,7 @@ def summary_as_matrix(self): @cache_readonly def summary(self): - """ - This returns the formatted result of the OLS computation - """ + """This returns the formatted result of the OLS computation.""" template = """ %(bannerTop)s @@ -599,8 +600,7 @@ def _total_times(self): class MovingOLS(OLS): - """ - Runs a rolling/expanding simple OLS. + """Runs a rolling/expanding simple OLS. Parameters ---------- @@ -1263,8 +1263,7 @@ def _safe_update(d, other): def _filter_data(lhs, rhs, weights=None): - """ - Cleans the input for single OLS. + """Cleans the input for single OLS. Parameters ---------- @@ -1279,6 +1278,7 @@ def _filter_data(lhs, rhs, weights=None): ------- Series, DataFrame Cleaned lhs and rhs + """ if not isinstance(lhs, Series): if len(lhs) != len(rhs): @@ -1313,10 +1313,8 @@ def _filter_data(lhs, rhs, weights=None): def _combine_rhs(rhs): - """ - Glue input X variables together while checking for potential - duplicates - """ + """Glue input X variables together while checking for potential + duplicates.""" series = {} if isinstance(rhs, Series): diff --git a/pandas/stats/plm.py b/pandas/stats/plm.py index 3c67119427ae0..d797740089c2b 100644 --- a/pandas/stats/plm.py +++ b/pandas/stats/plm.py @@ -1,6 +1,4 @@ -""" -Linear regression objects for panel data -""" +"""Linear regression objects for panel data.""" # pylint: disable-msg=W0231 # pylint: disable-msg=E1101,E1103 @@ -27,6 +25,7 @@ class PanelOLS(OLS): """Implements panel OLS. See ols function docs + """ _panel_model = True @@ -61,7 +60,7 @@ def log(self, msg): print(msg) def _prepare_data(self): - """Cleans and stacks input data into DataFrame objects + """Cleans and stacks input data into DataFrame objects. If time effects is True, then we turn off intercepts and omit an item from every (entity and x) fixed effect. @@ -71,6 +70,7 @@ def _prepare_data(self): - Else, we omit an item from every fixed effect except one of them. The categorical variables will get dropped from x. + """ (x, x_filtered, y, weights, cat_mapping) = self._filter_data() @@ -117,9 +117,7 @@ def _prepare_data(self): return x, x_regressor, x_filtered, y, y_regressor def _filter_data(self): - """ - - """ + """""" data = self._x_orig cat_mapping = {} @@ -199,12 +197,12 @@ def _convert_x(self, x): return x_converted, cat_mapping def _add_dummies(self, panel, mapping): - """ - Add entity and / or categorical dummies to input X DataFrame + """Add entity and / or categorical dummies to input X DataFrame. Returns ------- DataFrame + """ panel = self._add_entity_effects(panel) panel = self._add_categorical_dummies(panel, mapping) @@ -212,12 +210,12 @@ def _add_dummies(self, panel, mapping): return panel def _add_entity_effects(self, panel): - """ - Add entity dummies to panel + """Add entity dummies to panel. Returns ------- DataFrame + """ from pandas.core.reshape import make_axis_dummies @@ -248,12 +246,12 @@ def _add_entity_effects(self, panel): return panel def _add_categorical_dummies(self, panel, cat_mappings): - """ - Add categorical dummies to panel + """Add categorical dummies to panel. Returns ------- DataFrame + """ if not self._x_effects: return panel @@ -297,11 +295,8 @@ def _add_categorical_dummies(self, panel, cat_mappings): @property def _use_all_dummies(self): - """ - In the case of using an intercept or including time fixed - effects, completely partitioning the sample would make the X - not full rank. - """ + """In the case of using an intercept or including time fixed effects, + completely partitioning the sample would make the X not full rank.""" return (not self._intercept and not self._time_effects) @cache_readonly @@ -469,8 +464,7 @@ def _is_numeric(df): def add_intercept(panel, name='intercept'): - """ - Add column of ones to input panel + """Add column of ones to input panel. Parameters ---------- @@ -480,6 +474,7 @@ def add_intercept(panel, name='intercept'): Returns ------- New object (same type as input) + """ panel = panel.copy() panel[name] = 1. @@ -491,6 +486,7 @@ class MovingPanelOLS(MovingOLS, PanelOLS): """Implements rolling/expanding panel OLS. See ols function docs + """ _panel_model = True @@ -541,9 +537,8 @@ def y_predict(self): return self._unstack_y(self._y_predict_raw) def lagged_y_predict(self, lag=1): - """ - Compute forecast Y value lagging coefficient by input number - of time periods + """Compute forecast Y value lagging coefficient by input number of time + periods. Parameters ---------- @@ -552,6 +547,7 @@ def lagged_y_predict(self, lag=1): Returns ------- DataFrame + """ x = self._x.values betas = self._beta_matrix(lag=lag) diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py index 82f96bd444429..a44ab79a15828 100644 --- a/pandas/stats/tests/test_ols.py +++ b/pandas/stats/tests/test_ols.py @@ -1,6 +1,4 @@ -""" -Unit test suite for OLS and PanelOLS classes -""" +"""Unit test suite for OLS and PanelOLS classes.""" # pylint: disable-msg=W0212 diff --git a/pandas/stats/tests/test_var.py b/pandas/stats/tests/test_var.py index ab5709d013fa9..31bc62a27eb0a 100644 --- a/pandas/stats/tests/test_var.py +++ b/pandas/stats/tests/test_var.py @@ -100,9 +100,7 @@ def __init__(self): class RVAR(object): - """ - Estimates VAR model using R vars package and rpy - """ + """Estimates VAR model using R vars package and rpy.""" def __init__(self, data, p=1, type='both'): self.rdata = data diff --git a/pandas/stats/var.py b/pandas/stats/var.py index be55507f976cb..0478653e5122b 100644 --- a/pandas/stats/var.py +++ b/pandas/stats/var.py @@ -14,9 +14,8 @@ class VAR(StringMixin): - """ - Estimates VAR(p) regression on multivariate time series data - presented in pandas data structures. + """Estimates VAR(p) regression on multivariate time series data presented + in pandas data structures. Parameters ---------- @@ -51,22 +50,21 @@ def bic(self): @cache_readonly def beta(self): - """ - Returns a DataFrame, where each column x1 contains the betas - calculated by regressing the x1 column of the VAR input with - the lagged input. + """Returns a DataFrame, where each column x1 contains the betas + calculated by regressing the x1 column of the VAR input with the lagged + input. Returns ------- DataFrame + """ d = dict([(key, value.beta) for (key, value) in compat.iteritems(self.ols_results)]) return DataFrame(d) def forecast(self, h): - """ - Returns a DataFrame containing the forecasts for 1, 2, ..., n time + """Returns a DataFrame containing the forecasts for 1, 2, ..., n time steps. Each column x1 contains the forecasts of the x1 column. Parameters @@ -77,29 +75,30 @@ def forecast(self, h): Returns ------- DataFrame + """ forecast = self._forecast_raw(h)[:, 0, :] return DataFrame(forecast, index=lrange(1, 1 + h), columns=self._columns) def forecast_cov(self, h): - """ - Returns the covariance of the forecast residuals. + """Returns the covariance of the forecast residuals. Returns ------- DataFrame + """ return [DataFrame(value, index=self._columns, columns=self._columns) for value in self._forecast_cov_raw(h)] def forecast_std_err(self, h): - """ - Returns the standard errors of the forecast residuals. + """Returns the standard errors of the forecast residuals. Returns ------- DataFrame + """ return DataFrame(self._forecast_std_err_raw(h), index=lrange(1, 1 + h), columns=self._columns) @@ -203,14 +202,14 @@ def ols_results(self): @cache_readonly def resid(self): - """ - Returns the DataFrame containing the residuals of the VAR regressions. - Each column x1 contains the residuals generated by regressing the x1 - column of the input against the lagged input. + """Returns the DataFrame containing the residuals of the VAR + regressions. Each column x1 contains the residuals generated by + regressing the x1 column of the input against the lagged input. Returns ------- DataFrame + """ d = dict([(col, series.resid) for (col, series) in compat.iteritems(self.ols_results)]) @@ -323,9 +322,10 @@ def _forecast_cov_raw(self, n): return resid def _forecast_cov_beta_raw(self, n): - """ - Returns the covariance of the beta errors for the forecast at - 1, 2, ..., n timesteps. + """Returns the covariance of the beta errors for the forecast at 1, 2, + + ..., n timesteps. + """ p = self._p @@ -362,10 +362,8 @@ def _forecast_cov_beta_raw(self, n): return results def _forecast_cov_resid_raw(self, h): - """ - Returns the covariance of the residual errors for the forecast at - 1, 2, ..., h timesteps. - """ + """Returns the covariance of the residual errors for the forecast at 1, + 2, ..., h timesteps.""" psi_values = self._psi(h) sum = 0 result = [] @@ -377,9 +375,7 @@ def _forecast_cov_resid_raw(self, h): return result def _forecast_raw(self, h): - """ - Returns the forecast at 1, 2, ..., h timesteps in the future. - """ + """Returns the forecast at 1, 2, ..., h timesteps in the future.""" k = self._k result = [] for i in range(h): @@ -398,18 +394,14 @@ def _forecast_raw(self, h): return np.array(result) def _forecast_std_err_raw(self, h): - """ - Returns the standard error of the forecasts - at 1, 2, ..., n timesteps. - """ + """Returns the standard error of the forecasts at 1, 2, ..., n + timesteps.""" return np.array([np.sqrt(np.diag(value)) for value in self._forecast_cov_raw(h)]) @cache_readonly def _ic(self): - """ - Returns the Akaike/Bayesian information criteria. - """ + """Returns the Akaike/Bayesian information criteria.""" RSS = self._rss k = self._p * (self._k * self._p + 1) n = self._nobs * self._k @@ -443,10 +435,10 @@ def _nobs(self): return len(self._data) - self._p def _psi(self, h): - """ - psi value used for calculating standard error. + """psi value used for calculating standard error. Returns [psi_0, psi_1, ..., psi_(h - 1)] + """ k = self._k result = [np.eye(k)] @@ -484,8 +476,7 @@ def __unicode__(self): def lag_select(data, max_lags=5, ic=None): - """ - Select number of lags based on a variety of information criteria + """Select number of lags based on a variety of information criteria. Parameters ---------- @@ -498,18 +489,19 @@ def lag_select(data, max_lags=5, ic=None): Returns ------- None + """ pass class PanelVAR(VAR): - """ - Performs Vector Autoregression on panel data. + """Performs Vector Autoregression on panel data. Parameters ---------- data: Panel or dict of DataFrame lags: int + """ def __init__(self, data, lags, intercept=True): self._data = _prep_panel_data(data) @@ -530,9 +522,7 @@ def _rss(self): return (self.resid.values ** 2).sum() def forecast(self, h): - """ - Returns the forecasts at 1, 2, ..., n timesteps in the future. - """ + """Returns the forecasts at 1, 2, ..., n timesteps in the future.""" forecast = self._forecast_raw(h).T.swapaxes(1, 2) index = lrange(1, 1 + h) w = Panel(forecast, items=self._data.items, major_axis=index, @@ -541,14 +531,14 @@ def forecast(self, h): @cache_readonly def resid(self): - """ - Returns the DataFrame containing the residuals of the VAR regressions. - Each column x1 contains the residuals generated by regressing the x1 - column of the input against the lagged input. + """Returns the DataFrame containing the residuals of the VAR + regressions. Each column x1 contains the residuals generated by + regressing the x1 column of the input against the lagged input. Returns ------- DataFrame + """ d = dict([(key, value.resid) for (key, value) in compat.iteritems(self.ols_results)]) @@ -585,11 +575,11 @@ def _make_param_name(lag, name): def chain_dot(*matrices): - """ - Returns the dot product of the given matrices. + """Returns the dot product of the given matrices. Parameters ---------- matrices: argument list of ndarray + """ return reduce(lambda x, y: np.dot(y, x), matrices[::-1]) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 7185b684a1e12..6bbde818635a3 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -272,7 +272,7 @@ def test_iterpairs(): def test_split_ranges(): def _bin(x, width): - "return int(x) as a base2 string of given width" + """return int(x) as a base2 string of given width.""" return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1)) def test_locs(mask): diff --git a/pandas/tests/test_compat.py b/pandas/tests/test_compat.py index 0d38bb23d6aa7..d05e34abe63f0 100644 --- a/pandas/tests/test_compat.py +++ b/pandas/tests/test_compat.py @@ -1,6 +1,4 @@ -""" -Testing that functions from compat work as expected -""" +"""Testing that functions from compat work as expected.""" from pandas.compat import ( range, zip, map, filter, diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index fdea275b7e040..767388f12009a 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -104,10 +104,12 @@ def test_integer_arithmetic(self): def run_binary_test(self, df, other, assert_func, test_flex=False, numexpr_ops=set(['gt', 'lt', 'ge', 'le', 'eq', 'ne'])): - """ - tests solely that the result is the same whether or not numexpr is - enabled. Need to test whether the function does the correct thing + """tests solely that the result is the same whether or not numexpr is + enabled. + + Need to test whether the function does the correct thing elsewhere. + """ expr._MIN_ELEMENTS = 0 expr.set_test_mode(True) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 3ce65c81592a9..8ca12b4f5e0bc 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -1604,10 +1604,8 @@ def test_to_html_with_classes(self): self.assertEqual(result, expected) def test_pprint_pathological_object(self): - """ - if the test fails, the stack will overflow and nose crash, - but it won't hang. - """ + """if the test fails, the stack will overflow and nose crash, but it + won't hang.""" class A: def __getitem__(self, key): return 3 # obviously simplified diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 6c6e70b86105f..f20f17963c604 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -55,13 +55,12 @@ def _ndim(self): return self._typ._AXIS_LEN def _axes(self): - """ return the axes for my object typ """ + """return the axes for my object typ.""" return self._typ._AXIS_ORDERS def _construct(self, shape, value=None, dtype=None, **kwargs): - """ construct an object for the given shape - if value is specified use that if its a scalar - if value is an array, repeat it as needed """ + """construct an object for the given shape if value is specified use + that if its a scalar if value is an array, repeat it as needed.""" if isinstance(shape,int): shape = tuple([shape] * self._ndim) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 540ce1cc61929..27ddde8cee4af 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2727,9 +2727,7 @@ def test_groupby_reindex_inside_function(self): periods), 'low': np.arange(periods)}, index=ind) def agg_before(hour, func, fix=False): - """ - Run an aggregate func on the subset of data. - """ + """Run an aggregate func on the subset of data.""" def _func(data): d = data.select(lambda x: x.hour < 11).dropna() if fix: diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index ea1e07dbf6acc..9fc7c222f43cc 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -29,10 +29,8 @@ def _generate_indices(f, values=False): - """ generate the indicies - if values is True , use the axis values - is False, use the range - """ + """generate the indicies if values is True , use the axis values is False, + use the range.""" axes = f.axes if values: @@ -41,7 +39,7 @@ def _generate_indices(f, values=False): return itertools.product(*axes) def _get_value(f, i, values=False): - """ return the value for the location i """ + """return the value for the location i.""" # check agains values if values: @@ -55,7 +53,7 @@ def _get_value(f, i, values=False): return f.ix[i] def _get_result(obj, method, key, axis): - """ return the result for this obj with this key and this axis """ + """return the result for this obj with this key and this axis.""" if isinstance(key, dict): key = key[axis] @@ -154,7 +152,7 @@ def check_result(self, name, method1, key1, method2, key2, typs = None, objs = N def _eq(t, o, a, obj, k1, k2): - """ compare equal for these 2 keys """ + """compare equal for these 2 keys.""" if a is not None and a > obj.ndim-1: return diff --git a/pandas/tests/test_msgpack/test_read_size.py b/pandas/tests/test_msgpack/test_read_size.py index db3e1deb04f8f..c9475f6c4aa91 100644 --- a/pandas/tests/test_msgpack/test_read_size.py +++ b/pandas/tests/test_msgpack/test_read_size.py @@ -1,4 +1,4 @@ -"""Test Unpacker's read_array_header and read_map_header methods""" +"""Test Unpacker's read_array_header and read_map_header methods.""" from pandas.msgpack import packb, Unpacker, OutOfData UnexpectedTypeException = ValueError diff --git a/pandas/tests/test_msgpack/test_unpack_raw.py b/pandas/tests/test_msgpack/test_unpack_raw.py index 0e96a79cf190a..cbb415d980352 100644 --- a/pandas/tests/test_msgpack/test_unpack_raw.py +++ b/pandas/tests/test_msgpack/test_unpack_raw.py @@ -1,4 +1,4 @@ -"""Tests for cases where the user seeks to obtain packed msgpack objects""" +"""Tests for cases where the user seeks to obtain packed msgpack objects.""" from pandas import compat from pandas.msgpack import Unpacker, packb diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index aef4e3a72c099..e5764bcb4ead1 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -952,7 +952,7 @@ def test_stack_unstack_multiple(self): assert_frame_equal(unstacked, expected.ix[:, unstacked.columns]) def test_stack_multiple_bug(self): - """ bug when some uniques are not present in the data #3170""" + """bug when some uniques are not present in the data #3170.""" id_col = ([1] * 3) + ([2] * 3) name = (['a'] * 3) + (['b'] * 3) date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 198e600e8edc7..0186de0df84b6 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -2000,9 +2000,7 @@ def test_update_raise(self): class TestLongPanel(tm.TestCase): - """ - LongPanel no longer exists, but... - """ + """LongPanel no longer exists, but...""" _multiprocess_can_split_ = True def setUp(self): diff --git a/pandas/tests/test_rplot.py b/pandas/tests/test_rplot.py index ddfce477a320d..c5df1b6987bd9 100644 --- a/pandas/tests/test_rplot.py +++ b/pandas/tests/test_rplot.py @@ -24,6 +24,7 @@ def between(a, b, x): Returns: -------- True if x is between a and b, False otherwise + """ if a < b: return x >= a and x <= b @@ -33,9 +34,7 @@ def between(a, b, x): @tm.mplskip class TestUtilityFunctions(tm.TestCase): - """ - Tests for RPlot utility functions. - """ + """Tests for RPlot utility functions.""" def setUp(self): path = os.path.join(curpath(), 'data/iris.csv') self.data = read_csv(path, sep=',') diff --git a/pandas/tools/describe.py b/pandas/tools/describe.py index eca5a800b3c6c..b03c202b93337 100644 --- a/pandas/tools/describe.py +++ b/pandas/tools/describe.py @@ -2,8 +2,7 @@ def value_range(df): - """ - Return the minimum and maximum of a dataframe in a series object + """Return the minimum and maximum of a dataframe in a series object. Parameters ---------- diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 90e713d72bdda..9d727048ea25b 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -152,10 +152,8 @@ def _merger(x, y): # TODO: transformations?? # TODO: only copy DataFrames when modification necessary class _MergeOperation(object): - """ - Perform a database (SQL) merge operation between two DataFrame objects - using either columns as keys or their row indexes - """ + """Perform a database (SQL) merge operation between two DataFrame objects + using either columns as keys or their row indexes.""" def __init__(self, left, right, how='inner', on=None, left_on=None, right_on=None, axis=1, @@ -275,9 +273,7 @@ def _get_join_info(self): return join_index, left_indexer, right_indexer def _get_merge_data(self): - """ - Handles overlapping column names etc. - """ + """Handles overlapping column names etc.""" ldata, rdata = self.left._data, self.right._data lsuf, rsuf = self.suffixes ldata, rdata = ldata._maybe_rename_join(rdata, lsuf, rsuf, @@ -423,13 +419,10 @@ def _validate_specification(self): def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'): - """ - - Parameters - ---------- + """Parameters. Returns - ------- + ------- """ if len(left_keys) != len(right_keys): @@ -640,11 +633,11 @@ def _sort_labels(uniques, left, right): class _BlockJoinOperation(object): - """ - BlockJoinOperation made generic for N DataFrames + """BlockJoinOperation made generic for N DataFrames. + + Object responsible for orchestrating efficient join operation + between two BlockManager data structures - Object responsible for orchestrating efficient join operation between two - BlockManager data structures """ def __init__(self, data_list, join_index, indexers, axis=1, copy=True): if axis <= 0: # pragma: no cover @@ -685,10 +678,10 @@ def _prepare_blocks(self): return blockmaps def get_result(self): - """ - Returns - ------- + """Returns. + merged : BlockManager + """ blockmaps = self._prepare_blocks() kinds = _get_merge_block_kinds(blockmaps) @@ -781,9 +774,7 @@ def _merge_blocks(self, merge_chunks): class _JoinUnit(object): - """ - Blocks plus indexer - """ + """Blocks plus indexer.""" def __init__(self, blocks, indexer): self.blocks = blocks @@ -822,9 +813,7 @@ def _may_need_upcasting(blocks): def _upcast_blocks(blocks): - """ - Upcast and consolidate if necessary - """ + """Upcast and consolidate if necessary.""" new_blocks = [] for block in blocks: if isinstance(block, TimeDeltaBlock): @@ -877,11 +866,11 @@ def _get_block_dtype(blocks): def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False): - """ - Concatenate pandas objects along a particular axis with optional set logic - along the other axes. Can also add a layer of hierarchical indexing on the - concatenation axis, which may be useful if the labels are the same (or - overlapping) on the passed axis number + """Concatenate pandas objects along a particular axis with optional set + logic along the other axes. Can also add a layer of hierarchical indexing + on the concatenation axis, which may be useful if the labels are the same. + + (or overlapping) on the passed axis number. Parameters ---------- @@ -922,6 +911,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, Returns ------- concatenated : type of objects + """ op = _Concatenator(objs, axis=axis, join_axes=join_axes, ignore_index=ignore_index, join=join, @@ -931,9 +921,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, class _Concatenator(object): - """ - Orchestrates a concatenation operation for BlockManagers - """ + """Orchestrates a concatenation operation for BlockManagers.""" def __init__(self, objs, axis=0, join='outer', join_axes=None, keys=None, levels=None, names=None, diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index c2a929bab77b5..1fe9bd8962004 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -136,11 +136,12 @@ def random_color(column): return colors class _Options(dict): - """ - Stores pandas plotting options. - Allows for parameter aliasing so you can just use parameter names that are - the same as the plot function parameters, but is stored in a canonical - format that makes it easy to breakdown into groups later + """Stores pandas plotting options. + + Allows for parameter aliasing so you can just use parameter names + that are the same as the plot function parameters, but is stored in + a canonical format that makes it easy to breakdown into groups later + """ # alias so the names are same as plotting method parameter names @@ -171,12 +172,12 @@ def __contains__(self, key): return super(_Options, self).__contains__(key) def reset(self): - """ - Reset the option store to its initial state + """Reset the option store to its initial state. Returns ------- None + """ self.__init__() @@ -185,9 +186,10 @@ def _get_canonical_key(self, key): @contextmanager def use(self, key, value): - """ - Temporarily set a parameter value using the with statement. + """Temporarily set a parameter value using the with statement. + Aliasing allowed. + """ old_value = self[key] try: @@ -203,8 +205,7 @@ def use(self, key, value): def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, diagonal='hist', marker='.', density_kwds=None, hist_kwds=None, range_padding=0.05, **kwds): - """ - Draw a matrix of scatter plots. + """Draw a matrix of scatter plots. Parameters ---------- @@ -237,6 +238,7 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, -------- >>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D']) >>> scatter_matrix(df, alpha=0.2) + """ import matplotlib.pyplot as plt from matplotlib.artist import setp @@ -519,6 +521,7 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): Returns: -------- fig: matplotlib figure + """ import random import matplotlib.pyplot as plt @@ -602,6 +605,7 @@ def parallel_coordinates(data, class_column, cols=None, ax=None, colors=None, >>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv') >>> parallel_coordinates(df, 'Name', colors=('#556270', '#4ECDC4', '#C7F464')) >>> plt.show() + """ import matplotlib.pyplot as plt @@ -678,6 +682,7 @@ def lag_plot(series, lag=1, ax=None, **kwds): Returns: -------- ax: Matplotlib axis object + """ import matplotlib.pyplot as plt @@ -708,6 +713,7 @@ def autocorrelation_plot(series, ax=None, **kwds): Returns: ----------- ax: Matplotlib axis object + """ import matplotlib.pyplot as plt n = len(series) @@ -740,8 +746,7 @@ def r(h): def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, layout=None, sharex=False, sharey=False, rot=90, grid=True, **kwargs): - """ - Grouped histogram + """Grouped histogram. Parameters ---------- @@ -761,6 +766,7 @@ def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, Returns ------- axes: collection of Matplotlib Axes + """ def plot_group(group, ax): ax.hist(group.dropna().values, bins=bins, **kwargs) @@ -774,8 +780,7 @@ def plot_group(group, ax): class MPLPlot(object): - """ - Base class for assembling a pandas plot using matplotlib + """Base class for assembling a pandas plot using matplotlib. Parameters ---------- @@ -1075,11 +1080,13 @@ def _get_xticks(self, convert_period=False): self.data = self.data.reindex(index=index.order()) x = self.data.index.to_timestamp()._mpl_repr() elif index.is_numeric(): - """ - Matplotlib supports numeric values or datetime objects as - xaxis values. Taking LBYL approach here, by the time - matplotlib raises exception when using non numeric/datetime - values for xaxis, several actions are already taken by plt. + """Matplotlib supports numeric values or datetime objects as + xaxis values. + + Taking LBYL approach here, by the time matplotlib raises + exception when using non numeric/datetime values for + xaxis, several actions are already taken by plt. + """ x = index._mpl_repr() elif is_datetype: @@ -1100,10 +1107,8 @@ def _is_datetype(self): 'time')) def _get_plot_function(self): - ''' - Returns the matplotlib plotting function (plot or errorbar) based on - the presence of errorbar keywords. - ''' + """Returns the matplotlib plotting function (plot or errorbar) based on + the presence of errorbar keywords.""" if ('xerr' not in self.kwds) and \ ('yerr' not in self.kwds): @@ -1193,9 +1198,8 @@ def _get_marked_label(self, label, col_num): return label def _parse_errorbars(self, error_dim='y', **kwds): - ''' - Look for error keyword arguments and return the actual errorbar data - or return the error DataFrame/dict + """Look for error keyword arguments and return the actual errorbar data + or return the error DataFrame/dict. Error bars can be specified in several ways: Series: the user provides a pandas.Series object of the same @@ -1204,7 +1208,8 @@ def _parse_errorbars(self, error_dim='y', **kwds): DataFrame/dict: error values are paired with keys matching the key in the plotted DataFrame str: the name of the column within the plotted DataFrame - ''' + + """ err_kwd = kwds.pop(error_dim+'err', None) if err_kwd is None: @@ -2104,9 +2109,8 @@ def plot_series(series, label=None, kind='line', use_index=True, rot=None, def boxplot(data, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, figsize=None, **kwds): - """ - Make a box plot from DataFrame column optionally grouped by some columns or - other inputs + """Make a box plot from DataFrame column optionally grouped by some columns + or other inputs. Parameters ---------- @@ -2126,6 +2130,7 @@ def boxplot(data, column=None, by=None, ax=None, fontsize=None, Returns ------- ax : matplotlib.axes.AxesSubplot + """ from pandas import Series, DataFrame if isinstance(data, Series): @@ -2216,8 +2221,7 @@ def format_date_labels(ax, rot): def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwargs): - """ - Make a scatter plot from two DataFrame columns + """Make a scatter plot from two DataFrame columns. Parameters ---------- @@ -2233,6 +2237,7 @@ def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwarg Returns ------- fig : matplotlib.Figure + """ import matplotlib.pyplot as plt @@ -2265,8 +2270,7 @@ def plot_group(group, ax): def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False, sharey=False, figsize=None, layout=None, **kwds): - """ - Draw histogram of the DataFrame's series using matplotlib / pylab. + """Draw histogram of the DataFrame's series using matplotlib / pylab. Parameters ---------- @@ -2293,6 +2297,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, layout: (optional) a tuple (rows, columns) for the layout of the histograms kwds : other plotting keyword arguments To be passed to hist function + """ import matplotlib.pyplot as plt @@ -2365,8 +2370,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, figsize=None, **kwds): - """ - Draw histogram of the input series using matplotlib + """Draw histogram of the input series using matplotlib. Parameters ---------- @@ -2440,8 +2444,7 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, rot=0, grid=True, figsize=None, **kwds): - """ - Make box plots from DataFrameGroupBy data. + """Make box plots from DataFrameGroupBy data. Parameters ---------- @@ -2479,6 +2482,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, >>> >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1) >>> boxplot_frame_groupby(grouped, subplots=False) + """ if subplots is True: nrows, ncols = _get_layout(len(grouped)) @@ -2691,6 +2695,7 @@ def _subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, # Four polar axes plt.subplots(2, 2, subplot_kw=dict(polar=True)) + """ import matplotlib.pyplot as plt from pandas.core.frame import DataFrame diff --git a/pandas/tools/rplot.py b/pandas/tools/rplot.py index 1c3d17ee908cb..257115ae760c5 100644 --- a/pandas/tools/rplot.py +++ b/pandas/tools/rplot.py @@ -10,16 +10,12 @@ # class Scale: - """ - Base class for mapping between graphical and data attributes. - """ + """Base class for mapping between graphical and data attributes.""" pass class ScaleGradient(Scale): - """ - A mapping between a data attribute value and a - point in colour space between two specified colours. - """ + """A mapping between a data attribute value and a point in colour space + between two specified colours.""" def __init__(self, column, colour1, colour2): """Initialize ScaleGradient instance. @@ -28,6 +24,7 @@ def __init__(self, column, colour1, colour2): column: string, pandas DataFrame column name colour1: tuple, 3 element tuple with float values representing an RGB colour colour2: tuple, 3 element tuple with float values representing an RGB colour + """ self.column = column self.colour1 = colour1 @@ -45,6 +42,7 @@ def __call__(self, data, index): Returns: -------- A three element tuple representing an RGB somewhere between colour1 and colour2 + """ x = data[self.column].iget(index) a = min(data[self.column]) @@ -57,10 +55,8 @@ def __call__(self, data, index): b1 + (b2 - b1) * x_scaled) class ScaleGradient2(Scale): - """ - Create a mapping between a data attribute value and a - point in colour space in a line of three specified colours. - """ + """Create a mapping between a data attribute value and a point in colour + space in a line of three specified colours.""" def __init__(self, column, colour1, colour2, colour3): """Initialize ScaleGradient2 instance. @@ -70,6 +66,7 @@ def __init__(self, column, colour1, colour2, colour3): colour1: tuple, 3 element tuple with float values representing an RGB colour colour2: tuple, 3 element tuple with float values representing an RGB colour colour3: tuple, 3 element tuple with float values representing an RGB colour + """ self.column = column self.colour1 = colour1 @@ -89,6 +86,7 @@ def __call__(self, data, index): -------- A three element tuple representing an RGB somewhere along the line of colour1, colour2 and colour3 + """ x = data[self.column].iget(index) a = min(data[self.column]) @@ -109,10 +107,8 @@ def __call__(self, data, index): b2 + (b3 - b2) * x_scaled) class ScaleSize(Scale): - """ - Provide a mapping between a DataFrame column and matplotlib - scatter plot shape size. - """ + """Provide a mapping between a DataFrame column and matplotlib scatter plot + shape size.""" def __init__(self, column, min_size=5.0, max_size=100.0, transform=lambda x: x): """Initialize ScaleSize instance. @@ -122,6 +118,7 @@ def __init__(self, column, min_size=5.0, max_size=100.0, transform=lambda x: x): min_size: float, minimum point size max_size: float, maximum point size transform: a one argument function of form float -> float (e.g. lambda x: log(x)) + """ self.column = column self.min_size = min_size @@ -136,6 +133,7 @@ def __call__(self, data, index): ----------- data: pandas DataFrame index: pandas DataFrame row index + """ x = data[self.column].iget(index) a = float(min(data[self.column])) @@ -144,16 +142,15 @@ def __call__(self, data, index): (self.max_size - self.min_size)) class ScaleShape(Scale): - """ - Provides a mapping between matplotlib marker shapes - and attribute values. - """ + """Provides a mapping between matplotlib marker shapes and attribute + values.""" def __init__(self, column): """Initialize ScaleShape instance. Parameters: ----------- column: string, pandas DataFrame column name + """ self.column = column self.shapes = ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x'] @@ -171,6 +168,7 @@ def __call__(self, data, index): Returns: -------- a matplotlib marker identifier + """ values = sorted(list(set(data[self.column]))) if len(values) > len(self.shapes): @@ -179,34 +177,35 @@ def __call__(self, data, index): return self.shapes[values.index(x)] class ScaleRandomColour(Scale): - """ - Maps a random colour to a DataFrame attribute. - """ + """Maps a random colour to a DataFrame attribute.""" def __init__(self, column): """Initialize ScaleRandomColour instance. Parameters: ----------- column: string, pandas DataFrame column name + """ self.column = column self.categorical = True def __call__(self, data, index): - """Return a tuple of three floats, representing - an RGB colour. + """Return a tuple of three floats, representing an RGB colour. Parameters: ----------- data: pandas DataFrame index: pandas DataFrame row index + """ random.seed(data[self.column].iget(index)) return [random.random() for _ in range(3)] class ScaleConstant(Scale): - """ - Constant returning scale. Usually used automatically. + """Constant returning scale. + + Usually used automatically. + """ def __init__(self, value): """Initialize ScaleConstant instance. @@ -214,6 +213,7 @@ def __init__(self, value): Parameters: ----------- value: any Python value to be returned when called + """ self.value = value self.categorical = False @@ -229,6 +229,7 @@ def __call__(self, data, index): Returns: -------- A constant value specified during initialisation + """ return self.value @@ -243,6 +244,7 @@ def default_aes(x=None, y=None): Returns: -------- a dictionary with aesthetics bindings + """ return { 'x' : x, @@ -268,6 +270,7 @@ def make_aes(x=None, y=None, size=None, colour=None, shape=None, alpha=None): Returns: -------- a dictionary with aesthetics bindings + """ if not hasattr(size, '__call__') and size is not None: size = ScaleConstant(size) @@ -303,9 +306,7 @@ def make_aes(x=None, y=None, size=None, colour=None, shape=None, alpha=None): } class Layer: - """ - Layer object representing a single plot layer. - """ + """Layer object representing a single plot layer.""" def __init__(self, data=None, **kwds): """Initialize layer object. @@ -313,6 +314,7 @@ def __init__(self, data=None, **kwds): ----------- data: pandas DataFrame instance aes: aesthetics dictionary with bindings + """ self.data = data self.aes = make_aes(**kwds) @@ -329,13 +331,14 @@ def work(self, fig=None, ax=None): Returns: -------- a tuple with the same figure and axis instances + """ return fig, ax class GeomPoint(Layer): def work(self, fig=None, ax=None): - """Render the layer on a matplotlib axis. - You can specify either a figure or an axis to draw on. + """Render the layer on a matplotlib axis. You can specify either a + figure or an axis to draw on. Parameters: ----------- @@ -345,6 +348,7 @@ def work(self, fig=None, ax=None): Returns: -------- fig, ax: matplotlib figure and axis objects + """ if ax is None: if fig is None: @@ -379,9 +383,7 @@ def work(self, fig=None, ax=None): return fig, ax class GeomPolyFit(Layer): - """ - Draw a polynomial fit of specified degree. - """ + """Draw a polynomial fit of specified degree.""" def __init__(self, degree, lw=2.0, colour='grey'): """Initialize GeomPolyFit object. @@ -390,6 +392,7 @@ def __init__(self, degree, lw=2.0, colour='grey'): degree: an integer, polynomial degree lw: line width colour: matplotlib colour + """ self.degree = degree self.lw = lw @@ -397,7 +400,7 @@ def __init__(self, degree, lw=2.0, colour='grey'): Layer.__init__(self) def work(self, fig=None, ax=None): - """Draw the polynomial fit on matplotlib figure or axis + """Draw the polynomial fit on matplotlib figure or axis. Parameters: ----------- @@ -407,6 +410,7 @@ def work(self, fig=None, ax=None): Returns: -------- a tuple with figure and axis objects + """ if ax is None: if fig is None: @@ -426,9 +430,7 @@ def work(self, fig=None, ax=None): return fig, ax class GeomScatter(Layer): - """ - An efficient scatter plot, use this instead of GeomPoint for speed. - """ + """An efficient scatter plot, use this instead of GeomPoint for speed.""" def __init__(self, marker='o', colour='lightblue', alpha=1.0): """Initialize GeomScatter instance. @@ -437,6 +439,7 @@ def __init__(self, marker='o', colour='lightblue', alpha=1.0): marker: matplotlib marker string colour: matplotlib colour alpha: matplotlib alpha + """ self.marker = marker self.colour = colour @@ -444,7 +447,7 @@ def __init__(self, marker='o', colour='lightblue', alpha=1.0): Layer.__init__(self) def work(self, fig=None, ax=None): - """Draw a scatter plot on matplotlib figure or axis + """Draw a scatter plot on matplotlib figure or axis. Parameters: ----------- @@ -454,6 +457,7 @@ def work(self, fig=None, ax=None): Returns: -------- a tuple with figure and axis objects + """ if ax is None: if fig is None: @@ -466,9 +470,7 @@ def work(self, fig=None, ax=None): return fig, ax class GeomHistogram(Layer): - """ - An efficient histogram, use this instead of GeomBar for speed. - """ + """An efficient histogram, use this instead of GeomBar for speed.""" def __init__(self, bins=10, colour='lightblue'): """Initialize GeomHistogram instance. @@ -476,13 +478,14 @@ def __init__(self, bins=10, colour='lightblue'): ----------- bins: integer, number of histogram bins colour: matplotlib colour + """ self.bins = bins self.colour = colour Layer.__init__(self) def work(self, fig=None, ax=None): - """Draw a histogram on matplotlib figure or axis + """Draw a histogram on matplotlib figure or axis. Parameters: ----------- @@ -492,6 +495,7 @@ def work(self, fig=None, ax=None): Returns: -------- a tuple with figure and axis objects + """ if ax is None: if fig is None: @@ -504,12 +508,10 @@ def work(self, fig=None, ax=None): return fig, ax class GeomDensity(Layer): - """ - A kernel density estimation plot. - """ + """A kernel density estimation plot.""" def work(self, fig=None, ax=None): - """Draw a one dimensional kernel density plot. - You can specify either a figure or an axis to draw on. + """Draw a one dimensional kernel density plot. You can specify either a + figure or an axis to draw on. Parameters: ----------- @@ -519,6 +521,7 @@ def work(self, fig=None, ax=None): Returns: -------- fig, ax: matplotlib figure and axis objects + """ if ax is None: if fig is None: @@ -534,8 +537,8 @@ def work(self, fig=None, ax=None): class GeomDensity2D(Layer): def work(self, fig=None, ax=None): - """Draw a two dimensional kernel density plot. - You can specify either a figure or an axis to draw on. + """Draw a two dimensional kernel density plot. You can specify either a + figure or an axis to draw on. Parameters: ----------- @@ -545,6 +548,7 @@ def work(self, fig=None, ax=None): Returns: -------- fig, ax: matplotlib figure and axis objects + """ if ax is None: if fig is None: @@ -574,6 +578,7 @@ def __init__(self, by): Parameters: ----------- by: column names to group by + """ if len(by) != 2: raise ValueError("You must give a list of length 2 to group by") @@ -582,8 +587,8 @@ def __init__(self, by): self.by = by def trellis(self, layers): - """Create a trellis structure for a list of layers. - Each layer will be cloned with different data in to a two dimensional grid. + """Create a trellis structure for a list of layers. Each layer will be + cloned with different data in to a two dimensional grid. Parameters: ----------- @@ -592,6 +597,7 @@ def trellis(self, layers): Returns: -------- trellised_layers: Clones of each layer in the list arranged in a trellised latice + """ trellised_layers = [] for layer in layers: @@ -645,6 +651,7 @@ def dictionary_union(dict1, dict2): -------- A union of the dictionaries. It assumes that values with the same keys are identical. + """ keys1 = list(dict1.keys()) keys2 = list(dict2.keys()) @@ -656,14 +663,15 @@ def dictionary_union(dict1, dict2): return result def merge_aes(layer1, layer2): - """Merges the aesthetics dictionaries for the two layers. - Look up sequence_layers function. Which layer is first and which - one is second is important. + """Merges the aesthetics dictionaries for the two layers. Look up + sequence_layers function. Which layer is first and which one is second is + important. Parameters: ----------- layer1: Layer object layer2: Layer object + """ for key in layer2.aes.keys(): if layer2.aes[key] is None: @@ -686,11 +694,13 @@ def sequence_layers(layers): return layers def sequence_grids(layer_grids): - """Go through the list of layer girds and perform the same thing as sequence_layers. + """Go through the list of layer girds and perform the same thing as + sequence_layers. Parameters: ----------- layer_grids: a list of two dimensional layer grids + """ for grid1, grid2 in zip(layer_grids[:-1], layer_grids[1:]): for row1, row2 in zip(grid1, grid2): @@ -701,7 +711,8 @@ def sequence_grids(layer_grids): return layer_grids def work_grid(grid, fig): - """Take a two dimensional grid, add subplots to a figure for each cell and do layer work. + """Take a two dimensional grid, add subplots to a figure for each cell and + do layer work. Parameters: ----------- @@ -711,6 +722,7 @@ def work_grid(grid, fig): Returns: -------- axes: a two dimensional list of matplotlib axes + """ nrows = len(grid) ncols = len(grid[0]) @@ -722,8 +734,8 @@ def work_grid(grid, fig): return axes def adjust_subplots(fig, axes, trellis, layers): - """Adjust the subtplots on matplotlib figure with the - fact that we have a trellis plot in mind. + """Adjust the subtplots on matplotlib figure with the fact that we have a + trellis plot in mind. Parameters: ----------- @@ -731,6 +743,7 @@ def adjust_subplots(fig, axes, trellis, layers): axes: a two dimensional grid of matplotlib axes trellis: TrellisGrid object layers: last grid of layers in the plot + """ # Flatten the axes grid axes = [ax for row in axes for ax in row] @@ -795,8 +808,10 @@ def adjust_subplots(fig, axes, trellis, layers): fig.subplots_adjust(wspace=0.05, hspace=0.2) class RPlot: - """ - The main plot object. Add layers to an instance of this object to create a plot. + """The main plot object. + + Add layers to an instance of this object to create a plot. + """ def __init__(self, data, x=None, y=None): """Initialize RPlot instance. @@ -806,6 +821,7 @@ def __init__(self, data, x=None, y=None): data: pandas DataFrame instance x: string, DataFrame column name y: string, DataFrame column name + """ self.layers = [Layer(data, **default_aes(x=x, y=y))] trellised = False @@ -816,6 +832,7 @@ def add(self, layer): Parameters: ----------- layer: Layer instance + """ if not isinstance(layer, Layer): raise TypeError("The operand on the right side of + must be a Layer instance") @@ -827,6 +844,7 @@ def render(self, fig=None): Parameters: ----------- fig: matplotlib figure + """ import matplotlib.pyplot as plt if fig is None: diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py index 99fa1eaba79cc..6d5070abe4935 100644 --- a/pandas/tools/tile.py +++ b/pandas/tools/tile.py @@ -1,6 +1,4 @@ -""" -Quantilization functions and related stuff -""" +"""Quantilization functions and related stuff.""" from pandas.core.api import DataFrame, Series from pandas.core.categorical import Categorical diff --git a/pandas/tools/util.py b/pandas/tools/util.py index 6dbefc4b70930..f9f72ca5f6eec 100644 --- a/pandas/tools/util.py +++ b/pandas/tools/util.py @@ -9,9 +9,8 @@ def match(needles, haystack): def cartesian_product(X): - ''' - Numpy version of itertools.product or pandas.compat.product. - Sometimes faster (for large inputs)... + """Numpy version of itertools.product or pandas.compat.product. Sometimes + faster (for large inputs)... Examples -------- @@ -19,7 +18,7 @@ def cartesian_product(X): [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'), array([1, 2, 1, 2, 1, 2])] - ''' + """ lenX = np.fromiter((len(x) for x in X), dtype=int) cumprodX = np.cumproduct(lenX) @@ -35,11 +34,11 @@ def cartesian_product(X): def _compose2(f, g): - """Compose 2 callables""" + """Compose 2 callables.""" return lambda *args, **kwargs: f(g(*args, **kwargs)) def compose(*funcs): - """Compose 2 or more callables""" + """Compose 2 or more callables.""" assert len(funcs) > 1, 'At least 2 callables must be passed to compose' return reduce(_compose2, funcs) diff --git a/pandas/tseries/api.py b/pandas/tseries/api.py index c2cc3723802fc..04b870e66be2c 100644 --- a/pandas/tseries/api.py +++ b/pandas/tseries/api.py @@ -1,6 +1,4 @@ -""" - -""" +"""""" from pandas.tseries.index import DatetimeIndex, date_range, bdate_range diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py index b9939976fded8..7ea4d7b71da79 100644 --- a/pandas/tseries/converter.py +++ b/pandas/tseries/converter.py @@ -245,7 +245,7 @@ def __call__(self, x, pos=0): class PandasAutoDateLocator(dates.AutoDateLocator): def get_locator(self, dmin, dmax): - 'Pick the best locator based on a distance.' + """Pick the best locator based on a distance.""" delta = relativedelta(dmax, dmin) num_days = ((delta.years * 12.0) + delta.months * 31.0) + delta.days @@ -348,9 +348,7 @@ def _get_interval(self): return self._interval def autoscale(self): - """ - Set the view limits to include the data range. - """ + """Set the view limits to include the data range.""" dmin, dmax = self.datalim_to_dt() if dmin > dmax: dmax, dmin = dmin, dmax @@ -405,9 +403,7 @@ def _from_ordinal(x, tz=None): def _get_default_annual_spacing(nyears): - """ - Returns a default spacing between consecutive ticks for annual data. - """ + """Returns a default spacing between consecutive ticks for annual data.""" if nyears < 11: (min_spacing, maj_spacing) = (1, 1) elif nyears < 20: @@ -427,8 +423,7 @@ def _get_default_annual_spacing(nyears): def period_break(dates, period): - """ - Returns the indices where the given period changes. + """Returns the indices where the given period changes. Parameters ---------- @@ -436,6 +431,7 @@ def period_break(dates, period): Array of intervals to monitor. period : string Name of the period to monitor. + """ current = getattr(dates, period) previous = getattr(dates - 1, period) @@ -443,12 +439,12 @@ def period_break(dates, period): def has_level_label(label_flags, vmin): - """ - Returns true if the ``label_flags`` indicate there is at least one label + """Returns true if the ``label_flags`` indicate there is at least one label for this level. - if the minimum view limit is not an exact integer, then the first tick - label won't be shown, so we must adjust for that. + if the minimum view limit is not an exact integer, then the first + tick label won't be shown, so we must adjust for that. + """ if label_flags.size == 0 or (label_flags.size == 1 and label_flags[0] == 0 and @@ -871,7 +867,7 @@ def __init__(self, freq, minor_locator=False, dynamic_mode=True, self.finder = get_finder(freq) def _get_default_locs(self, vmin, vmax): - "Returns the default locations of ticks." + """Returns the default locations of ticks.""" if self.plot_obj.date_axis_info is None: self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) @@ -883,7 +879,7 @@ def _get_default_locs(self, vmin, vmax): return np.compress(locator['maj'], locator['val']) def __call__(self): - 'Return the locations of the ticks.' + """Return the locations of the ticks.""" # axis calls Locator.set_axis inside set_m<xxxx>_formatter vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: @@ -902,10 +898,8 @@ def __call__(self): return locs def autoscale(self): - """ - Sets the view limits to the nearest multiples of base that contain the - data. - """ + """Sets the view limits to the nearest multiples of base that contain + the data.""" # requires matplotlib >= 0.98.0 (vmin, vmax) = self.axis.get_data_interval() @@ -951,7 +945,7 @@ def __init__(self, freq, minor_locator=False, dynamic_mode=True, self.finder = get_finder(freq) def _set_default_format(self, vmin, vmax): - "Returns the default ticks spacing." + """Returns the default ticks spacing.""" if self.plot_obj.date_axis_info is None: self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) @@ -966,7 +960,7 @@ def _set_default_format(self, vmin, vmax): return self.formatdict def set_locs(self, locs): - 'Sets the locations of the ticks' + """Sets the locations of the ticks.""" # don't actually use the locs. This is just needed to work with # matplotlib. Force to use vmin, vmax self.locs = locs diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 7988b01af8c48..891dece6a69a6 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -72,13 +72,11 @@ def get_freq(freq): def get_freq_code(freqstr): - """ - - Parameters - ---------- + """Parameters. Returns - ------- + ------- + """ if isinstance(freqstr, DateOffset): freqstr = (get_offset_name(freqstr), freqstr.n) @@ -255,13 +253,13 @@ def inferTimeRule(index): def to_offset(freqstr): - """ - Return DateOffset object from string representation + """Return DateOffset object from string representation. Examples -------- >>> to_offset('5Min') Minute(5) + """ if freqstr is None: return None @@ -306,12 +304,12 @@ def to_offset(freqstr): def _base_and_stride(freqstr): - """ - Return base freq and stride info from string representation + """Return base freq and stride info from string representation. Examples -------- _freq_and_stride('5Min') -> 'Min', 5 + """ groups = opattern.match(freqstr) @@ -340,12 +338,12 @@ def get_base_alias(freqstr): def get_offset(name): - """ - Return DateOffset object associated with rule name + """Return DateOffset object associated with rule name. Examples -------- get_offset('EOM') --> BMonthEnd(1) + """ if name not in _dont_uppercase: name = name.upper() @@ -373,12 +371,12 @@ def get_offset(name): def get_offset_name(offset): - """ - Return rule name associated with a DateOffset object + """Return rule name associated with a DateOffset object. Examples -------- get_offset_name(BMonthEnd(1)) --> 'EOM' + """ if offset is None: raise ValueError("Offset can't be none!") @@ -403,9 +401,7 @@ def get_legacy_offset_name(offset): return _legacy_reverse_map.get(name, name) def get_standard_freq(freq): - """ - Return the standardized frequency string - """ + """Return the standardized frequency string.""" if freq is None: return None @@ -484,10 +480,8 @@ def get_standard_freq(freq): def _period_alias_dictionary(): - """ - Build freq alias dictionary to support freqs from original c_dates.c file - of the scikits.timeseries library. - """ + """Build freq alias dictionary to support freqs from original c_dates.c + file of the scikits.timeseries library.""" alias_dict = {} M_aliases = ["M", "MTH", "MONTH", "MONTHLY"] @@ -629,9 +623,8 @@ def _period_str_to_code(freqstr): def infer_freq(index, warn=True): - """ - Infer the most likely frequency given the input index. If the frequency is - uncertain, a warning will be printed + """Infer the most likely frequency given the input index. If the frequency + is uncertain, a warning will be printed. Parameters ---------- @@ -644,6 +637,7 @@ def infer_freq(index, warn=True): freq : string or None None if no discernible frequency TypeError if the index is not datetime-like + """ import pandas as pd @@ -673,9 +667,7 @@ def infer_freq(index, warn=True): class _FrequencyInferer(object): - """ - Not sure if I can avoid the state machine here - """ + """Not sure if I can avoid the state machine here.""" def __init__(self, index, warn=True): self.index = index @@ -884,9 +876,8 @@ def _maybe_add_count(base, count): def is_subperiod(source, target): - """ - Returns True if downsampling is possible between source and target - frequencies + """Returns True if downsampling is possible between source and target + frequencies. Parameters ---------- @@ -898,6 +889,7 @@ def is_subperiod(source, target): Returns ------- is_subperiod : boolean + """ if isinstance(source, offsets.DateOffset): source = source.rule_code @@ -933,9 +925,8 @@ def is_subperiod(source, target): def is_superperiod(source, target): - """ - Returns True if upsampling is possible between source and target - frequencies + """Returns True if upsampling is possible between source and target + frequencies. Parameters ---------- @@ -947,6 +938,7 @@ def is_superperiod(source, target): Returns ------- is_superperiod : boolean + """ if isinstance(source, offsets.DateOffset): source = source.rule_code diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index c58447acec621..af3ebeb9931d3 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -95,10 +95,9 @@ def _ensure_datetime64(other): _midnight = time(0, 0) class DatetimeIndex(Int64Index): - """ - Immutable ndarray of datetime64 data, represented internally as int64, and - which can be boxed to Timestamp objects that are subclasses of datetime and - carry metadata such as frequency information. + """Immutable ndarray of datetime64 data, represented internally as int64, + and which can be boxed to Timestamp objects that are subclasses of datetime + and carry metadata such as frequency information. Parameters ---------- @@ -122,6 +121,7 @@ class DatetimeIndex(Int64Index): the 'left', 'right', or both sides (None) name : object Name to be stored in the index + """ _join_precedence = 10 @@ -461,9 +461,7 @@ def _simple_new(cls, values, name, freq=None, tz=None): @property def tzinfo(self): - """ - Alias for tz attribute - """ + """Alias for tz attribute.""" return self.tz @classmethod @@ -573,14 +571,14 @@ def __unicode__(self): return summary def __reduce__(self): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" object_state = list(np.ndarray.__reduce__(self)) subclass_state = self.name, self.offset, self.tz object_state[2] = (object_state[2], subclass_state) return tuple(object_state) def __setstate__(self, state): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" if len(state) == 2: nd_state, own_state = state self.name = own_state[0] @@ -657,9 +655,8 @@ def _format_native_types(self, na_rep=u('NaT'), justify='all').get_result() def isin(self, values): - """ - Compute boolean array of whether each index value is found in the - passed set of values + """Compute boolean array of whether each index value is found in the + passed set of values. Parameters ---------- @@ -668,6 +665,7 @@ def isin(self, values): Returns ------- is_contained : ndarray (boolean dtype) + """ if not isinstance(values, DatetimeIndex): try: @@ -723,9 +721,8 @@ def _get_time_micros(self): return tslib.get_time_micros(values) def to_series(self, keep_tz=False): - """ - Create a Series with both index and values equal to the index keys - useful with map for returning an indexer based on an index + """Create a Series with both index and values equal to the index keys + useful with map for returning an indexer based on an index. Parameters ---------- @@ -745,29 +742,27 @@ def to_series(self, keep_tz=False): Returns ------- Series + """ return super(DatetimeIndex, self).to_series(keep_tz=keep_tz) def _to_embed(self, keep_tz=False): - """ return an array repr of this object, potentially casting to object """ + """return an array repr of this object, potentially casting to + object.""" if keep_tz and self.tz is not None and str(self.tz) != 'UTC': return self.asobject.values return self.values @property def asobject(self): - """ - Convert to Index of datetime objects - """ + """Convert to Index of datetime objects.""" if isnull(self).any(): msg = 'DatetimeIndex with NaT cannot be converted to object' raise ValueError(msg) return self._get_object_index() def tolist(self): - """ - See ndarray.tolist - """ + """See ndarray.tolist.""" return list(self.asobject) def _get_object_index(self): @@ -776,19 +771,17 @@ def _get_object_index(self): return Index(boxed_values, dtype=object) def to_pydatetime(self): - """ - Return DatetimeIndex as object ndarray of datetime.datetime objects + """Return DatetimeIndex as object ndarray of datetime.datetime objects. Returns ------- datetimes : ndarray + """ return tslib.ints_to_pydatetime(self.asi8, tz=self.tz) def to_period(self, freq=None): - """ - Cast to PeriodIndex at a particular frequency - """ + """Cast to PeriodIndex at a particular frequency.""" from pandas.tseries.period import PeriodIndex if self.freq is None and freq is None: @@ -801,9 +794,7 @@ def to_period(self, freq=None): return PeriodIndex(self.values, freq=freq, tz=self.tz) def order(self, return_indexer=False, ascending=True): - """ - Return sorted copy of Index - """ + """Return sorted copy of Index.""" if return_indexer: _as = self.argsort() if not ascending: @@ -818,10 +809,7 @@ def order(self, return_indexer=False, ascending=True): self.tz) def snap(self, freq='S'): - """ - Snap time stamps to nearest occurring frequency - - """ + """Snap time stamps to nearest occurring frequency.""" # Superdumb, punting on any optimizing freq = to_offset(freq) @@ -842,8 +830,7 @@ def snap(self, freq='S'): return DatetimeIndex(snapped, freq=freq, verify_integrity=False) def shift(self, n, freq=None): - """ - Specialized shift which produces a DatetimeIndex + """Specialized shift which produces a DatetimeIndex. Parameters ---------- @@ -854,6 +841,7 @@ def shift(self, n, freq=None): Returns ------- shifted : DatetimeIndex + """ if freq is not None and freq != self.offset: if isinstance(freq, compat.string_types): @@ -876,16 +864,12 @@ def shift(self, n, freq=None): name=self.name, tz=self.tz) def repeat(self, repeats, axis=None): - """ - Analogous to ndarray.repeat - """ + """Analogous to ndarray.repeat.""" return DatetimeIndex(self.values.repeat(repeats), name=self.name) def take(self, indices, axis=0): - """ - Analogous to ndarray.take - """ + """Analogous to ndarray.take.""" maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices)) if isinstance(maybe_slice, slice): return self[maybe_slice] @@ -894,22 +878,20 @@ def take(self, indices, axis=0): return self._simple_new(taken, self.name, None, self.tz) def unique(self): - """ - Index.unique with handling for DatetimeIndex metadata + """Index.unique with handling for DatetimeIndex metadata. Returns ------- result : DatetimeIndex + """ result = Int64Index.unique(self) return DatetimeIndex._simple_new(result, tz=self.tz, name=self.name) def union(self, other): - """ - Specialized union for DatetimeIndex objects. If combine - overlapping ranges with the same DateOffset, will be much - faster than Index.union + """Specialized union for DatetimeIndex objects. If combine overlapping + ranges with the same DateOffset, will be much faster than Index.union. Parameters ---------- @@ -918,6 +900,7 @@ def union(self, other): Returns ------- y : Index or DatetimeIndex + """ if not isinstance(other, DatetimeIndex): try: @@ -938,9 +921,7 @@ def union(self, other): return result def union_many(self, others): - """ - A bit of a hack to accelerate unioning a collection of indexes - """ + """A bit of a hack to accelerate unioning a collection of indexes.""" this = self for other in others: @@ -969,8 +950,7 @@ def union_many(self, others): return this def append(self, other): - """ - Append a collection of Index options together + """Append a collection of Index options together. Parameters ---------- @@ -979,6 +959,7 @@ def append(self, other): Returns ------- appended : Index + """ name = self.name to_concat = [self] @@ -999,9 +980,7 @@ def append(self, other): return factory(to_concat) def join(self, other, how='left', level=None, return_indexers=False): - """ - See Index.join - """ + """See Index.join.""" if (not isinstance(other, DatetimeIndex) and len(other) > 0 and other.inferred_type not in ('floating', 'mixed-integer', 'mixed-integer-float', 'mixed')): @@ -1115,9 +1094,8 @@ def __array_finalize__(self, obj): self._reset_identity() def intersection(self, other): - """ - Specialized intersection for DatetimeIndex objects. May be much faster - than Index.intersection + """Specialized intersection for DatetimeIndex objects. May be much + faster than Index.intersection. Parameters ---------- @@ -1126,6 +1104,7 @@ def intersection(self, other): Returns ------- y : Index or DatetimeIndex + """ if not isinstance(other, DatetimeIndex): try: @@ -1282,12 +1261,12 @@ def get_value_maybe_box(self, series, key): return _maybe_box(self, values, series, key) def get_loc(self, key): - """ - Get integer location for requested label + """Get integer location for requested label. Returns ------- loc : int + """ if isinstance(key, datetime): # needed to localize naive datetimes @@ -1320,9 +1299,7 @@ def _get_string_slice(self, key, use_lhs=True, use_rhs=True): return loc def slice_indexer(self, start=None, end=None, step=None): - """ - Index.slice_indexer, customized to handle time slicing - """ + """Index.slice_indexer, customized to handle time slicing.""" if isinstance(start, time) and isinstance(end, time): if step is not None and step != 1: raise ValueError('Must have step size of 1 with time slices') @@ -1383,7 +1360,7 @@ def slice_locs(self, start=None, end=None): return Index.slice_locs(self, start, end) def __getitem__(self, key): - """Override numpy.ndarray's __getitem__ method to work as desired""" + """Override numpy.ndarray's __getitem__ method to work as desired.""" arr_idx = self.view(np.ndarray) if np.isscalar(key): val = arr_idx[key] @@ -1420,7 +1397,7 @@ def map(self, f): # alias to offset @property def freq(self): - """ return the frequency object if its set, otherwise None """ + """return the frequency object if its set, otherwise None.""" return self.offset @cache_readonly @@ -1432,7 +1409,8 @@ def inferred_freq(self): @property def freqstr(self): - """ return the frequency object as a string if its set, otherwise None """ + """return the frequency object as a string if its set, otherwise + None.""" return self.offset.freqstr _year = _field_accessor('year', 'Y') @@ -1453,8 +1431,10 @@ def freqstr(self): @property def _time(self): - """ - Returns numpy array of datetime.time. The time part of the Timestamps. + """Returns numpy array of datetime.time. + + The time part of the Timestamps. + """ # can't call self.map() which tries to treat func as ufunc # and causes recursion warnings on python 2.6 @@ -1462,19 +1442,21 @@ def _time(self): @property def _date(self): - """ - Returns numpy array of datetime.date. The date part of the Timestamps. + """Returns numpy array of datetime.date. + + The date part of the Timestamps. + """ return _algos.arrmap_object(self.asobject, lambda x: x.date()) def normalize(self): - """ - Return DatetimeIndex with times to midnight. Length is unaltered + """Return DatetimeIndex with times to midnight. Length is unaltered. Returns ------- normalized : DatetimeIndex + """ new_values = tslib.date_normalize(self.asi8, self.tz) return DatetimeIndex(new_values, freq='infer', name=self.name, @@ -1517,16 +1499,12 @@ def is_all_dates(self): @cache_readonly def is_normalized(self): - """ - Returns True if all of the dates are at midnight ("no time") - """ + """Returns True if all of the dates are at midnight ("no time")""" return tslib.dates_normalized(self.asi8, self.tz) @cache_readonly def resolution(self): - """ - Returns day, hour, minute, second, or microsecond - """ + """Returns day, hour, minute, second, or microsecond.""" reso = self._resolution return get_reso_string(reso) @@ -1535,9 +1513,7 @@ def _resolution(self): return tslib.resolution(self.asi8, self.tz) def equals(self, other): - """ - Determines if two Index objects contain the same elements. - """ + """Determines if two Index objects contain the same elements.""" if self.is_(other): return True @@ -1563,8 +1539,7 @@ def equals(self, other): return same_zone and np.array_equal(self.asi8, other.asi8) def insert(self, loc, item): - """ - Make new Index inserting new item at location + """Make new Index inserting new item at location. Parameters ---------- @@ -1576,6 +1551,7 @@ def insert(self, loc, item): Returns ------- new_index : Index + """ if isinstance(item, datetime): item = _to_m8(item, tz=self.tz) @@ -1592,12 +1568,12 @@ def insert(self, loc, item): raise TypeError("cannot insert DatetimeIndex with incompatible label") def delete(self, loc): - """ - Make new DatetimeIndex with passed location deleted + """Make new DatetimeIndex with passed location deleted. Returns ------- new_index : DatetimeIndex + """ arr = np.delete(self.values, loc) return DatetimeIndex(arr, tz=self.tz) @@ -1610,12 +1586,12 @@ def _view_like(self, ndarray): return result def tz_convert(self, tz): - """ - Convert DatetimeIndex from one time zone to another (using pytz) + """Convert DatetimeIndex from one time zone to another (using pytz) Returns ------- normalized : DatetimeIndex + """ tz = tools._maybe_get_tz(tz) @@ -1738,9 +1714,7 @@ def indexer_between_time(self, start_time, end_time, include_start=True, return mask.nonzero()[0] def min(self, axis=None): - """ - Overridden ndarray.min to return a Timestamp - """ + """Overridden ndarray.min to return a Timestamp.""" if self.is_monotonic: return self[0] else: @@ -1748,9 +1722,7 @@ def min(self, axis=None): return Timestamp(min_stamp, tz=self.tz) def max(self, axis=None): - """ - Overridden ndarray.max to return a Timestamp - """ + """Overridden ndarray.max to return a Timestamp.""" if self.is_monotonic: return self[-1] else: @@ -1758,10 +1730,11 @@ def max(self, axis=None): return Timestamp(max_stamp, tz=self.tz) def to_julian_date(self): - """ - Convert DatetimeIndex to Float64Index of Julian Dates. + """Convert DatetimeIndex to Float64Index of Julian Dates. + 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day + """ # http://mysite.verizon.net/aesir_research/date/jdalg2.htm @@ -1826,9 +1799,8 @@ def _generate_regular_range(start, end, periods, offset): def date_range(start=None, end=None, periods=None, freq='D', tz=None, normalize=False, name=None, closed=None): - """ - Return a fixed frequency datetime index, with day (calendar) as the default - frequency + """Return a fixed frequency datetime index, with day (calendar) as the + default frequency. Parameters ---------- @@ -1858,6 +1830,7 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None, Returns ------- rng : DatetimeIndex + """ return DatetimeIndex(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, @@ -1866,9 +1839,8 @@ def date_range(start=None, end=None, periods=None, freq='D', tz=None, def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, normalize=True, name=None, closed=None): - """ - Return a fixed frequency datetime index, with business day as the default - frequency + """Return a fixed frequency datetime index, with business day as the + default frequency. Parameters ---------- @@ -1898,6 +1870,7 @@ def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, Returns ------- rng : DatetimeIndex + """ return DatetimeIndex(start=start, end=end, periods=periods, diff --git a/pandas/tseries/interval.py b/pandas/tseries/interval.py index 104e088ee4e84..70ac144da28fb 100644 --- a/pandas/tseries/interval.py +++ b/pandas/tseries/interval.py @@ -4,9 +4,7 @@ class Interval(object): - """ - Represents an interval of time defined by two timestamps - """ + """Represents an interval of time defined by two timestamps.""" def __init__(self, start, end): self.start = start @@ -14,9 +12,8 @@ def __init__(self, start, end): class PeriodInterval(object): - """ - Represents an interval of time defined by two Period objects (time ordinals) - """ + """Represents an interval of time defined by two Period objects (time + ordinals)""" def __init__(self, start, end): self.start = start @@ -24,9 +21,7 @@ def __init__(self, start, end): class IntervalIndex(Index): - """ - - """ + """""" def __new__(self, starts, ends): pass diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 9130d0f3d8102..0a5bad29bea8b 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -48,8 +48,7 @@ class CacheableOffset(object): class DateOffset(object): - """ - Standard kind of date increment used for a date range. + """Standard kind of date increment used for a date range. Works exactly like relativedelta in terms of the keyword args you pass in, use of the keyword n is discouraged-- you would be better @@ -89,6 +88,7 @@ def __add__(date): date + BDay(0) == BDay.rollforward(date) Since 0 is a bit weird, we suggest avoiding its use. + """ _cacheable = False _normalize_cache = True @@ -229,7 +229,7 @@ def __neg__(self): return self.__class__(-self.n, **self.kwds) def rollback(self, dt): - """Roll provided date backward to next offset only if not on offset""" + """Roll provided date backward to next offset only if not on offset.""" if type(dt) == date: dt = datetime(dt.year, dt.month, dt.day) @@ -238,7 +238,7 @@ def rollback(self, dt): return dt def rollforward(self, dt): - """Roll provided date forward to next offset only if not on offset""" + """Roll provided date forward to next offset only if not on offset.""" if type(dt) == date: dt = datetime(dt.year, dt.month, dt.day) @@ -292,9 +292,7 @@ def _from_name(cls, suffix=None): class BusinessDay(SingleConstructorOffset): - """ - DateOffset subclass representing possibly n business days - """ + """DateOffset subclass representing possibly n business days.""" _prefix = 'B' def __init__(self, n=1, **kwds): @@ -486,13 +484,13 @@ def _set_busdaycalendar(self): raise def __getstate__(self): - """Return a pickleable state""" + """Return a pickleable state.""" state = self.__dict__.copy() del state['busdaycalendar'] return state def __setstate__(self, state): - """Reconstruct an instance from a pickled state""" + """Reconstruct an instance from a pickled state.""" self.__dict__ = state self._set_busdaycalendar() @@ -574,7 +572,7 @@ def name(self): class MonthEnd(MonthOffset): - """DateOffset of one month end""" + """DateOffset of one month end.""" def apply(self, other): other = datetime(other.year, other.month, other.day, @@ -598,7 +596,7 @@ def onOffset(cls, dt): class MonthBegin(MonthOffset): - """DateOffset of one month at beginning""" + """DateOffset of one month at beginning.""" def apply(self, other): n = self.n @@ -617,7 +615,7 @@ def onOffset(cls, dt): class BusinessMonthEnd(MonthOffset): - """DateOffset increments between business EOM dates""" + """DateOffset increments between business EOM dates.""" def isAnchored(self): return (self.n == 1) @@ -645,7 +643,7 @@ def apply(self, other): class BusinessMonthBegin(MonthOffset): - """DateOffset of one business month at beginning""" + """DateOffset of one business month at beginning.""" def apply(self, other): n = self.n @@ -680,13 +678,13 @@ def onOffset(cls, dt): class Week(DateOffset): - """ - Weekly offset + """Weekly offset. Parameters ---------- weekday : int, default None Always generate specific day of week. 0 for Monday + """ def __init__(self, n=1, **kwds): @@ -770,8 +768,8 @@ class WeekDay(object): class WeekOfMonth(DateOffset): - """ - Describes monthly dates like "the Tuesday of the 2nd week of each month" + """Describes monthly dates like "the Tuesday of the 2nd week of each + month". Parameters ---------- @@ -786,6 +784,7 @@ class WeekOfMonth(DateOffset): 4: Fridays 5: Saturdays 6: Sundays + """ def __init__(self, n=1, **kwds): @@ -856,8 +855,8 @@ def _from_name(cls, suffix=None): return cls(week=week, weekday=weekday) class LastWeekOfMonth(DateOffset): - """ - Describes monthly dates in last week of month like "the last Tuesday of each month" + """Describes monthly dates in last week of month like "the last Tuesday of + each month". Parameters ---------- @@ -870,6 +869,7 @@ class LastWeekOfMonth(DateOffset): 4: Fridays 5: Saturdays 6: Sundays + """ def __init__(self, n=1, **kwds): self.n = n @@ -1132,7 +1132,7 @@ def apply(self, other): class YearOffset(DateOffset): - """DateOffset that just needs a month""" + """DateOffset that just needs a month.""" def __init__(self, n=1, **kwds): self.month = kwds.get('month', self._default_month) @@ -1155,7 +1155,7 @@ def rule_code(self): class BYearEnd(YearOffset): - """DateOffset increments between business EOM dates""" + """DateOffset increments between business EOM dates.""" _outputName = 'BusinessYearEnd' _default_month = 12 _prefix = 'BA' @@ -1192,7 +1192,7 @@ def apply(self, other): class BYearBegin(YearOffset): - """DateOffset increments between business year begin dates""" + """DateOffset increments between business year begin dates.""" _outputName = 'BusinessYearBegin' _default_month = 1 _prefix = 'BAS' @@ -1224,7 +1224,7 @@ def apply(self, other): class YearEnd(YearOffset): - """DateOffset increments between calendar year ends""" + """DateOffset increments between calendar year ends.""" _default_month = 12 _prefix = 'A' @@ -1280,7 +1280,7 @@ def onOffset(self, dt): class YearBegin(YearOffset): - """DateOffset increments between calendar year begin dates""" + """DateOffset increments between calendar year begin dates.""" _default_month = 1 _prefix = 'AS' @@ -1838,10 +1838,11 @@ class Nano(Tick): def _get_firstbday(wkday): - """ - wkday is the result of monthrange(year, month) + """wkday is the result of monthrange(year, month) + + If it's a saturday or sunday, increment first business day to + reflect this - If it's a saturday or sunday, increment first business day to reflect this """ first = 1 if wkday == 5: # on Saturday @@ -1853,10 +1854,9 @@ def _get_firstbday(wkday): def generate_range(start=None, end=None, periods=None, offset=BDay(), time_rule=None): - """ - Generates a sequence of dates corresponding to the specified time - offset. Similar to dateutil.rrule except uses pandas DateOffset - objects to represent time increments + """Generates a sequence of dates corresponding to the specified time + offset. Similar to dateutil.rrule except uses pandas DateOffset objects to + represent time increments. Parameters ---------- @@ -1948,9 +1948,12 @@ def generate_range(start=None, end=None, periods=None, def _make_offset(key): - """Gets offset based on key. KeyError if prefix is bad, ValueError if - suffix is bad. All handled by `get_offset` in tseries/frequencies. Not - public.""" + """Gets offset based on key. + + KeyError if prefix is bad, ValueError if suffix is bad. All handled + by `get_offset` in tseries/frequencies. Not public. + + """ if key is None: return None split = key.replace('@', '-').split('-') diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 5fca119c14e83..ad47a93ff2770 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -42,8 +42,7 @@ def f(self): class Period(PandasObject): - """ - Represents an period of time + """Represents an period of time. Parameters ---------- @@ -59,6 +58,7 @@ class Period(PandasObject): hour : int, default 0 minute : int, default 0 second : int, default 0 + """ __slots__ = ['freq', 'ordinal'] _comparables = ['name','freqstr'] @@ -182,9 +182,8 @@ def f(self, other): __ge__ = _comp_method(operator.ge, '__ge__') def asfreq(self, freq, how='E'): - """ - Convert Period to desired frequency, either at the start or end of the - interval + """Convert Period to desired frequency, either at the start or end of + the interval. Parameters ---------- @@ -195,6 +194,7 @@ def asfreq(self, freq, how='E'): Returns ------- resampled : Period + """ how = _validate_end_alias(how) base1, mult1 = _gfc(self.freq) @@ -218,9 +218,8 @@ def end_time(self): return Timestamp(ordinal) def to_timestamp(self, freq=None, how='start', tz=None): - """ - Return the Timestamp representation of the Period at the target - frequency at the specified end (how) of the Period + """Return the Timestamp representation of the Period at the target + frequency at the specified end (how) of the Period. Parameters ---------- @@ -234,6 +233,7 @@ def to_timestamp(self, freq=None, how='start', tz=None): Returns ------- Timestamp + """ how = _validate_end_alias(how) @@ -277,11 +277,11 @@ def __repr__(self): return "Period('%s', '%s')" % (formatted, freqstr) def __unicode__(self): - """ - Return a string representation for a particular DataFrame + """Return a string representation for a particular DataFrame. Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. + """ base, mult = _gfc(self.freq) formatted = tslib.period_format(self.ordinal, base) @@ -729,9 +729,7 @@ def is_all_dates(self): @property def is_full(self): - """ - Returns True if there are any missing periods from start to end - """ + """Returns True if there are any missing periods from start to end.""" if len(self) == 0: return True if not self.is_monotonic: @@ -740,9 +738,7 @@ def is_full(self): return ((values[1:] - values[:-1]) < 2).all() def factorize(self): - """ - Specialized factorize that boxes uniques - """ + """Specialized factorize that boxes uniques.""" from pandas.core.algorithms import factorize labels, uniques = factorize(self.values) uniques = PeriodIndex(ordinal=uniques, freq=self.freq) @@ -810,23 +806,18 @@ def _mpl_repr(self): return self._get_object_array() def equals(self, other): - """ - Determines if two Index objects contain the same elements. - """ + """Determines if two Index objects contain the same elements.""" if self.is_(other): return True return np.array_equal(self.asi8, other.asi8) def tolist(self): - """ - Return a list of Period objects - """ + """Return a list of Period objects.""" return self._get_object_array().tolist() def to_timestamp(self, freq=None, how='start'): - """ - Cast to DatetimeIndex + """Cast to DatetimeIndex. Parameters ---------- @@ -838,6 +829,7 @@ def to_timestamp(self, freq=None, how='start'): Returns ------- DatetimeIndex + """ how = _validate_end_alias(how) @@ -852,8 +844,7 @@ def to_timestamp(self, freq=None, how='start'): return DatetimeIndex(new_data, freq='infer', name=self.name) def shift(self, n): - """ - Specialized shift which produces an PeriodIndex + """Specialized shift which produces an PeriodIndex. Parameters ---------- @@ -864,6 +855,7 @@ def shift(self, n): Returns ------- shifted : PeriodIndex + """ if n == 0: return self @@ -922,12 +914,12 @@ def get_value(self, series, key): return _maybe_box(self, self._engine.get_value(s, key), series, key) def get_loc(self, key): - """ - Get integer location for requested label + """Get integer location for requested label. Returns ------- loc : int + """ try: return self._engine.get_loc(key) @@ -1003,9 +995,7 @@ def _get_string_slice(self, key): return slice(left, right) def join(self, other, how='left', level=None, return_indexers=False): - """ - See Index.join - """ + """See Index.join.""" self._assert_can_do_setop(other) result = Int64Index.join(self, other, how=how, level=level, @@ -1037,7 +1027,7 @@ def _apply_meta(self, rawarr): return rawarr def __getitem__(self, key): - """Override numpy.ndarray's __getitem__ method to work as desired""" + """Override numpy.ndarray's __getitem__ method to work as desired.""" arr_idx = self.view(np.ndarray) if np.isscalar(key): val = arr_idx[key] @@ -1110,9 +1100,7 @@ def __str__(self): return self.__bytes__() def take(self, indices, axis=None): - """ - Analogous to ndarray.take - """ + """Analogous to ndarray.take.""" indices = com._ensure_platform_int(indices) taken = self.values.take(indices, axis=axis) taken = taken.view(PeriodIndex) @@ -1121,8 +1109,7 @@ def take(self, indices, axis=None): return taken def append(self, other): - """ - Append a collection of Index options together + """Append a collection of Index options together. Parameters ---------- @@ -1131,6 +1118,7 @@ def append(self, other): Returns ------- appended : Index + """ name = self.name to_concat = [self] @@ -1160,14 +1148,14 @@ def append(self, other): return Index(com._concat_compat(to_concat), name=name) def __reduce__(self): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" object_state = list(np.ndarray.__reduce__(self)) subclass_state = (self.name, self.freq) object_state[2] = (object_state[2], subclass_state) return tuple(object_state) def __setstate__(self, state): - """Necessary for making this object picklable""" + """Necessary for making this object picklable.""" if len(state) == 2: nd_state, own_state = state np.ndarray.__setstate__(self, nd_state) @@ -1313,10 +1301,8 @@ def pnow(freq=None): def period_range(start=None, end=None, periods=None, freq='D', name=None): - """ - Return a fixed frequency datetime index, with day (calendar) as the default - frequency - + """Return a fixed frequency datetime index, with day (calendar) as the + default frequency. Parameters ---------- @@ -1332,6 +1318,7 @@ def period_range(start=None, end=None, periods=None, freq='D', name=None): Returns ------- prng : PeriodIndex + """ return PeriodIndex(start=start, end=end, periods=periods, freq=freq, name=name) diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py index ae32367a57cd3..f336cecf15d7f 100644 --- a/pandas/tseries/plotting.py +++ b/pandas/tseries/plotting.py @@ -27,8 +27,7 @@ def tsplot(series, plotf, **kwargs): - """ - Plots a Series on the given Matplotlib axes or the current axes + """Plots a Series on the given Matplotlib axes or the current axes. Parameters ---------- diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 7f243c20fe56e..043faf7214028 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -412,9 +412,7 @@ def _adjust_dates_anchored(first, last, offset, closed='right', base=0): def asfreq(obj, freq, method=None, how=None, normalize=False): - """ - Utility frequency conversion method for Series/DataFrame - """ + """Utility frequency conversion method for Series/DataFrame.""" if isinstance(obj.index, PeriodIndex): if method is not None: raise NotImplementedError diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 9dc26f2b01ccc..d059c0e6ee744 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -32,7 +32,7 @@ class TestPeriodProperties(tm.TestCase): - "Test properties such as year, month, weekday, etc...." + """Test properties such as year, month, weekday, etc....""" # def test_quarterly_negative_ordinals(self): @@ -491,7 +491,7 @@ def noWrap(item): class TestFreqConversion(tm.TestCase): - "Test frequency conversion of date objects" + """Test frequency conversion of date objects.""" def test_asfreq_corner(self): val = Period(freq='A', year=2007) @@ -2174,7 +2174,7 @@ def _permute(obj): class TestMethods(tm.TestCase): - "Base test class for MaskedArrays." + """Base test class for MaskedArrays.""" def test_add(self): dt1 = Period(freq='D', year=2008, month=1, day=1) @@ -2186,9 +2186,7 @@ def test_add(self): class TestPeriodRepresentation(tm.TestCase): - """ - Wish to match NumPy units - """ + """Wish to match NumPy units.""" def test_annual(self): self._check_freq('A', 1970) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index f7edd92fce122..974c010521daf 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2192,9 +2192,7 @@ def test_join_with_period_index(self): class TestDatetime64(tm.TestCase): - """ - Also test supoprt for datetime64[ns] in Series / DataFrame - """ + """Also test supoprt for datetime64[ns] in Series / DataFrame.""" def setUp(self): dti = DatetimeIndex(start=datetime(2005, 1, 1), diff --git a/pandas/tseries/tests/test_util.py b/pandas/tseries/tests/test_util.py index b10c4351c8725..9b27b8a087c4c 100644 --- a/pandas/tseries/tests/test_util.py +++ b/pandas/tseries/tests/test_util.py @@ -14,9 +14,7 @@ class TestPivotAnnual(tm.TestCase): - """ - New pandas of scikits.timeseries pivot_annual - """ + """New pandas of scikits.timeseries pivot_annual.""" def test_daily(self): rng = date_range('1/1/2000', '12/31/2004', freq='D') ts = Series(np.random.randn(len(rng)), index=rng) diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py index cc01c26f78b70..695801eda4e68 100644 --- a/pandas/tseries/timedeltas.py +++ b/pandas/tseries/timedeltas.py @@ -1,6 +1,4 @@ -""" -timedelta support tools -""" +"""timedelta support tools.""" import re from datetime import timedelta @@ -15,8 +13,7 @@ repr_timedelta64 = tslib.repr_timedelta64 def to_timedelta(arg, box=True, unit='ns'): - """ - Convert argument to timedelta + """Convert argument to timedelta. Parameters ---------- @@ -28,6 +25,7 @@ def to_timedelta(arg, box=True, unit='ns'): Returns ------- ret : timedelta64/arrays of timedelta64 if parsing succeeded + """ if _np_version_under1p7: raise ValueError("to_timedelta is not support for numpy < 1.7") @@ -74,7 +72,7 @@ def _convert_listlike(arg, box, unit): _whitespace = re.compile('^\s*$') def _coerce_scalar_to_timedelta_type(r, unit='ns'): - """ convert strings to timedelta; coerce to np.timedelta64""" + """convert strings to timedelta; coerce to np.timedelta64.""" if isinstance(r, compat.string_types): @@ -86,7 +84,7 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns'): return tslib.convert_to_timedelta(r,unit) def _get_string_converter(r, unit='ns'): - """ return a string converter for r to process the timedelta format """ + """return a string converter for r to process the timedelta format.""" # treat as a nan if _whitespace.search(r): diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 6761b5cbb04b0..c5e0954e961cc 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -68,8 +68,7 @@ def _maybe_get_tz(tz): def _guess_datetime_format(dt_str, dayfirst=False, dt_str_parse=compat.parse_date, dt_str_split=_DATEUTIL_LEXER_SPLIT): - """ - Guess the datetime format of a given datetime string. + """Guess the datetime format of a given datetime string. Parameters ---------- @@ -89,6 +88,7 @@ def _guess_datetime_format(dt_str, dayfirst=False, Returns ------- ret : datetime formatt string (for `strftime` or `strptime`) + """ if dt_str_parse is None or dt_str_split is None: return None @@ -185,8 +185,7 @@ def _guess_datetime_format_for_array(arr, **kwargs): def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, format=None, coerce=False, unit='ns', infer_datetime_format=False): - """ - Convert argument to datetime + """Convert argument to datetime. Parameters ---------- @@ -228,6 +227,7 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, >>> df = df.astype(str) >>> pd.to_datetime(df.day + df.month + df.year, format="%d%m%Y") + """ from pandas import Timestamp from pandas.core.series import Series @@ -370,8 +370,7 @@ def calc_with_mask(carg,mask): def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): - """ - Try hard to parse datetime string, leveraging dateutil plus some extra + """Try hard to parse datetime string, leveraging dateutil plus some extra goodies like quarter recognition. Parameters @@ -387,6 +386,7 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): Returns ------- datetime, datetime/dateutil.parser._result, str + """ from pandas.core.config import get_option from pandas.tseries.offsets import DateOffset @@ -477,7 +477,7 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): def dateutil_parse(timestr, default, ignoretz=False, tzinfos=None, **kwargs): - """ lifted from dateutil to get resolution""" + """lifted from dateutil to get resolution.""" from dateutil import tz import time fobj = StringIO(str(timestr)) @@ -574,7 +574,7 @@ def format(dt): def ole2datetime(oledt): - """function for converting excel date to normal date format""" + """function for converting excel date to normal date format.""" val = float(oledt) # Excel has a bug where it thinks the date 2/29/1900 exists diff --git a/pandas/tseries/util.py b/pandas/tseries/util.py index 664a42543822d..acdeb0dfdbdb1 100644 --- a/pandas/tseries/util.py +++ b/pandas/tseries/util.py @@ -9,8 +9,7 @@ def pivot_annual(series, freq=None): - """ - Group a series by years, taking leap years into account. + """Group a series by years, taking leap years into account. The output has as many rows as distinct years in the original series, and as many columns as the length of a leap year in the units corresponding @@ -37,6 +36,7 @@ def pivot_annual(series, freq=None): Returns ------- annual : DataFrame + """ index = series.index year = index.year @@ -82,13 +82,13 @@ def pivot_annual(series, freq=None): def isleapyear(year): - """ - Returns true if year is a leap year. + """Returns true if year is a leap year. Parameters ---------- year : integer / sequence A given (list of) year(s). + """ year = np.asarray(year) return np.logical_or(year % 400 == 0, diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py index 476a643b34ff7..c2f0556af0c0b 100644 --- a/pandas/util/decorators.py +++ b/pandas/util/decorators.py @@ -16,7 +16,7 @@ def wrapper(*args, **kwargs): def deprecate_kwarg(old_arg_name, new_arg_name): - """Decorator to deprecate a keyword argument of a function + """Decorator to deprecate a keyword argument of a function. Parameters ---------- @@ -67,8 +67,7 @@ def wrapper(*args, **kwargs): class Substitution(object): - """ - A decorator to take a function's docstring and perform string + """A decorator to take a function's docstring and perform string substitution on it. This decorator should be robust even if func.__doc__ is None @@ -93,6 +92,7 @@ def some_function(x): @sub_first_last_names def some_function(x): "%s %s wrote the Raven" + """ def __init__(self, *args, **kwargs): if (args and kwargs): @@ -105,7 +105,7 @@ def __call__(self, func): return func def update(self, *args, **kwargs): - "Assume self.params is a dict and update it with supplied args" + """Assume self.params is a dict and update it with supplied args.""" self.params.update(*args, **kwargs) @classmethod @@ -122,9 +122,8 @@ def from_params(cls, params): class Appender(object): - """ - A function decorator that will append an addendum to the docstring - of the target function. + """A function decorator that will append an addendum to the docstring of + the target function. This decorator should be robust even if func.__doc__ is None (for example, if -OO was passed to the interpreter). @@ -139,6 +138,7 @@ class Appender(object): def my_dog(has='fleas'): "This docstring will have a copyright below" pass + """ def __init__(self, addendum, join='', indents=0): if indents > 0: @@ -174,13 +174,13 @@ def wrapped(*args, **kwargs): class KnownFailureTest(Exception): - '''Raise this exception to mark a test as a known failing test.''' + """Raise this exception to mark a test as a known failing test.""" pass def knownfailureif(fail_condition, msg=None): - """ - Make function raise KnownFailureTest exception if given condition is true. + """Make function raise KnownFailureTest exception if given condition is + true. If the condition is a callable, it is used at runtime to dynamically make the decision. This is useful for tests that may require costly diff --git a/pandas/util/misc.py b/pandas/util/misc.py index 15492cde5a9f7..6fbb03329498d 100644 --- a/pandas/util/misc.py +++ b/pandas/util/misc.py @@ -1,7 +1,7 @@ -""" various miscellaneous utilities """ +"""various miscellaneous utilities.""" def is_little_endian(): - """ am I little endian """ + """am I little endian.""" import sys return sys.byteorder == 'little' diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py index 1c1c2e3224b30..47a2b23da7eb7 100644 --- a/pandas/util/print_versions.py +++ b/pandas/util/print_versions.py @@ -7,7 +7,7 @@ def get_sys_info(): - "Returns system information as a dict" + """Returns system information as a dict.""" blob = [] diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 8abbb37646b49..8cc775d6a97ae 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -72,45 +72,51 @@ def assert_numpy_array_equal(self, np_array, assert_equal): raise AssertionError('{0} is not equal to {1}.'.format(np_array, assert_equal)) def assertIs(self, first, second, msg=''): - """Checks that 'first' is 'second'""" + """Checks that 'first' is 'second'.""" a, b = first, second assert a is b, "%s: %r is not %r" % (msg.format(a,b), a, b) def assertIsNot(self, first, second, msg=''): - """Checks that 'first' is not 'second'""" + """Checks that 'first' is not 'second'.""" a, b = first, second assert a is not b, "%s: %r is %r" % (msg.format(a,b), a, b) def assertIsNone(self, expr, msg=''): - """Checks that 'expr' is None""" + """Checks that 'expr' is None.""" self.assertIs(expr, None, msg) def assertIsNotNone(self, expr, msg=''): - """Checks that 'expr' is not None""" + """Checks that 'expr' is not None.""" self.assertIsNot(expr, None, msg) def assertIn(self, first, second, msg=''): - """Checks that 'first' is in 'second'""" + """Checks that 'first' is in 'second'.""" a, b = first, second assert a in b, "%s: %r is not in %r" % (msg.format(a,b), a, b) def assertNotIn(self, first, second, msg=''): - """Checks that 'first' is not in 'second'""" + """Checks that 'first' is not in 'second'.""" a, b = first, second assert a not in b, "%s: %r is in %r" % (msg.format(a,b), a, b) def assertIsInstance(self, obj, cls, msg=''): - """Test that obj is an instance of cls - (which can be a class or a tuple of classes, - as supported by isinstance()).""" + """Test that obj is an instance of cls. + + (which can be a class or a tuple of classes, as supported by + isinstance()). + + """ assert isinstance(obj, cls), ( "%sExpected object to be of type %r, found %r instead" % ( msg, cls, type(obj))) def assertNotIsInstance(self, obj, cls, msg=''): - """Test that obj is not an instance of cls - (which can be a class or a tuple of classes, - as supported by isinstance()).""" + """Test that obj is not an instance of cls. + + (which can be a class or a tuple of classes, as supported by + isinstance()). + + """ assert not isinstance(obj, cls), ( "%sExpected object to be of type %r, found %r instead" % ( msg, cls, type(obj))) @@ -137,7 +143,7 @@ def randu(n): def choice(x, size=10): - """sample with replacement; uniform over the input""" + """sample with replacement; uniform over the input.""" try: return np.random.choice(x, size=size) except AttributeError: @@ -155,7 +161,7 @@ def close(fignum=None): def mplskip(cls): - """Skip a TestCase instance if matplotlib isn't installed""" + """Skip a TestCase instance if matplotlib isn't installed.""" @classmethod def setUpClass(cls): @@ -287,6 +293,7 @@ def set_locale(new_locale, lc_var=locale.LC_ALL): This is useful when you want to run a particular block of code under a particular locale, without globally setting the locale. This probably isn't thread-safe. + """ current_locale = locale.getlocale() @@ -318,6 +325,7 @@ def _can_set_locale(lc): ------- isvalid : bool Whether the passed locale can be set + """ try: with set_locale(lc): @@ -343,6 +351,7 @@ def _valid_locales(locales, normalize): ------- valid_locales : list A list of valid locales. + """ if normalize: normalizer = lambda x: locale.normalize(x.strip()) @@ -396,6 +405,7 @@ def ensure_clean(filename=None, return_filelike=False): return_filelike : bool (default False) if True, returns a file-like which is *always* cleaned. Necessary for savefig and other functions which want to append extensions. + """ filename = filename or '' fd = None @@ -433,8 +443,7 @@ def ensure_clean(filename=None, return_filelike=False): def get_data_path(f=''): """Return the path of a data file, these are relative to the current test - directory. - """ + directory.""" # get our callers file _, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1] base_dir = os.path.abspath(os.path.dirname(filename)) @@ -445,21 +454,21 @@ def get_data_path(f=''): def equalContents(arr1, arr2): - """Checks if the set of unique elements of arr1 and arr2 are equivalent. - """ + """Checks if the set of unique elements of arr1 and arr2 are equivalent.""" return frozenset(arr1) == frozenset(arr2) def assert_isinstance(obj, class_type_or_tuple, msg=''): - """asserts that obj is an instance of class_type_or_tuple""" + """asserts that obj is an instance of class_type_or_tuple.""" assert isinstance(obj, class_type_or_tuple), ( "%sExpected object to be of type %r, found %r instead" % ( msg, class_type_or_tuple, type(obj))) def assert_equal(a, b, msg=""): - """asserts that a equals b, like nose's assert_equal, but allows custom message to start. - Passes a and b to format string as well. So you can use '{0}' and '{1}' to display a and b. + """asserts that a equals b, like nose's assert_equal, but allows custom + message to start. Passes a and b to format string as well. So you can use + '{0}' and '{1}' to display a and b. Examples -------- @@ -468,6 +477,7 @@ def assert_equal(a, b, msg=""): Traceback (most recent call last): ... AssertionError: 5.2 was really a dead parrot: 5.2 != 1.2 + """ assert a == b, "%s: %r != %r" % (msg.format(a,b), a, b) @@ -483,7 +493,11 @@ def assert_index_equal(left, right): def assert_attr_equal(attr, left, right): - """checks attributes are equal. Both objects must have attribute.""" + """checks attributes are equal. + + Both objects must have attribute. + + """ left_attr = getattr(left, attr) right_attr = getattr(right, attr) assert_equal(left_attr,right_attr,"attr is not equal [{0}]" .format(attr)) @@ -754,7 +768,7 @@ def makePanel4D(nper=None): def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None, idx_type=None): - """Create an index/multindex with given dimensions, levels, names, etc' + """Create an index/multindex with given dimensions, levels, names, etc'. nentries - number of entries in index nlevels - number of levels (> 1 produces multindex) @@ -773,6 +787,7 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None, "dt" create a datetime index. if unspecified, string labels will be generated. + """ if ndupe_l is None: @@ -952,16 +967,16 @@ def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None, data_gen_f=None, c_ndupe_l=None, r_ndupe_l=None, dtype=None, c_idx_type=None, r_idx_type=None): - """ - Parameters - ---------- + """Parameters. + Density : float, optional - Float in (0, 1) that gives the percentage of non-missing numbers in - the DataFrame. - random_state : {np.random.RandomState, int}, optional - Random number generator or random seed. + Float in (0, 1) that gives the percentage of non-missing numbers in + the DataFrame. + random_state : {np.random.RandomState, int}, optional + Random number generator or random seed. + + See makeCustomDataframe for descriptions of the rest of the parameters. - See makeCustomDataframe for descriptions of the rest of the parameters. """ df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names, r_idx_names=r_idx_names, @@ -1058,12 +1073,13 @@ def package_check(pkg_name, version=None, app='pandas', checker=LooseVersion, def skip_if_no_package(*args, **kwargs): - """Raise SkipTest if package_check fails + """Raise SkipTest if package_check fails. Parameters ---------- *args Positional parameters passed to `package_check` *kwargs Keyword parameters passed to `package_check` + """ from nose import SkipTest package_check(exc_failed_import=SkipTest, @@ -1136,7 +1152,7 @@ def dec(f): def can_connect(url, error_classes=_network_error_classes): """Try to connect to the given url. True if succeeds, False if IOError - raised + raised. Parameters ---------- @@ -1148,6 +1164,7 @@ def can_connect(url, error_classes=_network_error_classes): connectable : bool Return True if no IOError (unable to connect) or URLError (bad url) was raised + """ try: with urlopen(url): @@ -1166,8 +1183,7 @@ def network(t, url="http://www.google.com", skip_errnos=_network_errno_vals, _skip_on_messages=_network_error_messages, ): - """ - Label a test as requiring network connection and, if an error is + """Label a test as requiring network connection and, if an error is encountered, only raise if it does not find a network connection. In comparison to ``network``, this assumes an added contract to your test: @@ -1248,6 +1264,7 @@ def network(t, url="http://www.google.com", SkipTest Errors not related to networking will always be raised. + """ from nose import SkipTest t.network = True @@ -1294,8 +1311,7 @@ def wrapper(*args, **kwargs): class SimpleMock(object): - """ - Poor man's mocking object + """Poor man's mocking object. Note: only works for new-style classes, assumes __getattribute__ exists. @@ -1306,6 +1322,7 @@ class SimpleMock(object): True >>> a.attr1 == "fizz" and a.attr2 == "buzz" True + """ def __init__(self, obj, *args, **kwds): @@ -1325,8 +1342,7 @@ def __getattribute__(self, name): @contextmanager def stdin_encoding(encoding=None): - """ - Context manager for running bits of code while emulating an arbitrary + """Context manager for running bits of code while emulating an arbitrary stdin encoding. >>> import sys @@ -1346,7 +1362,7 @@ def stdin_encoding(encoding=None): def assertRaises(_exception, _callable=None, *args, **kwargs): - """assertRaises that is usable as context manager or in a with statement + """assertRaises that is usable as context manager or in a with statement. Exceptions that don't match the given Exception type fall through:: @@ -1374,6 +1390,7 @@ def assertRaises(_exception, _callable=None, *args, **kwargs): function, just like the normal assertRaises >>> assertRaises(TypeError, ",".join, [1, 3, 5]); + """ manager = _AssertRaisesContextmanager(exception=_exception) # don't return anything if used in function form @@ -1429,7 +1446,8 @@ def assertRaisesRegexp(_exception, _regexp, _callable=None, *args, **kwargs): class _AssertRaisesContextmanager(object): - """handles the behind the scenes work for assertRaises and assertRaisesRegexp""" + """handles the behind the scenes work for assertRaises and + assertRaisesRegexp.""" def __init__(self, exception, regexp=None, *args, **kwargs): self.exception = exception if regexp is not None and not hasattr(regexp, "search"): @@ -1531,8 +1549,7 @@ def disabled(t): class RNGContext(object): - """ - Context manager to set the numpy random number generator speed. Returns + """Context manager to set the numpy random number generator speed. Returns to the original value upon exiting the context manager. Parameters @@ -1545,6 +1562,7 @@ class RNGContext(object): with RNGContext(42): np.random.randn() + """ def __init__(self, seed):
Not really for merging, but I wanted to see how [docformatter](https://github.com/myint/docformatter) handled codebase (e.g. any bugs). Perhaps we can make these changes incrementally. cc #6248
https://api.github.com/repos/pandas-dev/pandas/pulls/6827
2014-04-06T19:53:06Z
2014-06-10T07:08:28Z
null
2014-07-10T00:07:04Z
BENCH: add vbench for issue 6697
diff --git a/vb_suite/replace.py b/vb_suite/replace.py index 517e2da599694..46da3e0691897 100644 --- a/vb_suite/replace.py +++ b/vb_suite/replace.py @@ -13,17 +13,24 @@ date_range = DateRange ts = Series(np.random.randn(N), index=rng) +""" -def replace_slow(ser, old, new): - lib.slow_replace(ser.values, old, new) - return ser +large_dict_setup = """from pandas_vb_common import * +from pandas.compat import range +n = 10 ** 6 +start_value = 10 ** 5 +to_rep = dict((i, start_value + i) for i in range(n)) +s = Series(np.random.randint(n, size=10 ** 3)) """ replace_fillna = Benchmark('ts.fillna(0., inplace=True)', common_setup, + name='replace_fillna', start_date=datetime(2012, 4, 4)) replace_replacena = Benchmark('ts.replace(np.nan, 0., inplace=True)', common_setup, + name='replace_replacena', start_date=datetime(2012, 5, 15)) - -# replace_putmask = Benchmark('replace_slow(ts, np.nan, 0.)', common_setup, -# start_date=datetime(2012, 5, 15)) +replace_large_dict = Benchmark('s.replace(to_rep, inplace=True)', + large_dict_setup, + name='replace_large_dict', + start_date=datetime(2014, 4, 6))
null
https://api.github.com/repos/pandas-dev/pandas/pulls/6825
2014-04-06T18:19:07Z
2014-04-06T18:37:29Z
2014-04-06T18:37:29Z
2014-07-16T09:00:51Z
BUG: Regression from 0.13 with fillna and a Series on datetime-like (6344)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 8b98bdd7a350c..cddfd2e7e37f9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -321,6 +321,7 @@ Bug Fixes - Bug with numpy < 1.7.2 when reading long strings from ``HDFStore`` (:issue:`6166`) - Bug in ``DataFrame._reduce`` where non bool-like (0/1) integers were being coverted into bools. (:issue:`6806`) +- Regression from 0.13 with ``fillna`` and a Series on datetime-like (:issue:`6344`) pandas 0.13.1 ------------- diff --git a/pandas/core/internals.py b/pandas/core/internals.py index e28d4029d4fa0..25afaeaf62c18 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -374,7 +374,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): else: return [self.copy()] - mask = com.isnull(self.values) + mask = isnull(self.values) if limit is not None: if self.ndim > 2: raise NotImplementedError @@ -1306,7 +1306,7 @@ def fill_value(self): def _try_fill(self, value): """ if we are a NaT, return the actual fill value """ - if isinstance(value, type(tslib.NaT)) or isnull(value): + if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all(): value = tslib.iNaT elif isinstance(value, np.timedelta64): pass @@ -1688,7 +1688,7 @@ def fill_value(self): def _try_fill(self, value): """ if we are a NaT, return the actual fill value """ - if isinstance(value, type(tslib.NaT)) or isnull(value): + if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all(): value = tslib.iNaT return value @@ -1697,7 +1697,7 @@ def fillna(self, value, limit=None, # straight putmask here values = self.values if inplace else self.values.copy() - mask = com.isnull(self.values) + mask = isnull(self.values) value = self._try_fill(value) if limit is not None: if self.ndim > 2: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 39e8b4db94994..0a7100ce091c5 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -7221,6 +7221,18 @@ def test_fillna(self): result = df.fillna(999,limit=1) assert_frame_equal(result, expected) + # with datelike + # GH 6344 + df = DataFrame({ + 'Date':[pd.NaT, Timestamp("2014-1-1")], + 'Date2':[ Timestamp("2013-1-1"), pd.NaT] + }) + + expected = df.copy() + expected['Date'] = expected['Date'].fillna(df.ix[0,'Date2']) + result = df.fillna(value={'Date':df['Date2']}) + assert_frame_equal(result, expected) + def test_fillna_dtype_conversion(self): # make sure that fillna on an empty frame works df = DataFrame(index=["A","B","C"], columns = [1,2,3,4,5])
closes #6344
https://api.github.com/repos/pandas-dev/pandas/pulls/6824
2014-04-06T17:59:24Z
2014-04-06T18:16:27Z
2014-04-06T18:16:27Z
2014-06-25T21:09:37Z
CLN: leftover rebasing from 405018cf
diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index 216ff40f43e8d..12ba042487ebe 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -440,20 +440,6 @@ def test_unequal(name): frame_shift_axis0 = Benchmark('df.shift(1,axis=0)', setup, start_date=datetime(2014,1,1)) frame_shift_axis1 = Benchmark('df.shift(1,axis=1)', setup, -<<<<<<< HEAD - start_date=datetime(2014,1,1)) - -# -setup = common_setup + """ -df = DataFrame(np.random.rand(10000,500)) -df = df.consolidate() -# note: df._data.blocks are c_contigous -""" -frame_shift_c_order_axis0 = Benchmark('df.shift(1,axis=0)', setup, - start_date=datetime(2014,1,1)) -frame_shift_c_order_axis1 = Benchmark('df.shift(1,axis=1)', setup, - start_date=datetime(2014,1,1)) -======= name = 'frame_shift_axis_1', start_date=datetime(2014,1,1)) @@ -475,4 +461,3 @@ def get_data(n=100000): setup, name='frame_from_records_generator_nrows', start_date=datetime(2013,10,04)) # issue-4911 ->>>>>>> 8aee1cd8b8a711021f70fe62ecc3f548aa6e89c4
null
https://api.github.com/repos/pandas-dev/pandas/pulls/6823
2014-04-06T17:09:07Z
2014-04-06T17:35:25Z
2014-04-06T17:35:25Z
2014-07-16T09:00:48Z
TST/BUG: make the gender variable non random in test_hist_by_no_extra_plots
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index a64f24a61db9b..efefc96b51104 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -182,7 +182,7 @@ def test_plot_fails_with_dupe_color_and_style(self): def test_hist_by_no_extra_plots(self): import matplotlib.pyplot as plt n = 10 - df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n), + df = DataFrame({'gender': ['Male'] * 5 + ['Female'] * 5, 'height': random.normal(66, 4, size=n)}) axes = df.height.hist(by=df.gender) self.assertEqual(len(plt.get_fignums()), 1) @@ -593,9 +593,9 @@ def test_bar_linewidth(self): @slow def test_bar_barwidth(self): df = DataFrame(randn(5, 5)) - + width = 0.9 - + # regular ax = df.plot(kind='bar', width=width) for r in ax.patches: @@ -681,7 +681,7 @@ def _check_bar_alignment(self, df, kind='bar', stacked=False, align=align, width=width, position=position, grid=True) - tick_pos = np.arange(len(df)) + tick_pos = np.arange(len(df)) if not isinstance(axes, np.ndarray): axes = [axes] @@ -780,7 +780,7 @@ def test_bar_subplots_center(self): self.assertEqual(ax.get_xlim(), (-0.5, 4.75)) self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9) - + axes = self._check_bar_alignment(df, kind='barh', subplots=True) for ax in axes: self.assertEqual(ax.get_ylim(), (-0.5, 4.75)) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 8fdd6087bfbb3..c2a929bab77b5 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -221,7 +221,7 @@ def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, either Kernel Density Estimation or Histogram plot in the diagonal marker : str, optional - Matplotlib marker type, default '.' + Matplotlib marker type, default '.' hist_kwds : other plotting keyword arguments To be passed to hist function density_kwds : other plotting keyword arguments @@ -1678,7 +1678,7 @@ def __init__(self, data, **kwargs): kwargs['align'] = kwargs.pop('align', 'center') self.tick_pos = np.arange(len(data)) - + self.log = kwargs.pop('log',False) MPLPlot.__init__(self, data, **kwargs)
null
https://api.github.com/repos/pandas-dev/pandas/pulls/6822
2014-04-06T16:59:52Z
2014-04-06T17:39:42Z
2014-04-06T17:39:42Z
2014-07-16T09:00:46Z
BUG: Fix unconverting of long strings from HDF (GH6166)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6545d21a4b4f8..8e0bdcc6a8942 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -318,6 +318,7 @@ Bug Fixes (:issue:`6762`). - Bug in Makefile where it didn't remove Cython generated C files with ``make clean`` (:issue:`6768`) +- Bug with numpy < 1.7.2 when reading long strings from ``HDFStore`` (:issue:`6166`) pandas 0.13.1 ------------- diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 76f630082aa15..c4a839b4842c5 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -40,6 +40,7 @@ import pandas.tslib as tslib from contextlib import contextmanager +from distutils.version import LooseVersion # versioning attribute _version = '0.10.1' @@ -47,7 +48,6 @@ # PY3 encoding if we don't specify _default_encoding = 'UTF-8' - def _ensure_decoded(s): """ if we have bytes, decode them to unicde """ if isinstance(s, np.bytes_): @@ -225,7 +225,6 @@ def _tables(): global _table_file_open_policy_is_strict if _table_mod is None: import tables - from distutils.version import LooseVersion _table_mod = tables # version requirements @@ -4171,7 +4170,6 @@ def _convert_string_array(data, encoding, itemsize=None): data = np.array(data, dtype="S%d" % itemsize) return data - def _unconvert_string_array(data, nan_rep=None, encoding=None): """ deserialize a string array, possibly decoding """ shape = data.shape @@ -4181,9 +4179,15 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None): # where the passed encoding is actually None) encoding = _ensure_encoding(encoding) if encoding is not None and len(data): + try: - data = data.astype(string_types).astype(object) - except: + itemsize = lib.max_len_string_array(com._ensure_object(data.ravel())) + if compat.PY3: + dtype = "U{0}".format(itemsize) + else: + dtype = "S{0}".format(itemsize) + data = data.astype(dtype).astype(object) + except (Exception) as e: f = np.vectorize(lambda x: x.decode(encoding), otypes=[np.object]) data = f(data) diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index c579e8502eb84..9c9d20e51be64 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -179,6 +179,21 @@ def roundtrip(key, obj,**kwargs): finally: safe_remove(self.path) + def test_long_strings(self): + + # GH6166 + # unconversion of long strings was being chopped in earlier + # versions of numpy < 1.7.2 + df = DataFrame({'a': [tm.rands(100) for _ in range(10)]}, + index=[tm.rands(100) for _ in range(10)]) + + with ensure_clean_store(self.path) as store: + store.append('df', df, data_columns=['a']) + + result = store.select('df') + assert_frame_equal(df, result) + + def test_api(self): # GH4584
superseeds #6166
https://api.github.com/repos/pandas-dev/pandas/pulls/6821
2014-04-06T16:58:07Z
2014-04-06T17:35:50Z
2014-04-06T17:35:50Z
2014-06-30T14:04:14Z
BUG: fix replace bug where different dtypes in a nested dict would only replace the first value
diff --git a/doc/source/release.rst b/doc/source/release.rst index 20269f407e0f3..13588a771f3c0 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -336,6 +336,8 @@ Bug Fixes coverted into bools. (:issue:`6806`) - Regression from 0.13 with ``fillna`` and a Series on datetime-like (:issue:`6344`) - Bug in adding np.timedelta64 to DatetimeIndex with tz outputs incorrect result (:issue:`6818`) +- Bug in ``DataFrame.replace()`` where changing a dtype through replacement + would only replace the first occurrence of a value (:issue:`6689`) pandas 0.13.1 ------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8ec4655c0a309..7e5e125034189 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2093,7 +2093,7 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, strings), non-convertibles get NaN convert_timedeltas : if True, attempt to soft convert timedeltas, if 'coerce', force conversion (and non-convertibles get NaT) - copy : Boolean, if True, return copy even if no copy is necessary + copy : Boolean, if True, return copy even if no copy is necessary (e.g. no conversion was done), default is True. It is meant for internal use, not to be confused with `inplace` kw. @@ -2410,13 +2410,14 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, new_data = self._data if is_dictlike(to_replace): if is_dictlike(value): # {'A' : NA} -> {'A' : 0} + res = self if inplace else self.copy() for c, src in compat.iteritems(to_replace): if c in value and c in self: - new_data = new_data.replace(to_replace=src, - value=value[c], - filter=[c], - inplace=inplace, - regex=regex) + res[c] = res[c].replace(to_replace=src, + value=value[c], + inplace=False, + regex=regex) + return None if inplace else res # {'A': NA} -> 0 elif not com.is_list_like(value): @@ -2428,7 +2429,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, inplace=inplace, regex=regex) else: - raise TypeError('Fill value must be scalar, dict, or ' + raise TypeError('value argument must be scalar, dict, or ' 'Series') elif com.is_list_like(to_replace): # [NA, ''] -> [0, 'missing'] diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 0a7100ce091c5..aa8350dfdfe78 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -8231,6 +8231,17 @@ def test_replace_str_to_str_chain(self): with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"): df.replace({'a': dict(zip(astr, bstr))}) + def test_replace_swapping_bug(self): + df = pd.DataFrame({'a': [True, False, True]}) + res = df.replace({'a': {True: 'Y', False: 'N'}}) + expect = pd.DataFrame({'a': ['Y', 'N', 'Y']}) + tm.assert_frame_equal(res, expect) + + df = pd.DataFrame({'a': [0, 1, 0]}) + res = df.replace({'a': {0: 'Y', 1: 'N'}}) + expect = pd.DataFrame({'a': ['Y', 'N', 'Y']}) + tm.assert_frame_equal(res, expect) + def test_combine_multiple_frames_dtypes(self): # GH 2759
closes #6689
https://api.github.com/repos/pandas-dev/pandas/pulls/6820
2014-04-06T16:31:13Z
2014-04-08T22:55:12Z
2014-04-08T22:55:12Z
2022-02-05T00:53:07Z
Unconvert
diff --git a/doc/source/release.rst b/doc/source/release.rst index 4291ed1b6c357..51a3e6aff7d3b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -90,6 +90,7 @@ Bug Fixes - ``HDFStore.remove`` now handles start and stop (:issue:`6177`) - ``HDFStore.select_as_multiple`` handles start and stop the same way as ``select`` (:issue:`6177`) - ``HDFStore.select_as_coordinates`` and ``select_column`` works where clauses that result in filters (:issue:`6177`) +- Bug with numpy < 1.7.2 when reading long strings from ``HDFStore`` (:issue:`6166`) pandas 0.13.1 ------------- diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 85a9cf4ea0f9f..24cec489ce063 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -40,6 +40,7 @@ import pandas.tslib as tslib from contextlib import contextmanager +from distutils.version import LooseVersion # versioning attribute _version = '0.10.1' @@ -47,7 +48,6 @@ # PY3 encoding if we don't specify _default_encoding = 'UTF-8' - def _ensure_decoded(s): """ if we have bytes, decode them to unicde """ if isinstance(s, np.bytes_): @@ -776,8 +776,8 @@ def func(_start, _stop): c = s.read_coordinates(where=where, start=_start, stop=_stop, **kwargs) else: c = None - - objs = [t.read(where=c, start=_start, stop=_stop, + + objs = [t.read(where=c, start=_start, stop=_stop, columns=columns, **kwargs) for t in tbls] # concat and return @@ -4165,7 +4165,6 @@ def _convert_string_array(data, encoding, itemsize=None): data = np.array(data, dtype="S%d" % itemsize) return data - def _unconvert_string_array(data, nan_rep=None, encoding=None): """ deserialize a string array, possibly decoding """ shape = data.shape @@ -4176,7 +4175,12 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None): encoding = _ensure_encoding(encoding) if encoding is not None and len(data): try: - data = data.astype(string_types).astype(object) + itemsize = lib.max_len_string_array(com._ensure_object(data.ravel())) + if compat.PY3: + dtype = "U{0}".format(itemsize) + else: + dtype = "S{0}".format(itemsize) + data = data.astype(dtype).astype(object) except: f = np.vectorize(lambda x: x.decode(encoding), otypes=[np.object]) data = f(data) diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 3c5662a6fe268..8fd710e024b2c 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -176,6 +176,24 @@ def roundtrip(key, obj,**kwargs): finally: safe_remove(self.path) + def test_long_strings(self): + df = DataFrame({'a': [tm.rands(100) for _ in range(10)]}, + index=[tm.rands(100) for _ in range(10)]) + + with ensure_clean_store(self.path) as store: + store.append('df', df, data_columns=['a']) + assert_frame_equal(store['df'], df) + + # test with an encoding + if LooseVersion(tables.__version__) < '3.0.0': + raise nose.SkipTest('tables version does not support proper encoding') + if sys.byteorder != 'little': + raise nose.SkipTest('system byteorder is not little') + + with ensure_clean_store(self.path) as store: + store.append('df', df, data_columns=['a'], encoding='ascii') + assert_frame_equal(store['df'], df) + def test_api(self): # GH4584 @@ -2199,7 +2217,7 @@ def test_remove_startstop(self): # GH #4835 and #6177 with ensure_clean_store(self.path) as store: - + wp = tm.makePanel() # start @@ -2246,7 +2264,7 @@ def test_remove_startstop(self): result = store.select('wp6') expected = wp.reindex(major_axis=wp.major_axis) assert_panel_equal(result, expected) - + # with where date = wp.major_axis.take(np.arange(0,30,3)) crit = Term('major_axis=date') @@ -2256,7 +2274,7 @@ def test_remove_startstop(self): result = store.select('wp7') expected = wp.reindex(major_axis=wp.major_axis-wp.major_axis[np.arange(0,20,3)]) assert_panel_equal(result, expected) - + def test_remove_crit(self):
https://api.github.com/repos/pandas-dev/pandas/pulls/6819
2014-04-06T16:02:19Z
2014-04-06T16:34:00Z
null
2014-04-06T16:34:00Z
BUG: adding np.timedelta64 to DatetimeIndex with tz outputs incorrect
diff --git a/doc/source/release.rst b/doc/source/release.rst index a5d41b9f6a4af..a9b65b92233c4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -315,7 +315,6 @@ Bug Fixes as regexs even when ``regex=False`` (:issue:`6777`). - Bug in timedelta ops on 32-bit platforms (:issue:`6808`) - Bug in setting a tz-aware index directly via ``.index`` (:issue:`6785`) -<<<<<<< HEAD - Bug in expressions.py where numexpr would try to evaluate arithmetic ops (:issue:`6762`). - Bug in Makefile where it didn't remove Cython generated C files with ``make @@ -324,6 +323,7 @@ Bug Fixes - Bug in ``DataFrame._reduce`` where non bool-like (0/1) integers were being coverted into bools. (:issue:`6806`) - Regression from 0.13 with ``fillna`` and a Series on datetime-like (:issue:`6344`) +- Bug in adding np.timedelta64 to DatetimeIndex with tz outputs incorrect result (:issue:`6818`) pandas 0.13.1 ------------- diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index c58447acec621..c5915f4bddcbc 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -624,17 +624,15 @@ def _add_delta(self, delta): if isinstance(delta, (Tick, timedelta)): inc = offsets._delta_to_nanoseconds(delta) new_values = (self.asi8 + inc).view(_NS_DTYPE) - tz = 'UTC' if self.tz is not None else None - result = DatetimeIndex(new_values, tz=tz, freq='infer') - utc = _utc() - if self.tz is not None and self.tz is not utc: - result = result.tz_convert(self.tz) elif isinstance(delta, np.timedelta64): new_values = self.to_series() + delta - result = DatetimeIndex(new_values, tz=self.tz, freq='infer') else: new_values = self.astype('O') + delta - result = DatetimeIndex(new_values, tz=self.tz, freq='infer') + tz = 'UTC' if self.tz is not None else None + result = DatetimeIndex(new_values, tz=tz, freq='infer') + utc = _utc() + if self.tz is not None and self.tz is not utc: + result = result.tz_convert(self.tz) return result def __contains__(self, key): diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index dda722366e53e..db0690c5acfe9 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -31,6 +31,7 @@ from pandas.core.datetools import BDay import pandas.core.common as com +from pandas import _np_version_under1p7 def _skip_if_no_pytz(): try: @@ -961,6 +962,21 @@ def test_tzaware_offset(self): offset = dates + offsets.Hour(5) self.assertEqual(dates[0] + offsets.Hour(5), offset[0]) + # GH 6818 + for tz in ['UTC', 'US/Pacific', 'Asia/Tokyo']: + dates = date_range('2010-11-01 00:00', periods=3, tz=tz, freq='H') + expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00', + '2010-11-01 07:00'], freq='H', tz=tz) + + offset = dates + offsets.Hour(5) + self.assert_(offset.equals(expected)) + if not _np_version_under1p7: + offset = dates + np.timedelta64(5, 'h') + self.assert_(offset.equals(expected)) + offset = dates + timedelta(hours=5) + self.assert_(offset.equals(expected)) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
``` >>> idx = pd.date_range(start='2010-11-02 01:00:00', periods=3, tz='US/Pacific', freq='1H') >>> idx + offsets.Hour(3) [2010-11-02 04:00:00-07:00, ..., 2010-11-02 06:00:00-07:00] Length: 3, Freq: H, Timezone: US/Pacific >>> idx + datetime.timedelta(hours=3) [2010-11-02 04:00:00-07:00, ..., 2010-11-02 06:00:00-07:00] Length: 3, Freq: H, Timezone: US/Pacific >>> idx + np.timedelta64(3, 'h') # incorrect [2010-11-02 11:00:00-07:00, ..., 2010-11-02 13:00:00-07:00] Length: 3, Freq: H, Timezone: US/Pacific ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6818
2014-04-06T13:59:40Z
2014-04-07T12:37:33Z
2014-04-07T12:37:32Z
2014-06-26T02:50:17Z
ENH: Use Welford's method in stats.moments.rolling_var
diff --git a/doc/source/release.rst b/doc/source/release.rst index 7824a69c92561..c494db2ae91c4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -276,6 +276,7 @@ Improvements to existing features - Add option to turn off escaping in ``DataFrame.to_latex`` (:issue:`6472`) - Added ``how`` option to rolling-moment functions to dictate how to handle resampling; :func:``rolling_max`` defaults to max, :func:``rolling_min`` defaults to min, and all others default to mean (:issue:`6297`) +- ``pd.stats.moments.rolling_var`` now uses Welford's method for increased numerical stability (:issue:`6817`) .. _release.bug_fixes-0.14.0: diff --git a/pandas/algos.pyx b/pandas/algos.pyx index 27e25c3954dad..bba6b46c52e37 100644 --- a/pandas/algos.pyx +++ b/pandas/algos.pyx @@ -1122,7 +1122,10 @@ def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1): # Rolling variance def roll_var(ndarray[double_t] input, int win, int minp, int ddof=1): - cdef double val, prev, sum_x = 0, sum_xx = 0, nobs = 0 + """ + Numerically stable implementation using Welford's method. + """ + cdef double val, prev, mean_x = 0, ssqdm_x = 0, nobs = 0, delta cdef Py_ssize_t i cdef Py_ssize_t N = len(input) @@ -1130,48 +1133,71 @@ def roll_var(ndarray[double_t] input, int win, int minp, int ddof=1): minp = _check_minp(win, minp, N) - for i from 0 <= i < minp - 1: + for i from 0 <= i < win: val = input[i] # Not NaN if val == val: nobs += 1 - sum_x += val - sum_xx += val * val + delta = (val - mean_x) + mean_x += delta / nobs + ssqdm_x += delta * (val - mean_x) - output[i] = NaN + if nobs >= minp: + #pathological case + if nobs == 1: + val = 0 + else: + val = ssqdm_x / (nobs - ddof) + if val < 0: + val = 0 + else: + val = NaN - for i from minp - 1 <= i < N: + output[i] = val + + for i from win <= i < N: val = input[i] + prev = input[i - win] if val == val: - nobs += 1 - sum_x += val - sum_xx += val * val - - if i > win - 1: - prev = input[i - win] if prev == prev: - sum_x -= prev - sum_xx -= prev * prev - nobs -= 1 + delta = val - prev + prev -= mean_x + mean_x += delta / nobs + val -= mean_x + ssqdm_x += (val + prev) * delta + else: + nobs += 1 + delta = (val - mean_x) + mean_x += delta / nobs + ssqdm_x += delta * (val - mean_x) + elif prev == prev: + nobs -= 1 + if nobs: + delta = (prev - mean_x) + mean_x -= delta / nobs + ssqdm_x -= delta * (prev - mean_x) + else: + mean_x = 0 + ssqdm_x = 0 if nobs >= minp: - # pathological case + #pathological case if nobs == 1: - output[i] = 0 - continue - - val = (nobs * sum_xx - sum_x * sum_x) / (nobs * (nobs - ddof)) - if val < 0: val = 0 - - output[i] = val + else: + val = ssqdm_x / (nobs - ddof) + if val < 0: + val = 0 else: - output[i] = NaN + val = NaN + + output[i] = val return output + #------------------------------------------------------------------------------- # Rolling skewness diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index 246037c7d7009..42da19f1a241d 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -751,7 +751,7 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None, * ``gaussian`` (needs std) * ``general_gaussian`` (needs power, width) * ``slepian`` (needs width). - + By default, the result is set to the right edge of the window. This can be changed to the center of the window by setting ``center=True``. @@ -978,7 +978,7 @@ def expanding_apply(arg, func, min_periods=1, freq=None, center=False, Returns ------- y : type of input argument - + Notes ----- The `freq` keyword is used to conform time series data to a specified diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index 8c9eb080cfc61..06e7484bbd536 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -295,7 +295,8 @@ def test_rolling_std_neg_sqrt(self): def test_rolling_var(self): self._check_moment_func(mom.rolling_var, - lambda x: np.var(x, ddof=1)) + lambda x: np.var(x, ddof=1), + test_stable=True) self._check_moment_func(functools.partial(mom.rolling_var, ddof=0), lambda x: np.var(x, ddof=0)) @@ -349,13 +350,15 @@ def _check_moment_func(self, func, static_comp, window=50, has_center=True, has_time_rule=True, preserve_nan=True, - fill_value=None): + fill_value=None, + test_stable=False): self._check_ndarray(func, static_comp, window=window, has_min_periods=has_min_periods, preserve_nan=preserve_nan, has_center=has_center, - fill_value=fill_value) + fill_value=fill_value, + test_stable=test_stable) self._check_structures(func, static_comp, has_min_periods=has_min_periods, @@ -367,7 +370,8 @@ def _check_ndarray(self, func, static_comp, window=50, has_min_periods=True, preserve_nan=True, has_center=True, - fill_value=None): + fill_value=None, + test_stable=False): result = func(self.arr, window) assert_almost_equal(result[-1], @@ -425,6 +429,12 @@ def _check_ndarray(self, func, static_comp, window=50, self.assert_(np.isnan(expected[-5])) self.assert_(np.isnan(result[-14])) + if test_stable: + result = func(self.arr + 1e9, window) + assert_almost_equal(result[-1], + static_comp(self.arr[-50:] + 1e9)) + + def _check_structures(self, func, static_comp, has_min_periods=True, has_time_rule=True, has_center=True,
This PR implements a modified version of Welford's method to compute the rolling variance. Instead of keeping track of the sum and sum of the squares of the items in the window, it tracks the mean and the sum of squared differences from the mean. This turns out to be (much) more numerically stable. The formulas to update these two variables when adding or removing an item from the sequence are well known, see e.g. http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance The formulas used when both adding one and removing one item I have not seen explicitly worked out anywhere, but are not too hard to come up with if you put pen to (a lot of) paper.
https://api.github.com/repos/pandas-dev/pandas/pulls/6817
2014-04-06T08:47:22Z
2014-04-22T21:58:47Z
2014-04-22T21:58:47Z
2014-09-16T14:33:52Z
DEPR: Deprecate DateRange
diff --git a/bench/bench_dense_to_sparse.py b/bench/bench_dense_to_sparse.py index f76daab5d8289..e1dcd3456e88d 100644 --- a/bench/bench_dense_to_sparse.py +++ b/bench/bench_dense_to_sparse.py @@ -2,7 +2,7 @@ K = 100 N = 100000 -rng = DateRange('1/1/2000', periods=N, offset=datetools.Minute()) +rng = DatetimeIndex('1/1/2000', periods=N, offset=datetools.Minute()) rng2 = np.asarray(rng).astype('M8[us]').astype('i8') diff --git a/bench/io_roundtrip.py b/bench/io_roundtrip.py index fa4e0755f40df..d87da0ec6321a 100644 --- a/bench/io_roundtrip.py +++ b/bench/io_roundtrip.py @@ -6,7 +6,7 @@ import la import pandas from pandas.compat import range -from pandas import datetools, DateRange +from pandas import datetools, DatetimeIndex def timeit(f, iterations): @@ -23,7 +23,7 @@ def rountrip_archive(N, K=50, iterations=10): arr = np.random.randn(N, K) # lar = la.larry(arr) dma = pandas.DataFrame(arr, - DateRange('1/1/2000', periods=N, + DatetimeIndex('1/1/2000', periods=N, offset=datetools.Minute())) dma[201] = 'bar' diff --git a/doc/source/release.rst b/doc/source/release.rst index 7188851214f7f..64604e918688d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -180,6 +180,8 @@ Deprecations Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +- Remove :class:`DateRange` in favor of :class:`DatetimeIndex` (:issue:`6816`) + - Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) Experimental Features diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 23ab8f10116c1..e53a1e5126db1 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -326,6 +326,8 @@ Prior Version Deprecations/Changes Therse are prior version deprecations that are taking effect as of 0.14.0. +- Remove :class:`DateRange` in favor of :class:`DatetimeIndex` (:issue:`6816`) + - Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) Deprecations diff --git a/examples/regressions.py b/examples/regressions.py index 6351c6730d838..bc58408a6842b 100644 --- a/examples/regressions.py +++ b/examples/regressions.py @@ -3,13 +3,13 @@ import numpy as np -from pandas.core.api import Series, DataFrame, DateRange +from pandas.core.api import Series, DataFrame, DatetimeIndex from pandas.stats.api import ols N = 100 start = datetime(2009, 9, 2) -dateRange = DateRange(start, periods=N) +dateRange = DatetimeIndex(start, periods=N) def makeDataFrame(): diff --git a/pandas/core/api.py b/pandas/core/api.py index 3ebcb46cd98fa..b7e02917cd476 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -28,7 +28,6 @@ from pandas.tseries.period import Period, PeriodIndex # legacy -from pandas.core.daterange import DateRange # deprecated from pandas.core.common import save, load # deprecated, remove in 0.13 import pandas.core.datetools as datetools diff --git a/pandas/core/daterange.py b/pandas/core/daterange.py deleted file mode 100644 index bdaf546789c39..0000000000000 --- a/pandas/core/daterange.py +++ /dev/null @@ -1,48 +0,0 @@ -# pylint: disable=E1101,E1103 - -from pandas.core.index import Index -from pandas.tseries.index import DatetimeIndex -import pandas.core.datetools as datetools - - -#----------------------------------------------------------------------------- -# DateRange class - -class DateRange(Index): - - """Deprecated - """ - - offset = tzinfo = None - - def __new__(cls, start=None, end=None, periods=None, - offset=datetools.bday, time_rule=None, - tzinfo=None, name=None, **kwds): - - import warnings - warnings.warn("DateRange is deprecated, use DatetimeIndex instead", - FutureWarning) - - if time_rule is None: - time_rule = kwds.get('timeRule') - if time_rule is not None: - offset = datetools.get_offset(time_rule) - - return DatetimeIndex(start=start, end=end, - periods=periods, freq=offset, - tzinfo=tzinfo, name=name, **kwds) - - def __setstate__(self, aug_state): - """Necessary for making this object picklable""" - index_state = aug_state[:1] - offset = aug_state[1] - - # for backwards compatibility - if len(aug_state) > 2: - tzinfo = aug_state[2] - else: # pragma: no cover - tzinfo = None - - self.offset = offset - self.tzinfo = tzinfo - Index.__setstate__(self, *index_state) diff --git a/pandas/core/index.py b/pandas/core/index.py index bae4a2c455ec6..c162365a39bf8 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -3888,26 +3888,6 @@ def _sanitize_and_check(indexes): return indexes, 'array' -def _handle_legacy_indexes(indexes): - from pandas.core.daterange import DateRange - from pandas.tseries.index import DatetimeIndex - - converted = [] - for index in indexes: - if isinstance(index, DateRange): - if len(index) == 0: - kwds = dict(data=[], freq=index.offset, tz=index.tzinfo) - else: - kwds = dict(start=index[0], end=index[-1], - freq=index.offset, tz=index.tzinfo) - - index = DatetimeIndex(**kwds) - - converted.append(index) - - return converted - - def _get_consensus_names(indexes): # find the non-none names, need to tupleify to make diff --git a/pandas/core/internals.py b/pandas/core/internals.py index e28d4029d4fa0..0560480a9c2db 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -12,8 +12,7 @@ _NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like, ABCSparseSeries, _infer_dtype_from_scalar, _values_from_object, _is_null_datelike_scalar) -from pandas.core.index import (Index, MultiIndex, _ensure_index, - _handle_legacy_indexes) +from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (_maybe_convert_indices, _length_of_indexer) import pandas.core.common as com from pandas.sparse.array import _maybe_to_sparse, SparseArray @@ -2369,7 +2368,6 @@ def __setstate__(self, state): ax_arrays, bvalues, bitems = state[:3] self.axes = [_ensure_index(ax) for ax in ax_arrays] - self.axes = _handle_legacy_indexes(self.axes) blocks = [] for values, items in zip(bvalues, bitems): diff --git a/pandas/core/series.py b/pandas/core/series.py index 4ab7855ec2f84..763d14b629508 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -26,7 +26,7 @@ _ensure_object, SettingWithCopyError) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, - _ensure_index, _handle_legacy_indexes) + _ensure_index) from pandas.core.indexing import ( _check_bool_indexer, _is_index_slice, _maybe_convert_indices) @@ -426,7 +426,6 @@ def _unpickle_series_compat(self, state): index, name = own_state[0], None if len(own_state) > 1: name = own_state[1] - index = _handle_legacy_indexes([index])[0] # recreate self._data = SingleBlockManager(data, index, fastpath=True) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 64da6f76f3697..1bcf6c0b4431a 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -15,7 +15,6 @@ isnull, date_range, Timestamp, Period, DatetimeIndex, Int64Index, to_datetime, bdate_range, Float64Index) -from pandas.core.daterange import DateRange import pandas.core.datetools as datetools import pandas.tseries.offsets as offsets import pandas.tseries.tools as tools diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py index 3155f0f6e1a80..f9c031576f554 100644 --- a/pandas/tseries/tests/test_timeseries_legacy.py +++ b/pandas/tseries/tests/test_timeseries_legacy.py @@ -13,7 +13,6 @@ isnull, date_range, Timestamp, DatetimeIndex, Int64Index, to_datetime, bdate_range) -from pandas.core.daterange import DateRange import pandas.core.datetools as datetools import pandas.tseries.offsets as offsets import pandas.tseries.frequencies as fmod @@ -284,14 +283,6 @@ def test_inferTimeRule(self): self.assertRaises(Exception, inferTimeRule, index1[:2]) self.assertRaises(Exception, inferTimeRule, index3) - def test_time_rule(self): - result = DateRange('1/1/2000', '1/30/2000', time_rule='WEEKDAY') - result2 = DateRange('1/1/2000', '1/30/2000', timeRule='WEEKDAY') - expected = date_range('1/1/2000', '1/30/2000', freq='B') - - self.assert_(result.equals(expected)) - self.assert_(result2.equals(expected)) - def tearDown(self): sys.stderr = sys.__stderr__ diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index dda722366e53e..245a21eb2c112 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -12,7 +12,6 @@ from pandas import DatetimeIndex, Int64Index, to_datetime, NaT -from pandas.core.daterange import DateRange import pandas.core.datetools as datetools import pandas.tseries.offsets as offsets from pandas.tseries.index import bdate_range, date_range diff --git a/scripts/bench_join.py b/scripts/bench_join.py index c9f2475566519..5223aac40d63b 100644 --- a/scripts/bench_join.py +++ b/scripts/bench_join.py @@ -12,8 +12,8 @@ a = np.arange(n, dtype=np.int64) b = np.arange(n * pct_overlap, n * (1 + pct_overlap), dtype=np.int64) -dr1 = DateRange('1/1/2000', periods=n, offset=datetools.Minute()) -dr2 = DateRange( +dr1 = DatetimeIndex('1/1/2000', periods=n, offset=datetools.Minute()) +dr2 = DatetimeIndex( dr1[int(pct_overlap * n)], periods=n, offset=datetools.Minute(2)) aobj = a.astype(object) diff --git a/scripts/groupby_speed.py b/scripts/groupby_speed.py index 4e60c34556968..34f293d5008c6 100644 --- a/scripts/groupby_speed.py +++ b/scripts/groupby_speed.py @@ -1,12 +1,12 @@ from __future__ import print_function from pandas import * -rng = DateRange('1/3/2011', '11/30/2011', offset=datetools.Minute()) +rng = DatetimeIndex('1/3/2011', '11/30/2011', offset=datetools.Minute()) df = DataFrame(np.random.randn(len(rng), 5), index=rng, columns=list('OHLCV')) -rng5 = DateRange('1/3/2011', '11/30/2011', offset=datetools.Minute(5)) +rng5 = DatetimeIndex('1/3/2011', '11/30/2011', offset=datetools.Minute(5)) gp = rng5.asof grouped = df.groupby(gp) diff --git a/scripts/hdfstore_panel_perf.py b/scripts/hdfstore_panel_perf.py index 06c2a15bdc7c2..66b0b52444bc1 100644 --- a/scripts/hdfstore_panel_perf.py +++ b/scripts/hdfstore_panel_perf.py @@ -6,7 +6,7 @@ panel = Panel(np.random.randn(i, j, k), items=[rands(10) for _ in range(i)], - major_axis=DateRange('1/1/2000', periods=j, + major_axis=DatetimeIndex('1/1/2000', periods=j, offset=datetools.Minute()), minor_axis=[rands(10) for _ in range(k)]) diff --git a/scripts/preepoch_test.py b/scripts/preepoch_test.py index 59066ba832cd0..36a3d768e671f 100644 --- a/scripts/preepoch_test.py +++ b/scripts/preepoch_test.py @@ -7,7 +7,7 @@ def panda_test(): # generate some data data = np.random.rand(50, 5) # generate some dates - dates = DateRange('1/1/1969', periods=50) + dates = DatetimeIndex('1/1/1969', periods=50) # generate column headings cols = ['A', 'B', 'C', 'D', 'E'] diff --git a/vb_suite/index_object.py b/vb_suite/index_object.py index 2cfdffdc38541..cb13d63cd726c 100644 --- a/vb_suite/index_object.py +++ b/vb_suite/index_object.py @@ -11,7 +11,7 @@ # intersection, union setup = common_setup + """ -rng = DateRange('1/1/2000', periods=10000, offset=datetools.Minute()) +rng = DatetimeIndex('1/1/2000', periods=10000, offset=datetools.Minute()) if rng.dtype == object: rng = rng.view(Index) else: diff --git a/vb_suite/panel_ctor.py b/vb_suite/panel_ctor.py index 07d7326ecb879..e304a48e5d73f 100644 --- a/vb_suite/panel_ctor.py +++ b/vb_suite/panel_ctor.py @@ -11,7 +11,7 @@ setup_same_index = common_setup + """ # create 100 dataframes with the same index -dr = np.asarray(DateRange(datetime(1990,1,1), datetime(2012,1,1))) +dr = np.asarray(DatetimeIndex(datetime(1990,1,1), datetime(2012,1,1))) data_frames = {} for x in xrange(100): df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), @@ -27,7 +27,7 @@ setup_equiv_indexes = common_setup + """ data_frames = {} for x in xrange(100): - dr = np.asarray(DateRange(datetime(1990,1,1), datetime(2012,1,1))) + dr = np.asarray(DatetimeIndex(datetime(1990,1,1), datetime(2012,1,1))) df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), "c": [2]*len(dr)}, index=dr) data_frames[x] = df diff --git a/vb_suite/plotting.py b/vb_suite/plotting.py index 735ed78c1441e..88d272e7be4b3 100644 --- a/vb_suite/plotting.py +++ b/vb_suite/plotting.py @@ -7,7 +7,7 @@ from pandas import date_range except ImportError: def date_range(start=None, end=None, periods=None, freq=None): - return DateRange(start, end, periods=periods, offset=freq) + return DatetimeIndex(start, end, periods=periods, offset=freq) """ diff --git a/vb_suite/reindex.py b/vb_suite/reindex.py index de0f397334e94..ca82ee9b82649 100644 --- a/vb_suite/reindex.py +++ b/vb_suite/reindex.py @@ -18,7 +18,7 @@ #---------------------------------------------------------------------- setup = common_setup + """ -rng = DateRange('1/1/1970', periods=10000, offset=datetools.Minute()) +rng = DatetimeIndex('1/1/1970', periods=10000, offset=datetools.Minute()) df = DataFrame(np.random.rand(10000, 10), index=rng, columns=range(10)) df['foo'] = 'bar' diff --git a/vb_suite/replace.py b/vb_suite/replace.py index 517e2da599694..50ab9f429a471 100644 --- a/vb_suite/replace.py +++ b/vb_suite/replace.py @@ -9,7 +9,7 @@ try: rng = date_range('1/1/2000', periods=N, freq='min') except NameError: - rng = DateRange('1/1/2000', periods=N, offset=datetools.Minute()) + rng = DatetimeIndex('1/1/2000', periods=N, offset=datetools.Minute()) date_range = DateRange ts = Series(np.random.randn(N), index=rng) diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py index ccd4bd7ae371a..3ea970baeff7a 100644 --- a/vb_suite/timeseries.py +++ b/vb_suite/timeseries.py @@ -8,9 +8,9 @@ try: rng = date_range('1/1/2000', periods=N, freq='min') except NameError: - rng = DateRange('1/1/2000', periods=N, offset=datetools.Minute()) + rng = DatetimeIndex('1/1/2000', periods=N, offset=datetools.Minute()) def date_range(start=None, end=None, periods=None, freq=None): - return DateRange(start, end, periods=periods, offset=freq) + return DatetimeIndex(start, end, periods=periods, offset=freq) if hasattr(Series, 'convert'): Series.resample = Series.convert
Deprecate DateRange in favor of DatetimeIndex, perfromed in commit 6fe2db57. Deprecation warning was first given in v0.8. Related: #6641, #6813
https://api.github.com/repos/pandas-dev/pandas/pulls/6816
2014-04-06T02:28:26Z
2014-04-08T23:05:55Z
null
2014-06-14T20:22:26Z
BUG: DataFrame._reduce was converting integers to strings in mixed-type case.
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6545d21a4b4f8..c57c4560a75e9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -318,6 +318,8 @@ Bug Fixes (:issue:`6762`). - Bug in Makefile where it didn't remove Cython generated C files with ``make clean`` (:issue:`6768`) +- Bug in ``DataFrame._reduce`` where non bool-like (0/1) integers were being + coverted into bools. (:issue:`6806`) pandas 0.13.1 ------------- diff --git a/pandas/core/common.py b/pandas/core/common.py index 84d22a31531f8..18a3dba1a44a4 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -840,6 +840,9 @@ def conv(r, dtype): elif dtype == _TD_DTYPE: r = _coerce_scalar_to_timedelta_type(r) elif dtype == np.bool_: + # messy. non 0/1 integers do not get converted. + if is_integer(r) and r not in [0,1]: + return int(r) r = bool(r) elif dtype.kind == 'f': r = float(r) @@ -850,7 +853,7 @@ def conv(r, dtype): return r - return np.array([conv(r, dtype) for r, dtype in zip(result, dtypes)]) + return [conv(r, dtype) for r, dtype in zip(result, dtypes)] def _infer_dtype_from_scalar(val): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 877e3839ee11f..39e8b4db94994 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -10476,6 +10476,18 @@ def test_bool_describe_in_mixed_frame(self): assert_almost_equal(bool_describe['mean'], 0.4) assert_almost_equal(bool_describe['50%'], 0) + def test_reduce_mixed_frame(self): + # GH 6806 + df = DataFrame({ + 'bool_data': [True, True, False, False, False], + 'int_data': [10, 20, 30, 40, 50], + 'string_data': ['a', 'b', 'c', 'd', 'e'], + }) + df.reindex(columns=['bool_data', 'int_data', 'string_data']) + test = df.sum(axis=0) + assert_almost_equal(test.values, [2, 150, 'abcde']) + assert_series_equal(test, df.T.sum(axis=1)) + def test_count(self): f = lambda s: notnull(s).sum() self._check_stat_op('count', f,
closes #6806
https://api.github.com/repos/pandas-dev/pandas/pulls/6814
2014-04-06T00:45:11Z
2014-04-06T18:13:51Z
2014-04-06T18:13:51Z
2014-06-23T13:27:19Z
Remove number of deprecated parameters/functions/classes [fix #6641]
diff --git a/doc/source/release.rst b/doc/source/release.rst index 8bf6a8d7b9488..b541ef4d2dd4b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -180,6 +180,28 @@ Prior Version Deprecations/Changes - Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) +- Remove ``precision`` keyword from :func:`set_eng_float_format` (:issue:`6641`) + +- Remove ``force_unicode`` keyword from :meth:`DataFrame.to_string`, + :meth:`DataFrame.to_latex`, and :meth:`DataFrame.to_html`; these function + encode in unicode by default (:issue:`6641`) + +- Remove ``nanRep`` keyword from :meth:`DataFrame.to_csv` and + :meth:`DataFrame.to_string` (:issue:`6641`) + +- Remove ``unique`` keyword from :meth:`HDFStore.select_column` (:issue:`6641`) + +- Remove ``inferTimeRule`` keyword from :func:`Timestamp.offset` (:issue:`6641`) + +- Remove ``name`` keyword from :func:`get_data_yahoo` and + :func:`get_data_google` (:issue:`6641`) + +- Remove ``offset`` keyword from :class:`DatetimeIndex` constructor + (:issue:`6641`) + +- Remove ``time_rule`` from several rolling-moment statistical functions, such + as :func:`rolling_sum` (:issue:`6641`) + Experimental Features ~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 23ab8f10116c1..335546a983e6f 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -328,6 +328,29 @@ Therse are prior version deprecations that are taking effect as of 0.14.0. - Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) +- Remove ``precision`` keyword from :func:`set_eng_float_format` (:issue:`6641`) + +- Remove ``force_unicode`` keyword from :meth:`DataFrame.to_string`, + :meth:`DataFrame.to_latex`, and :meth:`DataFrame.to_html`; these function + encode in unicode by default (:issue:`6641`) + +- Remove ``nanRep`` keyword from :meth:`DataFrame.to_csv` and + :meth:`DataFrame.to_string` (:issue:`6641`) + +- Remove ``unique`` keyword from :meth:`HDFStore.select_column` (:issue:`6641`) + +- Remove ``inferTimeRule`` keyword from :func:`Timestamp.offset` (:issue:`6641`) + +- Remove ``name`` keyword from :func:`get_data_yahoo` and + :func:`get_data_google` (:issue:`6641`) + +- Remove ``offset`` keyword from :class:`DatetimeIndex` constructor + (:issue:`6641`) + +- Remove ``time_rule`` from several rolling-moment statistical functions, such + as :func:`rolling_sum` (:issue:`6641`) + + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/core/format.py b/pandas/core/format.py index 636b3f452a20c..a7cbf2c70a5d3 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -358,15 +358,10 @@ def _to_str_columns(self): return strcols - def to_string(self, force_unicode=None): + def to_string(self): """ Render a DataFrame to a console-friendly tabular output. """ - import warnings - if force_unicode is not None: # pragma: no cover - warnings.warn( - "force_unicode is deprecated, it will have no effect", - FutureWarning) frame = self.frame @@ -423,8 +418,7 @@ def _join_multiline(self, *strcols): st = ed return '\n\n'.join(str_lst) - def to_latex(self, force_unicode=None, column_format=None, - longtable=False): + def to_latex(self, column_format=None, longtable=False): """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ @@ -435,12 +429,6 @@ def get_col_type(dtype): else: return 'l' - import warnings - if force_unicode is not None: # pragma: no cover - warnings.warn( - "force_unicode is deprecated, it will have no effect", - FutureWarning) - frame = self.frame if len(frame.columns) == 0 or len(frame.index) == 0: @@ -2139,7 +2127,7 @@ def __call__(self, num): return formatted # .strip() -def set_eng_float_format(precision=None, accuracy=3, use_eng_prefix=False): +def set_eng_float_format(accuracy=3, use_eng_prefix=False): """ Alter default behavior on how float is formatted in DataFrame. Format float in engineering format. By accuracy, we mean the number of @@ -2147,11 +2135,6 @@ def set_eng_float_format(precision=None, accuracy=3, use_eng_prefix=False): See also EngFormatter. """ - if precision is not None: # pragma: no cover - import warnings - warnings.warn("'precision' parameter in set_eng_float_format is " - "being renamed to 'accuracy'", FutureWarning) - accuracy = precision set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix)) set_option("display.column_space", max(12, accuracy + 9)) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8875d2fdfb39a..a5d93f09c9e07 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1071,7 +1071,7 @@ def to_panel(self): @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, - mode='w', nanRep=None, encoding=None, quoting=None, + mode='w', encoding=None, quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=False, date_format=None, doublequote=True, escapechar=None, **kwds): @@ -1128,10 +1128,6 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, Format string for datetime objects cols : kwarg only alias of columns [deprecated] """ - if nanRep is not None: # pragma: no cover - warnings.warn("nanRep is deprecated, use na_rep", - FutureWarning) - na_rep = nanRep formatter = fmt.CSVFormatter(self, path_or_buf, line_terminator=line_terminator, @@ -1275,21 +1271,12 @@ def to_stata( @Appender(fmt.docstring_to_string, indents=1) def to_string(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, - float_format=None, sparsify=None, nanRep=None, - index_names=True, justify=None, force_unicode=None, - line_width=None, max_rows=None, max_cols=None, + float_format=None, sparsify=None, index_names=True, + justify=None, line_width=None, max_rows=None, max_cols=None, show_dimensions=False): """ Render a DataFrame to a console-friendly tabular output. """ - if force_unicode is not None: # pragma: no cover - warnings.warn("force_unicode is deprecated, it will have no " - "effect", FutureWarning) - - if nanRep is not None: # pragma: no cover - warnings.warn("nanRep is deprecated, use na_rep", - FutureWarning) - na_rep = nanRep if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", @@ -1318,9 +1305,8 @@ def to_string(self, buf=None, columns=None, col_space=None, colSpace=None, def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, - justify=None, force_unicode=None, bold_rows=True, - classes=None, escape=True, max_rows=None, max_cols=None, - show_dimensions=False): + justify=None, bold_rows=True, classes=None, escape=True, + max_rows=None, max_cols=None, show_dimensions=False): """ Render a DataFrame as an HTML table. @@ -1341,10 +1327,6 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, """ - if force_unicode is not None: # pragma: no cover - warnings.warn("force_unicode is deprecated, it will have no " - "effect", FutureWarning) - if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", FutureWarning) @@ -1372,7 +1354,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, - bold_rows=True, force_unicode=None, longtable=False): + bold_rows=True, longtable=False): """ Render a DataFrame to a tabular environment table. You can splice this into a LaTeX document. Requires \\usepackage(booktabs}. @@ -1387,10 +1369,6 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, """ - if force_unicode is not None: # pragma: no cover - warnings.warn("force_unicode is deprecated, it will have no " - "effect", FutureWarning) - if colSpace is not None: # pragma: no cover warnings.warn("colSpace is deprecated, use col_space", FutureWarning) diff --git a/pandas/core/series.py b/pandas/core/series.py index 4ab7855ec2f84..bf6d96848b41b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -881,7 +881,7 @@ def _repr_footer(self): str(self.dtype.name)) def to_string(self, buf=None, na_rep='NaN', float_format=None, - nanRep=None, length=False, dtype=False, name=False): + length=False, dtype=False, name=False): """ Render a string representation of the Series @@ -906,10 +906,6 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, formatted : string (if not buffer passed) """ - if nanRep is not None: # pragma: no cover - warnings.warn("nanRep is deprecated, use na_rep", FutureWarning) - na_rep = nanRep - the_repr = self._get_repr(float_format=float_format, na_rep=na_rep, length=length, dtype=dtype, name=name) diff --git a/pandas/io/data.py b/pandas/io/data.py index dc5dd2b4b7d80..e875e8aa3c6db 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -338,11 +338,7 @@ def _dl_mult_symbols(symbols, start, end, chunksize, retry_count, pause, def _get_data_from(symbols, start, end, retry_count, pause, adjust_price, - ret_index, chunksize, source, name): - if name is not None: - warnings.warn("Arg 'name' is deprecated, please use 'symbols' " - "instead.", FutureWarning) - symbols = name + ret_index, chunksize, source): src_fn = _source_functions[source] @@ -367,7 +363,7 @@ def _get_data_from(symbols, start, end, retry_count, pause, adjust_price, def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, pause=0.001, adjust_price=False, ret_index=False, - chunksize=25, name=None): + chunksize=25): """ Returns DataFrame/Panel of historical stock prices from symbols, over date range, start to end. To avoid being penalized by Yahoo! Finance servers, @@ -402,12 +398,12 @@ def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, hist_data : DataFrame (str) or Panel (array-like object, DataFrame) """ return _get_data_from(symbols, start, end, retry_count, pause, - adjust_price, ret_index, chunksize, 'yahoo', name) + adjust_price, ret_index, chunksize, 'yahoo') def get_data_google(symbols=None, start=None, end=None, retry_count=3, pause=0.001, adjust_price=False, ret_index=False, - chunksize=25, name=None): + chunksize=25): """ Returns DataFrame/Panel of historical stock prices from symbols, over date range, start to end. To avoid being penalized by Google Finance servers, @@ -436,7 +432,7 @@ def get_data_google(symbols=None, start=None, end=None, retry_count=3, hist_data : DataFrame (str) or Panel (array-like object, DataFrame) """ return _get_data_from(symbols, start, end, retry_count, pause, - adjust_price, ret_index, chunksize, 'google', name) + adjust_price, ret_index, chunksize, 'google') _FRED_URL = "http://research.stlouisfed.org/fred2/series/" diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 76f630082aa15..8d2ca794be6b8 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -685,13 +685,6 @@ def select_as_coordinates( return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs) - def unique(self, key, column, **kwargs): - warnings.warn("unique(key,column) is deprecated\n" - "use select_column(key,column).unique() instead", - FutureWarning) - return self.get_storer(key).read_column(column=column, - **kwargs).unique() - def select_column(self, key, column, **kwargs): """ return a single column from the table. This is generally only useful to diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index 523f055eaf605..f98c06a4d63a1 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -54,8 +54,7 @@ (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Set the labels at the center of the window. """ @@ -83,7 +82,6 @@ beginning) freq : None or string alias / date offset object, default=None Frequency to conform to before computing statistic - time_rule is a legacy alias for freq adjust : boolean, default True Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings (viewing EWMA as a moving average) @@ -109,8 +107,7 @@ (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. """ @@ -151,7 +148,7 @@ """ -def rolling_count(arg, window, freq=None, center=False, time_rule=None): +def rolling_count(arg, window, freq=None, center=False): """ Rolling count of number of non-NaN observations inside provided window. @@ -163,8 +160,7 @@ def rolling_count(arg, window, freq=None, center=False, time_rule=None): calculating the statistic. freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq` + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window @@ -178,7 +174,7 @@ def rolling_count(arg, window, freq=None, center=False, time_rule=None): frequency by resampling the data. This is done with the default parameters of :meth:`~pandas.Series.resample` (i.e. using the `mean`). """ - arg = _conv_timerule(arg, freq, time_rule) + arg = _conv_timerule(arg, freq) window = min(window, len(arg)) return_hook, values = _process_data_structure(arg, kill_inf=False) @@ -197,7 +193,7 @@ def rolling_count(arg, window, freq=None, center=False, time_rule=None): _roll_kw+_pairwise_kw, _flex_retval, _roll_notes) @Appender(_doc_template) def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None, - center=False, time_rule=None, pairwise=None): + center=False, pairwise=None): if window is None and isinstance(arg2, (int, float)): window = arg2 arg2 = arg1 @@ -205,8 +201,8 @@ def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None, elif arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise # only default unset - arg1 = _conv_timerule(arg1, freq, time_rule) - arg2 = _conv_timerule(arg2, freq, time_rule) + arg1 = _conv_timerule(arg1, freq) + arg2 = _conv_timerule(arg2, freq) window = min(window, len(arg1), len(arg2)) def _get_cov(X, Y): @@ -222,7 +218,7 @@ def _get_cov(X, Y): _roll_kw+_pairwise_kw, _flex_retval, _roll_notes) @Appender(_doc_template) def rolling_corr(arg1, arg2=None, window=None, min_periods=None, freq=None, - center=False, time_rule=None, pairwise=None): + center=False, pairwise=None): if window is None and isinstance(arg2, (int, float)): window = arg2 arg2 = arg1 @@ -230,17 +226,17 @@ def rolling_corr(arg1, arg2=None, window=None, min_periods=None, freq=None, elif arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise # only default unset - arg1 = _conv_timerule(arg1, freq, time_rule) - arg2 = _conv_timerule(arg2, freq, time_rule) + arg1 = _conv_timerule(arg1, freq) + arg2 = _conv_timerule(arg2, freq) window = min(window, len(arg1), len(arg2)) def _get_corr(a, b): num = rolling_cov(a, b, window, min_periods, freq=freq, - center=center, time_rule=time_rule) + center=center) den = (rolling_std(a, window, min_periods, freq=freq, - center=center, time_rule=time_rule) * + center=center) * rolling_std(b, window, min_periods, freq=freq, - center=center, time_rule=time_rule)) + center=center)) return num / den return _flex_binary_moment(arg1, arg2, _get_corr, pairwise=bool(pairwise)) @@ -296,16 +292,16 @@ def _flex_binary_moment(arg1, arg2, f, pairwise=False): _roll_kw, _pairwise_retval, _roll_notes) @Appender(_doc_template) def rolling_corr_pairwise(df1, df2=None, window=None, min_periods=None, - freq=None, center=False, time_rule=None): + freq=None, center=False): import warnings warnings.warn("rolling_corr_pairwise is deprecated, use rolling_corr(..., pairwise=True)", FutureWarning) return rolling_corr(df1, df2, window=window, min_periods=min_periods, - freq=freq, center=center, time_rule=time_rule, + freq=freq, center=center, pairwise=True) def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False, - time_rule=None, args=(), kwargs={}, **kwds): + args=(), kwargs={}, **kwds): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. @@ -322,7 +318,6 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False, Frequency to conform to before computing statistic center : boolean, default False Whether the label should correspond with center of window - time_rule : Legacy alias for freq args : tuple Passed on to func kwargs : dict @@ -332,7 +327,7 @@ def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False, ------- y : type of input """ - arg = _conv_timerule(arg, freq, time_rule) + arg = _conv_timerule(arg, freq) calc = lambda x: func(x, window, minp=minp, args=args, kwargs=kwargs, **kwds) return_hook, values = _process_data_structure(arg) @@ -417,10 +412,10 @@ def _get_center_of_mass(com, span, halflife): @Substitution("Exponentially-weighted moving average", _unary_arg, _ewm_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) -def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None, time_rule=None, +def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None, adjust=True): com = _get_center_of_mass(com, span, halflife) - arg = _conv_timerule(arg, freq, time_rule) + arg = _conv_timerule(arg, freq) def _ewma(v): result = algos.ewma(v, com, int(adjust)) @@ -442,9 +437,9 @@ def _first_valid_index(arr): _ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) def ewmvar(arg, com=None, span=None, halflife=None, min_periods=0, bias=False, - freq=None, time_rule=None): + freq=None): com = _get_center_of_mass(com, span, halflife) - arg = _conv_timerule(arg, freq, time_rule) + arg = _conv_timerule(arg, freq) moment2nd = ewma(arg * arg, com=com, min_periods=min_periods) moment1st = ewma(arg, com=com, min_periods=min_periods) @@ -458,9 +453,8 @@ def ewmvar(arg, com=None, span=None, halflife=None, min_periods=0, bias=False, @Substitution("Exponentially-weighted moving std", _unary_arg, _ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) -def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False, - time_rule=None): - result = ewmvar(arg, com=com, span=span, halflife=halflife, time_rule=time_rule, +def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False): + result = ewmvar(arg, com=com, span=span, halflife=halflife, min_periods=min_periods, bias=bias) return _zsqrt(result) @@ -470,8 +464,8 @@ def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False, @Substitution("Exponentially-weighted moving covariance", _binary_arg_flex, _ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) -def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, bias=False, - freq=None, time_rule=None, pairwise=None): +def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, + bias=False, freq=None, pairwise=None): if arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise @@ -479,8 +473,8 @@ def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, b com = arg2 arg2 = arg1 pairwise = True if pairwise is None else pairwise - arg1 = _conv_timerule(arg1, freq, time_rule) - arg2 = _conv_timerule(arg2, freq, time_rule) + arg1 = _conv_timerule(arg1, freq) + arg2 = _conv_timerule(arg2, freq) def _get_ewmcov(X, Y): mean = lambda x: ewma(x, com=com, span=span, halflife=halflife, min_periods=min_periods) @@ -498,7 +492,7 @@ def _get_ewmcov(X, Y): _ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes) @Appender(_doc_template) def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, - freq=None, time_rule=None, pairwise=None): + freq=None, pairwise=None): if arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise @@ -506,8 +500,8 @@ def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0, com = arg2 arg2 = arg1 pairwise = True if pairwise is None else pairwise - arg1 = _conv_timerule(arg1, freq, time_rule) - arg2 = _conv_timerule(arg2, freq, time_rule) + arg1 = _conv_timerule(arg1, freq) + arg2 = _conv_timerule(arg2, freq) def _get_ewmcorr(X, Y): mean = lambda x: ewma(x, com=com, span=span, halflife=halflife, min_periods=min_periods) @@ -547,13 +541,7 @@ def _prep_binary(arg1, arg2): # Python interface to Cython functions -def _conv_timerule(arg, freq, time_rule): - if time_rule is not None: - import warnings - warnings.warn("time_rule argument is deprecated, replace with freq", - FutureWarning) - - freq = time_rule +def _conv_timerule(arg, freq): types = (DataFrame, Series) if freq is not None and isinstance(arg, types): @@ -584,13 +572,12 @@ def _rolling_func(func, desc, check_minp=_use_window): @Appender(_doc_template) @wraps(func) def f(arg, window, min_periods=None, freq=None, center=False, - time_rule=None, **kwargs): + **kwargs): def call_cython(arg, window, minp, args=(), kwargs={}, **kwds): minp = check_minp(minp, window) return func(arg, window, minp, **kwds) - return _rolling_moment(arg, window, call_cython, min_periods, - freq=freq, center=center, - time_rule=time_rule, **kwargs) + return _rolling_moment(arg, window, call_cython, min_periods, freq=freq, + center=center, **kwargs) return f @@ -612,7 +599,7 @@ def call_cython(arg, window, minp, args=(), kwargs={}, **kwds): def rolling_quantile(arg, window, quantile, min_periods=None, freq=None, - center=False, time_rule=None): + center=False): """Moving quantile. Parameters @@ -628,8 +615,7 @@ def rolling_quantile(arg, window, quantile, min_periods=None, freq=None, (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq` + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window @@ -650,12 +636,12 @@ def rolling_quantile(arg, window, quantile, min_periods=None, freq=None, def call_cython(arg, window, minp, args=(), kwargs={}): minp = _use_window(minp, window) return algos.roll_quantile(arg, window, minp, quantile) - return _rolling_moment(arg, window, call_cython, min_periods, - freq=freq, center=center, time_rule=time_rule) + return _rolling_moment(arg, window, call_cython, min_periods, freq=freq, + center=center) def rolling_apply(arg, window, func, min_periods=None, freq=None, - center=False, time_rule=None, args=(), kwargs={}): + center=False, args=(), kwargs={}): """Generic moving function application. Parameters @@ -671,8 +657,7 @@ def rolling_apply(arg, window, func, min_periods=None, freq=None, (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window args : tuple @@ -696,13 +681,12 @@ def rolling_apply(arg, window, func, min_periods=None, freq=None, def call_cython(arg, window, minp, args, kwargs): minp = _use_window(minp, window) return algos.roll_generic(arg, window, minp, func, args, kwargs) - return _rolling_moment(arg, window, call_cython, min_periods, - freq=freq, center=center, time_rule=time_rule, - args=args, kwargs=kwargs) + return _rolling_moment(arg, window, call_cython, min_periods, freq=freq, + center=center, args=args, kwargs=kwargs) def rolling_window(arg, window=None, win_type=None, min_periods=None, - freq=None, center=False, mean=True, time_rule=None, + freq=None, center=False, mean=True, axis=0, **kwargs): """ Applies a moving window of type ``window_type`` and size ``window`` @@ -721,8 +705,7 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None, (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window mean : boolean, default True @@ -778,7 +761,7 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None, minp = _use_window(min_periods, len(window)) - arg = _conv_timerule(arg, freq, time_rule) + arg = _conv_timerule(arg, freq) return_hook, values = _process_data_structure(arg) f = lambda x: algos.roll_window(x, window, minp, avg=mean) @@ -816,16 +799,14 @@ def _expanding_func(func, desc, check_minp=_use_window): @Substitution(desc, _unary_arg, _expanding_kw, _type_of_input_retval, "") @Appender(_doc_template) @wraps(func) - def f(arg, min_periods=1, freq=None, center=False, time_rule=None, - **kwargs): + def f(arg, min_periods=1, freq=None, center=False, **kwargs): window = len(arg) def call_cython(arg, window, minp, args=(), kwargs={}, **kwds): minp = check_minp(minp, window) return func(arg, window, minp, **kwds) - return _rolling_moment(arg, window, call_cython, min_periods, - freq=freq, center=center, - time_rule=time_rule, **kwargs) + return _rolling_moment(arg, window, call_cython, min_periods, freq=freq, + center=center, **kwargs) return f @@ -849,7 +830,7 @@ def call_cython(arg, window, minp, args=(), kwargs={}, **kwds): check_minp=_require_min_periods(4)) -def expanding_count(arg, freq=None, center=False, time_rule=None): +def expanding_count(arg, freq=None, center=False): """ Expanding count of number of non-NaN observations. @@ -858,8 +839,7 @@ def expanding_count(arg, freq=None, center=False, time_rule=None): arg : DataFrame or numpy ndarray-like freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window. @@ -873,12 +853,11 @@ def expanding_count(arg, freq=None, center=False, time_rule=None): frequency by resampling the data. This is done with the default parameters of :meth:`~pandas.Series.resample` (i.e. using the `mean`). """ - return rolling_count(arg, len(arg), freq=freq, center=center, - time_rule=time_rule) + return rolling_count(arg, len(arg), freq=freq, center=center) def expanding_quantile(arg, quantile, min_periods=1, freq=None, - center=False, time_rule=None): + center=False): """Expanding quantile. Parameters @@ -891,8 +870,7 @@ def expanding_quantile(arg, quantile, min_periods=1, freq=None, (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window. @@ -907,14 +885,14 @@ def expanding_quantile(arg, quantile, min_periods=1, freq=None, of :meth:`~pandas.Series.resample` (i.e. using the `mean`). """ return rolling_quantile(arg, len(arg), quantile, min_periods=min_periods, - freq=freq, center=center, time_rule=time_rule) + freq=freq, center=center) @Substitution("Unbiased expanding covariance.", _binary_arg_flex, _expanding_kw+_pairwise_kw, _flex_retval, "") @Appender(_doc_template) def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, center=False, - time_rule=None, pairwise=None): + pairwise=None): if arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise @@ -925,14 +903,14 @@ def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, center=False, window = max(len(arg1), len(arg2)) return rolling_cov(arg1, arg2, window, min_periods=min_periods, freq=freq, - center=center, time_rule=time_rule, pairwise=pairwise) + center=center, pairwise=pairwise) @Substitution("Expanding sample correlation.", _binary_arg_flex, _expanding_kw+_pairwise_kw, _flex_retval, "") @Appender(_doc_template) def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, center=False, - time_rule=None, pairwise=None): + pairwise=None): if arg2 is None: arg2 = arg1 pairwise = True if pairwise is None else pairwise @@ -943,8 +921,7 @@ def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, center=False, window = max(len(arg1), len(arg2)) return rolling_corr(arg1, arg2, window, min_periods=min_periods, - freq=freq, center=center, time_rule=time_rule, - pairwise=pairwise) + freq=freq, center=center, pairwise=pairwise) @Substitution("Deprecated. Use expanding_corr(..., pairwise=True) instead.\n\n" @@ -952,16 +929,15 @@ def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, center=False, _expanding_kw, _pairwise_retval, "") @Appender(_doc_template) def expanding_corr_pairwise(df1, df2=None, min_periods=1, freq=None, - center=False, time_rule=None): + center=False): import warnings warnings.warn("expanding_corr_pairwise is deprecated, use expanding_corr(..., pairwise=True)", FutureWarning) return expanding_corr(df1, df2, min_periods=min_periods, - freq=freq, center=center, time_rule=time_rule, - pairwise=True) + freq=freq, center=center, pairwise=True) def expanding_apply(arg, func, min_periods=1, freq=None, center=False, - time_rule=None, args=(), kwargs={}): + args=(), kwargs={}): """Generic expanding function application. Parameters @@ -974,8 +950,7 @@ def expanding_apply(arg, func, min_periods=1, freq=None, center=False, (otherwise result is NA). freq : string or DateOffset object, optional (default None) Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. `time_rule` is a legacy alias - for `freq`. + as a frequency string or DateOffset object. center : boolean, default False Whether the label should correspond with center of window. args : tuple @@ -995,5 +970,4 @@ def expanding_apply(arg, func, min_periods=1, freq=None, center=False, """ window = len(arg) return rolling_apply(arg, window, func, min_periods=min_periods, freq=freq, - center=center, time_rule=time_rule, args=args, - kwargs=kwargs) + center=center, args=args, kwargs=kwargs) diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index 97f08e7052c87..22661ea7cacda 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -485,27 +485,6 @@ def _check_structures(self, func, static_comp, assert_series_equal(series_xp, series_rs) assert_frame_equal(frame_xp, frame_rs) - def test_legacy_time_rule_arg(self): - # suppress deprecation warnings - sys.stderr = StringIO() - - rng = bdate_range('1/1/2000', periods=20) - ts = Series(np.random.randn(20), index=rng) - ts = ts.take(np.random.permutation(len(ts))[:12]).sort_index() - - try: - result = mom.rolling_mean(ts, 1, min_periods=1, freq='B') - expected = mom.rolling_mean(ts, 1, min_periods=1, - time_rule='WEEKDAY') - tm.assert_series_equal(result, expected) - - result = mom.ewma(ts, span=5, freq='B') - expected = mom.ewma(ts, span=5, time_rule='WEEKDAY') - tm.assert_series_equal(result, expected) - - finally: - sys.stderr = sys.__stderr__ - def test_ewma(self): self._check_ew(mom.ewma) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 7988b01af8c48..a75d30c3323d6 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -239,21 +239,6 @@ def get_period_alias(offset_str): _legacy_reverse_map = dict((v, k) for k, v in reversed(sorted(compat.iteritems(_rule_aliases)))) - -def inferTimeRule(index): - from pandas.tseries.index import DatetimeIndex - import warnings - warnings.warn("This method is deprecated, use infer_freq or inferred_freq" - " attribute of DatetimeIndex", FutureWarning) - - freq = DatetimeIndex(index).inferred_freq - if freq is None: - raise Exception('Unable to infer time rule') - - offset = to_offset(freq) - return get_legacy_offset_name(offset) - - def to_offset(freqstr): """ Return DateOffset object from string representation diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index c58447acec621..353f5f1c472ba 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -158,10 +158,6 @@ def __new__(cls, data=None, dayfirst = kwds.pop('dayfirst', None) yearfirst = kwds.pop('yearfirst', None) infer_dst = kwds.pop('infer_dst', False) - warn = False - if 'offset' in kwds and kwds['offset']: - freq = kwds['offset'] - warn = True freq_infer = False if not isinstance(freq, DateOffset): @@ -173,14 +169,6 @@ def __new__(cls, data=None, freq_infer = True freq = None - if warn: - import warnings - warnings.warn("parameter 'offset' is deprecated, " - "please use 'freq' instead", - FutureWarning) - - offset = freq - if periods is not None: if com.is_float(periods): periods = int(periods) @@ -188,12 +176,12 @@ def __new__(cls, data=None, raise ValueError('Periods must be a number, got %s' % str(periods)) - if data is None and offset is None: + if data is None and freq is None: raise ValueError("Must provide freq argument if no data is " "supplied") if data is None: - return cls._generate(start, end, periods, name, offset, + return cls._generate(start, end, periods, name, freq, tz=tz, normalize=normalize, closed=closed, infer_dst=infer_dst) @@ -211,11 +199,11 @@ def __new__(cls, data=None, # try a few ways to make it datetime64 if lib.is_string_array(data): - data = _str_to_dt_array(data, offset, dayfirst=dayfirst, + data = _str_to_dt_array(data, freq, dayfirst=dayfirst, yearfirst=yearfirst) else: data = tools.to_datetime(data, errors='raise') - data.offset = offset + data.offset = freq if isinstance(data, DatetimeIndex): if name is not None: data.name = name @@ -226,7 +214,7 @@ def __new__(cls, data=None, return data if issubclass(data.dtype.type, compat.string_types): - data = _str_to_dt_array(data, offset, dayfirst=dayfirst, + data = _str_to_dt_array(data, freq, dayfirst=dayfirst, yearfirst=yearfirst) if issubclass(data.dtype.type, np.datetime64): @@ -238,8 +226,8 @@ def __new__(cls, data=None, subarr = data.values - if offset is None: - offset = data.offset + if freq is None: + freq = data.offset verify_integrity = False else: if data.dtype != _NS_DTYPE: @@ -287,13 +275,13 @@ def __new__(cls, data=None, subarr = subarr.view(cls) subarr.name = name - subarr.offset = offset + subarr.offset = freq subarr.tz = tz if verify_integrity and len(subarr) > 0: - if offset is not None and not freq_infer: + if freq is not None and not freq_infer: inferred = subarr.inferred_freq - if inferred != offset.freqstr: + if inferred != freq.freqstr: raise ValueError('Dates do not conform to passed ' 'frequency') diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py index 3155f0f6e1a80..0315cb598b88a 100644 --- a/pandas/tseries/tests/test_timeseries_legacy.py +++ b/pandas/tseries/tests/test_timeseries_legacy.py @@ -262,28 +262,6 @@ def setUp(self): # suppress deprecation warnings sys.stderr = StringIO() - def test_inferTimeRule(self): - from pandas.tseries.frequencies import inferTimeRule - - index1 = [datetime(2010, 1, 29, 0, 0), - datetime(2010, 2, 26, 0, 0), - datetime(2010, 3, 31, 0, 0)] - - index2 = [datetime(2010, 3, 26, 0, 0), - datetime(2010, 3, 29, 0, 0), - datetime(2010, 3, 30, 0, 0)] - - index3 = [datetime(2010, 3, 26, 0, 0), - datetime(2010, 3, 27, 0, 0), - datetime(2010, 3, 29, 0, 0)] - - # LEGACY - assert inferTimeRule(index1) == 'EOM' - assert inferTimeRule(index2) == 'WEEKDAY' - - self.assertRaises(Exception, inferTimeRule, index1[:2]) - self.assertRaises(Exception, inferTimeRule, index3) - def test_time_rule(self): result = DateRange('1/1/2000', '1/30/2000', time_rule='WEEKDAY') result2 = DateRange('1/1/2000', '1/30/2000', timeRule='WEEKDAY')
Fixes #6641 Line numbers as of commit 70de129: #### Deprecated since 0.11 or before The following are fixed in this PR: - [x] Deprecate precision in favor of accuracy (#395) -- v0.7.0 .\pandas\core\format.py:{2143} - [x] Deprecate force_unicode (#2224, #2225) -- v0.10.0 .\pandas\core\format.py:{442, 369} .\pandas\core\frame.py:{1387, 1342, 1283} - [x] Deprecate nanRep in favor of na_rep (#275) -- v0.5.0 .\pandas\core\frame.py:{1285, 1133} .\pandas\core\series.py:{910} - [x] Deprecate unique in HDFStore (#3256) -- v0.11.0 .\pandas\io\pytables.py:{691} - [x] Deprecate time_rule favor freq (#1042) -- v0.8.0 .\pandas\stats\moments.py:{554} - [x] Deprecate inferTimeRule in favor of infer_freq (#391) -- v0.8.0 .\pandas\tseries\frequencies.py:{247} - [x] Deprecate DateRange in favor of DatetimeIndex (no issue number, commit 6fe2db57) -- v0.8.0 .\pandas\core\daterange.py:{24} (will be done in #6816) - [x] Deprecate 'name' in favor of 'symbols' (no issue, commit b921d1a2) -- v0.11.0 .\pandas\io\data.py:{344} - [x] Deprecate offset in favor of freq (no issue, commit 31363905c20) -- v0.8.0 .\pandas\tseries\index.py:{180} ##### Deprecated since 0.12 or after Issue #6581 documents the following for future removal. **These are not included in this PR** - [ ] ~~Deprecate na_last in favor of na_position (#5231, #3917)~~ .\pandas\core\series.py:{1770} - [ ] ~~Deprecate rolling_corr_pairwise and expanding_corr_pairwise (#4950)~~ .\pandas\stats\moments.py:{301,957} - [ ] ~~Deprecate broadcasting TimeSeries along DataFrame index (#2304)~~ .\pandas\core\frame.py:{2837} - [ ] ~~Deprecate load and save in favor of pickle and to_pickle (#3787)~~ .\pandas\core\common56.py:{2740, 2756} .\pandas\core\generic.py:{960, 967} - [ ] ~~Deprecate timeRule and offset in favor of freq (#4853, #4864)~~ .\pandas\core\datetools.py:{55} - [ ] ~~Deprecate colSpace in favor of col_space (no issue, commit 4a5a677c)~~ .\pandas\core\frame.py:{1292,1346,1392} - [ ] ~~Deprecate year/month parameters in finance option signatures (#3822, #3817)~~ .\pandas\io\data.py:{588,811,848} - [ ] ~~Remove kind from read_excel (#4713)~~ .\pandas\io\excel.py:{103} - [ ] ~~Deprecate infer_types to have no effect (#4770)~~ .\pandas\io\html.py:{830} - [ ] ~~Deprecate table keyword in HDFStore (#4645)~~ .\pandas\io\pytables.py:{1124}
https://api.github.com/repos/pandas-dev/pandas/pulls/6813
2014-04-05T19:32:39Z
2014-04-08T22:37:52Z
2014-04-08T22:37:52Z
2014-06-14T21:54:29Z
BUG/ENH: Fix to_excel representation of inf values
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2576982d6976f..d1c3041d6bd6b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -153,6 +153,9 @@ API Changes - all offset operations now return ``Timestamp`` types (rather than datetime), Business/Week frequencies were incorrect (:issue:`4069`) - ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) - ``Panel.shift`` now uses ``NDFrame.shift``. It no longer drops the ``nan`` data and retains its original shape. (:issue:`4867`) +- ``to_excel`` now converts ``np.inf`` into a string representation, + customizable by the ``inf_rep`` keyword argument (Excel has no native inf + representation) (:issue:`6782`) Deprecations ~~~~~~~~~~~~ diff --git a/pandas/core/format.py b/pandas/core/format.py index a2a68b23c2018..636b3f452a20c 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -1369,10 +1369,14 @@ class ExcelFormatter(object): sequence should be given if the DataFrame uses MultiIndex. merge_cells : boolean, default False Format MultiIndex and Hierarchical Rows as merged cells. + inf_rep : string, default `'inf'` + representation for np.inf values (which aren't representable in Excel) + A `'-'` sign will be added in front of -inf. """ def __init__(self, df, na_rep='', float_format=None, cols=None, - header=True, index=True, index_label=None, merge_cells=False): + header=True, index=True, index_label=None, merge_cells=False, + inf_rep='inf'): self.df = df self.rowcounter = 0 self.na_rep = na_rep @@ -1384,12 +1388,18 @@ def __init__(self, df, na_rep='', float_format=None, cols=None, self.index_label = index_label self.header = header self.merge_cells = merge_cells + self.inf_rep = inf_rep def _format_value(self, val): if lib.checknull(val): val = self.na_rep - if self.float_format is not None and com.is_float(val): - val = float(self.float_format % val) + elif com.is_float(val): + if np.isposinf(val): + val = '-%s' % self.inf_rep + elif np.isneginf(val): + val = self.inf_rep + elif self.float_format is not None: + val = float(self.float_format % val) return val def _format_header_mi(self): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8a30b74575ec2..8875d2fdfb39a 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1155,7 +1155,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, - merge_cells=True, encoding=None): + merge_cells=True, encoding=None, inf_rep='inf'): """ Write DataFrame to a excel sheet @@ -1194,6 +1194,9 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', encoding of the resulting excel file. Only necessary for xlwt, other writers support unicode natively. cols : kwarg only alias of columns [deprecated] + inf_rep : string, default 'inf' + Representation for infinity (there is no native representation for + infinity in Excel) Notes ----- @@ -1207,7 +1210,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', >>> writer.save() """ from pandas.io.excel import ExcelWriter - + need_save = False if encoding == None: encoding = 'ascii' @@ -1223,7 +1226,8 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', float_format=float_format, index=index, index_label=index_label, - merge_cells=merge_cells) + merge_cells=merge_cells, + inf_rep=inf_rep) formatted_cells = formatter.get_formatted_cells() excel_writer.write_cells(formatted_cells, sheet_name, startrow=startrow, startcol=startcol) diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 8ba2a5dfc3d9c..fde5764993e76 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -538,6 +538,16 @@ def test_bool_types(self): recons = reader.parse('test1').astype(np_type) tm.assert_frame_equal(frame, recons) + def test_inf_roundtrip(self): + _skip_if_no_xlrd() + + frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)]) + with ensure_clean(self.ext) as path: + frame.to_excel(path, 'test1') + reader = ExcelFile(path) + recons = reader.parse('test1') + tm.assert_frame_equal(frame, recons) + def test_sheets(self): _skip_if_no_xlrd()
Fixes #6782 by converting inf values to string (default is `'inf'`). This still round trips successfully. According to Stack Overflow post I found, there isn't a specific representation of `INF` values - http://stackoverflow.com/questions/15704538/how-can-i-express-nan-values-in-excel so we're going to make the decision _for_ the writers. This also (implicitly) fixes an issues where round-tripping an Excel file with an inf value would cause an `OverflowError` and fail to read back. cc @jmcnamara and @arthurgerigk-rocket
https://api.github.com/repos/pandas-dev/pandas/pulls/6812
2014-04-05T15:28:06Z
2014-04-05T18:06:13Z
2014-04-05T18:06:13Z
2014-08-05T22:20:25Z
BUG: bug in timedelta ops on 32-bit platforms (GH6808)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2576982d6976f..94c5570d12844 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -303,6 +303,7 @@ Bug Fixes - Bug in unpickling ``NaT (NaTType)`` (:issue:`4606`) - Bug in ``DataFrame.replace()`` where regex metacharacters were being treated as regexs even when ``regex=False`` (:issue:`6777`). +- Bug in timedelta ops on 32-bit platforms (:issue:`6808`) pandas 0.13.1 ------------- diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index 341feec67fb9b..df7cad5580b66 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -243,6 +243,39 @@ def test_timedelta_ops(self): expected = to_timedelta('00:01:21') tm.assert_almost_equal(result, expected) + def test_timedelta_ops_scalar(self): + + # GH 6808 + base = pd.to_datetime('20130101 09:01:12.123456') + expected_add = pd.to_datetime('20130101 09:01:22.123456') + expected_sub = pd.to_datetime('20130101 09:01:02.123456') + + for offset in [pd.to_timedelta(10,unit='s'), + timedelta(seconds=10), + np.timedelta64(10,'s'), + np.timedelta64(10000000000,'ns'), + pd.offsets.Second(10)]: + result = base + offset + self.assertEquals(result, expected_add) + + result = base - offset + self.assertEquals(result, expected_sub) + + base = pd.to_datetime('20130102 09:01:12.123456') + expected_add = pd.to_datetime('20130103 09:01:22.123456') + expected_sub = pd.to_datetime('20130101 09:01:02.123456') + + for offset in [pd.to_timedelta('1 day, 00:00:10'), + pd.to_timedelta('1 days, 00:00:10'), + timedelta(days=1,seconds=10), + np.timedelta64(1,'D')+np.timedelta64(10,'s'), + pd.offsets.Day()+pd.offsets.Second(10)]: + result = base + offset + self.assertEquals(result, expected_add) + + result = base - offset + self.assertEquals(result, expected_sub) + def test_to_timedelta_on_missing_values(self): _skip_if_numpy_not_friendly() diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py index 78dbd246648c8..cc01c26f78b70 100644 --- a/pandas/tseries/timedeltas.py +++ b/pandas/tseries/timedeltas.py @@ -68,7 +68,7 @@ def _convert_listlike(arg, box, unit): _short_search = re.compile( "^\s*(?P<neg>-?)\s*(?P<value>\d*\.?\d*)\s*(?P<unit>d|s|ms|us|ns)?\s*$",re.IGNORECASE) _full_search = re.compile( - "^\s*(?P<neg>-?)\s*(?P<days>\d+)?\s*(days|d)?,?\s*(?P<time>\d{2}:\d{2}:\d{2})?(?P<frac>\.\d+)?\s*$",re.IGNORECASE) + "^\s*(?P<neg>-?)\s*(?P<days>\d+)?\s*(days|d|day)?,?\s*(?P<time>\d{2}:\d{2}:\d{2})?(?P<frac>\.\d+)?\s*$",re.IGNORECASE) _nat_search = re.compile( "^\s*(nat|nan)\s*$",re.IGNORECASE) _whitespace = re.compile('^\s*$') diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 497c0a33e0cd0..278e8effb534b 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -690,19 +690,19 @@ cdef class _Timestamp(datetime): dts.us, ts.tzinfo) def __add__(self, other): - cdef Py_ssize_t other_int + cdef int64_t other_int if is_timedelta64_object(other): - other_int = other.astype('timedelta64[ns]').astype(int) + other_int = other.astype('timedelta64[ns]').view('i8') return Timestamp(self.value + other_int, tz=self.tzinfo, offset=self.offset) - if is_integer_object(other): + elif is_integer_object(other): if self.offset is None: raise ValueError("Cannot add integral value to Timestamp " "without offset.") return Timestamp((self.offset * other).apply(self), offset=self.offset) - if isinstance(other, timedelta) or hasattr(other, 'delta'): + elif isinstance(other, timedelta) or hasattr(other, 'delta'): nanos = _delta_to_nanoseconds(other) return Timestamp(self.value + nanos, tz=self.tzinfo, offset=self.offset)
closes #6808
https://api.github.com/repos/pandas-dev/pandas/pulls/6811
2014-04-05T14:52:26Z
2014-04-05T15:23:20Z
null
2014-06-30T14:04:14Z
CLN: replace pandas.compat.scipy.scoreatpercentile with numpy.percentile
diff --git a/doc/source/release.rst b/doc/source/release.rst index fb4f06ac03ff9..cc8e271d62183 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -172,6 +172,10 @@ API Changes (and numpy defaults) - add ``inplace`` keyword to ``Series.order/sort`` to make them inverses (:issue:`6859`) +- Replace ``pandas.compat.scipy.scoreatpercentile`` with ``numpy.percentile`` (:issue:`6810`) +- ``.quantile`` on a ``datetime[ns]`` series now returns ``Timestamp`` instead + of ``np.datetime64`` objects (:issue:`6810`) + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/compat/scipy.py b/pandas/compat/scipy.py index 81601ffe25609..06da8799d0c96 100644 --- a/pandas/compat/scipy.py +++ b/pandas/compat/scipy.py @@ -6,88 +6,6 @@ import numpy as np -def scoreatpercentile(a, per, limit=(), interpolation_method='fraction'): - """Calculate the score at the given `per` percentile of the sequence `a`. - - For example, the score at `per=50` is the median. If the desired quantile - lies between two data points, we interpolate between them, according to - the value of `interpolation`. If the parameter `limit` is provided, it - should be a tuple (lower, upper) of two values. Values of `a` outside - this (closed) interval will be ignored. - - The `interpolation_method` parameter supports three values, namely - `fraction` (default), `lower` and `higher`. Interpolation is done only, - if the desired quantile lies between two data points `i` and `j`. For - `fraction`, the result is an interpolated value between `i` and `j`; - for `lower`, the result is `i`, for `higher` the result is `j`. - - Parameters - ---------- - a : ndarray - Values from which to extract score. - per : scalar - Percentile at which to extract score. - limit : tuple, optional - Tuple of two scalars, the lower and upper limits within which to - compute the percentile. - interpolation_method : {'fraction', 'lower', 'higher'}, optional - This optional parameter specifies the interpolation method to use, - when the desired quantile lies between two data points `i` and `j`: - - - fraction: `i + (j - i)*fraction`, where `fraction` is the - fractional part of the index surrounded by `i` and `j`. - - lower: `i`. - - higher: `j`. - - Returns - ------- - score : float - Score at percentile. - - See Also - -------- - percentileofscore - - Examples - -------- - >>> from scipy import stats - >>> a = np.arange(100) - >>> stats.scoreatpercentile(a, 50) - 49.5 - - """ - # TODO: this should be a simple wrapper around a well-written quantile - # function. GNU R provides 9 quantile algorithms (!), with differing - # behaviour at, for example, discontinuities. - values = np.sort(a, axis=0) - if limit: - values = values[(limit[0] <= values) & (values <= limit[1])] - - idx = per / 100. * (values.shape[0] - 1) - if idx % 1 == 0: - score = values[idx] - else: - if interpolation_method == 'fraction': - score = _interpolate(values[int(idx)], values[int(idx) + 1], - idx % 1) - elif interpolation_method == 'lower': - score = values[np.floor(idx)] - elif interpolation_method == 'higher': - score = values[np.ceil(idx)] - else: - raise ValueError("interpolation_method can only be 'fraction', " - "'lower' or 'higher'") - - return score - - -def _interpolate(a, b, fraction): - """Returns the point at the given fraction between a and b, where - 'fraction' must be between 0 and 1. - """ - return a + (b - a) * fraction - - def rankdata(a): """ Ranks the data, dealing with ties appropriately. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a00b729f1735a..2f8c70024a1e7 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -38,7 +38,7 @@ import pandas.computation.expressions as expressions from pandas.computation.eval import eval as _eval from pandas.computation.scope import _ensure_scope -from pandas.compat.scipy import scoreatpercentile as _quantile +from numpy import percentile as _quantile from pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u, OrderedDict, raise_with_traceback) from pandas import compat diff --git a/pandas/core/series.py b/pandas/core/series.py index 4d32481e30e55..6172f87ead246 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -52,7 +52,7 @@ import pandas.tslib as tslib import pandas.index as _index -from pandas.compat.scipy import scoreatpercentile as _quantile +from numpy import percentile as _quantile from pandas.core.config import get_option __all__ = ['Series'] @@ -1235,10 +1235,11 @@ def quantile(self, q=0.5): valid_values = self.dropna().values if len(valid_values) == 0: return pa.NA - result = _quantile(valid_values, q * 100) - if not np.isscalar and com.is_timedelta64_dtype(result): - from pandas.tseries.timedeltas import to_timedelta - return to_timedelta(result) + if com.is_datetime64_dtype(self): + values = _values_from_object(self).view('i8') + result = lib.Timestamp(_quantile(values, q * 100)) + else: + result = _quantile(valid_values, q * 100) return result diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index f273c794a7f05..01b42457e72f5 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -10915,13 +10915,13 @@ def wrapper(x): check_dtype=False, check_dates=True) def test_quantile(self): - from pandas.compat.scipy import scoreatpercentile + from numpy import percentile q = self.tsframe.quantile(0.1, axis=0) - self.assertEqual(q['A'], scoreatpercentile(self.tsframe['A'], 10)) + self.assertEqual(q['A'], percentile(self.tsframe['A'], 10)) q = self.tsframe.quantile(0.9, axis=1) q = self.intframe.quantile(0.1) - self.assertEqual(q['A'], scoreatpercentile(self.intframe['A'], 10)) + self.assertEqual(q['A'], percentile(self.intframe['A'], 10)) # test degenerate case q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index a7f7223172848..c0b7425485cba 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -1907,17 +1907,17 @@ def test_groupby_with_hier_columns(self): self.assert_(result.columns.equals(df.columns[:-1])) def test_pass_args_kwargs(self): - from pandas.compat.scipy import scoreatpercentile + from numpy import percentile - def f(x, q=None): - return scoreatpercentile(x, q) - g = lambda x: scoreatpercentile(x, 80) + def f(x, q=None, axis=0): + return percentile(x, q, axis=axis) + g = lambda x: percentile(x, 80, axis=0) # Series ts_grouped = self.ts.groupby(lambda x: x.month) - agg_result = ts_grouped.agg(scoreatpercentile, 80) - apply_result = ts_grouped.apply(scoreatpercentile, 80) - trans_result = ts_grouped.transform(scoreatpercentile, 80) + agg_result = ts_grouped.agg(percentile, 80, axis=0) + apply_result = ts_grouped.apply(percentile, 80, axis=0) + trans_result = ts_grouped.transform(percentile, 80, axis=0) agg_expected = ts_grouped.quantile(.8) trans_expected = ts_grouped.transform(g) @@ -1935,7 +1935,7 @@ def f(x, q=None): # DataFrame df_grouped = self.tsframe.groupby(lambda x: x.month) - agg_result = df_grouped.agg(scoreatpercentile, 80) + agg_result = df_grouped.agg(percentile, 80, axis=0) apply_result = df_grouped.apply(DataFrame.quantile, .8) expected = df_grouped.quantile(.8) assert_frame_equal(apply_result, expected) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 744a020347af9..d1775177d3c1d 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2137,17 +2137,28 @@ def test_prod_numpy16_bug(self): self.assertNotIsInstance(result, Series) def test_quantile(self): - from pandas.compat.scipy import scoreatpercentile + from numpy import percentile q = self.ts.quantile(0.1) - self.assertEqual(q, scoreatpercentile(self.ts.valid(), 10)) + self.assertEqual(q, percentile(self.ts.valid(), 10)) q = self.ts.quantile(0.9) - self.assertEqual(q, scoreatpercentile(self.ts.valid(), 90)) + self.assertEqual(q, percentile(self.ts.valid(), 90)) # object dtype q = Series(self.ts,dtype=object).quantile(0.9) - self.assertEqual(q, scoreatpercentile(self.ts.valid(), 90)) + self.assertEqual(q, percentile(self.ts.valid(), 90)) + + # datetime64[ns] dtype + dts = self.ts.index.to_series() + q = dts.quantile(.2) + self.assertEqual(q, Timestamp('2000-01-10 19:12:00')) + + if not _np_version_under1p7: + # timedelta64[ns] dtype + tds = dts.diff() + q = tds.quantile(.25) + self.assertEqual(q, pd.to_timedelta('24:00:00')) def test_describe(self): _ = self.series.describe() diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index 57d8bf5623a78..215e6e62c685e 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -240,7 +240,7 @@ def test_timedelta_ops(self): result = td.quantile(.1) # This properly returned a scalar. - expected = to_timedelta('00:00:02.6') + expected = np.timedelta64(2599999999,'ns') tm.assert_almost_equal(result, expected) result = td.median()[0]
PR to fix #5824. - Replaces `compat.scipy.scoreatpercentile` with `numpy.percentile`. - Sets `axis=0` in a few tests, because `axis=None` by default on `numpy.percentile`, which returns a scalar (i.e., operates on a flattened version of the array). - Fixes a test that fails after the switch due to differences in how fractions are computed. (Created a `np.timedelta64` directly since `to_timedelta` doesn't support enough precision.) Let me know if you'd like to see any changes..
https://api.github.com/repos/pandas-dev/pandas/pulls/6810
2014-04-05T14:27:01Z
2014-04-16T13:04:41Z
2014-04-16T13:04:41Z
2014-06-17T18:33:53Z
BUG: parser_source as a filename with multibyte characters in Windows(non utf-8 filesystem)
diff --git "a/pandas/io/tests/data/\346\227\245\346\234\254\350\252\236\343\203\225\343\202\241\343\202\244\343\203\253\345\220\215\343\203\206\343\202\271\343\203\210_read_csv_in_win_filesystem.csv" "b/pandas/io/tests/data/\346\227\245\346\234\254\350\252\236\343\203\225\343\202\241\343\202\244\343\203\253\345\220\215\343\203\206\343\202\271\343\203\210_read_csv_in_win_filesystem.csv" new file mode 100644 index 0000000000000..bea9471eab7a5 --- /dev/null +++ "b/pandas/io/tests/data/\346\227\245\346\234\254\350\252\236\343\203\225\343\202\241\343\202\244\343\203\253\345\220\215\343\203\206\343\202\271\343\203\210_read_csv_in_win_filesystem.csv" @@ -0,0 +1,3 @@ +A,B,C +100,200,300 +aaa,bbb,ccc diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 612840e82e3ff..55f194c98b6e3 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -1822,6 +1822,11 @@ def test_utf16_example(self): result = self.read_table(buf, encoding='utf-16') self.assertEquals(len(result), 50) + def test_read_csv_example_in_windows_filesystem(self): + buf = b'\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe3\x83\x95\xe3\x82\xa1\xe3\x82\xa4\xe3\x83\xab\xe5\x90\x8d\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88_read_csv_in_win_filesystem.csv' + path = tm.get_data_path(buf.decode('utf-8')) + self.read_csv(path) + def test_converters_corner_with_nas(self): # skip aberration observed on Win64 Python 3.2.2 if hash(np.int64(-1)) != -2: diff --git a/pandas/parser.pyx b/pandas/parser.pyx index bb93097debf71..f303298e88273 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -535,7 +535,7 @@ cdef class TextReader: if isinstance(source, basestring): if not isinstance(source, bytes): - source = source.encode('utf-8') + source = source.encode(sys.getfilesystemencoding() or 'utf-8') if self.memory_map: ptr = new_mmap(source)
fopen() in Windows doesn't accept utf-8 encoded filename with multibyte characters, so need to convert it to filesystem encoding. Set 'utf-8' as default in case sys.getfilesystemencoding() return None. sys.getfilesystemencoding() will return 'mbcs' in Windows, and will 'utf-8' or user setting in other systems.
https://api.github.com/repos/pandas-dev/pandas/pulls/6807
2014-04-05T10:36:08Z
2014-04-30T00:16:31Z
null
2014-06-13T15:26:22Z
BUG/API: disallow boolean arithmetic operations
diff --git a/doc/source/release.rst b/doc/source/release.rst index 8bf6a8d7b9488..7188851214f7f 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -156,6 +156,8 @@ API Changes - ``to_excel`` now converts ``np.inf`` into a string representation, customizable by the ``inf_rep`` keyword argument (Excel has no native inf representation) (:issue:`6782`) +- Arithmetic ops are now disallowed when passed two bool dtype Series or + DataFrames (:issue:`6762`). Deprecations ~~~~~~~~~~~~ @@ -307,6 +309,9 @@ Bug Fixes - Bug in ``DataFrame.replace()`` where regex metacharacters were being treated as regexs even when ``regex=False`` (:issue:`6777`). - Bug in timedelta ops on 32-bit platforms (:issue:`6808`) +- Bug in setting a tz-aware index directly via ``.index`` (:issue:`6785`) +- Bug in expressions.py where numexpr would try to evaluate arithmetic ops + (:issue:`6762`). pandas 0.13.1 ------------- diff --git a/pandas/computation/expressions.py b/pandas/computation/expressions.py index b379da9cd38bc..128aa5bf2b511 100644 --- a/pandas/computation/expressions.py +++ b/pandas/computation/expressions.py @@ -154,6 +154,20 @@ def _where_numexpr(cond, a, b, raise_on_error=False): set_use_numexpr(True) +def _has_bool_dtype(x): + try: + return x.dtype == bool + except AttributeError: + return 'bool' in x.blocks + + +def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('+', '*', '-', '/', + '//', '**'))): + if op_str in not_allowed and _has_bool_dtype(a) and _has_bool_dtype(b): + raise NotImplementedError("operator %r not implemented for bool " + "dtypes" % op_str) + + def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, **eval_kwargs): """ evaluate and return the expression of the op on a and b @@ -170,7 +184,7 @@ def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, return the results use_numexpr : whether to try to use numexpr (default True) """ - + _bool_arith_check(op_str, a, b) if use_numexpr: return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error, **eval_kwargs) diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 2b539b3386226..fdea275b7e040 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -2,6 +2,7 @@ # pylint: disable-msg=W0612,E1101 import nose +import re from numpy.random import randn @@ -339,6 +340,24 @@ def testit(): expr.set_numexpr_threads() testit() + def test_bool_ops_raise_on_arithmetic(self): + df = DataFrame({'a': np.random.rand(10) > 0.5, + 'b': np.random.rand(10) > 0.5}) + names = 'add', 'mul', 'sub', 'div', 'truediv', 'floordiv', 'pow' + ops = '+', '*', '-', '/', '/', '//', '**' + msg = 'operator %r not implemented for bool dtypes' + for op, name in zip(ops, names): + if not compat.PY3 or name != 'div': + f = getattr(operator, name) + err_msg = re.escape(msg % op) + + with tm.assertRaisesRegexp(NotImplementedError, err_msg): + f(df, df) + + with tm.assertRaisesRegexp(NotImplementedError, err_msg): + f(df.a, df.b) + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 087e094ffbcb8..877e3839ee11f 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -4761,13 +4761,13 @@ def _check_unary_op(op): _check_unary_op(operator.neg) def test_logical_typeerror(self): - if compat.PY3: - pass - else: + if not compat.PY3: self.assertRaises(TypeError, self.frame.__eq__, 'foo') self.assertRaises(TypeError, self.frame.__lt__, 'foo') self.assertRaises(TypeError, self.frame.__gt__, 'foo') self.assertRaises(TypeError, self.frame.__ne__, 'foo') + else: + raise nose.SkipTest('test_logical_typeerror not tested on PY3') def test_constructor_lists_to_object_dtype(self): # from #1074
closes #6762
https://api.github.com/repos/pandas-dev/pandas/pulls/6803
2014-04-04T19:51:55Z
2014-04-05T20:18:58Z
2014-04-05T20:18:58Z
2014-06-21T09:55:59Z
StataWriter: Replace missing values in string columns by an empty string
diff --git a/doc/source/release.rst b/doc/source/release.rst index 31cd37e4bf467..e280c3c9cc95b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -302,6 +302,8 @@ Bug Fixes - Bug in ``obj.blocks`` on sparse containers dropping all but the last items of same for dtype (:issue:`6748`) - Bug in unpickling ``NaT (NaTType)`` (:issue:`4606`) - Bug in setting a tz-aware index directly via ``.index`` (:issue:`6785`) +- StataWriter replaces missing values in string columns by empty string (:issue:`6802`) + pandas 0.13.1 ------------- diff --git a/pandas/io/stata.py b/pandas/io/stata.py index fd41961109511..75aaf68b4dd0a 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1319,6 +1319,8 @@ def _write_data_nodates(self): for i, var in enumerate(row): typ = ord(typlist[i]) if typ <= 244: # we've got a string + if var is None or var == np.nan: + var = _pad_bytes('', typ) if len(var) < typ: var = _pad_bytes(var, typ) self._write(var) diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index c5debed6654af..72bea8c458f9e 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -508,6 +508,16 @@ def test_date_export_formats(self): tm.assert_frame_equal(written_and_read_again.set_index('index'), expected) + def test_write_missing_strings(self): + original = DataFrame([["1"], [None]], columns=["foo"]) + expected = DataFrame([["1"], [""]], columns=["foo"]) + expected.index.name = 'index' + with tm.ensure_clean() as path: + original.to_stata(path) + written_and_read_again = self.read_dta(path) + tm.assert_frame_equal(written_and_read_again.set_index('index'), + expected) + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
Otherwise writing fails with errors about len() applied to a float or NoneType.
https://api.github.com/repos/pandas-dev/pandas/pulls/6802
2014-04-04T19:09:34Z
2014-04-06T18:32:34Z
2014-04-06T18:32:34Z
2014-06-21T14:30:35Z
BUG: duplicate (getitem) indexing with iloc (GH6766)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index ed8cfb59bc995..a548b5f61754e 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -674,7 +674,8 @@ def setitem(self, indexer, value): values = self._try_coerce_result(values) values = self._try_cast_result(values, dtype) return [make_block(transf(values), self.items, self.ref_items, - ndim=self.ndim, fastpath=True)] + ndim=self.ndim, placement=self._ref_locs, + fastpath=True)] except (ValueError, TypeError) as detail: raise except Exception as detail: @@ -2902,7 +2903,10 @@ def fast_xs(self, loc, copy=False): # non-unique (GH4726) if not items.is_unique: - return self._interleave(items).ravel(), True + result = self._interleave(items) + if self.ndim == 2: + result = result.T + return result[loc], True # unique dtype = _interleaved_dtype(self.blocks) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index fe08fb3a957c2..ea1e07dbf6acc 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -483,6 +483,19 @@ def test_iloc_getitem_dups(self): # no dups in panel (bug?) self.check_result('list int (dups)', 'iloc', [0,1,1,3], 'ix', { 0 : [0,2,2,6], 1 : [0,3,3,9] }, objs = ['series','frame'], typs = ['ints']) + # GH 6766 + df1 = DataFrame([{'A':None, 'B':1},{'A':2, 'B':2}]) + df2 = DataFrame([{'A':3, 'B':3},{'A':4, 'B':4}]) + df = concat([df1, df2], axis=1) + + # cross-sectional indexing + result = df.iloc[0,0] + self.assertTrue(isnull(result)) + + result = df.iloc[0,:] + expected = Series([np.nan,1,3,3],index=['A','B','A','B']) + assert_series_equal(result,expected) + def test_iloc_getitem_array(self): # array like
fix the getitem issues for iloc on #6766
https://api.github.com/repos/pandas-dev/pandas/pulls/6799
2014-04-04T14:03:09Z
2014-04-04T14:42:33Z
2014-04-04T14:42:33Z
2014-06-16T05:11:38Z
DOC: Improve doc-string for `copy` kw on `convert_object()` (GH1234)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cec6ce2cabcdd..8ec4655c0a309 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2093,7 +2093,9 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, strings), non-convertibles get NaN convert_timedeltas : if True, attempt to soft convert timedeltas, if 'coerce', force conversion (and non-convertibles get NaT) - copy : Boolean, if True, return copy, default is True + copy : Boolean, if True, return copy even if no copy is necessary + (e.g. no conversion was done), default is True. + It is meant for internal use, not to be confused with `inplace` kw. Returns -------
PR as suggested by @jreback.
https://api.github.com/repos/pandas-dev/pandas/pulls/6791
2014-04-03T23:19:02Z
2014-04-04T12:33:06Z
2014-04-04T12:33:06Z
2014-06-19T16:29:24Z
BUG: bug in taking all on a multi-index when only level 0 is specified (GH6788)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 39ddc9a7ee22a..e0c5fa573ff69 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1040,6 +1040,10 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False): level = 0 _, indexer = labels.reindex(objarr, level=level) + # take all + if indexer is None: + indexer = np.arange(len(labels)) + check = labels.levels[0].get_indexer(objarr) else: level = None diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 3f6ae24756d47..fe08fb3a957c2 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1128,6 +1128,17 @@ def test_loc_multiindex(self): xp = mi_int.ix[4] assert_frame_equal(rs,xp) + # GH6788 + # multi-index indexer is None (meaning take all) + attributes = ['Attribute' + str(i) for i in range(1)] + attribute_values = ['Value' + str(i) for i in range(5)] + + index = MultiIndex.from_product([attributes,attribute_values]) + df = 0.1 * np.random.randn(10, 1 * 5) + 0.5 + df = DataFrame(df, columns=index) + result = df[attributes] + assert_frame_equal(result, df) + def test_series_getitem_multiindex(self): # GH 6018
closes #6788
https://api.github.com/repos/pandas-dev/pandas/pulls/6790
2014-04-03T21:46:40Z
2014-04-03T22:11:49Z
2014-04-03T22:11:49Z
2014-07-16T09:00:21Z
BUG: Bug in setting a tz-aware index directly via .index (GH6785)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 80a833848f8fa..31cd37e4bf467 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -301,6 +301,7 @@ Bug Fixes - Bug in downcasting inference with empty arrays (:issue:`6733`) - Bug in ``obj.blocks`` on sparse containers dropping all but the last items of same for dtype (:issue:`6748`) - Bug in unpickling ``NaT (NaTType)`` (:issue:`4606`) +- Bug in setting a tz-aware index directly via ``.index`` (:issue:`6785`) pandas 0.13.1 ------------- diff --git a/pandas/core/index.py b/pandas/core/index.py index 3213f288be4b3..bae4a2c455ec6 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -154,7 +154,7 @@ def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False, if (inferred.startswith('datetime') or tslib.is_timestamp_array(subarr)): from pandas.tseries.index import DatetimeIndex - return DatetimeIndex(data, copy=copy, name=name, **kwargs) + return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) elif inferred == 'period': return PeriodIndex(subarr, name=name, **kwargs) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 1bbcba0e4caad..4e422b452ebf8 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -2125,6 +2125,15 @@ def test_set_index_cast_datetimeindex(self): result = df['D'] assert_series_equal(result, expected) + # GH 6785 + # set the index manually + import pytz + df = DataFrame([{'ts':datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo':1}]) + expected = df.set_index('ts') + df.index = df['ts'] + df.pop('ts') + assert_frame_equal(df, expected) + def test_set_index_multiindexcolumns(self): columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)]) df = DataFrame(np.random.randn(3, 3), columns=columns)
closes #6785
https://api.github.com/repos/pandas-dev/pandas/pulls/6786
2014-04-03T20:17:23Z
2014-04-03T21:07:11Z
2014-04-03T21:07:11Z
2014-06-30T14:03:57Z
BLD: spring cleaning on Makefile
diff --git a/Makefile b/Makefile index d29e177d39832..9a768932b8bea 100644 --- a/Makefile +++ b/Makefile @@ -1,14 +1,13 @@ tseries: pandas/lib.pyx pandas/tslib.pyx pandas/hashtable.pyx python setup.py build_ext --inplace -.PHONY : clean develop build clean clean_pyc tseries doc +.PHONY : develop build clean clean_pyc tseries doc -clean: clean_pyc - -rm -rf build dist - -find . -name '*.so' -exec rm {} \; +clean: + -python setup.py clean clean_pyc: - -find . -name '*.pyc' -exec rm {} \; -or -name '*.pyo' -exec rm {} \; + -find . -name '*.py[co]' -exec rm {} \; sparse: pandas/src/sparse.pyx python setup.py build_ext --inplace @@ -20,8 +19,7 @@ develop: build -python setup.py develop doc: - -rm -rf doc/build - -rm -rf doc/source/generated + -rm -rf doc/build doc/source/generated cd doc; \ python make.py clean; \ python make.py html diff --git a/doc/source/release.rst b/doc/source/release.rst index 3f3cfe5dd4359..dd5526f4b07a6 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -314,6 +314,8 @@ Bug Fixes - Bug in setting a tz-aware index directly via ``.index`` (:issue:`6785`) - Bug in expressions.py where numexpr would try to evaluate arithmetic ops (:issue:`6762`). +- Bug in Makefile where it didn't remove Cython generated C files with ``make + clean`` (:issue:`6768`) pandas 0.13.1 ------------- diff --git a/setup.py b/setup.py index 6713e52733fd1..f89353e20a625 100755 --- a/setup.py +++ b/setup.py @@ -280,7 +280,7 @@ def initialize_options(self): 'ultrajsondec.c', ] - for root, dirs, files in list(os.walk('pandas')): + for root, dirs, files in os.walk('pandas'): for f in files: if f in self._clean_exclude: continue @@ -297,7 +297,7 @@ def initialize_options(self): if d == '__pycache__': self._clean_trees.append(pjoin(root, d)) - for d in ('build',): + for d in ('build', 'dist'): if os.path.exists(d): self._clean_trees.append(d)
null
https://api.github.com/repos/pandas-dev/pandas/pulls/6784
2014-04-03T18:32:18Z
2014-04-06T16:26:02Z
2014-04-06T16:26:02Z
2014-07-16T09:00:15Z
BUG: fix metacharacter replacement bug in DataFrame.replace()
diff --git a/doc/source/release.rst b/doc/source/release.rst index 31cd37e4bf467..2576982d6976f 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -301,7 +301,8 @@ Bug Fixes - Bug in downcasting inference with empty arrays (:issue:`6733`) - Bug in ``obj.blocks`` on sparse containers dropping all but the last items of same for dtype (:issue:`6748`) - Bug in unpickling ``NaT (NaTType)`` (:issue:`4606`) -- Bug in setting a tz-aware index directly via ``.index`` (:issue:`6785`) +- Bug in ``DataFrame.replace()`` where regex metacharacters were being treated + as regexs even when ``regex=False`` (:issue:`6777`). pandas 0.13.1 ------------- diff --git a/pandas/core/internals.py b/pandas/core/internals.py index a548b5f61754e..e28d4029d4fa0 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1555,7 +1555,7 @@ def replace(self, to_replace, value, inplace=False, filter=None, def _replace_single(self, to_replace, value, inplace=False, filter=None, regex=False): # to_replace is regex compilable - to_rep_re = com.is_re_compilable(to_replace) + to_rep_re = regex and com.is_re_compilable(to_replace) # regex is regex compilable regex_re = com.is_re_compilable(regex) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4e422b452ebf8..087e094ffbcb8 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -7857,6 +7857,15 @@ def test_regex_replace_numeric_to_object_conversion(self): assert_frame_equal(res, expec) self.assertEqual(res.a.dtype, np.object_) + def test_replace_regex_metachar(self): + metachars = '[]', '()', '\d', '\w', '\s' + + for metachar in metachars: + df = DataFrame({'a': [metachar, 'else']}) + result = df.replace({'a': {metachar: 'paren'}}) + expected = DataFrame({'a': ['paren', 'else']}) + tm.assert_frame_equal(result, expected) + def test_replace(self): self.tsframe['A'][:5] = nan self.tsframe['A'][-5:] = nan
closes #6777
https://api.github.com/repos/pandas-dev/pandas/pulls/6778
2014-04-03T14:55:05Z
2014-04-04T20:56:16Z
2014-04-04T20:56:16Z
2014-07-16T09:00:14Z
DOC: Issue #6763 - Corrected MERGE example
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3329483a61f5c..8a30b74575ec2 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -123,11 +123,11 @@ >>> merge(A, B, left_on='lkey', right_on='rkey', how='outer') lkey value_x rkey value_y -0 bar 2 bar 6 -1 bar 2 bar 8 -2 baz 3 NaN NaN -3 foo 1 foo 5 -4 foo 4 foo 5 +0 foo 1 foo 5 +1 foo 4 foo 5 +2 bar 2 bar 6 +3 bar 2 bar 8 +4 baz 3 NaN NaN 5 NaN NaN qux 7 Returns
Corrected the example provided in the MERGE function docstring. Closes #6763
https://api.github.com/repos/pandas-dev/pandas/pulls/6776
2014-04-03T12:23:54Z
2014-04-03T12:42:23Z
2014-04-03T12:42:23Z
2014-07-16T09:00:13Z
ENH: rename function now has errors keyword
diff --git a/doc/source/release.rst b/doc/source/release.rst index 80a833848f8fa..d8549886578d9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -154,6 +154,8 @@ API Changes - ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) - ``Panel.shift`` now uses ``NDFrame.shift``. It no longer drops the ``nan`` data and retains its original shape. (:issue:`4867`) +- ``rename`` function can now accept ``errors`` keyword to suppress error raised by a passed function, or raise ValueError when any of label in a passed dict is not found in the target axis. + Deprecations ~~~~~~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 23ab8f10116c1..c32e10c358b62 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -199,6 +199,8 @@ API changes - ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) - ``Panel.shift`` now uses ``NDFrame.shift``. It no longer drops the ``nan`` data and retains its original shape. (:issue:`4867`) +- ``rename`` function can now accept ``errors`` keyword to suppress error raised by a passed function, or raise ValueError when any of label in a passed dict is not found in the target axis. From this version, passing a dict with labels which isn't included in the axis results in FutureWarning, and will raise ValueError in future version. + MultiIndexing Using Slicers ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cec6ce2cabcdd..78303198fb9a3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -524,6 +524,10 @@ def swaplevel(self, i, j, axis=0): inplace : boolean, default False Whether to return a new %(klass)s. If True then value of copy is ignored. + errors : {'ignore', 'raise'}, default 'raise' + If 'raise', ValueError is raised when any of label in the dict doesn't exist in target axis. + Also, errors raised by passed function is not suppressed. + If 'ignore', suppress errors and rename only labels which doesn't trigger errors. Returns ------- @@ -538,19 +542,46 @@ def rename(self, *args, **kwargs): copy = kwargs.get('copy', True) inplace = kwargs.get('inplace', False) + # default should be 'raise' in future version + errors = kwargs.get('errors', None) + # errors = kwargs.get('errors', 'raise') + if (com._count_not_none(*axes.values()) == 0): raise TypeError('must pass an index to rename') # renamer function if passed a dict - def _get_rename_function(mapper): + def _get_rename_function(mapper, axis): if isinstance(mapper, (dict, ABCSeries)): + if errors != 'ignore': + # once cast a view to list for python 3 + labels = np.array(list(mapper.keys())) + axis = self._get_axis(axis) + indexer = axis.get_indexer(labels) + mask = indexer == -1 + if mask.any(): + msg = 'labels %s not contained in axis' % labels[mask] + + if errors is None: + # should be removed in future version + warnings.warn("%s will results in ValueError in the future." + "Use 'errors' keyword to suppress/force error" % msg, + FutureWarning) + else: + raise ValueError(msg) + def f(x): if x in mapper: return mapper[x] else: return x else: - f = mapper + def f(x): + try: + return mapper(x) + except Exception: + if errors != 'ignore': + raise + return x return f @@ -562,7 +593,8 @@ def f(x): v = axes.get(self._AXIS_NAMES[axis]) if v is None: continue - f = _get_rename_function(v) + + f = _get_rename_function(v, axis) baxis = self._get_block_manager_axis(axis) result._data = result._data.rename(f, axis=baxis, copy=copy) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 1bbcba0e4caad..b4f8123979563 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -9031,8 +9031,23 @@ def test_rename(self): index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar']) columns = MultiIndex.from_tuples(tuples_columns, names=['fizz', 'buzz']) renamer = DataFrame([(0,0),(1,1)], index=index, columns=columns) + + with tm.assert_produces_warning(): + # should raise ValueError in future version + renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'}, + columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}) + new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')]) + new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), ('fizz2', 'buzz3')]) + self.assert_numpy_array_equal(renamed.index, new_index) + self.assert_numpy_array_equal(renamed.columns, new_columns) + self.assertEquals(renamed.index.names, renamer.index.names) + self.assertEquals(renamed.columns.names, renamer.columns.names) + + self.assertRaises(ValueError, renamer.rename, index={'foo1': 'foo3', 'bar2': 'bar3'}, + columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}, errors='raise') + renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'}, - columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}) + columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}, errors='ignore') new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')]) new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), ('fizz2', 'buzz3')]) self.assert_numpy_array_equal(renamed.index, new_index) @@ -9040,6 +9055,29 @@ def test_rename(self): self.assertEquals(renamed.index.names, renamer.index.names) self.assertEquals(renamed.columns.names, renamer.columns.names) + # error handling + data = {1: {'A': 0, 'B': 1}, '2': {'C':1, 'D': 2}} + df = DataFrame(data) + + # errors = default + with tm.assert_produces_warning(): + # should raise ValueError in future version + renamed = df.rename(columns={'1': 'One', '2': 'Two'}) + self.assertEqual(renamed.columns.tolist(), [1, 'Two']) + self.assertRaises(TypeError, df.rename, columns=lambda x: x + 1) + + # errors = raise + self.assertRaises(ValueError, df.rename, + columns={'1': 'One', '2': 'Two'}, errors='raise') + self.assertRaises(TypeError, df.rename, columns=lambda x: x + 1, errors='raise') + + # errors = ignore + renamed = df.rename(columns={'1': 'One', '2': 'Two'}, errors='ignore') + self.assertEqual(renamed.columns.tolist(), [1, 'Two']) + + renamed = df.rename(columns=lambda x: x + 1, errors='ignore') + self.assertEqual(renamed.columns.tolist(), [2, '2']) + def test_rename_nocopy(self): renamed = self.frame.rename(columns={'C': 'foo'}, copy=False) renamed['foo'] = 1.
Same background as #6736. Currently, - When a function is passed, any error caused by the function results in `rename` error. - When a dict is passed, label which is not included in the axis will be silently skipped. Even though `drop` raises `ValueError` in such a case. ``` >>> df = pd.DataFrame({1: [1, 2], 'B': pd.to_datetime(['2010-01-01', np.nan])}) >>> renamed_func = df.rename(columns=lambda x: x + 1) TypeError: cannot concatenate 'str' and 'int' objects >>> renamed_dict = df.rename(columns={'B':'C', 'D':'E'}) Index([1, u'C'], dtype='object') ``` I think it is nice if `rename` also has `errors` keyword to: - Suppress error raised by the function and `rename` only non-problematic labels (`errors='ignore'`), or raise error whatever derived from the function (`errors='raise'`). - Suppress error if label is not found in the target axis, and `rename` only non-problematic labels (`errors='ignore'`), or raise error if any of label is not included in the target axis (`errors='raise'`). I feel the default should be `errors='raise'` in the future version based on the other functions behavior. This doesn't affect to the current behavior when a function is passed, but affects to when a dict is passed. In this version, `rename` raise `FutureWarning` for future precaution if it is called with non-existing label. And it is possible to force `rename` to raise `ValueError` in such a case by specifying `errors='raise'`. ``` >>> renamed_func = df.rename(columns=lambda x: x + 1, errors='ignore') Index([2, u'B'], dtype='object') >>> renamed_dict = df.rename(columns={'B':'C', 'D':'E', 'F':'G'}, errors='ignore') Index([1, u'C'], dtype='object') >>> renamed_func = df.rename(columns=lambda x: x + 1, errors='raise') TypeError: cannot concatenate 'str' and 'int' objects >>> renamed_dict = df.rename(columns={'B':'C', 'D':'E', 'F':'G'}, errors='raise') ValueError: labels ['D' 'F'] not contained in axis ```
https://api.github.com/repos/pandas-dev/pandas/pulls/6767
2014-04-02T14:58:15Z
2014-04-05T15:58:05Z
null
2015-11-14T05:27:03Z
BUG: Use `reverse` to check for non-lazy
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 3336c3948fac6..143e47baab465 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1808,8 +1808,8 @@ def test_iteritems(self): for idx, val in compat.iteritems(self.ts): self.assertEqual(val, self.ts[idx]) - # assert is lazy (genrators don't define __getslice__, lists do) - self.assertFalse(hasattr(self.series.iteritems(), '__getslice__')) + # assert is lazy (genrators don't define reverse, lists do) + self.assertFalse(hasattr(self.series.iteritems(), 'reverse')) def test_sum(self): self._check_stat_op('sum', np.sum)
As pointed out at https://github.com/pydata/pandas/pull/6761#issuecomment-39291213, `__getslice__` was deprecated in Py3, so the test was faulty. I switched to checking for `reverse`.
https://api.github.com/repos/pandas-dev/pandas/pulls/6765
2014-04-02T13:28:31Z
2014-04-04T12:34:11Z
2014-04-04T12:34:11Z
2017-05-15T21:15:56Z