title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
BUG/TST: transform and filter on non-unique index, closes #4620
diff --git a/doc/source/release.rst b/doc/source/release.rst index 8a9163075fb9c..cd9f688de152d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -762,6 +762,9 @@ Bug Fixes - Make sure that ``head/tail`` are ``iloc`` based, (:issue:`5370`) - Fixed bug for ``PeriodIndex`` string representation if there are 1 or 2 elements. (:issue:`5372`) + - The GroupBy methods ``transform`` and ``filter`` can be used on Series + and DataFrames that have repeated (non-unique) indices. (:issue:`4620`) + pandas 0.12.0 ------------- diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 4beb6ecf1a63b..668c665613c0d 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -566,6 +566,22 @@ def _concat_objects(self, keys, values, not_indexed_same=False): return result + def _apply_filter(self, indices, dropna): + if len(indices) == 0: + indices = [] + else: + indices = np.sort(np.concatenate(indices)) + if dropna: + filtered = self.obj.take(indices) + else: + mask = np.empty(len(self.obj.index), dtype=bool) + mask.fill(False) + mask[indices.astype(int)] = True + # mask fails to broadcast when passed to where; broadcast manually. + mask = np.tile(mask, list(self.obj.shape[1:]) + [1]).T + filtered = self.obj.where(mask) # Fill with NaNs. + return filtered + @Appender(GroupBy.__doc__) def groupby(obj, by, **kwds): @@ -1585,14 +1601,13 @@ def transform(self, func, *args, **kwargs): group = com.ensure_float(group) object.__setattr__(group, 'name', name) res = wrapper(group) - indexer = self.obj.index.get_indexer(group.index) if hasattr(res,'values'): res = res.values # need to do a safe put here, as the dtype may be different # this needs to be an ndarray result = Series(result) - result.loc[indexer] = res + result.iloc[self.indices[name]] = res result = result.values # downcast if we can (and need) @@ -1630,22 +1645,15 @@ def true_and_notnull(x, *args, **kwargs): return b and notnull(b) try: - indexers = [self.obj.index.get_indexer(group.index) \ - if true_and_notnull(group) else [] \ - for _ , group in self] + indices = [self.indices[name] if true_and_notnull(group) else [] + for name, group in self] except ValueError: raise TypeError("the filter must return a boolean result") except TypeError: raise TypeError("the filter must return a boolean result") - if len(indexers) == 0: - filtered = self.obj.take([]) # because np.concatenate would fail - else: - filtered = self.obj.take(np.sort(np.concatenate(indexers))) - if dropna: - return filtered - else: - return filtered.reindex(self.obj.index) # Fill with NaNs. + filtered = self._apply_filter(indices, dropna) + return filtered class NDFrameGroupBy(GroupBy): @@ -2125,7 +2133,7 @@ def filter(self, func, dropna=True, *args, **kwargs): """ from pandas.tools.merge import concat - indexers = [] + indices = [] obj = self._obj_with_exclusions gen = self.grouper.get_iterator(obj, axis=self.axis) @@ -2146,31 +2154,25 @@ def filter(self, func, dropna=True, *args, **kwargs): else: res = path(group) - def add_indexer(): - indexers.append(self.obj.index.get_indexer(group.index)) + def add_indices(): + indices.append(self.indices[name]) # interpret the result of the filter if isinstance(res,(bool,np.bool_)): if res: - add_indexer() + add_indices() else: if getattr(res,'ndim',None) == 1: val = res.ravel()[0] if val and notnull(val): - add_indexer() + add_indices() else: # in theory you could do .all() on the boolean result ? raise TypeError("the filter must return a boolean result") - if len(indexers) == 0: - filtered = self.obj.take([]) # because np.concatenate would fail - else: - filtered = self.obj.take(np.sort(np.concatenate(indexers))) - if dropna: - return filtered - else: - return filtered.reindex(self.obj.index) # Fill with NaNs. + filtered = self._apply_filter(indices, dropna) + return filtered class DataFrameGroupBy(NDFrameGroupBy): diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index f71d7ff9d096b..ca74f46122d88 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -7,8 +7,8 @@ from datetime import datetime from numpy import nan -from pandas import bdate_range -from pandas.core.index import Index, MultiIndex +from pandas import bdate_range, Timestamp +from pandas.core.index import Index, MultiIndex, Int64Index from pandas.core.common import rands from pandas.core.api import Categorical, DataFrame from pandas.core.groupby import SpecificationError, DataError @@ -2801,6 +2801,277 @@ def test_filter_maintains_ordering(self): expected = s.iloc[[1, 2, 4, 7]] assert_series_equal(actual, expected) + def test_filter_and_transform_with_non_unique_int_index(self): + # GH4620 + index = [1, 1, 1, 2, 1, 1, 0, 1] + df = DataFrame({'pid' : [1,1,1,2,2,3,3,3], + 'tag' : [23,45,62,24,45,34,25,62]}, index=index) + grouped_df = df.groupby('tag') + ser = df['pid'] + grouped_ser = ser.groupby(df['tag']) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + expected = df.copy() + expected.iloc[[0, 3, 5, 6]] = np.nan + assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + NA = np.nan + expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid') + # ^ made manually because this can get confusing! + assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index) + assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + assert_series_equal(actual, expected) + + def test_filter_and_transform_with_multiple_non_unique_int_index(self): + # GH4620 + index = [1, 1, 1, 2, 0, 0, 0, 1] + df = DataFrame({'pid' : [1,1,1,2,2,3,3,3], + 'tag' : [23,45,62,24,45,34,25,62]}, index=index) + grouped_df = df.groupby('tag') + ser = df['pid'] + grouped_ser = ser.groupby(df['tag']) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + expected = df.copy() + expected.iloc[[0, 3, 5, 6]] = np.nan + assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + NA = np.nan + expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid') + # ^ made manually because this can get confusing! + assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index) + assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + assert_series_equal(actual, expected) + + def test_filter_and_transform_with_non_unique_float_index(self): + # GH4620 + index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float) + df = DataFrame({'pid' : [1,1,1,2,2,3,3,3], + 'tag' : [23,45,62,24,45,34,25,62]}, index=index) + grouped_df = df.groupby('tag') + ser = df['pid'] + grouped_ser = ser.groupby(df['tag']) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + expected = df.copy() + expected.iloc[[0, 3, 5, 6]] = np.nan + assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + NA = np.nan + expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid') + # ^ made manually because this can get confusing! + assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index) + assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + assert_series_equal(actual, expected) + + def test_filter_and_transform_with_non_unique_float_index(self): + # GH4620 + index = np.array([1, 1, 1, 2, 0, 0, 0, 1], dtype=float) + df = DataFrame({'pid' : [1,1,1,2,2,3,3,3], + 'tag' : [23,45,62,24,45,34,25,62]}, index=index) + grouped_df = df.groupby('tag') + ser = df['pid'] + grouped_ser = ser.groupby(df['tag']) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + expected = df.copy() + expected.iloc[[0, 3, 5, 6]] = np.nan + assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + NA = np.nan + expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid') + # ^ made manually because this can get confusing! + assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index) + assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + assert_series_equal(actual, expected) + + def test_filter_and_transform_with_non_unique_timestamp_index(self): + # GH4620 + t0 = Timestamp('2013-09-30 00:05:00') + t1 = Timestamp('2013-10-30 00:05:00') + t2 = Timestamp('2013-11-30 00:05:00') + index = [t1, t1, t1, t2, t1, t1, t0, t1] + df = DataFrame({'pid' : [1,1,1,2,2,3,3,3], + 'tag' : [23,45,62,24,45,34,25,62]}, index=index) + grouped_df = df.groupby('tag') + ser = df['pid'] + grouped_ser = ser.groupby(df['tag']) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + expected = df.copy() + expected.iloc[[0, 3, 5, 6]] = np.nan + assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + NA = np.nan + expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid') + # ^ made manually because this can get confusing! + assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index) + assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + assert_series_equal(actual, expected) + + def test_filter_and_transform_with_non_unique_string_index(self): + # GH4620 + index = list('bbbcbbab') + df = DataFrame({'pid' : [1,1,1,2,2,3,3,3], + 'tag' : [23,45,62,24,45,34,25,62]}, index=index) + grouped_df = df.groupby('tag') + ser = df['pid'] + grouped_ser = ser.groupby(df['tag']) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + expected = df.copy() + expected.iloc[[0, 3, 5, 6]] = np.nan + assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + NA = np.nan + expected = Series([NA,1,1,NA,2,NA,NA,3], index, name='pid') + # ^ made manually because this can get confusing! + assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index) + assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + assert_series_equal(actual, expected) + + def test_index_label_overlaps_location(self): + # checking we don't have any label/location confusion in the + # the wake of GH5375 + df = DataFrame(list('ABCDE'), index=[2, 0, 2, 1, 1]) + g = df.groupby(list('ababb')) + actual = g.filter(lambda x: len(x) > 2) + expected = df.iloc[[1, 3, 4]] + assert_frame_equal(actual, expected) + + ser = df[0] + g = ser.groupby(list('ababb')) + actual = g.filter(lambda x: len(x) > 2) + expected = ser.take([1, 3, 4]) + assert_series_equal(actual, expected) + + # ... and again, with a generic Index of floats + df.index = df.index.astype(float) + g = df.groupby(list('ababb')) + actual = g.filter(lambda x: len(x) > 2) + expected = df.iloc[[1, 3, 4]] + assert_frame_equal(actual, expected) + + ser = df[0] + g = ser.groupby(list('ababb')) + actual = g.filter(lambda x: len(x) > 2) + expected = ser.take([1, 3, 4]) + assert_series_equal(actual, expected) + def test_groupby_whitelist(self): from string import ascii_lowercase letters = np.array(list(ascii_lowercase))
closes #4620 This is a relatively minor change. Previously, `filter` on `SeriesGroupBy` and `DataFrameGroupby` and `transform` on only `SeriesGroupBy` referred to the index of each group's object. This cannot work with repeated indexes, because some repeated indexes occur between more than one group. Instead, use `{Series, DataFrame}GroupBy.indices` array, an array of locations, not labels. _This was marked for 0.14, but I would really like to start using it._
https://api.github.com/repos/pandas-dev/pandas/pulls/5375
2013-10-29T19:08:50Z
2013-11-01T18:34:36Z
2013-11-01T18:34:35Z
2014-06-18T17:10:16Z
Escape special characters in to_latex() output
diff --git a/doc/source/release.rst b/doc/source/release.rst index a3792ae74b023..f58620020d254 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -87,6 +87,7 @@ Improvements to existing features - perf improvments in ``dtypes/ftypes`` methods (:issue:`5968`) - perf improvments in indexing with object dtypes (:issue:`5968`) - improved dtype inference for ``timedelta`` like passed to constructors (:issue:`5458`,:issue:`5689`) + - escape special characters when writing to latex (:issue: `5374`) .. _release.bug_fixes-0.13.1: diff --git a/pandas/core/format.py b/pandas/core/format.py index 24b0554755ead..fce0ef6a27889 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -468,8 +468,15 @@ def write(buf, frame, column_format, strcols): for i, row in enumerate(zip(*strcols)): if i == nlevels: buf.write('\\midrule\n') # End of header - crow = [(x.replace('_', '\\_') + crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first + .replace('_', '\\_') .replace('%', '\\%') + .replace('$', '\\$') + .replace('#', '\\#') + .replace('{', '\\{') + .replace('}', '\\}') + .replace('~', '\\textasciitilde') + .replace('^', '\\textasciicircum') .replace('&', '\\&') if x else '{}') for x in row] buf.write(' & '.join(crow)) buf.write(' \\\\\n') diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index a9855c4e73c6e..d0c783725f8bb 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -1654,6 +1654,30 @@ def test_to_latex(self): \end{tabular} """ self.assertEqual(withoutindex_result, withoutindex_expected) + + def test_to_latex_escape_special_chars(self): + special_characters = ['&','%','$','#','_', + '{','}','~','^','\\'] + df = DataFrame(data=special_characters) + observed = df.to_latex() + expected = r"""\begin{tabular}{ll} +\toprule +{} & 0 \\ +\midrule +0 & \& \\ +1 & \% \\ +2 & \$ \\ +3 & \# \\ +4 & \_ \\ +5 & \{ \\ +6 & \} \\ +7 & \textasciitilde \\ +8 & \textasciicircum \\ +9 & \textbackslash \\ +\bottomrule +\end{tabular} +""" + self.assertEqual(observed, expected) class TestSeriesFormatting(tm.TestCase): _multiprocess_can_split_ = True
Some characters have special meaning in latex: & % $ # _ { } ~ ^ . When a dataframe contains a special character (as part of an index label, for example), to_latex() spits out invalid latex.
https://api.github.com/repos/pandas-dev/pandas/pulls/5374
2013-10-29T14:12:05Z
2014-01-21T21:06:58Z
2014-01-21T21:06:58Z
2014-06-18T14:06:06Z
BUG: Make sure that head/tail are iloc based, (GH5370)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 77d86b8a7a9f1..0a901d1ff044c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -756,6 +756,7 @@ Bug Fixes - Test suite no longer leaves around temporary files when testing graphics. (:issue:`5347`) (thanks for catching this @yarikoptic!) - Fixed html tests on win32. (:issue:`4580`) + - Make sure that ``head/tail`` are ``iloc`` based, (:issue:`5370`) pandas 0.12.0 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2361c6920985b..0a5306de9bbb5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3085,16 +3085,6 @@ def last_valid_index(self): """ return self.index[self.count(1) > 0][-1] - def head(self, n=5): - """Returns first n rows of DataFrame - """ - return self[:n] - - def tail(self, n=5): - """Returns last n rows of DataFrame - """ - return self[-n:] - #---------------------------------------------------------------------- # Data reshaping diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b230df7483760..b5e526e42a547 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1461,6 +1461,24 @@ def filter(self, items=None, like=None, regex=None, axis=None): else: raise TypeError('Must pass either `items`, `like`, or `regex`') + def head(self, n=5): + """ + Returns first n rows + """ + l = len(self) + if abs(n) > l: + n = l if n > 0 else -l + return self.iloc[:n] + + def tail(self, n=5): + """ + Returns last n rows + """ + l = len(self) + if abs(n) > l: + n = l if n > 0 else -l + return self.iloc[-n:] + #---------------------------------------------------------------------- # Attribute access diff --git a/pandas/core/panel.py b/pandas/core/panel.py index d4ba7dd4e708a..6b50bfb76a3ea 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -581,6 +581,12 @@ def conform(self, frame, axis='items'): axes = self._get_plane_axes(axis) return frame.reindex(**self._extract_axes_for_slice(self, axes)) + def head(self, n=5): + raise NotImplementedError + + def tail(self, n=5): + raise NotImplementedError + def _needs_reindex_multi(self, axes, method, level): # only allowing multi-index on Panel (and not > dims) return method is None and not self._is_mixed_type and self._AXIS_LEN <= 3 and com._count_not_none(*axes.values()) == 3 diff --git a/pandas/core/series.py b/pandas/core/series.py index 699dc9b31464e..d3cc53d0bc9fc 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1058,16 +1058,6 @@ def to_sparse(self, kind='block', fill_value=None): return SparseSeries(self, kind=kind, fill_value=fill_value).__finalize__(self) - def head(self, n=5): - """Returns first n rows of Series - """ - return self[:n] - - def tail(self, n=5): - """Returns last n rows of Series - """ - return self[-n:] - #---------------------------------------------------------------------- # Statistics, overridden ndarray methods diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 12b960ad376ff..b73c7cdbb8f87 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -4261,6 +4261,12 @@ def test_head_tail(self): assert_frame_equal(self.frame.head(), self.frame[:5]) assert_frame_equal(self.frame.tail(), self.frame[-5:]) + # with a float index + df = self.frame.copy() + df.index = np.arange(len(self.frame)) + 0.1 + assert_frame_equal(df.head(), df.iloc[:5]) + assert_frame_equal(df.tail(), df.iloc[-5:]) + def test_insert(self): df = DataFrame(np.random.randn(5, 3), index=np.arange(5), columns=['c', 'b', 'a']) diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index fdeb61f09cadb..cf9b2d174faea 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -317,6 +317,39 @@ def test_metadata_propagation(self): except (ValueError): pass + def test_head_tail(self): + # GH5370 + + o = self._construct(shape=10) + + # check all index types + for index in [ tm.makeFloatIndex, tm.makeIntIndex, + tm.makeStringIndex, tm.makeUnicodeIndex, + tm.makeDateIndex, tm.makePeriodIndex ]: + axis = o._get_axis_name(0) + setattr(o,axis,index(len(getattr(o,axis)))) + + # Panel + dims + try: + o.head() + except (NotImplementedError): + raise nose.SkipTest('not implemented on {0}'.format(o.__class__.__name__)) + + self._compare(o.head(), o.iloc[:5]) + self._compare(o.tail(), o.iloc[-5:]) + + # 0-len + self._compare(o.head(0), o.iloc[:0]) + self._compare(o.tail(0), o.iloc[0:]) + + # bounded + self._compare(o.head(len(o)+1), o) + self._compare(o.tail(len(o)+1), o) + + # neg index + self._compare(o.head(-3), o.head(7)) + self._compare(o.tail(-3), o.tail(7)) + class TestSeries(unittest.TestCase, Generic): _typ = Series _comparator = lambda self, x, y: assert_series_equal(x,y)
closes #5370
https://api.github.com/repos/pandas-dev/pandas/pulls/5373
2013-10-29T12:43:57Z
2013-10-29T18:33:10Z
2013-10-29T18:33:10Z
2014-07-01T05:46:01Z
TST: Better handle np.array_equal() edge cases
diff --git a/pandas/src/testing.pyx b/pandas/src/testing.pyx index b324c6652d58f..404b97879e2be 100644 --- a/pandas/src/testing.pyx +++ b/pandas/src/testing.pyx @@ -82,10 +82,12 @@ cpdef assert_almost_equal(a, b, bint check_less_precise=False): assert na == nb, ( "Length of two iterators not the same: %r != %r" % (na, nb) ) - if (isinstance(a, np.ndarray) and - isinstance(b, np.ndarray) and - np.array_equal(a, b)): - return True + if isinstance(a, np.ndarray) and isinstance(b, np.ndarray): + try: + if np.array_equal(a, b): + return True + except: + pass else: for i in xrange(na): assert_almost_equal(a[i], b[i], check_less_precise) diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py index fa295838d47e9..8431d91a8fff6 100644 --- a/pandas/tests/test_testing.py +++ b/pandas/tests/test_testing.py @@ -48,6 +48,12 @@ def test_assert_almost_equal_numbers_with_mixed(self): self._assert_not_almost_equal_both(1, [1,]) self._assert_not_almost_equal_both(1, object()) + def test_assert_almost_equal_edge_case_ndarrays(self): + self._assert_almost_equal_both(np.array([], dtype='M8[ns]'), + np.array([], dtype='float64')) + self._assert_almost_equal_both(np.array([], dtype=str), + np.array([], dtype='int64')) + def test_assert_almost_equal_dicts(self): self._assert_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 2}) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 413b5f8426d71..f40a8e1a5a9d6 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -411,9 +411,9 @@ def assert_series_equal(left, right, check_dtype=True, check_less_precise=False): if check_series_type: assert_isinstance(left, type(right)) - assert_almost_equal(left.values, right.values, check_less_precise) if check_dtype: assert_attr_equal('dtype', left, right) + assert_almost_equal(left.values, right.values, check_less_precise) if check_less_precise: assert_almost_equal( left.index.values, right.index.values, check_less_precise)
https://api.github.com/repos/pandas-dev/pandas/pulls/5371
2013-10-29T03:45:56Z
2013-10-29T22:51:12Z
2013-10-29T22:51:12Z
2014-07-16T08:37:47Z
ENH: HDFStore.flush() to optionally perform fsync (GH5364)
diff --git a/doc/source/io.rst b/doc/source/io.rst index 90bb762f1a1ba..9fd2ea1eb83ec 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2745,6 +2745,9 @@ Notes & Caveats need to serialize these operations in a single thread in a single process. You will corrupt your data otherwise. See the issue (:`2397`) for more information. + - If you use locks to manage write access between multiple processes, you + may want to use :py:func:`~os.fsync` before releasing write locks. For + convenience you can use ``store.flush(fsync=True)`` to do this for you. - ``PyTables`` only supports fixed-width string columns in ``tables``. The sizes of a string based indexing column (e.g. *columns* or *minor_axis*) are determined as the maximum size diff --git a/doc/source/release.rst b/doc/source/release.rst index 49de8dddd7210..eaf1b601b2d3c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -275,6 +275,8 @@ API Changes - store `datetime.date` objects as ordinals rather then timetuples to avoid timezone issues (:issue:`2852`), thanks @tavistmorph and @numpand - ``numexpr`` 2.2.2 fixes incompatiblity in PyTables 2.4 (:issue:`4908`) + - ``flush`` now accepts an ``fsync`` parameter, which defaults to ``False`` + (:issue:`5364`) - ``JSON`` - added ``date_unit`` parameter to specify resolution of timestamps. diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 05528d5c0d407..5919589978903 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -10,6 +10,7 @@ import copy import itertools import warnings +import os import numpy as np from pandas import (Series, TimeSeries, DataFrame, Panel, Panel4D, Index, @@ -525,12 +526,26 @@ def is_open(self): return False return bool(self._handle.isopen) - def flush(self): + def flush(self, fsync=False): """ - Force all buffered modifications to be written to disk + Force all buffered modifications to be written to disk. + + Parameters + ---------- + fsync : bool (default False) + call ``os.fsync()`` on the file handle to force writing to disk. + + Notes + ----- + Without ``fsync=True``, flushing may not guarantee that the OS writes + to disk. With fsync, the operation will block until the OS claims the + file has been written; however, other caching layers may still + interfere. """ if self._handle is not None: self._handle.flush() + if fsync: + os.fsync(self._handle.fileno()) def get(self, key): """ @@ -4072,5 +4087,4 @@ def timeit(key, df, fn=None, remove=True, **kwargs): store.close() if remove: - import os os.remove(fn) diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index fe60352845316..a08073bd7bd35 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -465,6 +465,7 @@ def test_flush(self): with ensure_clean(self.path) as store: store['a'] = tm.makeTimeSeries() store.flush() + store.flush(fsync=True) def test_get(self):
Pull request for #5364. The `fsync=False` parameter was added to `flush` given this offers utility for non-distribution use cases, such as a user just wanting to ensure their data is regularly flushed to disk. I could not see any mocking library in use in the Pandas tests, so I skipped adding one just to verify `os.fsync` actually gets called.
https://api.github.com/repos/pandas-dev/pandas/pulls/5369
2013-10-28T23:59:11Z
2013-10-29T20:28:40Z
2013-10-29T20:28:40Z
2014-06-16T19:17:25Z
BUG: downcasting is now more robust (related GH5174)
diff --git a/pandas/core/common.py b/pandas/core/common.py index d9e8f4164adb4..d05c3dbafdee6 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1022,6 +1022,7 @@ def _possibly_downcast_to_dtype(result, dtype): if np.isscalar(result) or not len(result): return result + trans = lambda x: x if isinstance(dtype, compat.string_types): if dtype == 'infer': inferred_type = lib.infer_dtype(_ensure_object(result.ravel())) @@ -1037,6 +1038,7 @@ def _possibly_downcast_to_dtype(result, dtype): # try to upcast here elif inferred_type == 'floating': dtype = 'int64' + trans = lambda x: x.round() else: dtype = 'object' @@ -1058,7 +1060,7 @@ def _possibly_downcast_to_dtype(result, dtype): # do a test on the first element, if it fails then we are done r = result.ravel() arr = np.array([ r[0] ]) - if not np.allclose(arr,arr.astype(dtype)): + if not np.allclose(arr,trans(arr).astype(dtype)): return result # a comparable, e.g. a Decimal may slip in here @@ -1066,7 +1068,7 @@ def _possibly_downcast_to_dtype(result, dtype): return result if issubclass(result.dtype.type, (np.object_,np.number)) and notnull(result).all(): - new_result = result.astype(dtype) + new_result = trans(result).astype(dtype) try: if np.allclose(new_result,result): return new_result diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 68195fb3d6ec5..3fd40062e1459 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -118,6 +118,22 @@ def test_isnull_datetime(): assert(mask[0]) assert(not mask[1:].any()) +def test_downcast_conv(): + # test downcasting + + arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]) + result = com._possibly_downcast_to_dtype(arr, 'infer') + assert (np.array_equal(result, arr)) + + arr = np.array([8., 8., 8., 8., 8.9999999999995]) + result = com._possibly_downcast_to_dtype(arr, 'infer') + expected = np.array([8, 8, 8, 8, 9]) + assert (np.array_equal(result, expected)) + + arr = np.array([8., 8., 8., 8., 9.0000000000005]) + result = com._possibly_downcast_to_dtype(arr, 'infer') + expected = np.array([8, 8, 8, 8, 9]) + assert (np.array_equal(result, expected)) def test_datetimeindex_from_empty_datetime64_array(): for unit in [ 'ms', 'us', 'ns' ]: diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index a6e0dea2c4e22..fdeb61f09cadb 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -478,9 +478,9 @@ def test_nan_str_index(self): def test_interp_quad(self): _skip_if_no_scipy() sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4]) - result = sq.interpolate(method='quadratic', downcast=False) - expected = Series([1., 4, 9, 16], index=[1, 2, 3, 4]) - assert_series_equal(result, expected, check_less_precise=True) + result = sq.interpolate(method='quadratic') + expected = Series([1, 4, 9, 16], index=[1, 2, 3, 4]) + assert_series_equal(result, expected) def test_interp_scipy_basic(self): _skip_if_no_scipy() @@ -659,7 +659,7 @@ def test_interp_alt_scipy(self): expected = df.copy() expected['A'].iloc[2] = 3 expected['A'].iloc[5] = 6 - assert_frame_equal(result, expected) + assert_frame_equal(result, expected.astype(np.int64)) result = df.interpolate(method='krogh') expectedk = df.copy()
related #5174
https://api.github.com/repos/pandas-dev/pandas/pulls/5368
2013-10-28T22:58:08Z
2013-10-28T23:55:55Z
2013-10-28T23:55:55Z
2014-07-16T08:37:41Z
fix: to_datetime returns KeyError: 'p' when %p specified in format string (GH5361)
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index b8144c2b5eab9..7fbb908265476 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -816,62 +816,6 @@ def test_dayfirst(self): #### to_datetime('01-13-2012', dayfirst=True) #### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True)) - def test_to_datetime_format(self): - values = ['1/1/2000', '1/2/2000', '1/3/2000'] - - results1 = [ Timestamp('20000101'), Timestamp('20000201'), - Timestamp('20000301') ] - results2 = [ Timestamp('20000101'), Timestamp('20000102'), - Timestamp('20000103') ] - for vals, expecteds in [ (values, (Index(results1), Index(results2))), - (Series(values),(Series(results1), Series(results2))), - (values[0], (results1[0], results2[0])), - (values[1], (results1[1], results2[1])), - (values[2], (results1[2], results2[2])) ]: - - for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']): - result = to_datetime(vals, format=fmt) - expected = expecteds[i] - - if isinstance(expected, Series): - assert_series_equal(result, Series(expected)) - elif isinstance(expected, Timestamp): - self.assert_(result == expected) - else: - self.assert_(result.equals(expected)) - - def test_to_datetime_format_YYYYMMDD(self): - s = Series([19801222,19801222] + [19810105]*5) - expected = Series([ Timestamp(x) for x in s.apply(str) ]) - - result = to_datetime(s,format='%Y%m%d') - assert_series_equal(result, expected) - - result = to_datetime(s.apply(str),format='%Y%m%d') - assert_series_equal(result, expected) - - # with NaT - expected = Series([Timestamp("19801222"),Timestamp("19801222")] + [Timestamp("19810105")]*5) - expected[2] = np.nan - s[2] = np.nan - - result = to_datetime(s,format='%Y%m%d') - assert_series_equal(result, expected) - - # string with NaT - s = s.apply(str) - s[2] = 'nat' - result = to_datetime(s,format='%Y%m%d') - assert_series_equal(result, expected) - - - def test_to_datetime_format_microsecond(self): - val = '01-Apr-2011 00:00:01.978' - format = '%d-%b-%Y %H:%M:%S.%f' - result = to_datetime(val, format=format) - exp = dt.datetime.strptime(val, format) - self.assert_(result == exp) - def test_to_datetime_on_datetime64_series(self): # #2699 s = Series(date_range('1/1/2000', periods=10)) @@ -3053,6 +2997,82 @@ def test_date_range_fy5252(self): self.assertEqual(dr[0], Timestamp('2013-01-31')) self.assertEqual(dr[1], Timestamp('2014-01-30')) +class TimeConversionFormats(unittest.TestCase): + def test_to_datetime_format(self): + values = ['1/1/2000', '1/2/2000', '1/3/2000'] + + results1 = [ Timestamp('20000101'), Timestamp('20000201'), + Timestamp('20000301') ] + results2 = [ Timestamp('20000101'), Timestamp('20000102'), + Timestamp('20000103') ] + for vals, expecteds in [ (values, (Index(results1), Index(results2))), + (Series(values),(Series(results1), Series(results2))), + (values[0], (results1[0], results2[0])), + (values[1], (results1[1], results2[1])), + (values[2], (results1[2], results2[2])) ]: + + for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']): + result = to_datetime(vals, format=fmt) + expected = expecteds[i] + + if isinstance(expected, Series): + assert_series_equal(result, Series(expected)) + elif isinstance(expected, Timestamp): + self.assert_(result == expected) + else: + self.assert_(result.equals(expected)) + + def test_to_datetime_format_YYYYMMDD(self): + s = Series([19801222,19801222] + [19810105]*5) + expected = Series([ Timestamp(x) for x in s.apply(str) ]) + + result = to_datetime(s,format='%Y%m%d') + assert_series_equal(result, expected) + + result = to_datetime(s.apply(str),format='%Y%m%d') + assert_series_equal(result, expected) + + # with NaT + expected = Series([Timestamp("19801222"),Timestamp("19801222")] + [Timestamp("19810105")]*5) + expected[2] = np.nan + s[2] = np.nan + + result = to_datetime(s,format='%Y%m%d') + assert_series_equal(result, expected) + + # string with NaT + s = s.apply(str) + s[2] = 'nat' + result = to_datetime(s,format='%Y%m%d') + assert_series_equal(result, expected) + + + def test_to_datetime_format_microsecond(self): + val = '01-Apr-2011 00:00:01.978' + format = '%d-%b-%Y %H:%M:%S.%f' + result = to_datetime(val, format=format) + exp = dt.datetime.strptime(val, format) + self.assert_(result == exp) + + def test_to_datetime_format_time(self): + data = [ + ['01/10/2010 15:20', '%m/%d/%Y %H:%M', Timestamp('2010-01-10 15:20')], + ['01/10/2010 05:43', '%m/%d/%Y %I:%M', Timestamp('2010-01-10 05:43')], + ['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S', Timestamp('2010-01-10 13:56:01')]#, + #['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 20:14')], + #['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 07:40')], + #['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p', Timestamp('2010-01-10 09:12:56')] + ] + for s, format, dt in data: + self.assertEqual(to_datetime(s, format=format), dt) + + def test_to_datetime_format_weeks(self): + data = [ + ['2009324', '%Y%W%w', Timestamp('2009-08-13')], + ['2013020', '%Y%U%w', Timestamp('2013-01-13')] + ] + for s, format, dt in data: + self.assertEqual(to_datetime(s, format=format), dt) if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 11b86db8b8d92..a8b6ec2487a77 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1247,7 +1247,8 @@ def array_strptime(ndarray[object] values, object fmt, coerce=False): 'j': 14, 'U': 15, 'W': 16, - 'Z': 17 + 'Z': 17, + 'p': 18 # just an additional key, works only with I } cdef int parse_code
closes https://github.com/pydata/pandas/issues/5361
https://api.github.com/repos/pandas-dev/pandas/pulls/5366
2013-10-28T21:04:33Z
2014-01-24T22:16:24Z
null
2014-06-20T18:09:45Z
TST: interpolate precision inference
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index b3e216526d0f6..a6e0dea2c4e22 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -478,9 +478,9 @@ def test_nan_str_index(self): def test_interp_quad(self): _skip_if_no_scipy() sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4]) - result = sq.interpolate(method='quadratic') - expected = Series([1, 4, 9, 16], index=[1, 2, 3, 4]) - assert_series_equal(result, expected) + result = sq.interpolate(method='quadratic', downcast=False) + expected = Series([1., 4, 9, 16], index=[1, 2, 3, 4]) + assert_series_equal(result, expected, check_less_precise=True) def test_interp_scipy_basic(self): _skip_if_no_scipy()
closes #5174
https://api.github.com/repos/pandas-dev/pandas/pulls/5362
2013-10-28T18:26:15Z
2013-10-28T19:52:51Z
2013-10-28T19:52:51Z
2016-11-03T12:37:33Z
TST/CLN: Only print on test failures.
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index 1f7df9894a97d..46f4355a5a1f3 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -635,8 +635,11 @@ def test_corr_sanity(self): for i in range(10): df = DataFrame(np.random.rand(30,2)) res = mom.rolling_corr(df[0],df[1],5,center=True) - print( res) - self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res])) + try: + self.assertTrue(all([np.abs(np.nan_to_num(x)) <=1 for x in res])) + except: + print(res) + def test_flex_binary_frame(self): def _check(method):
Don't pollute output.
https://api.github.com/repos/pandas-dev/pandas/pulls/5360
2013-10-28T03:46:45Z
2013-10-28T04:00:35Z
2013-10-28T04:00:35Z
2014-07-16T08:37:37Z
TST: Make sure files are removed in test_graphics
diff --git a/doc/source/release.rst b/doc/source/release.rst index f23852885668a..49de8dddd7210 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -753,6 +753,8 @@ Bug Fixes - Fix return value/type signature of ``initObjToJSON()`` to be compatible with numpy's ``import_array()`` (:issue:`5334`, :issue:`5326`) - Bug when renaming then set_index on a DataFrame (:issue:`5344`) + - Test suite no longer leaves around temporary files when testing graphics. (:issue:`5347`) + (thanks for catching this @yarikoptic!) pandas 0.12.0 ------------- diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index be18f0bd5cf89..8f48c2d5951f2 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1163,7 +1163,7 @@ def _check_plot_works(f, *args, **kwargs): else: assert_is_valid_plot_return_object(ret) - with ensure_clean() as path: + with ensure_clean(return_filelike=True) as path: plt.savefig(path) finally: tm.close(fig) diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index a5e249b77fa52..233c9f249ab38 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -943,7 +943,7 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs): except Exception: pass - with ensure_clean() as path: + with ensure_clean(return_filelike=True) as path: plt.savefig(path) finally: plt.close(fig) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index be6f593da2043..413b5f8426d71 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -306,18 +306,39 @@ def set_trace(): @contextmanager -def ensure_clean(filename=None): - # if we are not passed a filename, generate a temporary - if filename is None: - filename = tempfile.mkstemp()[1] +def ensure_clean(filename=None, return_filelike=False): + """Gets a temporary path and agrees to remove on close. - try: - yield filename - finally: + Parameters + ---------- + filename : str (optional) + if None, creates a temporary file which is then removed when out of + scope. + return_filelike: bool (default False) + if True, returns a file-like which is *always* cleaned. Necessary for + savefig and other functions which want to append extensions. Ignores + filename if True. + """ + + if return_filelike: + f = tempfile.TemporaryFile() try: - os.remove(filename) - except: - pass + yield f + finally: + f.close() + + else: + # if we are not passed a filename, generate a temporary + if filename is None: + filename = tempfile.mkstemp()[1] + + try: + yield filename + finally: + try: + os.remove(filename) + except Exception as e: + print(e) def get_data_path(f=''):
plt.savefig() appends an extension by default, which messes up ensure_clean(). Added option to ensure_clean() to just return file-like. Closes #5347.
https://api.github.com/repos/pandas-dev/pandas/pulls/5359
2013-10-28T03:10:35Z
2013-10-28T03:26:02Z
2013-10-28T03:26:02Z
2014-07-16T08:37:36Z
BUG/ENH: Allow drop to accept axis name
diff --git a/doc/source/release.rst b/doc/source/release.rst index c544396e4c73b..5ca602cc1cc0f 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -189,6 +189,8 @@ Improvements to existing features right-open fashion using the ``closed`` parameter (:issue:`4579`) - Python csv parser now supports usecols (:issue:`4335`) - Added support for Google Analytics v3 API segment IDs that also supports v2 IDs. (:issue:`5271`) + - ``NDFrame.drop()`` now accepts names as well as integers for the axis + argument. (:issue:`5354`) API Changes ~~~~~~~~~~~ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3259522e20926..704c41819ae71 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1093,7 +1093,7 @@ def drop(self, labels, axis=0, level=None): Parameters ---------- labels : single label or list-like - axis : int + axis : int or axis name level : int or name, default None For MultiIndex @@ -1101,6 +1101,7 @@ def drop(self, labels, axis=0, level=None): ------- dropped : type of caller """ + axis = self._get_axis_number(axis) axis_name = self._get_axis_name(axis) axis, axis_ = self._get_axis(axis), axis diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 2d214a2abe867..6c7877a8d6e4a 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -6734,6 +6734,25 @@ def test_drop_col_still_multiindex(self): del df[('a', '', '')] assert(isinstance(df.columns, MultiIndex)) + def test_drop(self): + simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}) + assert_frame_equal(simple.drop("A", axis=1), simple[['B']]) + assert_frame_equal(simple.drop(["A", "B"], axis='columns'), + simple[[]]) + assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :]) + assert_frame_equal(simple.drop([0, 3], axis='index'), simple.ix[[1, 2], :]) + + #non-unique - wheee! + nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')), + columns=['a', 'a', 'b']) + assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']]) + assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a']) + + nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X'])) + nu_df.columns = list('abc') + assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :]) + assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :]) + def test_fillna(self): self.tsframe['A'][:5] = nan self.tsframe['A'][-5:] = nan diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index d2da403907280..39ceba7469f36 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1624,6 +1624,43 @@ def test_dropna(self): exp = p.ix[['a', 'c', 'd']] assert_panel_equal(result, exp) + def test_drop(self): + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + panel = Panel({"One": df, "Two": df}) + + def check_drop(drop_val, axis_number, aliases, expected): + try: + actual = panel.drop(drop_val, axis=axis_number) + assert_panel_equal(actual, expected) + for alias in aliases: + actual = panel.drop(drop_val, axis=alias) + assert_panel_equal(actual, expected) + except AssertionError: + print("Failed with axis_number %d and aliases: %s" % + (axis_number, aliases)) + raise + # Items + expected = Panel({"One": df}) + check_drop('Two', 0, ['items'], expected) + + # Major + exp_df = DataFrame({"A": [2], "B": [4]}, index=[1]) + expected = Panel({"One": exp_df, "Two": exp_df}) + check_drop(0, 1, ['major_axis', 'major'], expected) + + exp_df = DataFrame({"A": [1], "B": [3]}, index=[0]) + expected = Panel({"One": exp_df, "Two": exp_df}) + check_drop([1], 1, ['major_axis', 'major'], expected) + + # Minor + exp_df = df[['B']] + expected = Panel({"One": exp_df, "Two": exp_df}) + check_drop(["A"], 2, ['minor_axis', 'minor'], expected) + + exp_df = df[['A']] + expected = Panel({"One": exp_df, "Two": exp_df}) + check_drop("B", 2, ['minor_axis', 'minor'], expected) + def test_update(self): pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 6abfce31c4870..ad6fa68f063e1 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1429,14 +1429,14 @@ def test_drop(self): expected = Series([1],index=['one']) result = s.drop(['two']) assert_series_equal(result,expected) - result = s.drop('two') + result = s.drop('two', axis='rows') assert_series_equal(result,expected) # non-unique # GH 5248 s = Series([1,1,2],index=['one','two','one']) expected = Series([1,2],index=['one','one']) - result = s.drop(['two']) + result = s.drop(['two'], axis=0) assert_series_equal(result,expected) result = s.drop('two') assert_series_equal(result,expected) @@ -1452,6 +1452,9 @@ def test_drop(self): self.assertRaises(ValueError, s.drop, 'bc') self.assertRaises(ValueError, s.drop, ('a',)) + # bad axis + self.assertRaises(ValueError, s.drop, 'one', axis='columns') + def test_ix_setitem(self): inds = self.series.index[[3, 4, 7]]
Still need to add tests for DataFrame and Series, but should work. Very simple fix to boot :). Finally closes #5044
https://api.github.com/repos/pandas-dev/pandas/pulls/5354
2013-10-28T00:09:50Z
2013-10-28T01:14:38Z
2013-10-28T01:14:38Z
2014-06-14T15:19:38Z
CLN: Add axis kwarg to Series wrapper for compat
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 7fad806e43af3..0c647bb6ee7eb 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -663,7 +663,9 @@ def _flex_method_SERIES(op, name, str_rep=None, default_axis=None, """ % name @Appender(doc) - def f(self, other, level=None, fill_value=None): + def flex_wrapper(self, other, level=None, fill_value=None, axis=0): + # validate axis + self._get_axis_number(axis) if isinstance(other, pd.Series): return self._binop(other, op, level=level, fill_value=fill_value) elif isinstance(other, (pa.Array, pd.Series, list, tuple)): @@ -675,8 +677,8 @@ def f(self, other, level=None, fill_value=None): return self._constructor(op(self.values, other), self.index).__finalize__(self) - f.__name__ = name - return f + flex_wrapper.__name__ = name + return flex_wrapper series_flex_funcs = dict(flex_arith_method=_flex_method_SERIES, radd_func=_radd_compat, diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index ed1df4f0bd8d6..6abfce31c4870 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3241,21 +3241,31 @@ def _check_fill(meth, op, a, b, fill_value=0): a = Series([nan, 1., 2., 3., nan], index=np.arange(5)) b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6)) - ops = [Series.add, Series.sub, Series.mul, Series.pow, - Series.truediv, Series.div] - equivs = [operator.add, operator.sub, operator.mul, operator.pow, - operator.truediv] + pairings = [] + for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']: + fv = 0 + lop = getattr(Series, op) + lequiv = getattr(operator, op) + rop = getattr(Series, 'r' + op) + # bind op at definition time... + requiv = lambda x, y, op=op: getattr(operator, op)(y, x) + pairings.append((lop, lequiv, fv)) + pairings.append((rop, requiv, fv)) + if compat.PY3: - equivs.append(operator.truediv) + pairings.append((Series.div, operator.truediv, 1)) + pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1)) else: - equivs.append(operator.div) - fillvals = [0, 0, 1, 1] + pairings.append((Series.div, operator.div, 1)) + pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1)) - for op, equiv_op, fv in zip(ops, equivs, fillvals): + for op, equiv_op, fv in pairings: result = op(a, b) exp = equiv_op(a, b) assert_series_equal(result, exp) _check_fill(op, equiv_op, a, b, fill_value=fv) + # should accept axis=0 or axis='rows' + op(a, b, axis=0) def test_combine_first(self): values = tm.makeIntIndex(20).values.astype(float)
Now flex methods can take axis=0/axis='rows'/axis='index'. Also updated tests to cover full range of Series flex ops and check that they all accept an axis argument.
https://api.github.com/repos/pandas-dev/pandas/pulls/5352
2013-10-27T22:27:17Z
2013-10-28T00:48:46Z
2013-10-28T00:48:46Z
2014-07-16T08:37:31Z
DOC: Panel *xs() docstring indicates wrong default for copy
diff --git a/pandas/core/panel.py b/pandas/core/panel.py index f35070c634aa1..9235b119d0d17 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -702,7 +702,7 @@ def major_xs(self, key, copy=True): ---------- key : object Major axis label - copy : boolean, default False + copy : boolean, default True Copy data Returns @@ -720,7 +720,7 @@ def minor_xs(self, key, copy=True): ---------- key : object Minor axis label - copy : boolean, default False + copy : boolean, default True Copy data Returns @@ -739,6 +739,8 @@ def xs(self, key, axis=1, copy=True): key : object Label axis : {'items', 'major', 'minor}, default 1/'major' + copy : boolean, default True + Copy data Returns -------
Also the copy param in Panel.xs was undocumented. Intentional? Worth a line in release.rst?
https://api.github.com/repos/pandas-dev/pandas/pulls/5350
2013-10-27T20:17:41Z
2013-11-21T14:06:31Z
2013-11-21T14:06:31Z
2016-11-03T12:37:32Z
BUG: Bug when renaming then set_index on a DataFrame (GH5344)
diff --git a/doc/source/release.rst b/doc/source/release.rst index b74b23029a2ac..d4764dbe81a9a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -642,6 +642,7 @@ Bug Fixes related (:issue:`5312`) - Fix return value/type signature of ``initObjToJSON()`` to be compatible with numpy's ``import_array()`` (:issue:`5334`, :issue:`5326`) + - Bug when renaming then set_index on a DataFrame (:issue:`5344`) pandas 0.12.0 ------------- diff --git a/pandas/core/internals.py b/pandas/core/internals.py index c327458e00a07..62aa95d270924 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1992,6 +1992,9 @@ def _set_ref_locs(self, labels=None, do_refs=False): self._ref_locs = rl return rl + elif do_refs: + self._reset_ref_locs() + # return our cached _ref_locs (or will compute again # when we recreate the block manager if needed return getattr(self, '_ref_locs', None) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 6aa4322234d7f..704493308f960 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -8551,6 +8551,19 @@ def test_rename_inplace(self): self.assert_('foo' in frame) self.assert_(id(frame['foo']) != c_id) + def test_rename_bug(self): + # GH 5344 + # rename set ref_locs, and set_index was not resetting + df = DataFrame({ 0 : ['foo','bar'], 1 : ['bah','bas'], 2 : [1,2]}) + df = df.rename(columns={0 : 'a'}) + df = df.rename(columns={1 : 'b'}) + df = df.set_index(['a','b']) + df.columns = ['2001-01-01'] + expected = DataFrame([[1],[2]],index=MultiIndex.from_tuples([('foo','bah'),('bar','bas')], + names=['a','b']), + columns=['2001-01-01']) + assert_frame_equal(df,expected) + #---------------------------------------------------------------------- # Time series related def test_diff(self):
closes #5344
https://api.github.com/repos/pandas-dev/pandas/pulls/5345
2013-10-27T00:36:05Z
2013-10-27T00:45:07Z
2013-10-27T00:45:07Z
2014-06-17T11:24:24Z
ENH: allow in-line expression assignment with df.eval
diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index e59cb6ac30964..4e9e62a2f0e3e 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -441,18 +441,27 @@ The ``DataFrame.eval`` method (Experimental) In addition to the top level :func:`~pandas.eval` function you can also evaluate an expression in the "context" of a ``DataFrame``. - .. ipython:: python df = DataFrame(randn(5, 2), columns=['a', 'b']) df.eval('a + b') - Any expression that is a valid :func:`~pandas.eval` expression is also a valid ``DataFrame.eval`` expression, with the added benefit that *you don't have to prefix the name of the* ``DataFrame`` *to the column you're interested in evaluating*. +In addition, you can perform in-line assignment of columns within an expression. +This can allow for *formulaic evaluation*. Only a signle assignement is permitted. +It can be a new column name or an existing column name. It must be a string-like. + +.. ipython:: python + + df = DataFrame(dict(a = range(5), b = range(5,10))) + df.eval('c=a+b') + df.eval('d=a+b+c') + df.eval('a=1') + df Local Variables ~~~~~~~~~~~~~~~ diff --git a/doc/source/release.rst b/doc/source/release.rst index b74b23029a2ac..cfb47873863b8 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -72,7 +72,8 @@ Experimental Features ``numexpr`` behind the scenes. This results in large speedups for complicated expressions involving large DataFrames/Series. - :class:`~pandas.DataFrame` has a new :meth:`~pandas.DataFrame.eval` that - evaluates an expression in the context of the ``DataFrame``. + evaluates an expression in the context of the ``DataFrame``; allows + inline expression assignment - A :meth:`~pandas.DataFrame.query` method has been added that allows you to select elements of a ``DataFrame`` using a natural query syntax nearly identical to Python syntax. diff --git a/pandas/computation/eval.py b/pandas/computation/eval.py index 36b1e2bc96090..163477b258e15 100644 --- a/pandas/computation/eval.py +++ b/pandas/computation/eval.py @@ -113,7 +113,8 @@ def _convert_expression(expr): def eval(expr, parser='pandas', engine='numexpr', truediv=True, - local_dict=None, global_dict=None, resolvers=None, level=2): + local_dict=None, global_dict=None, resolvers=None, level=2, + target=None): """Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, @@ -169,6 +170,8 @@ def eval(expr, parser='pandas', engine='numexpr', truediv=True, level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. + target : a target object for assignment, optional, default is None + essentially this is a passed in resolver Returns ------- @@ -194,7 +197,7 @@ def eval(expr, parser='pandas', engine='numexpr', truediv=True, # get our (possibly passed-in) scope env = _ensure_scope(global_dict=global_dict, local_dict=local_dict, - resolvers=resolvers, level=level) + resolvers=resolvers, level=level, target=target) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) @@ -203,4 +206,10 @@ def eval(expr, parser='pandas', engine='numexpr', truediv=True, eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() + + # assign if needed + if env.target is not None and parsed_expr.assigner is not None: + env.target[parsed_expr.assigner] = ret + return None + return ret diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py index ba2dffa9e71b8..64bceee118fd1 100644 --- a/pandas/computation/expr.py +++ b/pandas/computation/expr.py @@ -21,13 +21,14 @@ _arith_ops_syms, _unary_ops_syms, is_term) from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div +from pandas.computation.ops import UndefinedVariableError def _ensure_scope(level=2, global_dict=None, local_dict=None, resolvers=None, - **kwargs): + target=None, **kwargs): """Ensure that we are grabbing the correct scope.""" return Scope(gbls=global_dict, lcls=local_dict, level=level, - resolvers=resolvers) + resolvers=resolvers, target=target) def _check_disjoint_resolver_names(resolver_keys, local_keys, global_keys): @@ -88,13 +89,14 @@ class Scope(StringMixin): resolver_keys : frozenset """ __slots__ = ('globals', 'locals', 'resolvers', '_global_resolvers', - 'resolver_keys', '_resolver', 'level', 'ntemps') + 'resolver_keys', '_resolver', 'level', 'ntemps', 'target') - def __init__(self, gbls=None, lcls=None, level=1, resolvers=None): + def __init__(self, gbls=None, lcls=None, level=1, resolvers=None, target=None): self.level = level self.resolvers = tuple(resolvers or []) self.globals = dict() self.locals = dict() + self.target = target self.ntemps = 1 # number of temporary variables in this scope if isinstance(lcls, Scope): @@ -102,6 +104,8 @@ def __init__(self, gbls=None, lcls=None, level=1, resolvers=None): self.locals.update(ld.locals.copy()) self.globals.update(ld.globals.copy()) self.resolvers += ld.resolvers + if ld.target is not None: + self.target = ld.target self.update(ld.level) frame = sys._getframe(level) @@ -130,9 +134,10 @@ def __init__(self, gbls=None, lcls=None, level=1, resolvers=None): def __unicode__(self): return com.pprint_thing("locals: {0}\nglobals: {0}\nresolvers: " - "{0}".format(list(self.locals.keys()), - list(self.globals.keys()), - list(self.resolver_keys))) + "{0}\ntarget: {0}".format(list(self.locals.keys()), + list(self.globals.keys()), + list(self.resolver_keys), + self.target)) def __getitem__(self, key): return self.resolve(key, globally=False) @@ -417,6 +422,7 @@ def __init__(self, env, engine, parser, preparser=_preparse): self.engine = engine self.parser = parser self.preparser = preparser + self.assigner = None def visit(self, node, **kwargs): if isinstance(node, string_types): @@ -575,9 +581,33 @@ def visit_Slice(self, node, **kwargs): return slice(lower, upper, step) def visit_Assign(self, node, **kwargs): - cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0], - comparators=[node.value]) - return self.visit(cmpr) + """ + support a single assignment node, like + + c = a + b + + set the assigner at the top level, must be a Name node which + might or might not exist in the resolvers + + """ + + if len(node.targets) != 1: + raise SyntaxError('can only assign a single expression') + if not isinstance(node.targets[0], ast.Name): + raise SyntaxError('left hand side of an assignment must be a single name') + if self.env.target is None: + raise ValueError('cannot assign without a target object') + + try: + assigner = self.visit(node.targets[0], **kwargs) + except (UndefinedVariableError): + assigner = node.targets[0].id + + self.assigner = getattr(assigner,'name',assigner) + if self.assigner is None: + raise SyntaxError('left hand side of an assignment must be a single resolvable name') + + return self.visit(node.value, **kwargs) def visit_Attribute(self, node, **kwargs): attr = node.attr @@ -669,7 +699,7 @@ def visitor(x, y): return reduce(visitor, operands) -_python_not_supported = frozenset(['Assign', 'Dict', 'Call', 'BoolOp', +_python_not_supported = frozenset(['Dict', 'Call', 'BoolOp', 'In', 'NotIn']) _numexpr_supported_calls = frozenset(_reductions + _mathops) @@ -712,6 +742,10 @@ def __init__(self, expr, engine='numexpr', parser='pandas', env=None, self.terms = self.parse() self.truediv = truediv + @property + def assigner(self): + return getattr(self._visitor,'assigner',None) + def __call__(self): self.env.locals['truediv'] = self.truediv return self.terms(self.env) diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py index 9ffae5edd93bc..eb675d6230c8c 100644 --- a/pandas/computation/pytables.py +++ b/pandas/computation/pytables.py @@ -389,6 +389,11 @@ def visit_USub(self, node, **kwargs): def visit_Index(self, node, **kwargs): return self.visit(node.value).value + def visit_Assign(self, node, **kwargs): + cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0], + comparators=[node.value]) + return self.visit(cmpr) + def visit_Subscript(self, node, **kwargs): value = self.visit(node.value) slobj = self.visit(node.slice) diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py index aa5c0cc5d50f6..b8de54ade31db 100644 --- a/pandas/computation/tests/test_eval.py +++ b/pandas/computation/tests/test_eval.py @@ -24,6 +24,7 @@ from pandas.computation.ops import (_binary_ops_dict, _unary_ops_dict, _special_case_arith_ops_syms, _arith_ops_syms, _bool_ops_syms) +from pandas.computation.common import NameResolutionError import pandas.computation.expr as expr import pandas.util.testing as tm from pandas.util.testing import (assert_frame_equal, randbool, @@ -1151,9 +1152,65 @@ def test_assignment_fails(self): df = DataFrame(np.random.randn(5, 3), columns=list('abc')) df2 = DataFrame(np.random.randn(5, 3)) expr1 = 'df = df2' - self.assertRaises(NotImplementedError, self.eval, expr1, + self.assertRaises(ValueError, self.eval, expr1, local_dict={'df': df, 'df2': df2}) + def test_assignment_column(self): + skip_if_no_ne('numexpr') + df = DataFrame(np.random.randn(5, 2), columns=list('ab')) + orig_df = df.copy() + + # multiple assignees + self.assertRaises(SyntaxError, df.eval, 'd c = a + b') + + # invalid assignees + self.assertRaises(SyntaxError, df.eval, 'd,c = a + b') + self.assertRaises(SyntaxError, df.eval, 'Timestamp("20131001") = a + b') + + # single assignment - existing variable + expected = orig_df.copy() + expected['a'] = expected['a'] + expected['b'] + df = orig_df.copy() + df.eval('a = a + b') + assert_frame_equal(df,expected) + + # single assignment - new variable + expected = orig_df.copy() + expected['c'] = expected['a'] + expected['b'] + df = orig_df.copy() + df.eval('c = a + b') + assert_frame_equal(df,expected) + + # with a local name overlap + def f(): + df = orig_df.copy() + a = 1 + df.eval('a = 1 + b') + return df + + df = f() + expected = orig_df.copy() + expected['a'] = 1 + expected['b'] + assert_frame_equal(df,expected) + + df = orig_df.copy() + def f(): + a = 1 + df.eval('a=a+b') + self.assertRaises(NameResolutionError, f) + + # multiple assignment + df = orig_df.copy() + df.eval('c = a + b') + self.assertRaises(SyntaxError, df.eval, 'c = a = b') + + # explicit targets + df = orig_df.copy() + self.eval('c = df.a + df.b', local_dict={'df' : df}, target=df) + expected = orig_df.copy() + expected['c'] = expected['a'] + expected['b'] + assert_frame_equal(df,expected) + def test_basic_period_index_boolean_expression(self): df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i') diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b485d51514162..a91180ac43561 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1797,12 +1797,14 @@ def eval(self, expr, **kwargs): >>> from pandas import DataFrame >>> df = DataFrame(randn(10, 2), columns=list('ab')) >>> df.eval('a + b') + >>> df.eval('c=a + b') """ resolvers = kwargs.pop('resolvers', None) if resolvers is None: index_resolvers = self._get_resolvers() resolvers = [self, index_resolvers] kwargs['local_dict'] = _ensure_scope(resolvers=resolvers, **kwargs) + kwargs['target'] = self return _eval(expr, **kwargs) def _slice(self, slobj, axis=0, raise_on_error=False, typ=None):
This was a relatively easy extension of eval to allow in-line creation/assignment. Allows one to basically use formulas to do things (pandas conquers excel!!!!) - [x] docs - [x] tests non-frame ``` In [11]: df = DataFrame(dict(a = range(5), b = range(5,10))) In [12]: df Out[12]: a b 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 In [13]: df.eval('c=a+b') In [14]: df.eval('d=a+b+c') In [15]: df.eval('a=1') In [16]: df Out[16]: a b c d 0 1 5 5 10 1 1 6 7 14 2 1 7 9 18 3 1 8 11 22 4 1 9 13 26 ``` You can do this (this could maybe have a bit better syntax though) ``` In [31]: df = DataFrame(dict(a = range(5), b = range(5,10))) In [32]: formulas = Series(['c=a+b','d=a*b'],index=['a','b']) In [33]: df.apply(lambda x: df.eval(formulas[x.name])) Out[33]: a None b None dtype: object In [34]: df Out[34]: a b c d 0 0 5 5 0 1 1 6 7 6 2 2 7 9 14 3 3 8 11 24 4 4 9 13 36 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/5343
2013-10-26T22:34:45Z
2013-10-27T22:12:25Z
2013-10-27T22:12:25Z
2014-06-17T03:29:49Z
API: provide compat for Timestamp now/today/utcnow class methods (GH5339)
diff --git a/doc/source/release.rst b/doc/source/release.rst index b74b23029a2ac..6c8ae847254f2 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -321,6 +321,7 @@ API Changes (:issue:`4501`) - Support non-unique axes in a Panel via indexing operations (:issue:`4960`) - ``.truncate`` will raise a ``ValueError`` if invalid before and afters dates are given (:issue:`5242`) + - ``Timestamp`` now supports ``now/today/utcnow`` class methods (:issue:`5339`) Internal Refactoring ~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index dee0587aaaa02..b8144c2b5eab9 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2516,6 +2516,18 @@ def test_string_index_series_name_converted(self): class TestTimestamp(unittest.TestCase): + def test_class_ops(self): + _skip_if_no_pytz() + import pytz + + def compare(x,y): + self.assert_(int(Timestamp(x).value/1e9) == int(Timestamp(y).value/1e9)) + + compare(Timestamp.now(),datetime.now()) + compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC'))) + compare(Timestamp.utcnow(),datetime.utcnow()) + compare(Timestamp.today(),datetime.today()) + def test_basics_nanos(self): val = np.int64(946684800000000000).view('M8[ns]') stamp = Timestamp(val.view('i8') + 500) @@ -3031,10 +3043,10 @@ def test_frame_apply_dont_convert_datetime64(self): df = df.applymap(lambda x: x + BDay()) self.assertTrue(df.x1.dtype == 'M8[ns]') - + def test_date_range_fy5252(self): - dr = date_range(start="2013-01-01", - periods=2, + dr = date_range(start="2013-01-01", + periods=2, freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest")) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index c487202c4c0f9..11b86db8b8d92 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -140,6 +140,22 @@ class Timestamp(_Timestamp): note: by definition there cannot be any tz info on the ordinal itself """ return cls(datetime.fromordinal(ordinal),offset=offset,tz=tz) + @classmethod + def now(cls, tz=None): + """ compat now with datetime """ + if isinstance(tz, basestring): + tz = pytz.timezone(tz) + return cls(datetime.now(tz)) + + @classmethod + def today(cls): + """ compat today with datetime """ + return cls(datetime.today()) + + @classmethod + def utcnow(cls): + return cls.now('UTC') + def __new__(cls, object ts_input, object offset=None, tz=None, unit=None): cdef _TSObject ts cdef _Timestamp ts_base
closes #5339
https://api.github.com/repos/pandas-dev/pandas/pulls/5342
2013-10-26T19:39:45Z
2013-10-27T21:59:55Z
2013-10-27T21:59:55Z
2014-07-16T08:37:21Z
BUG/CLN: Cleanup sanitize column and abstract broadcasting
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b485d51514162..f55c9ec08d084 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1915,71 +1915,51 @@ def _sanitize_column(self, key, value): # Need to make sure new columns (which go into the BlockManager as new # blocks) are always copied - # dont' need further processing on an equal index - if isinstance(value, Index) and (not len(self.index) or value.equals(self.index)): - value = value.values.copy() - elif isinstance(value, Series) or _is_sequence(value): + if isinstance(value, (Series, DataFrame)): is_frame = isinstance(value, DataFrame) - if isinstance(value, Series) or is_frame: - if value.index.equals(self.index) or not len(self.index): - # copy the values - value = value.values.copy() - else: - - # GH 4107 - try: - value = value.reindex(self.index).values - except: - raise TypeError('incompatible index of inserted column ' - 'with frame index') - - if is_frame: - value = value.T + if value.index.equals(self.index) or not len(self.index): + # copy the values + value = value.values.copy() else: - if len(value) != len(self.index): - raise ValueError('Length of values does not match ' - 'length of index') - if not isinstance(value, np.ndarray): - if isinstance(value, list) and len(value) > 0: - value = com._possibly_convert_platform(value) - else: - value = com._asarray_tuplesafe(value) - elif isinstance(value, PeriodIndex): - value = value.asobject - elif value.ndim == 2: - value = value.copy().T + # GH 4107 + try: + value = value.reindex(self.index).values + except: + raise TypeError('incompatible index of inserted column ' + 'with frame index') + + if is_frame: + value = value.T + elif isinstance(value, Index) or _is_sequence(value): + if len(value) != len(self.index): + raise ValueError('Length of values does not match ' + 'length of index') + + if not isinstance(value, (np.ndarray, Index)): + if isinstance(value, list) and len(value) > 0: + value = com._possibly_convert_platform(value) else: - value = value.copy() + value = com._asarray_tuplesafe(value) + elif isinstance(value, PeriodIndex): + value = value.asobject + elif value.ndim == 2: + value = value.copy().T + else: + value = value.copy() + else: + # upcast the scalar + dtype, value = _infer_dtype_from_scalar(value) + value = np.repeat(value, len(self.index)).astype(dtype) + value = com._possibly_cast_to_datetime(value, dtype) - # Broadcasting funtimes - if key in self.columns and value.ndim == 1: + # broadcast across multiple columns if necessary + if key in self.columns and value.ndim == 1: + if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)) - else: - if key in self.columns: - existing_piece = self[key] - # upcast the scalar - dtype, value = _infer_dtype_from_scalar(value) - - # transpose hack - if isinstance(existing_piece, DataFrame): - shape = (len(existing_piece.columns), len(self.index)) - value = np.repeat(value, np.prod(shape)).reshape(shape) - else: - value = np.repeat(value, len(self.index)) - - value = value.astype(dtype) - - else: - # upcast the scalar - dtype, value = _infer_dtype_from_scalar(value) - value = np.array( - np.repeat(value, len(self.index)), dtype=dtype) - - value = com._possibly_cast_to_datetime(value, dtype) return np.atleast_2d(np.asarray(value)) @property diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 6aa4322234d7f..d4a252e0995f3 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -3205,6 +3205,17 @@ def check(result, expected=None): expected = pd.concat([ s, s, s],keys=df.columns,axis=1) check(result,expected) + # check column dups with index equal and not equal to df's index + df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'], + columns=['A', 'B', 'A']) + for index in [df.index, pd.Index(list('edcba'))]: + this_df = df.copy() + expected_ser = pd.Series(index.values, index=this_df.index) + expected_df = DataFrame.from_items([('A', expected_ser), + ('B', this_df['B']), + ('A', expected_ser)]) + this_df['A'] = index + check(this_df, expected_df) def test_column_dups_indexing(self):
Also fixes small bug from #5321
https://api.github.com/repos/pandas-dev/pandas/pulls/5341
2013-10-26T18:09:21Z
2013-10-26T23:40:01Z
2013-10-26T23:40:01Z
2014-07-16T08:37:19Z
More informative exception when trying to use ``MS`` as period frequency
diff --git a/doc/source/release.rst b/doc/source/release.rst index a2015a3b361ac..f1f685850284c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -633,6 +633,7 @@ Bug Fixes - Fixed issue with ``drop`` and a non-unique index on Series (:issue:`5248`) - Fixed seg fault in C parser caused by passing more names than columns in the file. (:issue:`5156`) + - More informative exception when trying to use ``MS`` as period frequency (:issue:`5332`) pandas 0.12.0 ------------- diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index cfe874484231b..a7e315af5c1cc 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -337,7 +337,7 @@ def get_base_alias(freqstr): """ return _base_and_stride(freqstr)[0] -_dont_uppercase = ['MS', 'ms'] +_dont_uppercase = set(('MS', 'ms')) def get_offset(name): @@ -497,7 +497,7 @@ def _period_alias_dictionary(): H_aliases = ["H", "HR", "HOUR", "HRLY", "HOURLY"] T_aliases = ["T", "MIN", "MINUTE", "MINUTELY"] S_aliases = ["S", "SEC", "SECOND", "SECONDLY"] - L_aliases = ["L", "MS", "MILLISECOND", "MILLISECONDLY"] + L_aliases = ["L", "ms", "MILLISECOND", "MILLISECONDLY"] U_aliases = ["U", "US", "MICROSECOND", "MICROSECONDLY"] N_aliases = ["N", "NS", "NANOSECOND", "NANOSECONDLY"] @@ -615,10 +615,13 @@ def _period_group(freqstr): def _period_str_to_code(freqstr): # hack freqstr = _rule_aliases.get(freqstr, freqstr) - freqstr = _rule_aliases.get(freqstr.lower(), freqstr) + + if freqstr not in _dont_uppercase: + freqstr = _rule_aliases.get(freqstr.lower(), freqstr) try: - freqstr = freqstr.upper() + if freqstr not in _dont_uppercase: + freqstr = freqstr.upper() return _period_code_map[freqstr] except KeyError: try: diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 312a88bcbc5a9..84b62e7931156 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -13,7 +13,7 @@ from numpy.ma.testutils import assert_equal from pandas import Timestamp -from pandas.tseries.frequencies import MONTHS, DAYS +from pandas.tseries.frequencies import MONTHS, DAYS, _period_code_map from pandas.tseries.period import Period, PeriodIndex, period_range from pandas.tseries.index import DatetimeIndex, date_range, Index from pandas.tseries.tools import to_datetime @@ -488,7 +488,14 @@ def test_constructor_infer_freq(self): p = Period('2007-01-01 07:10:15.123400') self.assert_(p.freq == 'U') - + + def test_asfreq_MS(self): + initial = Period("2013") + + self.assertEqual(initial.asfreq(freq="M", how="S"), Period('2013-01', 'M')) + self.assertRaises(ValueError, initial.asfreq, freq="MS", how="S") + tm.assertRaisesRegexp(ValueError, "Unknown freqstr: MS", pd.Period, '2013-01', 'MS') + self.assertTrue(_period_code_map.get("MS") is None) def noWrap(item): return item
Closes #5332
https://api.github.com/repos/pandas-dev/pandas/pulls/5340
2013-10-26T17:45:23Z
2014-04-09T03:01:05Z
null
2014-07-16T08:37:16Z
Docs for error_bad_lines, and warn_bad_lines options to pd.read_*
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index c10cb84de34fd..84cb8567fbbcb 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -133,6 +133,14 @@ tupleize_cols: boolean, default False Leave a list of tuples on columns as is (default is to convert to a Multi Index on the columns) +error_bad_lines: boolean, default True + Lines with too many fields (e.g. a csv line with too many commas) will by + default cause an exception to be raised, and no DataFrame will be returned. + If False, then these "bad lines" will dropped from the DataFrame that is + returned. (Only valid with C parser). +warn_bad_lines: boolean, default True + If error_bad_lines is False, and warn_bad_lines is True, a warning for each + "bad line" will be output. (Only valid with C parser). Returns -------
Hi, Added some docs for error_bad_lines & warn_bad_lines, as currently there aren't any found here: http://pandas.pydata.org/pandas-docs/dev/generated/pandas.io.parsers.read_csv.html
https://api.github.com/repos/pandas-dev/pandas/pulls/5337
2013-10-26T02:55:02Z
2014-01-01T03:14:28Z
2014-01-01T03:14:28Z
2014-07-16T08:37:14Z
DOC: Added/fixed some docstrings, esp Panel flex
diff --git a/doc/source/_templates/autosummary/class.rst b/doc/source/_templates/autosummary/class.rst index e9af7e8df8bab..a9c9bd2b6507f 100644 --- a/doc/source/_templates/autosummary/class.rst +++ b/doc/source/_templates/autosummary/class.rst @@ -3,7 +3,7 @@ {% block methods %} {% if methods %} -.. +.. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages. .. autosummary:: :toctree: diff --git a/doc/source/release.rst b/doc/source/release.rst index 5ca602cc1cc0f..6227fedda11d4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -7,7 +7,7 @@ import os import csv - from StringIO import StringIO + from pandas.compat import StringIO import pandas as pd ExcelWriter = pd.ExcelWriter @@ -58,32 +58,35 @@ New features is evaluated, respecttively. See scipy docs. (:issue:`4298`) - Added ``isin`` method to DataFrame (:issue:`4211`) - Clipboard functionality now works with PySide (:issue:`4282`) - - New ``extract`` string method returns regex matches more conveniently (:issue:`4685`) + - New ``extract`` string method returns regex matches more conveniently + (:issue:`4685`) - Auto-detect field widths in read_fwf when unspecified (:issue:`4488`) - - ``to_csv()`` now outputs datetime objects according to a specified format string - via the ``date_format`` keyword (:issue:`4313`) + - ``to_csv()`` now outputs datetime objects according to a specified format + string via the ``date_format`` keyword (:issue:`4313`) - Added ``LastWeekOfMonth`` DateOffset (:issue:`4637`) - Added ``FY5253``, and ``FY5253Quarter`` DateOffsets (:issue:`4511`) Experimental Features ~~~~~~~~~~~~~~~~~~~~~ - - The new :func:`~pandas.eval` function implements expression evaluation using - ``numexpr`` behind the scenes. This results in large speedups for complicated - expressions involving large DataFrames/Series. + - The new :func:`~pandas.eval` function implements expression evaluation + using ``numexpr`` behind the scenes. This results in large speedups for + complicated expressions involving large DataFrames/Series. - :class:`~pandas.DataFrame` has a new :meth:`~pandas.DataFrame.eval` that evaluates an expression in the context of the ``DataFrame``; allows inline expression assignment - A :meth:`~pandas.DataFrame.query` method has been added that allows - you to select elements of a ``DataFrame`` using a natural query syntax nearly - identical to Python syntax. + you to select elements of a ``DataFrame`` using a natural query syntax + nearly identical to Python syntax. - ``pd.eval`` and friends now evaluate operations involving ``datetime64`` objects in Python space because ``numexpr`` cannot handle ``NaT`` values (:issue:`4897`). - - Add msgpack support via ``pd.read_msgpack()`` and ``pd.to_msgpack()`` / ``df.to_msgpack()`` for serialization - of arbitrary pandas (and python objects) in a lightweight portable binary format (:issue:`686`) + - Add msgpack support via ``pd.read_msgpack()`` and ``pd.to_msgpack()`` / + ``df.to_msgpack()`` for serialization of arbitrary pandas (and python + objects) in a lightweight portable binary format (:issue:`686`) - Added PySide support for the qtpandas DataFrameModel and DataFrameWidget. - - Added :mod:`pandas.io.gbq` for reading from (and writing to) Google BigQuery into a DataFrame. (:issue:`4140`) + - Added :mod:`pandas.io.gbq` for reading from (and writing to) Google + BigQuery into a DataFrame. (:issue:`4140`) Improvements to existing features ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -93,7 +96,8 @@ Improvements to existing features - ``read_excel`` now supports an integer in its ``sheetname`` argument giving the index of the sheet to read in (:issue:`4301`). - ``get_dummies`` works with NaN (:issue:`4446`) - - Added a test for ``read_clipboard()`` and ``to_clipboard()`` (:issue:`4282`) + - Added a test for ``read_clipboard()`` and ``to_clipboard()`` + (:issue:`4282`) - Added bins argument to ``value_counts`` (:issue:`3945`), also sort and ascending, now available in Series method as well as top-level function. - Text parser now treats anything that reads like inf ("inf", "Inf", "-Inf", @@ -120,21 +124,21 @@ Improvements to existing features - A Series of dtype ``timedelta64[ns]`` can now be divided by another ``timedelta64[ns]`` object to yield a ``float64`` dtyped Series. This is frequency conversion; astyping is also supported. - - Timedelta64 support ``fillna/ffill/bfill`` with an integer interpreted as seconds, - or a ``timedelta`` (:issue:`3371`) + - Timedelta64 support ``fillna/ffill/bfill`` with an integer interpreted as + seconds, or a ``timedelta`` (:issue:`3371`) - Box numeric ops on ``timedelta`` Series (:issue:`4984`) - Datetime64 support ``ffill/bfill`` - Performance improvements with ``__getitem__`` on ``DataFrames`` with when the key is a column - - Support for using a ``DatetimeIndex/PeriodsIndex`` directly in a datelike calculation - e.g. s-s.index (:issue:`4629`) + - Support for using a ``DatetimeIndex/PeriodsIndex`` directly in a datelike + calculation e.g. s-s.index (:issue:`4629`) - Better/cleaned up exceptions in core/common, io/excel and core/format (:issue:`4721`, :issue:`3954`), as well as cleaned up test cases in tests/test_frame, tests/test_multilevel (:issue:`4732`). - Performance improvement of timesesies plotting with PeriodIndex and added test to vbench (:issue:`4705` and :issue:`4722`) - - Add ``axis`` and ``level`` keywords to ``where``, so that the ``other`` argument - can now be an alignable pandas object. + - Add ``axis`` and ``level`` keywords to ``where``, so that the ``other`` + argument can now be an alignable pandas object. - ``to_datetime`` with a format of '%Y%m%d' now parses much faster - It's now easier to hook new Excel writers into pandas (just subclass ``ExcelWriter`` and register your engine). You can specify an ``engine`` in @@ -149,8 +153,8 @@ Improvements to existing features - allow DataFrame constructor to accept more list-like objects, e.g. list of ``collections.Sequence`` and ``array.Array`` objects (:issue:`3783`, :issue:`4297`, :issue:`4851`), thanks @lgautier - - DataFrame constructor now accepts a numpy masked record array (:issue:`3478`), - thanks @jnothman + - DataFrame constructor now accepts a numpy masked record array + (:issue:`3478`), thanks @jnothman - ``__getitem__`` with ``tuple`` key (e.g., ``[:, 2]``) on ``Series`` without ``MultiIndex`` raises ``ValueError`` (:issue:`4759`, :issue:`4837`) - ``read_json`` now raises a (more informative) ``ValueError`` when the dict @@ -160,8 +164,8 @@ Improvements to existing features (:issue:`3441`, :issue:`4933`) - ``pandas`` is now tested with two different versions of ``statsmodels`` (0.4.3 and 0.5.0) (:issue:`4981`). - - Better string representations of ``MultiIndex`` (including ability to roundtrip - via ``repr``). (:issue:`3347`, :issue:`4935`) + - Better string representations of ``MultiIndex`` (including ability to + roundtrip via ``repr``). (:issue:`3347`, :issue:`4935`) - Both ExcelFile and read_excel to accept an xlrd.Book for the io (formerly path_or_buf) argument; this requires engine to be set. (:issue:`4961`). @@ -175,22 +179,27 @@ Improvements to existing features structure of unlocalized data (:issue:`4230`) - DatetimeIndex is now in the API documentation - Improve support for converting R datasets to pandas objects (more - informative index for timeseries and numeric, support for factors, dist, and - high-dimensional arrays). + informative index for timeseries and numeric, support for factors, dist, + and high-dimensional arrays). - :func:`~pandas.read_html` now supports the ``parse_dates``, ``tupleize_cols`` and ``thousands`` parameters (:issue:`4770`). - - :meth:`~pandas.io.json.json_normalize` is a new method to allow you to create a flat table - from semi-structured JSON data. :ref:`See the docs<io.json_normalize>` (:issue:`1067`) + - :meth:`~pandas.io.json.json_normalize` is a new method to allow you to + create a flat table from semi-structured JSON data. :ref:`See the + docs<io.json_normalize>` (:issue:`1067`) - ``DataFrame.from_records()`` will now accept generators (:issue:`4910`) - - ``DataFrame.interpolate()`` and ``Series.interpolate()`` have been expanded to include - interpolation methods from scipy. (:issue:`4434`, :issue:`1892`) - - ``Series`` now supports a ``to_frame`` method to convert it to a single-column DataFrame (:issue:`5164`) + - ``DataFrame.interpolate()`` and ``Series.interpolate()`` have been expanded + to include interpolation methods from scipy. (:issue:`4434`, :issue:`1892`) + - ``Series`` now supports a ``to_frame`` method to convert it to a + single-column DataFrame (:issue:`5164`) - DatetimeIndex (and date_range) can now be constructed in a left- or right-open fashion using the ``closed`` parameter (:issue:`4579`) - Python csv parser now supports usecols (:issue:`4335`) - - Added support for Google Analytics v3 API segment IDs that also supports v2 IDs. (:issue:`5271`) + - Added support for Google Analytics v3 API segment IDs that also supports v2 + IDs. (:issue:`5271`) - ``NDFrame.drop()`` now accepts names as well as integers for the axis argument. (:issue:`5354`) + - Added short docstrings to a few methods that were missing them + fixed the + docstrings for Panel flex methods. (:issue:`5336`) API Changes ~~~~~~~~~~~ @@ -212,51 +221,63 @@ API Changes - deprecated ``iterkv``, which will be removed in a future release (was just an alias of iteritems used to get around ``2to3``'s changes). (:issue:`4384`, :issue:`4375`, :issue:`4372`) - - ``Series.get`` with negative indexers now returns the same as ``[]`` (:issue:`4390`) - - allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when the single-key - is not currently contained in the index for that axis (:issue:`2578`, :issue:`5226`) + - ``Series.get`` with negative indexers now returns the same as ``[]`` + (:issue:`4390`) + - allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when + the single-key is not currently contained in the index for that axis + (:issue:`2578`, :issue:`5226`) - Default export for ``to_clipboard`` is now csv with a sep of `\t` for compat (:issue:`3368`) - - ``at`` now will enlarge the object inplace (and return the same) (:issue:`2578`) - - ``DataFrame.plot`` will scatter plot x versus y by passing ``kind='scatter'`` (:issue:`2215`) + - ``at`` now will enlarge the object inplace (and return the same) + (:issue:`2578`) + - ``DataFrame.plot`` will scatter plot x versus y by passing + ``kind='scatter'`` (:issue:`2215`) - ``HDFStore`` - ``append_to_multiple`` automatically synchronizes writing rows to multiple tables and adds a ``dropna`` kwarg (:issue:`4698`) - handle a passed ``Series`` in table format (:issue:`4330`) - - added an ``is_open`` property to indicate if the underlying file handle is_open; - a closed store will now report 'CLOSED' when viewing the store (rather than raising an error) - (:issue:`4409`) - - a close of a ``HDFStore`` now will close that instance of the ``HDFStore`` - but will only close the actual file if the ref count (by ``PyTables``) w.r.t. all of the open handles - are 0. Essentially you have a local instance of ``HDFStore`` referenced by a variable. Once you - close it, it will report closed. Other references (to the same file) will continue to operate - until they themselves are closed. Performing an action on a closed file will raise - ``ClosedFileError`` - - removed the ``_quiet`` attribute, replace by a ``DuplicateWarning`` if retrieving - duplicate rows from a table (:issue:`4367`) - - removed the ``warn`` argument from ``open``. Instead a ``PossibleDataLossError`` exception will - be raised if you try to use ``mode='w'`` with an OPEN file handle (:issue:`4367`) - - allow a passed locations array or mask as a ``where`` condition (:issue:`4467`) - - add the keyword ``dropna=True`` to ``append`` to change whether ALL nan rows are not written - to the store (default is ``True``, ALL nan rows are NOT written), also settable - via the option ``io.hdf.dropna_table`` (:issue:`4625`) - - the ``format`` keyword now replaces the ``table`` keyword; allowed values are ``fixed(f)|table(t)`` - the ``Storer`` format has been renamed to ``Fixed`` - - a column multi-index will be recreated properly (:issue:`4710`); raise on trying to use a multi-index - with data_columns on the same axis - - ``select_as_coordinates`` will now return an ``Int64Index`` of the resultant selection set + - added an ``is_open`` property to indicate if the underlying file handle + is_open; a closed store will now report 'CLOSED' when viewing the store + (rather than raising an error) (:issue:`4409`) + - a close of a ``HDFStore`` now will close that instance of the + ``HDFStore`` but will only close the actual file if the ref count (by + ``PyTables``) w.r.t. all of the open handles are 0. Essentially you have + a local instance of ``HDFStore`` referenced by a variable. Once you close + it, it will report closed. Other references (to the same file) will + continue to operate until they themselves are closed. Performing an + action on a closed file will raise ``ClosedFileError`` + - removed the ``_quiet`` attribute, replace by a ``DuplicateWarning`` if + retrieving duplicate rows from a table (:issue:`4367`) + - removed the ``warn`` argument from ``open``. Instead a + ``PossibleDataLossError`` exception will be raised if you try to use + ``mode='w'`` with an OPEN file handle (:issue:`4367`) + - allow a passed locations array or mask as a ``where`` condition + (:issue:`4467`) + - add the keyword ``dropna=True`` to ``append`` to change whether ALL nan + rows are not written to the store (default is ``True``, ALL nan rows are + NOT written), also settable via the option ``io.hdf.dropna_table`` + (:issue:`4625`) + - the ``format`` keyword now replaces the ``table`` keyword; allowed values + are ``fixed(f)|table(t)`` the ``Storer`` format has been renamed to + ``Fixed`` + - a column multi-index will be recreated properly (:issue:`4710`); raise on + trying to use a multi-index with data_columns on the same axis + - ``select_as_coordinates`` will now return an ``Int64Index`` of the + resultant selection set - support ``timedelta64[ns]`` as a serialization type (:issue:`3577`) - - store `datetime.date` objects as ordinals rather then timetuples to avoid timezone issues (:issue:`2852`), - thanks @tavistmorph and @numpand + - store `datetime.date` objects as ordinals rather then timetuples to avoid + timezone issues (:issue:`2852`), thanks @tavistmorph and @numpand - ``numexpr`` 2.2.2 fixes incompatiblity in PyTables 2.4 (:issue:`4908`) - ``JSON`` - - added ``date_unit`` parameter to specify resolution of timestamps. Options - are seconds, milliseconds, microseconds and nanoseconds. (:issue:`4362`, :issue:`4498`). - - added ``default_handler`` parameter to allow a callable to be passed which will be - responsible for handling otherwise unserialisable objects. (:issue:`5138`) + - added ``date_unit`` parameter to specify resolution of timestamps. + Options are seconds, milliseconds, microseconds and nanoseconds. + (:issue:`4362`, :issue:`4498`). + - added ``default_handler`` parameter to allow a callable to be passed + which will be responsible for handling otherwise unserialisable objects. + (:issue:`5138`) - ``Index`` and ``MultiIndex`` changes (:issue:`4039`): @@ -284,54 +305,71 @@ API Changes changes on ``levels`` and ``labels`` setting on ``MultiIndex``). (:issue:`4859`, :issue:`4909`) - - Infer and downcast dtype if ``downcast='infer'`` is passed to ``fillna/ffill/bfill`` (:issue:`4604`) - - ``__nonzero__`` for all NDFrame objects, will now raise a ``ValueError``, this reverts back to (:issue:`1073`, :issue:`4633`) - behavior. Add ``.bool()`` method to ``NDFrame`` objects to facilitate evaluating of single-element boolean Series + - Infer and downcast dtype if ``downcast='infer'`` is passed to + ``fillna/ffill/bfill`` (:issue:`4604`) + - ``__nonzero__`` for all NDFrame objects, will now raise a ``ValueError``, + this reverts back to (:issue:`1073`, :issue:`4633`) behavior. Add + ``.bool()`` method to ``NDFrame`` objects to facilitate evaluating of + single-element boolean Series - ``DataFrame.update()`` no longer raises a ``DataConflictError``, it now will raise a ``ValueError`` instead (if necessary) (:issue:`4732`) - ``Series.isin()`` and ``DataFrame.isin()`` now raise a ``TypeError`` when passed a string (:issue:`4763`). Pass a ``list`` of one element (containing the string) instead. - - Remove undocumented/unused ``kind`` keyword argument from ``read_excel``, and ``ExcelFile``. (:issue:`4713`, :issue:`4712`) + - Remove undocumented/unused ``kind`` keyword argument from ``read_excel``, + and ``ExcelFile``. (:issue:`4713`, :issue:`4712`) - The ``method`` argument of ``NDFrame.replace()`` is valid again, so that a a list can be passed to ``to_replace`` (:issue:`4743`). - provide automatic dtype conversions on _reduce operations (:issue:`3371`) - - exclude non-numerics if mixed types with datelike in _reduce operations (:issue:`3371`) - - default for ``tupleize_cols`` is now ``False`` for both ``to_csv`` and ``read_csv``. Fair warning in 0.12 (:issue:`3604`) - - moved timedeltas support to pandas.tseries.timedeltas.py; add timedeltas string parsing, - add top-level ``to_timedelta`` function - - ``NDFrame`` now is compatible with Python's toplevel ``abs()`` function (:issue:`4821`). - - raise a ``TypeError`` on invalid comparison ops on Series/DataFrame (e.g. integer/datetime) (:issue:`4968`) - - Added a new index type, ``Float64Index``. This will be automatically created when passing floating values in index creation. - This enables a pure label-based slicing paradigm that makes ``[],ix,loc`` for scalar indexing and slicing work exactly the same. - Indexing on other index types are preserved (and positional fallback for ``[],ix``), with the exception, that floating point slicing - on indexes on non ``Float64Index`` will raise a ``TypeError``, e.g. ``Series(range(5))[3.5:4.5]`` (:issue:`263`) + - exclude non-numerics if mixed types with datelike in _reduce operations + (:issue:`3371`) + - default for ``tupleize_cols`` is now ``False`` for both ``to_csv`` and + ``read_csv``. Fair warning in 0.12 (:issue:`3604`) + - moved timedeltas support to pandas.tseries.timedeltas.py; add timedeltas + string parsing, add top-level ``to_timedelta`` function + - ``NDFrame`` now is compatible with Python's toplevel ``abs()`` function + (:issue:`4821`). + - raise a ``TypeError`` on invalid comparison ops on Series/DataFrame (e.g. + integer/datetime) (:issue:`4968`) + - Added a new index type, ``Float64Index``. This will be automatically + created when passing floating values in index creation. This enables a + pure label-based slicing paradigm that makes ``[],ix,loc`` for scalar + indexing and slicing work exactly the same. Indexing on other index types + are preserved (and positional fallback for ``[],ix``), with the exception, + that floating point slicing on indexes on non ``Float64Index`` will raise a + ``TypeError``, e.g. ``Series(range(5))[3.5:4.5]`` (:issue:`263`) - Make Categorical repr nicer (:issue:`4368`) - Remove deprecated ``Factor`` (:issue:`3650`) - Remove deprecated ``set_printoptions/reset_printoptions`` (:issue:``3046``) - Remove deprecated ``_verbose_info`` (:issue:`3215`) - Begin removing methods that don't make sense on ``GroupBy`` objects (:issue:`4887`). - - Remove deprecated ``read_clipboard/to_clipboard/ExcelFile/ExcelWriter`` from ``pandas.io.parsers`` (:issue:`3717`) + - Remove deprecated ``read_clipboard/to_clipboard/ExcelFile/ExcelWriter`` + from ``pandas.io.parsers`` (:issue:`3717`) - All non-Index NDFrames (``Series``, ``DataFrame``, ``Panel``, ``Panel4D``, ``SparsePanel``, etc.), now support the entire set of arithmetic operators and arithmetic flex methods (add, sub, mul, etc.). ``SparsePanel`` does not support ``pow`` or ``mod`` with non-scalars. (:issue:`3765`) - - Arithemtic func factories are now passed real names (suitable for using with super) (:issue:`5240`) - - Provide numpy compatibility with 1.7 for a calling convention like ``np.prod(pandas_object)`` as numpy - call with additional keyword args (:issue:`4435`) - - Provide __dir__ method (and local context) for tab completion / remove ipython completers code - (:issue:`4501`) + - Arithemtic func factories are now passed real names (suitable for using + with super) (:issue:`5240`) + - Provide numpy compatibility with 1.7 for a calling convention like + ``np.prod(pandas_object)`` as numpy call with additional keyword args + (:issue:`4435`) + - Provide __dir__ method (and local context) for tab completion / remove + ipython completers code (:issue:`4501`) - Support non-unique axes in a Panel via indexing operations (:issue:`4960`) - - ``.truncate`` will raise a ``ValueError`` if invalid before and afters dates are given (:issue:`5242`) - - ``Timestamp`` now supports ``now/today/utcnow`` class methods (:issue:`5339`) + - ``.truncate`` will raise a ``ValueError`` if invalid before and afters + dates are given (:issue:`5242`) + - ``Timestamp`` now supports ``now/today/utcnow`` class methods + (:issue:`5339`) Internal Refactoring ~~~~~~~~~~~~~~~~~~~~ -In 0.13.0 there is a major refactor primarily to subclass ``Series`` from ``NDFrame``, -which is the base class currently for ``DataFrame`` and ``Panel``, to unify methods -and behaviors. Series formerly subclassed directly from ``ndarray``. (:issue:`4080`, :issue:`3862`, :issue:`816`) +In 0.13.0 there is a major refactor primarily to subclass ``Series`` from +``NDFrame``, which is the base class currently for ``DataFrame`` and ``Panel``, +to unify methods and behaviors. Series formerly subclassed directly from +``ndarray``. (:issue:`4080`, :issue:`3862`, :issue:`816`) See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` - Refactor of series.py/frame.py/panel.py to move common code to generic.py @@ -339,15 +377,19 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` - added ``_setup_axes`` to created generic NDFrame structures - moved methods - - ``from_axes,_wrap_array,axes,ix,loc,iloc,shape,empty,swapaxes,transpose,pop`` - - ``__iter__,keys,__contains__,__len__,__neg__,__invert__`` - - ``convert_objects,as_blocks,as_matrix,values`` - - ``__getstate__,__setstate__`` (compat remains in frame/panel) - - ``__getattr__,__setattr__`` - - ``_indexed_same,reindex_like,align,where,mask`` - - ``fillna,replace`` (``Series`` replace is now consistent with ``DataFrame``) - - ``filter`` (also added axis argument to selectively filter on a different axis) - - ``reindex,reindex_axis,take`` + - ``from_axes``, ``_wrap_array``, ``axes``, ``ix``, ``loc``, ``iloc``, + ``shape``, ``empty``, ``swapaxes``, ``transpose``, ``pop`` + - ``__iter__``, ``keys``, ``__contains__``, ``__len__``, ``__neg__``, + ``__invert__`` + - ``convert_objects``, ``as_blocks``, ``as_matrix``, ``values`` + - ``__getstate__``, ``__setstate__`` (compat remains in frame/panel) + - ``__getattr__``, ``__setattr__`` + - ``_indexed_same``, ``reindex_like``, ``align``, ``where``, ``mask`` + - ``fillna``, ``replace`` (``Series`` replace is now consistent with + ``DataFrame``) + - ``filter`` (also added axis argument to selectively filter on a different + axis) + - ``reindex``, ``reindex_axis``, ``take`` - ``truncate`` (moved to become part of ``NDFrame``) - ``isnull/notnull`` now available on ``NDFrame`` objects @@ -356,58 +398,69 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` - ``swapaxes`` on a ``Panel`` with the same axes specified now return a copy - support attribute access for setting - ``filter`` supports same api as original ``DataFrame`` filter - - ``fillna`` refactored to ``core/generic.py``, while > 3ndim is ``NotImplemented`` + - ``fillna`` refactored to ``core/generic.py``, while > 3ndim is + ``NotImplemented`` - Series now inherits from ``NDFrame`` rather than directly from ``ndarray``. There are several minor changes that affect the API. - - numpy functions that do not support the array interface will now - return ``ndarrays`` rather than series, e.g. ``np.diff``, ``np.ones_like``, ``np.where`` + - numpy functions that do not support the array interface will now return + ``ndarrays`` rather than series, e.g. ``np.diff``, ``np.ones_like``, + ``np.where`` - ``Series(0.5)`` would previously return the scalar ``0.5``, this is no longer supported - - ``TimeSeries`` is now an alias for ``Series``. the property ``is_time_series`` - can be used to distinguish (if desired) + - ``TimeSeries`` is now an alias for ``Series``. the property + ``is_time_series`` can be used to distinguish (if desired) - Refactor of Sparse objects to use BlockManager - - Created a new block type in internals, ``SparseBlock``, which can hold multi-dtypes - and is non-consolidatable. ``SparseSeries`` and ``SparseDataFrame`` now inherit - more methods from there hierarchy (Series/DataFrame), and no longer inherit - from ``SparseArray`` (which instead is the object of the ``SparseBlock``) - - Sparse suite now supports integration with non-sparse data. Non-float sparse - data is supportable (partially implemented) - - Operations on sparse structures within DataFrames should preserve sparseness, - merging type operations will convert to dense (and back to sparse), so might - be somewhat inefficient + - Created a new block type in internals, ``SparseBlock``, which can hold + multi-dtypes and is non-consolidatable. ``SparseSeries`` and + ``SparseDataFrame`` now inherit more methods from there hierarchy + (Series/DataFrame), and no longer inherit from ``SparseArray`` (which + instead is the object of the ``SparseBlock``) + - Sparse suite now supports integration with non-sparse data. Non-float + sparse data is supportable (partially implemented) + - Operations on sparse structures within DataFrames should preserve + sparseness, merging type operations will convert to dense (and back to + sparse), so might be somewhat inefficient - enable setitem on ``SparseSeries`` for boolean/integer/slices - - ``SparsePanels`` implementation is unchanged (e.g. not using BlockManager, needs work) - - - added ``ftypes`` method to Series/DataFame, similar to ``dtypes``, but indicates - if the underlying is sparse/dense (as well as the dtype) - - All ``NDFrame`` objects now have a ``_prop_attributes``, which can be used to indcated various - values to propogate to a new object from an existing (e.g. name in ``Series`` will follow - more automatically now) - - Internal type checking is now done via a suite of generated classes, allowing ``isinstance(value, klass)`` - without having to directly import the klass, courtesy of @jtratner - - Bug in Series update where the parent frame is not updating its cache based on - changes (:issue:`4080`, :issue:`5216`) or types (:issue:`3217`), fillna (:issue:`3386`) + - ``SparsePanels`` implementation is unchanged (e.g. not using BlockManager, + needs work) + + - added ``ftypes`` method to Series/DataFame, similar to ``dtypes``, but + indicates if the underlying is sparse/dense (as well as the dtype) + - All ``NDFrame`` objects now have a ``_prop_attributes``, which can be used + to indcated various values to propogate to a new object from an existing + (e.g. name in ``Series`` will follow more automatically now) + - Internal type checking is now done via a suite of generated classes, + allowing ``isinstance(value, klass)`` without having to directly import the + klass, courtesy of @jtratner + - Bug in Series update where the parent frame is not updating its cache based + on changes (:issue:`4080`, :issue:`5216`) or types (:issue:`3217`), fillna + (:issue:`3386`) - Indexing with dtype conversions fixed (:issue:`4463`, :issue:`4204`) - - Refactor ``Series.reindex`` to core/generic.py (:issue:`4604`, :issue:`4618`), allow ``method=`` in reindexing - on a Series to work - - ``Series.copy`` no longer accepts the ``order`` parameter and is now consistent with ``NDFrame`` copy - - Refactor ``rename`` methods to core/generic.py; fixes ``Series.rename`` for (:issue:`4605`), and adds ``rename`` - with the same signature for ``Panel`` - - Series (for index) / Panel (for items) now as attribute access to its elements (:issue:`1903`) + - Refactor ``Series.reindex`` to core/generic.py (:issue:`4604`, + :issue:`4618`), allow ``method=`` in reindexing on a Series to work + - ``Series.copy`` no longer accepts the ``order`` parameter and is now + consistent with ``NDFrame`` copy + - Refactor ``rename`` methods to core/generic.py; fixes ``Series.rename`` for + (:issue:`4605`), and adds ``rename`` with the same signature for ``Panel`` + - Series (for index) / Panel (for items) now as attribute access to its + elements (:issue:`1903`) - Refactor ``clip`` methods to core/generic.py (:issue:`4798`) - - Refactor of ``_get_numeric_data/_get_bool_data`` to core/generic.py, allowing Series/Panel functionaility - - Refactor of Series arithmetic with time-like objects (datetime/timedelta/time - etc.) into a separate, cleaned up wrapper class. (:issue:`4613`) + - Refactor of ``_get_numeric_data/_get_bool_data`` to core/generic.py, + allowing Series/Panel functionaility + - Refactor of Series arithmetic with time-like objects + (datetime/timedelta/time etc.) into a separate, cleaned up wrapper class. + (:issue:`4613`) - Complex compat for ``Series`` with ``ndarray``. (:issue:`4819`) - - Removed unnecessary ``rwproperty`` from codebase in favor of builtin property. (:issue:`4843`) - - Refactor object level numeric methods (mean/sum/min/max...) from object level modules to - ``core/generic.py`` (:issue:`4435`). - - Refactor cum objects to core/generic.py (:issue:`4435`), note that these have a more numpy-like - function signature. + - Removed unnecessary ``rwproperty`` from codebase in favor of builtin + property. (:issue:`4843`) + - Refactor object level numeric methods (mean/sum/min/max...) from object + level modules to ``core/generic.py`` (:issue:`4435`). + - Refactor cum objects to core/generic.py (:issue:`4435`), note that these + have a more numpy-like function signature. - :func:`~pandas.read_html` now uses ``TextParser`` to parse HTML data from bs4/lxml (:issue:`4770`). - Removed the ``keep_internal`` keyword parameter in @@ -430,17 +483,20 @@ Bug Fixes - ``HDFStore`` - - raising an invalid ``TypeError`` rather than ``ValueError`` when appending - with a different block ordering (:issue:`4096`) + - raising an invalid ``TypeError`` rather than ``ValueError`` when + appending with a different block ordering (:issue:`4096`) - ``read_hdf`` was not respecting as passed ``mode`` (:issue:`4504`) - appending a 0-len table will work correctly (:issue:`4273`) - - ``to_hdf`` was raising when passing both arguments ``append`` and ``table`` (:issue:`4584`) - - reading from a store with duplicate columns across dtypes would raise (:issue:`4767`) - - Fixed a bug where ``ValueError`` wasn't correctly raised when column names - weren't strings (:issue:`4956`) - - A zero length series written in Fixed format not deserializing properly. (:issue:`4708`) - - Fixed bug in tslib.tz_convert(vals, tz1, tz2): it could raise IndexError exception while - trying to access trans[pos + 1] (:issue:`4496`) + - ``to_hdf`` was raising when passing both arguments ``append`` and + ``table`` (:issue:`4584`) + - reading from a store with duplicate columns across dtypes would raise + (:issue:`4767`) + - Fixed a bug where ``ValueError`` wasn't correctly raised when column + names weren't strings (:issue:`4956`) + - A zero length series written in Fixed format not deserializing properly. + (:issue:`4708`) + - Fixed bug in tslib.tz_convert(vals, tz1, tz2): it could raise IndexError + exception while trying to access trans[pos + 1] (:issue:`4496`) - The ``by`` argument now works correctly with the ``layout`` argument (:issue:`4102`, :issue:`4014`) in ``*.hist`` plotting methods - Fixed bug in ``PeriodIndex.map`` where using ``str`` would return the str @@ -454,16 +510,19 @@ Bug Fixes - ``read_html`` tests now work with Python 2.6 (:issue:`4351`) - Fixed bug where ``network`` testing was throwing ``NameError`` because a local variable was undefined (:issue:`4381`) - - In ``to_json``, raise if a passed ``orient`` would cause loss of data because - of a duplicate index (:issue:`4359`) + - In ``to_json``, raise if a passed ``orient`` would cause loss of data + because of a duplicate index (:issue:`4359`) - In ``to_json``, fix date handling so milliseconds are the default timestamp as the docstring says (:issue:`4362`). - - ``as_index`` is no longer ignored when doing groupby apply (:issue:`4648`), (:issue:`3417`) + - ``as_index`` is no longer ignored when doing groupby apply (:issue:`4648`, + :issue:`3417`) - JSON NaT handling fixed, NaTs are now serialised to `null` (:issue:`4498`) - - Fixed JSON handling of escapable characters in JSON object keys (:issue:`4593`) - - Fixed passing ``keep_default_na=False`` when ``na_values=None`` (:issue:`4318`) - - Fixed bug with ``values`` raising an error on a DataFrame with duplicate columns and mixed - dtypes, surfaced in (:issue:`4377`) + - Fixed JSON handling of escapable characters in JSON object keys + (:issue:`4593`) + - Fixed passing ``keep_default_na=False`` when ``na_values=None`` + (:issue:`4318`) + - Fixed bug with ``values`` raising an error on a DataFrame with duplicate + columns and mixed dtypes, surfaced in (:issue:`4377`) - Fixed bug with duplicate columns and type conversion in ``read_json`` when ``orient='split'`` (:issue:`4377`) - Fixed JSON bug where locales with decimal separators other than '.' threw @@ -472,8 +531,8 @@ Bug Fixes - Fixed an issue where ``PeriodIndex`` joining with self was returning a new instance rather than the same instance (:issue:`4379`); also adds a test for this for the other index types - - Fixed a bug with all the dtypes being converted to object when using the CSV cparser - with the usecols parameter (:issue:`3192`) + - Fixed a bug with all the dtypes being converted to object when using the + CSV cparser with the usecols parameter (:issue:`3192`) - Fix an issue in merging blocks where the resulting DataFrame had partially set _ref_locs (:issue:`4403`) - Fixed an issue where hist subplots were being overwritten when they were @@ -488,80 +547,107 @@ Bug Fixes (:issue:`4486`) - Fixed an issue where cumsum and cumprod didn't work with bool dtypes (:issue:`4170`, :issue:`4440`) - - Fixed Panel slicing issued in ``xs`` that was returning an incorrect dimmed object - (:issue:`4016`) - - Fix resampling bug where custom reduce function not used if only one group (:issue:`3849`, :issue:`4494`) + - Fixed Panel slicing issued in ``xs`` that was returning an incorrect dimmed + object (:issue:`4016`) + - Fix resampling bug where custom reduce function not used if only one group + (:issue:`3849`, :issue:`4494`) - Fixed Panel assignment with a transposed frame (:issue:`3830`) - - Raise on set indexing with a Panel and a Panel as a value which needs alignment (:issue:`3777`) + - Raise on set indexing with a Panel and a Panel as a value which needs + alignment (:issue:`3777`) - frozenset objects now raise in the ``Series`` constructor (:issue:`4482`, :issue:`4480`) - - Fixed issue with sorting a duplicate multi-index that has multiple dtypes (:issue:`4516`) + - Fixed issue with sorting a duplicate multi-index that has multiple dtypes + (:issue:`4516`) - Fixed bug in ``DataFrame.set_values`` which was causing name attributes to be lost when expanding the index. (:issue:`3742`, :issue:`4039`) - Fixed issue where individual ``names``, ``levels`` and ``labels`` could be set on ``MultiIndex`` without validation (:issue:`3714`, :issue:`4039`) - - Fixed (:issue:`3334`) in pivot_table. Margins did not compute if values is the index. - - Fix bug in having a rhs of ``np.timedelta64`` or ``np.offsets.DateOffset`` when operating - with datetimes (:issue:`4532`) - - Fix arithmetic with series/datetimeindex and ``np.timedelta64`` not working the same (:issue:`4134`) - and buggy timedelta in numpy 1.6 (:issue:`4135`) - - Fix bug in ``pd.read_clipboard`` on windows with PY3 (:issue:`4561`); not decoding properly + - Fixed (:issue:`3334`) in pivot_table. Margins did not compute if values is + the index. + - Fix bug in having a rhs of ``np.timedelta64`` or ``np.offsets.DateOffset`` + when operating with datetimes (:issue:`4532`) + - Fix arithmetic with series/datetimeindex and ``np.timedelta64`` not working + the same (:issue:`4134`) and buggy timedelta in numpy 1.6 (:issue:`4135`) + - Fix bug in ``pd.read_clipboard`` on windows with PY3 (:issue:`4561`); not + decoding properly - ``tslib.get_period_field()`` and ``tslib.get_period_field_arr()`` now raise if code argument out of range (:issue:`4519`, :issue:`4520`) - Fix boolean indexing on an empty series loses index names (:issue:`4235`), infer_dtype works with empty arrays. - - Fix reindexing with multiple axes; if an axes match was not replacing the current axes, leading - to a possible lazay frequency inference issue (:issue:`3317`) + - Fix reindexing with multiple axes; if an axes match was not replacing the + current axes, leading to a possible lazay frequency inference issue + (:issue:`3317`) - Fixed issue where ``DataFrame.apply`` was reraising exceptions incorrectly (causing the original stack trace to be truncated). - Fix selection with ``ix/loc`` and non_unique selectors (:issue:`4619`) - - Fix assignment with iloc/loc involving a dtype change in an existing column (:issue:`4312`) - have internal setitem_with_indexer in core/indexing to use Block.setitem - - Fixed bug where thousands operator was not handled correctly for floating point numbers - in csv_import (:issue:`4322`) - - Fix an issue with CacheableOffset not properly being used by many DateOffset; this prevented - the DateOffset from being cached (:issue:`4609`) - - Fix boolean comparison with a DataFrame on the lhs, and a list/tuple on the rhs (:issue:`4576`) - - Fix error/dtype conversion with setitem of ``None`` on ``Series/DataFrame`` (:issue:`4667`) - - Fix decoding based on a passed in non-default encoding in ``pd.read_stata`` (:issue:`4626`) - - Fix ``DataFrame.from_records`` with a plain-vanilla ``ndarray``. (:issue:`4727`) + - Fix assignment with iloc/loc involving a dtype change in an existing column + (:issue:`4312`) have internal setitem_with_indexer in core/indexing to use + Block.setitem + - Fixed bug where thousands operator was not handled correctly for floating + point numbers in csv_import (:issue:`4322`) + - Fix an issue with CacheableOffset not properly being used by many + DateOffset; this prevented the DateOffset from being cached (:issue:`4609`) + - Fix boolean comparison with a DataFrame on the lhs, and a list/tuple on the + rhs (:issue:`4576`) + - Fix error/dtype conversion with setitem of ``None`` on ``Series/DataFrame`` + (:issue:`4667`) + - Fix decoding based on a passed in non-default encoding in ``pd.read_stata`` + (:issue:`4626`) + - Fix ``DataFrame.from_records`` with a plain-vanilla ``ndarray``. + (:issue:`4727`) - Fix some inconsistencies with ``Index.rename`` and ``MultiIndex.rename``, etc. (:issue:`4718`, :issue:`4628`) - - Bug in using ``iloc/loc`` with a cross-sectional and duplicate indicies (:issue:`4726`) - - Bug with using ``QUOTE_NONE`` with ``to_csv`` causing ``Exception``. (:issue:`4328`) - - Bug with Series indexing not raising an error when the right-hand-side has an incorrect length (:issue:`2702`) - - Bug in multi-indexing with a partial string selection as one part of a MultIndex (:issue:`4758`) - - Bug with reindexing on the index with a non-unique index will now raise ``ValueError`` (:issue:`4746`) - - Bug in setting with ``loc/ix`` a single indexer with a multi-index axis and a numpy array, related to (:issue:`3777`) - - Bug in concatenation with duplicate columns across dtypes not merging with axis=0 (:issue:`4771`, :issue:`4975`) + - Bug in using ``iloc/loc`` with a cross-sectional and duplicate indicies + (:issue:`4726`) + - Bug with using ``QUOTE_NONE`` with ``to_csv`` causing ``Exception``. + (:issue:`4328`) + - Bug with Series indexing not raising an error when the right-hand-side has + an incorrect length (:issue:`2702`) + - Bug in multi-indexing with a partial string selection as one part of a + MultIndex (:issue:`4758`) + - Bug with reindexing on the index with a non-unique index will now raise + ``ValueError`` (:issue:`4746`) + - Bug in setting with ``loc/ix`` a single indexer with a multi-index axis and + a numpy array, related to (:issue:`3777`) + - Bug in concatenation with duplicate columns across dtypes not merging with + axis=0 (:issue:`4771`, :issue:`4975`) - Bug in ``iloc`` with a slice index failing (:issue:`4771`) - - Incorrect error message with no colspecs or width in ``read_fwf``. (:issue:`4774`) - - Fix bugs in indexing in a Series with a duplicate index (:issue:`4548`, :issue:`4550`) + - Incorrect error message with no colspecs or width in ``read_fwf``. + (:issue:`4774`) + - Fix bugs in indexing in a Series with a duplicate index (:issue:`4548`, + :issue:`4550`) - Fixed bug with reading compressed files with ``read_fwf`` in Python 3. (:issue:`3963`) - - Fixed an issue with a duplicate index and assignment with a dtype change (:issue:`4686`) + - Fixed an issue with a duplicate index and assignment with a dtype change + (:issue:`4686`) - Fixed bug with reading compressed files in as ``bytes`` rather than ``str`` in Python 3. Simplifies bytes-producing file-handling in Python 3 (:issue:`3963`, :issue:`4785`). - Fixed an issue related to ticklocs/ticklabels with log scale bar plots across different versions of matplotlib (:issue:`4789`) - - Suppressed DeprecationWarning associated with internal calls issued by repr() (:issue:`4391`) - - Fixed an issue with a duplicate index and duplicate selector with ``.loc`` (:issue:`4825`) + - Suppressed DeprecationWarning associated with internal calls issued by + repr() (:issue:`4391`) + - Fixed an issue with a duplicate index and duplicate selector with ``.loc`` + (:issue:`4825`) - Fixed an issue with ``DataFrame.sort_index`` where, when sorting by a single column and passing a list for ``ascending``, the argument for ``ascending`` was being interpreted as ``True`` (:issue:`4839`, :issue:`4846`) - - Fixed ``Panel.tshift`` not working. Added `freq` support to ``Panel.shift`` (:issue:`4853`) + - Fixed ``Panel.tshift`` not working. Added `freq` support to ``Panel.shift`` + (:issue:`4853`) - Fix an issue in TextFileReader w/ Python engine (i.e. PythonParser) with thousands != "," (:issue:`4596`) - Bug in getitem with a duplicate index when using where (:issue:`4879`) - Fix Type inference code coerces float column into datetime (:issue:`4601`) - - Fixed ``_ensure_numeric`` does not check for complex numbers (:issue:`4902`) + - Fixed ``_ensure_numeric`` does not check for complex numbers + (:issue:`4902`) - Fixed a bug in ``Series.hist`` where two figures were being created when the ``by`` argument was passed (:issue:`4112`, :issue:`4113`). - Fixed a bug in ``convert_objects`` for > 2 ndims (:issue:`4937`) - - Fixed a bug in DataFrame/Panel cache insertion and subsequent indexing (:issue:`4939`) - - Fixed string methods for ``FrozenNDArray`` and ``FrozenList`` (:issue:`4929`) + - Fixed a bug in DataFrame/Panel cache insertion and subsequent indexing + (:issue:`4939`) + - Fixed string methods for ``FrozenNDArray`` and ``FrozenList`` + (:issue:`4929`) - Fixed a bug with setting invalid or out-of-range values in indexing enlargement scenarios (:issue:`4940`) - Tests for fillna on empty Series (:issue:`4346`), thanks @immerrr @@ -570,22 +656,28 @@ Bug Fixes - Fixed skiprows option in Python parser for read_csv (:issue:`4382`) - Fixed bug preventing ``cut`` from working with ``np.inf`` levels without explicitly passing labels (:issue:`3415`) - - Fixed wrong check for overlapping in ``DatetimeIndex.union`` (:issue:`4564`) - - Fixed conflict between thousands separator and date parser in csv_parser (:issue:`4678`) - - Fix appending when dtypes are not the same (error showing mixing float/np.datetime64) (:issue:`4993`) + - Fixed wrong check for overlapping in ``DatetimeIndex.union`` + (:issue:`4564`) + - Fixed conflict between thousands separator and date parser in csv_parser + (:issue:`4678`) + - Fix appending when dtypes are not the same (error showing mixing + float/np.datetime64) (:issue:`4993`) - Fix repr for DateOffset. No longer show duplicate entries in kwds. Removed unused offset fields. (:issue:`4638`) - - Fixed wrong index name during read_csv if using usecols. Applies to c parser only. (:issue:`4201`) + - Fixed wrong index name during read_csv if using usecols. Applies to c + parser only. (:issue:`4201`) - ``Timestamp`` objects can now appear in the left hand side of a comparison operation with a ``Series`` or ``DataFrame`` object (:issue:`4982`). - Fix a bug when indexing with ``np.nan`` via ``iloc/loc`` (:issue:`5016`) - - Fixed a bug where low memory c parser could create different types in different - chunks of the same file. Now coerces to numerical type or raises warning. (:issue:`3866`) - - Fix a bug where reshaping a ``Series`` to its own shape raised ``TypeError`` (:issue:`4554`) - and other reshaping issues. + - Fixed a bug where low memory c parser could create different types in + different chunks of the same file. Now coerces to numerical type or raises + warning. (:issue:`3866`) + - Fix a bug where reshaping a ``Series`` to its own shape raised + ``TypeError`` (:issue:`4554`) and other reshaping issues. - Bug in setting with ``ix/loc`` and a mixed int/string index (:issue:`4544`) - Make sure series-series boolean comparions are label based (:issue:`4947`) - - Bug in multi-level indexing with a Timestamp partial indexer (:issue:`4294`) + - Bug in multi-level indexing with a Timestamp partial indexer + (:issue:`4294`) - Tests/fix for multi-index construction of an all-nan frame (:issue:`4078`) - Fixed a bug where :func:`~pandas.read_html` wasn't correctly inferring values of tables with commas (:issue:`5029`) @@ -602,10 +694,12 @@ Bug Fixes (:issue:`5102`). - Fixed a bug where ``groupby.plot()`` and friends were duplicating figures multiple times (:issue:`5102`). - - Provide automatic conversion of ``object`` dtypes on fillna, related (:issue:`5103`) + - Provide automatic conversion of ``object`` dtypes on fillna, related + (:issue:`5103`) - Fixed a bug where default options were being overwritten in the option parser cleaning (:issue:`5121`). - - Treat a list/ndarray identically for ``iloc`` indexing with list-like (:issue:`5006`) + - Treat a list/ndarray identically for ``iloc`` indexing with list-like + (:issue:`5006`) - Fix ``MultiIndex.get_level_values()`` with missing values (:issue:`5074`) - Fix bound checking for Timestamp() with datetime64 input (:issue:`4065`) - Fix a bug where ``TestReadHtml`` wasn't calling the correct ``read_html()`` @@ -618,13 +712,16 @@ Bug Fixes context manager. - Fixed segfault on ``isnull(MultiIndex)`` (now raises an error instead) (:issue:`5123`, :issue:`5125`) - - Allow duplicate indices when performing operations that align (:issue:`5185`) - - Compound dtypes in a constructor raise ``NotImplementedError`` (:issue:`5191`) + - Allow duplicate indices when performing operations that align + (:issue:`5185`) + - Compound dtypes in a constructor raise ``NotImplementedError`` + (:issue:`5191`) - Bug in comparing duplicate frames (:issue:`4421`) related - Bug in describe on duplicate frames - - Bug in ``to_datetime`` with a format and ``coerce=True`` not raising (:issue:`5195`) - - Bug in ``loc`` setting with multiple indexers and a rhs of a Series that needs - broadcasting (:issue:`5206`) + - Bug in ``to_datetime`` with a format and ``coerce=True`` not raising + (:issue:`5195`) + - Bug in ``loc`` setting with multiple indexers and a rhs of a Series that + needs broadcasting (:issue:`5206`) - Fixed bug where inplace setting of levels or labels on ``MultiIndex`` would not clear cached ``values`` property and therefore return wrong ``values``. (:issue:`5215`) @@ -638,9 +735,10 @@ Bug Fixes - Fixed seg fault in C parser caused by passing more names than columns in the file. (:issue:`5156`) - Fix ``Series.isin`` with date/time-like dtypes (:issue:`5021`) - - C and Python Parser can now handle the more common multi-index column format - which doesn't have a row for index names (:issue:`4702`) - - Bug when trying to use an out-of-bounds date as an object dtype (:issue:`5312`) + - C and Python Parser can now handle the more common multi-index column + format which doesn't have a row for index names (:issue:`4702`) + - Bug when trying to use an out-of-bounds date as an object dtype + (:issue:`5312`) - Bug when trying to display an embedded PandasObject (:issue:`5324`) - Allows operating of Timestamps to return a datetime if the result is out-of-bounds related (:issue:`5312`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 704c41819ae71..a48488f57e833 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -345,16 +345,19 @@ def _stat_axis(self): @property def shape(self): + "tuple of axis dimensions" return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) @property def axes(self): - """ we do it this way because if we have reversed axes, then - the block manager shows then reversed """ + "index(es) of the NDFrame" + # we do it this way because if we have reversed axes, then + # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] @property def ndim(self): + "Number of axes / array dimensions" return self._data.ndim def _expand_axes(self, key): @@ -598,11 +601,18 @@ def __iter__(self): # can we get a better explanation of this? def keys(self): - """ return the info axis names """ + """Get the 'info axis' (see Indexing for more) + + This is index for Series, columns for DataFrame and major_axis for + Panel.""" return self._info_axis - # what does info axis actually mean? def iteritems(self): + """Iterate over (label, values) on info axis + + This is index for Series, columns for DataFrame, major_axis for Panel, + and so on. + """ for h in self._info_axis: yield h, self[h] @@ -610,6 +620,7 @@ def iteritems(self): # Now unnecessary. Sidenote: don't want to deprecate this for a while, # otherwise libraries that use 2to3 will have issues. def iterkv(self, *args, **kwargs): + "iteritems alias used to get around 2to3. Deprecated" warnings.warn("iterkv is deprecated and will be removed in a future " "release, use ``iteritems`` instead.", DeprecationWarning) return self.iteritems(*args, **kwargs) @@ -624,6 +635,7 @@ def __contains__(self, key): @property def empty(self): + "True if NDFrame is entirely empty [no items]" return not all(len(self._get_axis(a)) > 0 for a in self._AXIS_ORDERS) def __nonzero__(self): @@ -664,6 +676,7 @@ def __array_wrap__(self, result): return self._constructor(result, **d).__finalize__(self) def to_dense(self): + "Return dense representation of NDFrame (as opposed to sparse)" # compat return self @@ -857,12 +870,14 @@ def to_pickle(self, path): return to_pickle(self, path) def save(self, path): # TODO remove in 0.14 + "Deprecated. Use to_pickle instead" import warnings from pandas.io.pickle import to_pickle warnings.warn("save is deprecated, use to_pickle", FutureWarning) return to_pickle(self, path) def load(self, path): # TODO remove in 0.14 + "Deprecated. Use read_pickle instead." import warnings from pandas.io.pickle import read_pickle warnings.warn("load is deprecated, use pd.read_pickle", FutureWarning) @@ -1562,6 +1577,7 @@ def as_matrix(self, columns=None): @property def values(self): + "Numpy representation of NDFrame" return self.as_matrix() @property @@ -1611,6 +1627,7 @@ def as_blocks(self, columns=None): @property def blocks(self): + "Internal property, property synonym for as_blocks()" return self.as_blocks() def astype(self, dtype, copy=True, raise_on_error=True): @@ -1777,10 +1794,12 @@ def fillna(self, value=None, method=None, axis=0, inplace=False, return self._constructor(new_data).__finalize__(self) def ffill(self, axis=0, inplace=False, limit=None, downcast=None): + "Synonym for NDFrame.fillna(method='ffill')" return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) def bfill(self, axis=0, inplace=False, limit=None, downcast=None): + "Synonym for NDFrame.fillna(method='bfill')" return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index f35070c634aa1..04ace84cace37 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1238,7 +1238,7 @@ def na_op(x, y): # not sure whether it's worth it at the moment result = com._fill_zeros(result,y,fill_zeros) return result - @Substitution(op) + @Substitution(name) @Appender(_agg_doc) def f(self, other, axis=0): return self._combine(other, na_op, axis=axis)
1. Panel flex docstrings had str representation of functions like `<builtin-func add..>` (not pretty). Fixed to be name 2. Added some docstrings to miscellaneous properties and clarified meaning of info_axis/keys. 3. Wrap 0.13's release.rst at 80 characters per line. I'm happy to revert (3) above...just was bothering me (most of the doc breaks at 80) Key change is here - http://jtratner.github.io/example-pandas-docs/html-minor-doc-fixup-10-25/api.html#id17
https://api.github.com/repos/pandas-dev/pandas/pulls/5336
2013-10-26T02:27:57Z
2013-10-28T01:18:49Z
2013-10-28T01:18:49Z
2014-06-13T13:53:33Z
BUG: empty series not printing name in repr (#4651)
diff --git a/doc/source/release.rst b/doc/source/release.rst index d6a98157c76d3..6e10bd651d90a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -768,7 +768,7 @@ Bug Fixes elements. (:issue:`5372`) - The GroupBy methods ``transform`` and ``filter`` can be used on Series and DataFrames that have repeated (non-unique) indices. (:issue:`4620`) - + - Fix empty series not printing name in repr (:issue:`4651`) pandas 0.12.0 ------------- diff --git a/pandas/core/series.py b/pandas/core/series.py index d3cc53d0bc9fc..798183a29c48b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -844,8 +844,11 @@ def __unicode__(self): length=len(self) > 50, name=True, dtype=True) + elif self.name is None: + result = u('Series([], dtype: %s)') % (self.dtype) else: - result = u('Series([], dtype: %s)') % self.dtype + result = u('Series([], name: %s, dtype: %s)') % (self.name, + self.dtype) return result def _tidy_repr(self, max_vals=20): diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 4405fcc778886..fd9fb0ef0d79a 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1580,6 +1580,13 @@ def test_repr(self): self.assertFalse("\r" in repr(ser)) self.assertFalse("a\n" in repr(ser)) + # with empty series (#4651) + s = Series([], dtype=np.int64, name='foo') + self.assertEqual(repr(s), 'Series([], name: foo, dtype: int64)') + + s = Series([], dtype=np.int64, name=None) + self.assertEqual(repr(s), 'Series([], dtype: int64)') + def test_tidy_repr(self): a = Series([u("\u05d0")] * 1000) a.name = 'title1'
Implemented as suggested in #4651
https://api.github.com/repos/pandas-dev/pandas/pulls/5335
2013-10-26T00:42:38Z
2013-11-02T20:58:19Z
2013-11-02T20:58:19Z
2014-06-23T09:41:29Z
CLN: Fix return type for initObjToJSON()
diff --git a/doc/source/release.rst b/doc/source/release.rst index b35f8298c815a..b74b23029a2ac 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -640,6 +640,8 @@ Bug Fixes - Bug when trying to display an embedded PandasObject (:issue:`5324`) - Allows operating of Timestamps to return a datetime if the result is out-of-bounds related (:issue:`5312`) + - Fix return value/type signature of ``initObjToJSON()`` to be compatible + with numpy's ``import_array()`` (:issue:`5334`, :issue:`5326`) pandas 0.12.0 ------------- diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c index 8bcef1d0adc00..13d403cdb2b7b 100644 --- a/pandas/src/ujson/python/objToJSON.c +++ b/pandas/src/ujson/python/objToJSON.c @@ -145,10 +145,11 @@ enum PANDAS_FORMAT //#define PRINTMARK() fprintf(stderr, "%s: MARK(%d)\n", __FILE__, __LINE__) #define PRINTMARK() -#if (PY_VERSION_HEX < 0x03000000) -void initObjToJSON(void) +// import_array() compat +#if (PY_VERSION_HEX >= 0x03000000) +void *initObjToJSON(void) #else -int initObjToJSON(void) +void initObjToJSON(void) #endif { PyObject *mod_pandas; @@ -176,8 +177,9 @@ int initObjToJSON(void) Py_DECREF(mod_tslib); } - /* Initialise numpy API */ + /* Initialise numpy API and use 2/3 compatible return */ import_array(); + return NUMPY_IMPORT_ARRAY_RETVAL; } static void *PyIntToINT32(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen)
Make it so that it always returns the same thing as numpy, so that it matches the right signature whether in or not in the error condition. Closes #5326.
https://api.github.com/repos/pandas-dev/pandas/pulls/5334
2013-10-26T00:17:03Z
2013-10-26T14:13:31Z
2013-10-26T14:13:31Z
2014-06-25T20:51:28Z
ENH: Add option to highlight NaN cells
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 13f7a3dbe7d4a..e5e1dac73f070 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -181,6 +181,15 @@ Setting this to None/False restores the values to their initial value. """ + +pc_highlight_nan_doc = """ +: bool + + When True, NaN elements in the HTML represenation of Series or + DataFrames are displayed with a yellow background. + +""" + style_backup = dict() def mpl_style_cb(key): import sys @@ -245,6 +254,8 @@ def mpl_style_cb(key): validator=is_instance_factory([type(None), int])) # redirected to width, make defval identical cf.register_option('line_width', get_default_val('display.width'), pc_line_width_doc) + cf.register_option('highlight_nan', False, pc_highlight_nan_doc, + validator=is_bool) cf.deprecate_option('display.line_width', msg=pc_line_width_deprecation_warning, diff --git a/pandas/core/format.py b/pandas/core/format.py index 2355ae16874ce..158a2975f77fe 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -456,11 +456,12 @@ def _format_col(self, i): na_rep=self.na_rep, space=self.col_space) - def to_html(self, classes=None): + def to_html(self, classes=None, highlight_nan=False): """ Render a DataFrame to a html table. """ - html_renderer = HTMLFormatter(self, classes=classes) + html_renderer = HTMLFormatter(self, classes=classes, + highlight_nan=highlight_nan) if hasattr(self.buf, 'write'): html_renderer.write_result(self.buf) elif isinstance(self.buf, compat.string_types): @@ -558,9 +559,10 @@ class HTMLFormatter(TableFormatter): indent_delta = 2 - def __init__(self, formatter, classes=None): + def __init__(self, formatter, classes=None, highlight_nan=False): self.fmt = formatter self.classes = classes + self.highlight_nan = highlight_nan self.frame = self.fmt.frame self.columns = formatter.columns @@ -581,6 +583,9 @@ def write_th(self, s, indent=0, tags=None): return self._write_cell(s, kind='th', indent=indent, tags=tags) def write_td(self, s, indent=0, tags=None): + tags = (tags or "") + tags += ' style="background-color:yellow"' if \ + self.highlight_nan and s == self.fmt.na_rep else "" return self._write_cell(s, kind='td', indent=indent, tags=tags) def _write_cell(self, s, kind='td', indent=0, tags=None): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b485d51514162..9165fbfa96083 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -485,9 +485,10 @@ def _repr_html_(self): ignore_width=ipnbh) if fits_horizontal and fits_vertical: + hn = get_option("display.highlight_nan") return ('<div style="max-height:1000px;' 'max-width:1500px;overflow:auto;">\n' + - self.to_html() + '\n</div>') + self.to_html(highlight_nan=hn) + '\n</div>') else: buf = StringIO(u("")) max_info_rows = get_option('display.max_info_rows') @@ -1289,7 +1290,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, force_unicode=None, bold_rows=True, - classes=None, escape=True): + classes=None, escape=True, highlight_nan=False): """ to_html-specific options @@ -1299,6 +1300,8 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, CSS class(es) to apply to the resulting html table escape : boolean, default True Convert the characters <, >, and & to HTML-safe sequences. + highlight_nan: boolean, default False + Display NaN cells with yellow background Render a DataFrame as an HTML table. """ @@ -1322,7 +1325,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, header=header, index=index, bold_rows=bold_rows, escape=escape) - formatter.to_html(classes=classes) + formatter.to_html(classes=classes, highlight_nan=highlight_nan) if buf is None: return formatter.buf.getvalue() diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index d9bf8adb71298..3d3f6a0409932 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -1246,6 +1246,13 @@ def test_to_html_with_no_bold(self): ashtml = x.to_html(bold_rows=False) assert('<strong>' not in ashtml[ashtml.find('</thead>')]) + + def test_to_html_with_highlight_nan(self): + x = DataFrame({'c1': [1, 2], 'c2': [3, nan]}) + ashtml = x.to_html(highlight_nan=True) + assert('style="background-color:yellow"' in ashtml) + + def test_to_html_columns_arg(self): result = self.frame.to_html(columns=['A']) self.assert_('<th>B</th>' not in result)
Useful for quickly inspecting data in an ipynb - Add display.highlight_nan option (default True) - Add argument to DataFrame.to_html and HTMLFormatter.__init__ - Add test
https://api.github.com/repos/pandas-dev/pandas/pulls/5330
2013-10-25T19:53:01Z
2013-11-21T13:59:41Z
null
2014-06-14T01:25:10Z
BUG: repr/str of storers in HDFStore was calling an incorrect method
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 999f0751abe99..05528d5c0d407 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1914,9 +1914,6 @@ def __unicode__(self): return "%-12.12s (shape->%s)" % (self.pandas_type, s) return self.pandas_type - def __str__(self): - return self.__repr__() - def set_object_info(self): """ set my pandas type & version """ self.attrs.pandas_type = str(self.pandas_kind) diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 730a9d907f496..fe60352845316 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -313,6 +313,16 @@ def test_repr(self): repr(store) str(store) + # storers + with ensure_clean(self.path) as store: + + df = tm.makeDataFrame() + store.append('df',df) + + s = store.get_storer('df') + repr(s) + str(s) + def test_contains(self): with ensure_clean(self.path) as store:
https://api.github.com/repos/pandas-dev/pandas/pulls/5328
2013-10-25T19:11:25Z
2013-10-25T19:23:46Z
2013-10-25T19:23:46Z
2014-07-16T08:37:00Z
TST/BUG: allow invalid Timestamps to pass thru as datetimes when operating with offsets
diff --git a/doc/source/release.rst b/doc/source/release.rst index af59137a194b0..b35f8298c815a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -638,6 +638,8 @@ Bug Fixes which doesn't have a row for index names (:issue:`4702`) - Bug when trying to use an out-of-bounds date as an object dtype (:issue:`5312`) - Bug when trying to display an embedded PandasObject (:issue:`5324`) + - Allows operating of Timestamps to return a datetime if the result is out-of-bounds + related (:issue:`5312`) pandas 0.12.0 ------------- diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 07efbcfdcd7ba..8830d66b245ef 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -8,17 +8,32 @@ # import after tools, dateutil check from dateutil.relativedelta import relativedelta, weekday import pandas.tslib as tslib -from pandas.tslib import Timestamp +from pandas.tslib import Timestamp, OutOfBoundsDatetime + from pandas import _np_version_under1p7 __all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay', 'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd', 'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd', 'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd', - 'LastWeekOfMonth', 'FY5253Quarter', 'FY5253', + 'LastWeekOfMonth', 'FY5253Quarter', 'FY5253', 'Week', 'WeekOfMonth', 'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano'] +# convert to/from datetime/timestamp to allow invalid Timestamp ranges to pass thru +def as_timestamp(obj): + try: + return Timestamp(obj) + except (OutOfBoundsDatetime): + pass + return obj + +def as_datetime(obj): + f = getattr(obj,'to_pydatetime',None) + if f is not None: + obj = f() + return obj + #---------------------------------------------------------------------- # DateOffset @@ -87,6 +102,7 @@ def __init__(self, n=1, **kwds): self._offset = timedelta(1) def apply(self, other): + other = as_datetime(other) if len(self.kwds) > 0: if self.n > 0: for i in range(self.n): @@ -94,9 +110,9 @@ def apply(self, other): else: for i in range(-self.n): other = other - self._offset - return Timestamp(other) + return as_timestamp(other) else: - return Timestamp(other + timedelta(self.n)) + return as_timestamp(other + timedelta(self.n)) def isAnchored(self): return (self.n == 1) @@ -394,7 +410,7 @@ def apply(self, other): if self.offset: result = result + self.offset - return Timestamp(result) + return as_timestamp(result) elif isinstance(other, (timedelta, Tick)): return BDay(self.n, offset=self.offset + other, @@ -540,11 +556,11 @@ def apply(self, other): n = self.n _, days_in_month = tslib.monthrange(other.year, other.month) if other.day != days_in_month: - other = other + relativedelta(months=-1, day=31) + other = as_datetime(other) + relativedelta(months=-1, day=31) if n <= 0: n = n + 1 - other = other + relativedelta(months=n, day=31) - return Timestamp(other) + other = as_datetime(other) + relativedelta(months=n, day=31) + return as_timestamp(other) @classmethod def onOffset(cls, dt): @@ -563,8 +579,8 @@ def apply(self, other): if other.day > 1 and n <= 0: # then roll forward if n<=0 n += 1 - other = other + relativedelta(months=n, day=1) - return Timestamp(other) + other = as_datetime(other) + relativedelta(months=n, day=1) + return as_timestamp(other) @classmethod def onOffset(cls, dt): @@ -592,7 +608,7 @@ def apply(self, other): n = n - 1 elif n <= 0 and other.day > lastBDay: n = n + 1 - other = other + relativedelta(months=n, day=31) + other = as_datetime(other) + relativedelta(months=n, day=31) if other.weekday() > 4: other = other - BDay() @@ -614,14 +630,14 @@ def apply(self, other): # as if rolled forward already n += 1 elif other.day < first and n > 0: - other = other + timedelta(days=first - other.day) + other = as_datetime(other) + timedelta(days=first - other.day) n -= 1 - other = other + relativedelta(months=n) + other = as_datetime(other) + relativedelta(months=n) wkday, _ = tslib.monthrange(other.year, other.month) first = _get_firstbday(wkday) result = datetime(other.year, other.month, first) - return result + return as_timestamp(result) @classmethod def onOffset(cls, dt): @@ -663,24 +679,26 @@ def isAnchored(self): def apply(self, other): if self.weekday is None: - return other + self.n * self._inc + return as_datetime(other) + self.n * self._inc if self.n > 0: k = self.n otherDay = other.weekday() if otherDay != self.weekday: - other = other + timedelta((self.weekday - otherDay) % 7) + other = as_datetime(other) + timedelta((self.weekday - otherDay) % 7) k = k - 1 + other = as_datetime(other) for i in range(k): other = other + self._inc else: k = self.n otherDay = other.weekday() if otherDay != self.weekday: - other = other + timedelta((self.weekday - otherDay) % 7) + other = as_datetime(other) + timedelta((self.weekday - otherDay) % 7) + other = as_datetime(other) for i in range(-k): other = other - self._inc - return Timestamp(other) + return as_timestamp(other) def onOffset(self, dt): return dt.weekday() == self.weekday @@ -709,7 +727,7 @@ class WeekDay(object): THU = 3 FRI = 4 SAT = 5 - SUN = 6 + SUN = 6 _int_to_weekday = { WeekDay.MON: 'MON', @@ -776,8 +794,8 @@ def apply(self, other): else: months = self.n + 1 - return self.getOffsetOfMonth(other + relativedelta(months=months, - day=1)) + return self.getOffsetOfMonth(as_datetime(other) + relativedelta(months=months, + day=1)) def getOffsetOfMonth(self, dt): w = Week(weekday=self.weekday) @@ -809,7 +827,7 @@ def _from_name(cls, suffix=None): week = int(suffix[0]) - 1 weekday = _weekday_to_int[suffix[1:]] return cls(week=week, weekday=weekday) - + class LastWeekOfMonth(CacheableOffset, DateOffset): """ Describes monthly dates in last week of month like "the last Tuesday of each month" @@ -855,16 +873,16 @@ def apply(self, other): else: months = self.n + 1 - return self.getOffsetOfMonth(other + relativedelta(months=months, day=1)) + return self.getOffsetOfMonth(as_datetime(other) + relativedelta(months=months, day=1)) def getOffsetOfMonth(self, dt): m = MonthEnd() d = datetime(dt.year, dt.month, 1) eom = m.rollforward(d) - + w = Week(weekday=self.weekday) - + return w.rollback(eom) def onOffset(self, dt): @@ -948,7 +966,7 @@ def apply(self, other): elif n <= 0 and other.day > lastBDay and monthsToGo == 0: n = n + 1 - other = other + relativedelta(months=monthsToGo + 3 * n, day=31) + other = as_datetime(other) + relativedelta(months=monthsToGo + 3 * n, day=31) if other.weekday() > 4: other = other - BDay() @@ -962,22 +980,22 @@ def onOffset(self, dt): class FY5253(CacheableOffset, DateOffset): """ Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar. - - It is used by companies that desire that their + + It is used by companies that desire that their fiscal year always end on the same day of the week. - - It is a method of managing accounting periods. - It is a common calendar structure for some industries, + + It is a method of managing accounting periods. + It is a common calendar structure for some industries, such as retail, manufacturing and parking industry. - - For more information see: + + For more information see: http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar - - - The year may either: + + + The year may either: - end on the last X day of the Y month. - end on the last X day closest to the last day of the Y month. - + X is a specific day of the week. Y is a certain month of the year @@ -996,41 +1014,41 @@ class FY5253(CacheableOffset, DateOffset): variation : str {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth" """ - + _prefix = 'RE' _suffix_prefix_last = 'L' _suffix_prefix_nearest = 'N' - + def __init__(self, n=1, **kwds): self.n = n self.startingMonth = kwds['startingMonth'] self.weekday = kwds["weekday"] - + self.variation = kwds["variation"] - + self.kwds = kwds - + if self.n == 0: raise ValueError('N cannot be 0') - + if self.variation not in ["nearest", "last"]: raise ValueError('%s is not a valid variation' % self.variation) - + if self.variation == "nearest": self._rd_forward = relativedelta(weekday=weekday(self.weekday)) self._rd_backward = relativedelta(weekday=weekday(self.weekday)(-1)) else: - self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday) - + self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday) + def isAnchored(self): return self.n == 1 \ and self.startingMonth is not None \ - and self.weekday is not None - + and self.weekday is not None + def onOffset(self, dt): year_end = self.get_year_end(dt) return year_end == dt - + def apply(self, other): n = self.n if n > 0: @@ -1039,10 +1057,10 @@ def apply(self, other): other = year_end n -= 1 elif other > year_end: - other = self.get_year_end(other + relativedelta(years=1)) + other = self.get_year_end(as_datetime(other) + relativedelta(years=1)) n -= 1 - - return self.get_year_end(other + relativedelta(years=n)) + + return self.get_year_end(as_datetime(other) + relativedelta(years=n)) else: n = -n year_end = self.get_year_end(other) @@ -1050,10 +1068,10 @@ def apply(self, other): other = year_end n -= 1 elif other < year_end: - other = self.get_year_end(other + relativedelta(years=-1)) + other = self.get_year_end(as_datetime(other) + relativedelta(years=-1)) n -= 1 - - return self.get_year_end(other + relativedelta(years=-n)) + + return self.get_year_end(as_datetime(other) + relativedelta(years=-n)) def get_year_end(self, dt): if self.variation == "nearest": @@ -1065,7 +1083,7 @@ def get_target_month_end(self, dt): target_month = datetime(year=dt.year, month=self.startingMonth, day=1) next_month_first_of = target_month + relativedelta(months=+1) return next_month_first_of + relativedelta(days=-1) - + def _get_year_end_nearest(self, dt): target_date = self.get_target_month_end(dt) if target_date.weekday() == self.weekday: @@ -1073,21 +1091,21 @@ def _get_year_end_nearest(self, dt): else: forward = target_date + self._rd_forward backward = target_date + self._rd_backward - + if forward - target_date < target_date - backward: return forward else: return backward - + def _get_year_end_last(self, dt): current_year = datetime(year=dt.year, month=self.startingMonth, day=1) return current_year + self._offset_lwom - + @property def rule_code(self): suffix = self.get_rule_code_suffix() return "%s-%s" % (self._get_prefix(), suffix) - + def _get_prefix(self): return self._prefix @@ -1096,12 +1114,12 @@ def _get_suffix_prefix(self): return self._suffix_prefix_nearest else: return self._suffix_prefix_last - + def get_rule_code_suffix(self): return '%s-%s-%s' % (self._get_suffix_prefix(), \ _int_to_month[self.startingMonth], \ _int_to_weekday[self.weekday]) - + @classmethod def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code): if varion_code == "N": @@ -1110,42 +1128,42 @@ def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code): variation = "last" else: raise ValueError("Unable to parse varion_code: %s" % (varion_code,)) - + startingMonth = _month_to_int[startingMonth_code] weekday = _weekday_to_int[weekday_code] - + return { - "weekday":weekday, + "weekday":weekday, "startingMonth":startingMonth, "variation":variation, } - + @classmethod def _from_name(cls, *args): return cls(**cls._parse_suffix(*args)) - + class FY5253Quarter(CacheableOffset, DateOffset): """ DateOffset increments between business quarter dates for 52-53 week fiscal year (also known as a 4-4-5 calendar). - - It is used by companies that desire that their + + It is used by companies that desire that their fiscal year always end on the same day of the week. - - It is a method of managing accounting periods. - It is a common calendar structure for some industries, + + It is a method of managing accounting periods. + It is a common calendar structure for some industries, such as retail, manufacturing and parking industry. - - For more information see: + + For more information see: http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar - - The year may either: + + The year may either: - end on the last X day of the Y month. - end on the last X day closest to the last day of the Y month. - + X is a specific day of the week. Y is a certain month of the year - + startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ... startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ... startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ... @@ -1162,35 +1180,36 @@ class FY5253Quarter(CacheableOffset, DateOffset): 5: Saturdays 6: Sundays startingMonth : The month in which fiscal years end. {1, 2, ... 12} - qtr_with_extra_week : The quarter number that has the leap + qtr_with_extra_week : The quarter number that has the leap or 14 week when needed. {1,2,3,4} variation : str - {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth" + {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth" """ - + _prefix = 'REQ' - + def __init__(self, n=1, **kwds): self.n = n - + self.qtr_with_extra_week = kwds["qtr_with_extra_week"] self.kwds = kwds if self.n == 0: raise ValueError('N cannot be 0') - + self._offset = FY5253( \ startingMonth=kwds['startingMonth'], \ weekday=kwds["weekday"], variation=kwds["variation"]) - + def isAnchored(self): return self.n == 1 and self._offset.isAnchored() def apply(self, other): + other = as_datetime(other) n = self.n - + if n > 0: while n > 0: if not self._offset.onOffset(other): @@ -1199,14 +1218,14 @@ def apply(self, other): else: start = other qtr_lens = self.get_weeks(other + self._offset) - + for weeks in qtr_lens: start += relativedelta(weeks=weeks) if start > other: other = start n -= 1 break - + else: n = -n while n > 0: @@ -1216,7 +1235,7 @@ def apply(self, other): else: end = other qtr_lens = self.get_weeks(other) - + for weeks in reversed(qtr_lens): end -= relativedelta(weeks=weeks) if end < other: @@ -1225,53 +1244,53 @@ def apply(self, other): break return other - + def get_weeks(self, dt): ret = [13] * 4 year_has_extra_week = self.year_has_extra_week(dt) - + if year_has_extra_week: ret[self.qtr_with_extra_week-1] = 14 - + return ret - + def year_has_extra_week(self, dt): if self._offset.onOffset(dt): prev_year_end = dt - self._offset next_year_end = dt - else: + else: next_year_end = dt + self._offset prev_year_end = dt - self._offset - + week_in_year = (next_year_end - prev_year_end).days/7 - return week_in_year == 53 - + return week_in_year == 53 + def onOffset(self, dt): if self._offset.onOffset(dt): return True - + next_year_end = dt - self._offset qtr_lens = self.get_weeks(dt) - + current = next_year_end for qtr_len in qtr_lens[0:4]: current += relativedelta(weeks=qtr_len) if dt == current: return True return False - + @property def rule_code(self): suffix = self._offset.get_rule_code_suffix() return "%s-%s" %(self._prefix, "%s-%d" % (suffix, self.qtr_with_extra_week)) - + @classmethod def _from_name(cls, *args): return cls(**dict(FY5253._parse_suffix(*args[:-1]), qtr_with_extra_week=int(args[-1]))) - + _int_to_month = { 1: 'JAN', 2: 'FEB', @@ -1300,6 +1319,7 @@ class BQuarterBegin(CacheableOffset, QuarterOffset): def apply(self, other): n = self.n + other = as_datetime(other) wkday, _ = tslib.monthrange(other.year, other.month) @@ -1324,7 +1344,7 @@ def apply(self, other): result = datetime(other.year, other.month, first, other.hour, other.minute, other.second, other.microsecond) - return result + return as_timestamp(result) class QuarterEnd(CacheableOffset, QuarterOffset): @@ -1348,6 +1368,7 @@ def isAnchored(self): def apply(self, other): n = self.n + other = as_datetime(other) wkday, days_in_month = tslib.monthrange(other.year, other.month) @@ -1360,7 +1381,7 @@ def apply(self, other): other = other + relativedelta(months=monthsToGo + 3 * n, day=31) - return Timestamp(other) + return as_timestamp(other) def onOffset(self, dt): modMonth = (dt.month - self.startingMonth) % 3 @@ -1378,6 +1399,7 @@ def isAnchored(self): def apply(self, other): n = self.n + other = as_datetime(other) wkday, days_in_month = tslib.monthrange(other.year, other.month) @@ -1392,7 +1414,7 @@ def apply(self, other): n = n + 1 other = other + relativedelta(months=3 * n - monthsSince, day=1) - return Timestamp(other) + return as_timestamp(other) class YearOffset(DateOffset): @@ -1426,6 +1448,7 @@ class BYearEnd(CacheableOffset, YearOffset): def apply(self, other): n = self.n + other = as_datetime(other) wkday, days_in_month = tslib.monthrange(other.year, self.month) lastBDay = (days_in_month - @@ -1462,6 +1485,7 @@ class BYearBegin(CacheableOffset, YearOffset): def apply(self, other): n = self.n + other = as_datetime(other) wkday, days_in_month = tslib.monthrange(other.year, self.month) @@ -1482,7 +1506,7 @@ def apply(self, other): other = other + relativedelta(years=years) wkday, days_in_month = tslib.monthrange(other.year, self.month) first = _get_firstbday(wkday) - return datetime(other.year, self.month, first) + return as_timestamp(datetime(other.year, self.month, first)) class YearEnd(CacheableOffset, YearOffset): @@ -1534,7 +1558,7 @@ def _rollf(date): # n == 0, roll forward result = _rollf(result) - return Timestamp(result) + return as_timestamp(result) def onOffset(self, dt): wkday, days_in_month = tslib.monthrange(dt.year, self.month) @@ -1581,7 +1605,7 @@ def _rollf(date): # n == 0, roll forward result = _rollf(result) - return Timestamp(result) + return as_timestamp(result) def onOffset(self, dt): return dt.month == self.month and dt.day == 1 @@ -1848,7 +1872,7 @@ def generate_range(start=None, end=None, periods=None, Day, # 'D' WeekOfMonth, # 'WOM' FY5253, - FY5253Quarter, + FY5253Quarter, ]) if not _np_version_under1p7: diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index 7ebe6c0cfb728..f66f57cc45409 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -21,7 +21,7 @@ from pandas.tseries.tools import parse_time_string import pandas.tseries.offsets as offsets -from pandas.tslib import monthrange +from pandas.tslib import monthrange, OutOfBoundsDatetime from pandas.lib import Timestamp from pandas.util.testing import assertRaisesRegexp import pandas.util.testing as tm @@ -95,8 +95,26 @@ def test_to_m8(): ### DateOffset Tests ##### +class TestBase(unittest.TestCase): + _offset = None -class TestDateOffset(unittest.TestCase): + def test_apply_out_of_range(self): + if self._offset is None: + raise nose.SkipTest("_offset not defined") + + # try to create an out-of-bounds result timestamp; if we can't create the offset + # skip + try: + offset = self._offset(10000) + + result = Timestamp('20080101') + offset + self.assert_(isinstance(result, datetime)) + except (OutOfBoundsDatetime): + raise + except (ValueError, KeyError): + raise nose.SkipTest("cannot create out_of_range offset") + +class TestDateOffset(TestBase): _multiprocess_can_split_ = True def setUp(self): @@ -137,8 +155,9 @@ def test_eq(self): self.assert_(not (offset1 == offset2)) -class TestBusinessDay(unittest.TestCase): +class TestBusinessDay(TestBase): _multiprocess_can_split_ = True + _offset = BDay def setUp(self): self.d = datetime(2008, 1, 1) @@ -310,8 +329,9 @@ def test_offsets_compare_equal(self): self.assertFalse(offset1 != offset2) -class TestCustomBusinessDay(unittest.TestCase): +class TestCustomBusinessDay(TestBase): _multiprocess_can_split_ = True + _offset = CDay def setUp(self): self.d = datetime(2008, 1, 1) @@ -531,7 +551,9 @@ def assertOnOffset(offset, date, expected): (expected, actual, offset, date)) -class TestWeek(unittest.TestCase): +class TestWeek(TestBase): + _offset = Week + def test_repr(self): self.assertEqual(repr(Week(weekday=0)), "<Week: weekday=0>") self.assertEqual(repr(Week(n=-1, weekday=0)), "<-1 * Week: weekday=0>") @@ -600,7 +622,8 @@ def test_offsets_compare_equal(self): self.assertFalse(offset1 != offset2) -class TestWeekOfMonth(unittest.TestCase): +class TestWeekOfMonth(TestBase): + _offset = WeekOfMonth def test_constructor(self): assertRaisesRegexp(ValueError, "^N cannot be 0", WeekOfMonth, n=0, week=1, weekday=1) @@ -678,11 +701,13 @@ def test_onOffset(self): offset = WeekOfMonth(week=week, weekday=weekday) self.assert_(offset.onOffset(date) == expected) -class TestLastWeekOfMonth(unittest.TestCase): +class TestLastWeekOfMonth(TestBase): + _offset = LastWeekOfMonth + def test_constructor(self): assertRaisesRegexp(ValueError, "^N cannot be 0", \ LastWeekOfMonth, n=0, weekday=1) - + assertRaisesRegexp(ValueError, "^Day", LastWeekOfMonth, n=1, weekday=-1) assertRaisesRegexp(ValueError, "^Day", LastWeekOfMonth, n=1, weekday=7) @@ -691,40 +716,40 @@ def test_offset(self): last_sat = datetime(2013,8,31) next_sat = datetime(2013,9,28) offset_sat = LastWeekOfMonth(n=1, weekday=5) - - one_day_before = (last_sat + timedelta(days=-1)) + + one_day_before = (last_sat + timedelta(days=-1)) self.assert_(one_day_before + offset_sat == last_sat) - + one_day_after = (last_sat + timedelta(days=+1)) self.assert_(one_day_after + offset_sat == next_sat) - + #Test On that day self.assert_(last_sat + offset_sat == next_sat) - + #### Thursday - + offset_thur = LastWeekOfMonth(n=1, weekday=3) last_thurs = datetime(2013,1,31) next_thurs = datetime(2013,2,28) - + one_day_before = last_thurs + timedelta(days=-1) self.assert_(one_day_before + offset_thur == last_thurs) - + one_day_after = last_thurs + timedelta(days=+1) self.assert_(one_day_after + offset_thur == next_thurs) - + # Test on that day self.assert_(last_thurs + offset_thur == next_thurs) - + three_before = last_thurs + timedelta(days=-3) self.assert_(three_before + offset_thur == last_thurs) - + two_after = last_thurs + timedelta(days=+2) self.assert_(two_after + offset_thur == next_thurs) - + offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN) self.assert_(datetime(2013,7,31) + offset_sunday == datetime(2013,8,25)) - + def test_onOffset(self): test_cases = [ (WeekDay.SUN, datetime(2013, 1, 27), True), @@ -733,7 +758,7 @@ def test_onOffset(self): (WeekDay.SUN, datetime(2013, 2, 25), False), #Not a SUN (WeekDay.MON, datetime(2013, 2, 25), True), (WeekDay.SAT, datetime(2013, 11, 30), True), - + (WeekDay.SAT, datetime(2006, 8, 26), True), (WeekDay.SAT, datetime(2007, 8, 25), True), (WeekDay.SAT, datetime(2008, 8, 30), True), @@ -742,13 +767,15 @@ def test_onOffset(self): (WeekDay.SAT, datetime(2011, 8, 27), True), (WeekDay.SAT, datetime(2019, 8, 31), True), ] - + for weekday, date, expected in test_cases: offset = LastWeekOfMonth(weekday=weekday) self.assert_(offset.onOffset(date) == expected, date) -class TestBMonthBegin(unittest.TestCase): +class TestBMonthBegin(TestBase): + _offset = BMonthBegin + def test_offset(self): tests = [] @@ -808,7 +835,8 @@ def test_offsets_compare_equal(self): self.assertFalse(offset1 != offset2) -class TestBMonthEnd(unittest.TestCase): +class TestBMonthEnd(TestBase): + _offset = BMonthEnd def test_offset(self): tests = [] @@ -870,7 +898,8 @@ def test_offsets_compare_equal(self): self.assertFalse(offset1 != offset2) -class TestMonthBegin(unittest.TestCase): +class TestMonthBegin(TestBase): + _offset = MonthBegin def test_offset(self): tests = [] @@ -910,7 +939,8 @@ def test_offset(self): assertEq(offset, base, expected) -class TestMonthEnd(unittest.TestCase): +class TestMonthEnd(TestBase): + _offset = MonthEnd def test_offset(self): tests = [] @@ -977,7 +1007,8 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestBQuarterBegin(unittest.TestCase): +class TestBQuarterBegin(TestBase): + _offset = BQuarterBegin def test_repr(self): self.assertEqual(repr(BQuarterBegin()),"<BusinessQuarterBegin: startingMonth=3>") @@ -1067,7 +1098,8 @@ def test_offset(self): self.assertEqual(datetime(2007, 4, 3) + offset, datetime(2007, 4, 2)) -class TestBQuarterEnd(unittest.TestCase): +class TestBQuarterEnd(TestBase): + _offset = BQuarterEnd def test_repr(self): self.assertEqual(repr(BQuarterEnd()),"<BusinessQuarterEnd: startingMonth=3>") @@ -1185,13 +1217,14 @@ def makeFY5253NearestEndMonth(*args, **kwds): def makeFY5253LastOfMonth(*args, **kwds): return FY5253(*args, variation="last", **kwds) - -class TestFY5253LastOfMonth(unittest.TestCase): + +class TestFY5253LastOfMonth(TestBase): + def test_onOffset(self): offset_lom_sat_aug = makeFY5253LastOfMonth(1, startingMonth=8, weekday=WeekDay.SAT) offset_lom_sat_sep = makeFY5253LastOfMonth(1, startingMonth=9, weekday=WeekDay.SAT) - + tests = [ #From Wikipedia (see: http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end) (offset_lom_sat_aug, datetime(2006, 8, 26), True), @@ -1208,43 +1241,43 @@ def test_onOffset(self): (offset_lom_sat_aug, datetime(2017, 8, 26), True), (offset_lom_sat_aug, datetime(2018, 8, 25), True), (offset_lom_sat_aug, datetime(2019, 8, 31), True), - + (offset_lom_sat_aug, datetime(2006, 8, 27), False), (offset_lom_sat_aug, datetime(2007, 8, 28), False), (offset_lom_sat_aug, datetime(2008, 8, 31), False), (offset_lom_sat_aug, datetime(2009, 8, 30), False), (offset_lom_sat_aug, datetime(2010, 8, 29), False), (offset_lom_sat_aug, datetime(2011, 8, 28), False), - + (offset_lom_sat_aug, datetime(2006, 8, 25), False), (offset_lom_sat_aug, datetime(2007, 8, 24), False), (offset_lom_sat_aug, datetime(2008, 8, 29), False), (offset_lom_sat_aug, datetime(2009, 8, 28), False), (offset_lom_sat_aug, datetime(2010, 8, 27), False), (offset_lom_sat_aug, datetime(2011, 8, 26), False), - (offset_lom_sat_aug, datetime(2019, 8, 30), False), + (offset_lom_sat_aug, datetime(2019, 8, 30), False), #From GMCR (see for example: http://yahoo.brand.edgar-online.com/Default.aspx?companyid=3184&formtypeID=7) - (offset_lom_sat_sep, datetime(2010, 9, 25), True), - (offset_lom_sat_sep, datetime(2011, 9, 24), True), - (offset_lom_sat_sep, datetime(2012, 9, 29), True), - + (offset_lom_sat_sep, datetime(2010, 9, 25), True), + (offset_lom_sat_sep, datetime(2011, 9, 24), True), + (offset_lom_sat_sep, datetime(2012, 9, 29), True), + ] for offset, date, expected in tests: assertOnOffset(offset, date, expected) - + def test_apply(self): offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8, weekday=WeekDay.SAT) offset_lom_aug_sat_1 = makeFY5253LastOfMonth(n=1, startingMonth=8, weekday=WeekDay.SAT) - - date_seq_lom_aug_sat = [datetime(2006, 8, 26), datetime(2007, 8, 25), - datetime(2008, 8, 30), datetime(2009, 8, 29), - datetime(2010, 8, 28), datetime(2011, 8, 27), - datetime(2012, 8, 25), datetime(2013, 8, 31), - datetime(2014, 8, 30), datetime(2015, 8, 29), + + date_seq_lom_aug_sat = [datetime(2006, 8, 26), datetime(2007, 8, 25), + datetime(2008, 8, 30), datetime(2009, 8, 29), + datetime(2010, 8, 28), datetime(2011, 8, 27), + datetime(2012, 8, 25), datetime(2013, 8, 31), + datetime(2014, 8, 30), datetime(2015, 8, 29), datetime(2016, 8, 27)] - + tests = [ (offset_lom_aug_sat, date_seq_lom_aug_sat), (offset_lom_aug_sat_1, date_seq_lom_aug_sat), @@ -1258,22 +1291,23 @@ def test_apply(self): for datum in data[1:]: current = current + offset self.assertEqual(current, datum) - -class TestFY5253NearestEndMonth(unittest.TestCase): + +class TestFY5253NearestEndMonth(TestBase): + def test_get_target_month_end(self): self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT).get_target_month_end(datetime(2013,1,1)), datetime(2013,8,31)) self.assertEqual(makeFY5253NearestEndMonth(startingMonth=12, weekday=WeekDay.SAT).get_target_month_end(datetime(2013,1,1)), datetime(2013,12,31)) self.assertEqual(makeFY5253NearestEndMonth(startingMonth=2, weekday=WeekDay.SAT).get_target_month_end(datetime(2013,1,1)), datetime(2013,2,28)) - + def test_get_year_end(self): self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT).get_year_end(datetime(2013,1,1)), datetime(2013,8,31)) self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SUN).get_year_end(datetime(2013,1,1)), datetime(2013,9,1)) self.assertEqual(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.FRI).get_year_end(datetime(2013,1,1)), datetime(2013,8,30)) - + def test_onOffset(self): offset_lom_aug_sat = makeFY5253NearestEndMonth(1, startingMonth=8, weekday=WeekDay.SAT) offset_lom_aug_thu = makeFY5253NearestEndMonth(1, startingMonth=8, weekday=WeekDay.THU) - + tests = [ # From Wikipedia (see: http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Saturday_nearest_the_end_of_month) # 2006-09-02 2006 September 2 @@ -1296,19 +1330,19 @@ def test_onOffset(self): (offset_lom_aug_sat, datetime(2009, 8, 29), True), (offset_lom_aug_sat, datetime(2010, 8, 28), True), (offset_lom_aug_sat, datetime(2011, 9, 3), True), - + (offset_lom_aug_sat, datetime(2016, 9, 3), True), (offset_lom_aug_sat, datetime(2017, 9, 2), True), - (offset_lom_aug_sat, datetime(2018, 9, 1), True), + (offset_lom_aug_sat, datetime(2018, 9, 1), True), (offset_lom_aug_sat, datetime(2019, 8, 31), True), - + (offset_lom_aug_sat, datetime(2006, 8, 27), False), (offset_lom_aug_sat, datetime(2007, 8, 28), False), (offset_lom_aug_sat, datetime(2008, 8, 31), False), (offset_lom_aug_sat, datetime(2009, 8, 30), False), (offset_lom_aug_sat, datetime(2010, 8, 29), False), (offset_lom_aug_sat, datetime(2011, 8, 28), False), - + (offset_lom_aug_sat, datetime(2006, 8, 25), False), (offset_lom_aug_sat, datetime(2007, 8, 24), False), (offset_lom_aug_sat, datetime(2008, 8, 29), False), @@ -1316,19 +1350,19 @@ def test_onOffset(self): (offset_lom_aug_sat, datetime(2010, 8, 27), False), (offset_lom_aug_sat, datetime(2011, 8, 26), False), (offset_lom_aug_sat, datetime(2019, 8, 30), False), - + #From Micron, see: http://google.brand.edgar-online.com/?sym=MU&formtypeID=7 (offset_lom_aug_thu, datetime(2012, 8, 30), True), (offset_lom_aug_thu, datetime(2011, 9, 1), True), - + ] for offset, date, expected in tests: assertOnOffset(offset, date, expected) - + def test_apply(self): date_seq_nem_8_sat = [datetime(2006, 9, 2), datetime(2007, 9, 1), datetime(2008, 8, 30), datetime(2009, 8, 29), datetime(2010, 8, 28), datetime(2011, 9, 3)] - + tests = [ (makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT), date_seq_nem_8_sat), (makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT), date_seq_nem_8_sat), @@ -1343,68 +1377,68 @@ def test_apply(self): current = current + offset self.assertEqual(current, datum) -class TestFY5253LastOfMonthQuarter(unittest.TestCase): +class TestFY5253LastOfMonthQuarter(TestBase): def test_isAnchored(self): self.assert_(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4).isAnchored()) self.assert_(makeFY5253LastOfMonthQuarter(weekday=WeekDay.SAT, startingMonth=3, qtr_with_extra_week=4).isAnchored()) self.assert_(not makeFY5253LastOfMonthQuarter(2, startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4).isAnchored()) - + def test_equality(self): self.assertEqual(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4), makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4)) self.assertNotEqual(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4), makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SUN, qtr_with_extra_week=4)) self.assertNotEqual(makeFY5253LastOfMonthQuarter(startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4), makeFY5253LastOfMonthQuarter(startingMonth=2, weekday=WeekDay.SAT, qtr_with_extra_week=4)) - + def test_offset(self): offset = makeFY5253LastOfMonthQuarter(1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4) offset2 = makeFY5253LastOfMonthQuarter(2, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4) offset4 = makeFY5253LastOfMonthQuarter(4, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4) - + offset_neg1 = makeFY5253LastOfMonthQuarter(-1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4) offset_neg2 = makeFY5253LastOfMonthQuarter(-2, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4) - - GMCR = [datetime(2010, 3, 27), - datetime(2010, 6, 26), - datetime(2010, 9, 25), - datetime(2010, 12, 25), - datetime(2011, 3, 26), - datetime(2011, 6, 25), - datetime(2011, 9, 24), - datetime(2011, 12, 24), - datetime(2012, 3, 24), - datetime(2012, 6, 23), - datetime(2012, 9, 29), - datetime(2012, 12, 29), - datetime(2013, 3, 30), + + GMCR = [datetime(2010, 3, 27), + datetime(2010, 6, 26), + datetime(2010, 9, 25), + datetime(2010, 12, 25), + datetime(2011, 3, 26), + datetime(2011, 6, 25), + datetime(2011, 9, 24), + datetime(2011, 12, 24), + datetime(2012, 3, 24), + datetime(2012, 6, 23), + datetime(2012, 9, 29), + datetime(2012, 12, 29), + datetime(2013, 3, 30), datetime(2013, 6, 29)] - - assertEq(offset, base=GMCR[0], expected=GMCR[1]) + + assertEq(offset, base=GMCR[0], expected=GMCR[1]) assertEq(offset, base=GMCR[0] + relativedelta(days=-1), expected=GMCR[0]) - assertEq(offset, base=GMCR[1], expected=GMCR[2]) - + assertEq(offset, base=GMCR[1], expected=GMCR[2]) + assertEq(offset2, base=GMCR[0], expected=GMCR[2]) assertEq(offset4, base=GMCR[0], expected=GMCR[4]) - + assertEq(offset_neg1, base=GMCR[-1], expected=GMCR[-2]) assertEq(offset_neg1, base=GMCR[-1] + relativedelta(days=+1), expected=GMCR[-1]) assertEq(offset_neg2, base=GMCR[-1], expected=GMCR[-3]) - + date = GMCR[0] + relativedelta(days=-1) for expected in GMCR: assertEq(offset, date, expected) date = date + offset - + date = GMCR[-1] + relativedelta(days=+1) for expected in reversed(GMCR): assertEq(offset_neg1, date, expected) date = date + offset_neg1 - + def test_onOffset(self): lomq_aug_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=8, weekday=WeekDay.SAT, qtr_with_extra_week=4) lomq_sep_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4) - + tests = [ #From Wikipedia (lomq_aug_sat_4, datetime(2006, 8, 26), True), @@ -1414,78 +1448,78 @@ def test_onOffset(self): (lomq_aug_sat_4, datetime(2010, 8, 28), True), (lomq_aug_sat_4, datetime(2011, 8, 27), True), (lomq_aug_sat_4, datetime(2019, 8, 31), True), - + (lomq_aug_sat_4, datetime(2006, 8, 27), False), (lomq_aug_sat_4, datetime(2007, 8, 28), False), (lomq_aug_sat_4, datetime(2008, 8, 31), False), (lomq_aug_sat_4, datetime(2009, 8, 30), False), (lomq_aug_sat_4, datetime(2010, 8, 29), False), (lomq_aug_sat_4, datetime(2011, 8, 28), False), - + (lomq_aug_sat_4, datetime(2006, 8, 25), False), (lomq_aug_sat_4, datetime(2007, 8, 24), False), (lomq_aug_sat_4, datetime(2008, 8, 29), False), (lomq_aug_sat_4, datetime(2009, 8, 28), False), (lomq_aug_sat_4, datetime(2010, 8, 27), False), (lomq_aug_sat_4, datetime(2011, 8, 26), False), - (lomq_aug_sat_4, datetime(2019, 8, 30), False), + (lomq_aug_sat_4, datetime(2019, 8, 30), False), #From GMCR - (lomq_sep_sat_4, datetime(2010, 9, 25), True), - (lomq_sep_sat_4, datetime(2011, 9, 24), True), - (lomq_sep_sat_4, datetime(2012, 9, 29), True), - - (lomq_sep_sat_4, datetime(2013, 6, 29), True), - (lomq_sep_sat_4, datetime(2012, 6, 23), True), - (lomq_sep_sat_4, datetime(2012, 6, 30), False), - - (lomq_sep_sat_4, datetime(2013, 3, 30), True), - (lomq_sep_sat_4, datetime(2012, 3, 24), True), - - (lomq_sep_sat_4, datetime(2012, 12, 29), True), - (lomq_sep_sat_4, datetime(2011, 12, 24), True), - + (lomq_sep_sat_4, datetime(2010, 9, 25), True), + (lomq_sep_sat_4, datetime(2011, 9, 24), True), + (lomq_sep_sat_4, datetime(2012, 9, 29), True), + + (lomq_sep_sat_4, datetime(2013, 6, 29), True), + (lomq_sep_sat_4, datetime(2012, 6, 23), True), + (lomq_sep_sat_4, datetime(2012, 6, 30), False), + + (lomq_sep_sat_4, datetime(2013, 3, 30), True), + (lomq_sep_sat_4, datetime(2012, 3, 24), True), + + (lomq_sep_sat_4, datetime(2012, 12, 29), True), + (lomq_sep_sat_4, datetime(2011, 12, 24), True), + #INTC (extra week in Q1) #See: http://www.intc.com/releasedetail.cfm?ReleaseID=542844 (makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2011, 4, 2), True), - + #see: http://google.brand.edgar-online.com/?sym=INTC&formtypeID=7 (makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2012, 12, 29), True), (makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2011, 12, 31), True), (makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1), datetime(2010, 12, 25), True), - + ] for offset, date, expected in tests: assertOnOffset(offset, date, expected) - + def test_year_has_extra_week(self): #End of long Q1 self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2011, 4, 2))) - + #Start of long Q1 self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 26))) - + #End of year before year with long Q1 self.assertFalse(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 25))) - - for year in [x for x in range(1994, 2011+1) if x not in [2011, 2005, 2000, 1994]]: + + for year in [x for x in range(1994, 2011+1) if x not in [2011, 2005, 2000, 1994]]: self.assertFalse(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(year, 4, 2))) - + #Other long years self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2005, 4, 2))) self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(2000, 4, 2))) self.assertTrue(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).year_has_extra_week(datetime(1994, 4, 2))) - + def test_get_weeks(self): self.assertEqual(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).get_weeks(datetime(2011, 4, 2)), [14, 13, 13, 13]) self.assertEqual(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=4).get_weeks(datetime(2011, 4, 2)), [13, 13, 13, 14]) self.assertEqual(makeFY5253LastOfMonthQuarter(1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1).get_weeks(datetime(2010, 12, 25)), [13, 13, 13, 13]) -class TestFY5253NearestEndMonthQuarter(unittest.TestCase): - +class TestFY5253NearestEndMonthQuarter(TestBase): + def test_onOffset(self): - + offset_nem_sat_aug_4 = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.SAT, qtr_with_extra_week=4) offset_nem_thu_aug_4 = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.THU, qtr_with_extra_week=4) tests = [ @@ -1496,19 +1530,19 @@ def test_onOffset(self): (offset_nem_sat_aug_4, datetime(2009, 8, 29), True), (offset_nem_sat_aug_4, datetime(2010, 8, 28), True), (offset_nem_sat_aug_4, datetime(2011, 9, 3), True), - + (offset_nem_sat_aug_4, datetime(2016, 9, 3), True), (offset_nem_sat_aug_4, datetime(2017, 9, 2), True), - (offset_nem_sat_aug_4, datetime(2018, 9, 1), True), + (offset_nem_sat_aug_4, datetime(2018, 9, 1), True), (offset_nem_sat_aug_4, datetime(2019, 8, 31), True), - + (offset_nem_sat_aug_4, datetime(2006, 8, 27), False), (offset_nem_sat_aug_4, datetime(2007, 8, 28), False), (offset_nem_sat_aug_4, datetime(2008, 8, 31), False), (offset_nem_sat_aug_4, datetime(2009, 8, 30), False), (offset_nem_sat_aug_4, datetime(2010, 8, 29), False), (offset_nem_sat_aug_4, datetime(2011, 8, 28), False), - + (offset_nem_sat_aug_4, datetime(2006, 8, 25), False), (offset_nem_sat_aug_4, datetime(2007, 8, 24), False), (offset_nem_sat_aug_4, datetime(2008, 8, 29), False), @@ -1516,11 +1550,11 @@ def test_onOffset(self): (offset_nem_sat_aug_4, datetime(2010, 8, 27), False), (offset_nem_sat_aug_4, datetime(2011, 8, 26), False), (offset_nem_sat_aug_4, datetime(2019, 8, 30), False), - + #From Micron, see: http://google.brand.edgar-online.com/?sym=MU&formtypeID=7 (offset_nem_thu_aug_4, datetime(2012, 8, 30), True), (offset_nem_thu_aug_4, datetime(2011, 9, 1), True), - + #See: http://google.brand.edgar-online.com/?sym=MU&formtypeID=13 (offset_nem_thu_aug_4, datetime(2013, 5, 30), True), (offset_nem_thu_aug_4, datetime(2013, 2, 28), True), @@ -1528,7 +1562,7 @@ def test_onOffset(self): (offset_nem_thu_aug_4, datetime(2012, 5, 31), True), (offset_nem_thu_aug_4, datetime(2007, 3, 1), True), (offset_nem_thu_aug_4, datetime(1994, 3, 3), True), - + ] for offset, date, expected in tests: @@ -1536,18 +1570,19 @@ def test_onOffset(self): def test_offset(self): offset = makeFY5253NearestEndMonthQuarter(1, startingMonth=8, weekday=WeekDay.THU, qtr_with_extra_week=4) - + MU = [datetime(2012, 5, 31), datetime(2012, 8, 30), datetime(2012, 11, 29), datetime(2013, 2, 28), datetime(2013, 5, 30)] - + date = MU[0] + relativedelta(days=-1) for expected in MU: assertEq(offset, date, expected) date = date + offset - + assertEq(offset, datetime(2012, 5, 31), datetime(2012, 8, 30)) assertEq(offset, datetime(2012, 5, 30), datetime(2012, 5, 31)) - -class TestQuarterBegin(unittest.TestCase): + +class TestQuarterBegin(TestBase): + def test_repr(self): self.assertEqual(repr(QuarterBegin()), "<QuarterBegin: startingMonth=3>") self.assertEqual(repr(QuarterBegin(startingMonth=3)), "<QuarterBegin: startingMonth=3>") @@ -1621,7 +1656,9 @@ def test_offset(self): self.assertEqual(datetime(2010, 2, 1) + offset, datetime(2010, 1, 1)) -class TestQuarterEnd(unittest.TestCase): +class TestQuarterEnd(TestBase): + _offset = QuarterEnd + def test_repr(self): self.assertEqual(repr(QuarterEnd()), "<QuarterEnd: startingMonth=3>") self.assertEqual(repr(QuarterEnd(startingMonth=3)), "<QuarterEnd: startingMonth=3>") @@ -1757,7 +1794,8 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestBYearBegin(unittest.TestCase): +class TestBYearBegin(TestBase): + _offset = BYearBegin def test_misspecified(self): self.assertRaises(ValueError, BYearBegin, month=13) @@ -1804,7 +1842,8 @@ def test_offset(self): assertEq(offset, base, expected) -class TestYearBegin(unittest.TestCase): +class TestYearBegin(TestBase): + _offset = YearBegin def test_misspecified(self): self.assertRaises(ValueError, YearBegin, month=13) @@ -1876,7 +1915,7 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestBYearEndLagged(unittest.TestCase): +class TestBYearEndLagged(TestBase): def test_bad_month_fail(self): self.assertRaises(Exception, BYearEnd, month=13) @@ -1917,7 +1956,8 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestBYearEnd(unittest.TestCase): +class TestBYearEnd(TestBase): + _offset = BYearEnd def test_offset(self): tests = [] @@ -1965,7 +2005,8 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestYearEnd(unittest.TestCase): +class TestYearEnd(TestBase): + _offset = YearEnd def test_misspecified(self): self.assertRaises(ValueError, YearEnd, month=13) @@ -2016,7 +2057,7 @@ def test_onOffset(self): assertOnOffset(offset, date, expected) -class TestYearEndDiffMonth(unittest.TestCase): +class TestYearEndDiffMonth(TestBase): def test_offset(self): tests = [] @@ -2193,10 +2234,10 @@ def test_compare_ticks(): assert(kls(3) != kls(4)) -class TestOffsetNames(unittest.TestCase): +class TestOffsetNames(unittest.TestCase): def test_get_offset_name(self): assertRaisesRegexp(ValueError, 'Bad rule.*BusinessDays', get_offset_name, BDay(2)) - + assert get_offset_name(BDay()) == 'B' assert get_offset_name(BMonthEnd()) == 'BM' assert get_offset_name(Week(weekday=0)) == 'W-MON' @@ -2229,7 +2270,7 @@ def test_get_offset(): offset = get_offset(name) assert offset == expected, ("Expected %r to yield %r (actual: %r)" % (name, expected, offset)) - + def test_parse_time_string(): (date, parsed, reso) = parse_time_string('4Q1984') @@ -2337,9 +2378,9 @@ def get_all_subclasses(cls): class TestCaching(unittest.TestCase): no_simple_ctr = [WeekOfMonth, FY5253, - FY5253Quarter, + FY5253Quarter, LastWeekOfMonth] - + def test_should_cache_month_end(self): self.assertTrue(MonthEnd()._should_cache())
closes #5312
https://api.github.com/repos/pandas-dev/pandas/pulls/5327
2013-10-25T17:44:31Z
2013-10-26T00:31:29Z
2013-10-26T00:31:29Z
2014-06-27T23:14:58Z
BUG: bug when trying to display an embedded PandasObject (GH5324)
diff --git a/doc/source/release.rst b/doc/source/release.rst index e92d18827ae2a..af59137a194b0 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -637,6 +637,7 @@ Bug Fixes - C and Python Parser can now handle the more common multi-index column format which doesn't have a row for index names (:issue:`4702`) - Bug when trying to use an out-of-bounds date as an object dtype (:issue:`5312`) + - Bug when trying to display an embedded PandasObject (:issue:`5324`) pandas 0.12.0 ------------- diff --git a/pandas/core/format.py b/pandas/core/format.py index 2355ae16874ce..75069297360d6 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -3,6 +3,7 @@ import sys +from pandas.core.base import PandasObject from pandas.core.common import adjoin, isnull, notnull from pandas.core.index import Index, MultiIndex, _ensure_index from pandas import compat @@ -1486,6 +1487,8 @@ def _format(x): if x is None: return 'None' return self.na_rep + elif isinstance(x, PandasObject): + return '%s' % x else: # object dtype return '%s' % formatter(x) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 37f78156d0fc2..6aa4322234d7f 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -2356,6 +2356,21 @@ def test_constructor_error_msgs(self): with assertRaisesRegexp(ValueError, 'If using all scalar values, you must must pass an index'): DataFrame({'a': False, 'b': True}) + def test_constructor_with_embedded_frames(self): + + # embedded data frames + df1 = DataFrame({'a':[1, 2, 3], 'b':[3, 4, 5]}) + df2 = DataFrame([df1, df1+10]) + + df2.dtypes + str(df2) + + result = df2.loc[0,0] + assert_frame_equal(result,df1) + + result = df2.loc[1,0] + assert_frame_equal(result,df1+10) + def test_insert_error_msmgs(self): # GH 4107, more descriptive error message
closes #5324 There was a bug where this wasn't display at all, so this PR fixes Do we care about formatting _embedded_ pandas objects? (as we don't recommend this anyhow) ``` In [15]: df1 = DataFrame({'a':[1, 2, 3], 'b':[3, 4, 5]}) In [19]: df2.dtypes Out[19]: 0 object dtype: object In [16]: df1 Out[16]: a b 0 1 3 1 2 4 2 3 5 In [17]: df2 = DataFrame([df1,df1+10]) In [18]: df2 Out[18]: 0 0 a b 0 1 3 1 2 4 2 3 5 1 a b 0 11 13 1 12 14 2 13 15 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/5325
2013-10-25T16:02:46Z
2013-10-25T19:24:09Z
2013-10-25T19:24:09Z
2014-06-20T16:31:35Z
ENH: add more postgres support - started adding some oracle code
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index ceea902649690..ada036c040a01 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -237,7 +237,7 @@ def _alchemy_connect_sqlite(path): return create_engine('sqlite:///%s' % path).connect() -# I don't think this is used... +# used in _cur_write_oracle def sequence2dict(seq): """Helper function for cx_Oracle. @@ -329,10 +329,18 @@ def _get_data_and_columns(self, *args, **kwargs): def _has_table(self, name, flavor='sqlite'): # Note: engine overrides this, to use engine.has_table + schema = None + if flavor=='postgres': + if '.' in name: + (schema, name) = name.split('.') + else: + schema = 'public' flavor_map = { 'sqlite': ("SELECT name FROM sqlite_master " "WHERE type='table' AND name='%s';") % name, - 'mysql': "SHOW TABLES LIKE '%s'" % name} + 'mysql': "SHOW TABLES LIKE '%s'" % name, + 'postgres': "SELECT tablename FROM pg_tables WHERE schemaname= '%s' and tablename='%s'" % (schema, name), + 'oracle': "select table_name from user_tables where table_name='%s'" % name.upper()} query = flavor_map.get(flavor, None) if query is None: raise NotImplementedError @@ -356,7 +364,10 @@ def _create_schema(frame, name, flavor, keys=None): elif flavor == 'mysql': columns = ',\n '.join('`%s` %s' % x for x in column_types) elif flavor == 'postgres': - columns = ',\n '.join('%s %s' % x for x in column_types) + columns = ',\n '.join('"%s" %s' % x for x in column_types) + elif flavor== 'oracle': + columns = ',\n '.join('"%s" %s' % x for x in column_types) + else: raise ValueError("Don't have a template for that database flavor.") @@ -408,33 +419,40 @@ def _safe_fetch(cur): def _get_sqltype(pytype, flavor): sqltype = {'mysql': 'VARCHAR (63)', 'sqlite': 'TEXT', - 'postgres': 'text'} + 'postgres': 'TEXT', + 'oracle': 'VARCHAR2'} if issubclass(pytype, np.floating): sqltype['mysql'] = 'FLOAT' sqltype['sqlite'] = 'REAL' - sqltype['postgres'] = 'real' + sqltype['postgres'] = 'NUMERIC' + sqltype['oracle'] = 'NUMBER' if issubclass(pytype, np.integer): # TODO: Refine integer size. sqltype['mysql'] = 'BIGINT' sqltype['sqlite'] = 'INTEGER' - sqltype['postgres'] = 'integer' + sqltype['postgres'] = 'BIGINT' + sqltype['oracle'] = 'NUMBER' if issubclass(pytype, np.datetime64) or pytype is datetime: # Caution: np.datetime64 is also a subclass of np.number. sqltype['mysql'] = 'DATETIME' sqltype['sqlite'] = 'TIMESTAMP' - sqltype['postgres'] = 'timestamp' + sqltype['postgres'] = 'TIMESTAMP' + sqltype['oracle'] = 'DATE' if pytype is datetime.date: sqltype['mysql'] = 'DATE' sqltype['sqlite'] = 'TIMESTAMP' - sqltype['postgres'] = 'date' + sqltype['postgres'] = 'DATE' + sqltype['oracle'] = 'DATE' if issubclass(pytype, np.bool_): + sqltype['mysql'] = 'BOOL' sqltype['sqlite'] = 'INTEGER' - sqltype['postgres'] = 'boolean' + sqltype['postgres'] = 'BOOLEAN' + sqltype['oracle'] = 'INTEGER' return sqltype[flavor] @@ -552,7 +570,9 @@ def _write(self, frame, name, flavor='sqlite'): # Replace spaces in DataFrame column names with _. safe_names = [s.replace(' ', '_').strip() for s in frame.columns] flavor_picker = {'sqlite': self._cur_write_sqlite, - 'mysql': self._cur_write_mysql} + 'mysql': self._cur_write_mysql, + 'postgres': self._cur_write_postgres, + 'oracle': self._cur_write_oracle} func = flavor_picker.get(flavor, None) if func is None: @@ -590,6 +610,23 @@ def _cur_write_mysql(frame, table, names, cur): data = [tuple(x) for x in frame.values.tolist()] cur.executemany(insert_query, data) + @staticmethod + def _cur_write_postgres(frame, table, names, cur): + bracketed_names = ['"' + column.lower() +'"' for column in names] + col_names = ','.join(bracketed_names) + wildcards = ','.join(["%s"] * len(names)) + insert_query = "INSERT INTO %s (%s) VALUES (%s)" % (table, col_names, wildcards) + data = [tuple(x) for x in frame.values] + cur.executemany(insert_query, data) + + @staticmethod + def _cur_write_oracle(frame, table, names, cur): + bracketed_names = ['"' + column.lower() +'"' for column in names] + col_names = ','.join(bracketed_names) + wildcards = ','.join(["%s"] * len(names)) + insert_query = "INSERT INTO %s (%s) VALUES (%s)" % (table, col_names, wildcards) + data = [sequence2dict(x) for x in frame.values] + cur.executemany(insert_query, data) class PandasSQLWithCon(PandasSQL): def __init__(self, con): diff --git a/pandas/io/sql_legacy.py b/pandas/io/sql_legacy.py index 91cb2ec77af08..c0e3537fa9e15 100644 --- a/pandas/io/sql_legacy.py +++ b/pandas/io/sql_legacy.py @@ -2,13 +2,18 @@ Collection of query wrappers / abstractions to both facilitate data retrieval and to reduce dependency on DB-specific API. """ +from __future__ import print_function from datetime import datetime, date +from pandas.compat import range, lzip, map, zip +import pandas.compat as compat import numpy as np import traceback +import cStringIO +import csv from pandas.core.datetools import format as date_format -from pandas.core.api import DataFrame, isnull +from pandas.core.api import DataFrame #------------------------------------------------------------------------------ # Helper execution function @@ -51,7 +56,7 @@ def execute(sql, con, retry=True, cur=None, params=None): except Exception: # pragma: no cover pass - print ('Error on sql %s' % sql) + print('Error on sql %s' % sql) raise @@ -61,7 +66,7 @@ def _safe_fetch(cur): if not isinstance(result, list): result = list(result) return result - except Exception, e: # pragma: no cover + except Exception as e: # pragma: no cover excName = e.__class__.__name__ if excName == 'OperationalError': return [] @@ -94,7 +99,7 @@ def tquery(sql, con=None, cur=None, retry=True): except Exception as e: excName = e.__class__.__name__ if excName == 'OperationalError': # pragma: no cover - print ('Failed to commit, may need to restart interpreter') + print('Failed to commit, may need to restart interpreter') else: raise @@ -104,7 +109,7 @@ def tquery(sql, con=None, cur=None, retry=True): if result and len(result[0]) == 1: # python 3 compat - result = list(list(zip(*result))[0]) + result = list(lzip(*result)[0]) elif result is None: # pragma: no cover result = [] @@ -128,7 +133,7 @@ def uquery(sql, con=None, cur=None, retry=True, params=None): traceback.print_exc() if retry: - print ('Looks like your connection failed, reconnecting...') + print('Looks like your connection failed, reconnecting...') return uquery(sql, con, retry=False) return result @@ -172,6 +177,7 @@ def read_frame(sql, con, index_col=None, coerce_float=True, params=None): frame_query = read_frame read_sql = read_frame + def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): """ Write records stored in a DataFrame to a SQL database. @@ -193,17 +199,17 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): warnings.warn("append is deprecated, use if_exists instead", FutureWarning) if kwargs['append']: - if_exists='append' + if_exists = 'append' else: - if_exists='fail' + if_exists = 'fail' exists = table_exists(name, con, flavor) if if_exists == 'fail' and exists: - raise ValueError, "Table '%s' already exists." % name + raise ValueError("Table '%s' already exists." % name) #create or drop-recreate if necessary create = None if exists and if_exists == 'replace': - create = "DROP TABLE %s" % name + create = "DROP TABLE %s; %s" % (name, get_schema(frame, name, flavor)) elif not exists: create = get_schema(frame, name, flavor) @@ -215,8 +221,9 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): cur = con.cursor() # Replace spaces in DataFrame column names with _. safe_names = [s.replace(' ', '_').strip() for s in frame.columns] - flavor_picker = {'sqlite' : _write_sqlite, - 'mysql' : _write_mysql} + flavor_picker = {'sqlite': _write_sqlite, + 'mysql': _write_mysql, + 'postgres': _write_postgres} func = flavor_picker.get(flavor, None) if func is None: @@ -225,6 +232,7 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): cur.close() con.commit() + def _write_sqlite(frame, table, names, cur): bracketed_names = ['[' + column + ']' for column in names] col_names = ','.join(bracketed_names) @@ -232,12 +240,13 @@ def _write_sqlite(frame, table, names, cur): insert_query = 'INSERT INTO %s (%s) VALUES (%s)' % ( table, col_names, wildcards) # pandas types are badly handled if there is only 1 column ( Issue #3628 ) - if not len(frame.columns )==1 : + if not len(frame.columns) == 1: data = [tuple(x) for x in frame.values] - else : + else: data = [tuple(x) for x in frame.values.tolist()] cur.executemany(insert_query, data) + def _write_mysql(frame, table, names, cur): bracketed_names = ['`' + column + '`' for column in names] col_names = ','.join(bracketed_names) @@ -247,57 +256,90 @@ def _write_mysql(frame, table, names, cur): data = [tuple(x) for x in frame.values] cur.executemany(insert_query, data) +def _write_postgres(frame, table, names, cur): + output = cStringIO.StringIO() + writer = csv.writer(output, delimiter='\t', quoting=csv.QUOTE_MINIMAL) + + for col in frame.dtypes.index: + dt = frame.dtypes[col] + if str(dt.type)=="<type 'numpy.object_'>": + frame[col] = frame[col].apply(lambda x: x.__str__().replace('\n', '')) + + [writer.writerow(tuple(x)) for x in frame.values] + + output.seek(0) + cur.copy_from(output, table, columns=names) + def table_exists(name, con, flavor): + schema = None + if flavor=='postgres': + if '.' in name: + (schema, name) = name.split('.') + else: + schema = 'public' flavor_map = { 'sqlite': ("SELECT name FROM sqlite_master " "WHERE type='table' AND name='%s';") % name, - 'mysql' : "SHOW TABLES LIKE '%s'" % name} + 'mysql': "SHOW TABLES LIKE '%s'" % name, + 'postgres': "select tablename FROM pg_tables WHERE schemaname = '%s' and tablename='%s'" % (schema, name) + } query = flavor_map.get(flavor, None) if query is None: raise NotImplementedError return len(tquery(query, con)) > 0 + def get_sqltype(pytype, flavor): sqltype = {'mysql': 'VARCHAR (63)', - 'sqlite': 'TEXT'} + 'sqlite': 'TEXT', + 'postgres': 'TEXT'} if issubclass(pytype, np.floating): sqltype['mysql'] = 'FLOAT' sqltype['sqlite'] = 'REAL' + sqltype['postgres'] = 'NUMERIC' if issubclass(pytype, np.integer): #TODO: Refine integer size. sqltype['mysql'] = 'BIGINT' sqltype['sqlite'] = 'INTEGER' + sqltype['postgres'] = 'BIGINT' if issubclass(pytype, np.datetime64) or pytype is datetime: # Caution: np.datetime64 is also a subclass of np.number. sqltype['mysql'] = 'DATETIME' sqltype['sqlite'] = 'TIMESTAMP' + sqltype['postgres'] = 'TIMESTAMP' if pytype is datetime.date: sqltype['mysql'] = 'DATE' sqltype['sqlite'] = 'TIMESTAMP' + sqltype['postgres'] = 'TIMESTAMP' if issubclass(pytype, np.bool_): sqltype['sqlite'] = 'INTEGER' + sqltype['postgres'] = 'BOOLEAN' return sqltype[flavor] + def get_schema(frame, name, flavor, keys=None): "Return a CREATE TABLE statement to suit the contents of a DataFrame." lookup_type = lambda dtype: get_sqltype(dtype.type, flavor) # Replace spaces in DataFrame column names with _. - safe_columns = [s.replace(' ', '_').strip() for s in frame.dtypes.index] - column_types = zip(safe_columns, map(lookup_type, frame.dtypes)) + # Also force lowercase, postgresql can be case sensitive + safe_columns = [s.replace(' ', '_').strip().lower() for s in frame.dtypes.index] + column_types = lzip(safe_columns, map(lookup_type, frame.dtypes)) if flavor == 'sqlite': columns = ',\n '.join('[%s] %s' % x for x in column_types) + elif flavor == 'postgres': + columns = ',\n '.join('"%s" %s' % x for x in column_types) else: columns = ',\n '.join('`%s` %s' % x for x in column_types) keystr = '' if keys is not None: - if isinstance(keys, basestring): + if isinstance(keys, compat.string_types): keys = (keys,) keystr = ', PRIMARY KEY (%s)' % ','.join(keys) template = """CREATE TABLE %(name)s ( @@ -308,6 +350,7 @@ def get_schema(frame, name, flavor, keys=None): 'keystr': keystr} return create_statement + def sequence2dict(seq): """Helper function for cx_Oracle. @@ -320,6 +363,6 @@ def sequence2dict(seq): http://www.gingerandjohn.com/archives/2004/02/26/cx_oracle-executemany-example/ """ d = {} - for k,v in zip(range(1, 1 + len(seq)), seq): + for k, v in zip(range(1, 1 + len(seq)), seq): d[str(k)] = v return d
https://api.github.com/repos/pandas-dev/pandas/pulls/5323
2013-10-25T15:05:39Z
2014-02-16T22:49:57Z
null
2014-06-24T15:17:24Z
BUG: when trying to use an out-of-bounds date as an object dtype (GH5312)
diff --git a/doc/source/release.rst b/doc/source/release.rst index b3fa90ed6f624..e92d18827ae2a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -636,6 +636,7 @@ Bug Fixes - Fix ``Series.isin`` with date/time-like dtypes (:issue:`5021`) - C and Python Parser can now handle the more common multi-index column format which doesn't have a row for index names (:issue:`4702`) + - Bug when trying to use an out-of-bounds date as an object dtype (:issue:`5312`) pandas 0.12.0 ------------- diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 607499a2d35fe..c327458e00a07 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1789,14 +1789,15 @@ def make_block(values, items, ref_items, klass=None, ndim=None, dtype=None, fast if np.prod(values.shape): flat = values.ravel() inferred_type = lib.infer_dtype(flat) - if inferred_type == 'datetime': + if inferred_type in ['datetime','datetime64']: # we have an object array that has been inferred as datetime, so # convert it try: values = tslib.array_to_datetime( flat).reshape(values.shape) - klass = DatetimeBlock + if issubclass(values.dtype.type, np.datetime64): + klass = DatetimeBlock except: # it already object, so leave it pass diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 4802cfaaa6ab5..ed1df4f0bd8d6 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -539,6 +539,13 @@ def test_constructor_dtype_datetime64(self): self.assertRaises( TypeError, lambda x: Series(dates, dtype='datetime64')) + # invalid dates can be help as object + result = Series([datetime(2,1,1)]) + self.assert_(result[0] == datetime(2,1,1,0,0)) + + result = Series([datetime(3000,1,1)]) + self.assert_(result[0] == datetime(3000,1,1,0,0)) + def test_constructor_dict(self): d = {'a': 0., 'b': 1., 'c': 2.} result = Series(d, index=['b', 'c', 'd', 'a'])
closes 1st issue in #5312
https://api.github.com/repos/pandas-dev/pandas/pulls/5322
2013-10-25T14:24:13Z
2013-10-25T14:44:17Z
2013-10-25T14:44:17Z
2014-07-16T08:36:48Z
PERF: vbench for time-series index assignment in frame (GH5320)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5e33532587506..eb98bcfa00ecb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1914,7 +1914,11 @@ def insert(self, loc, column, value, allow_duplicates=False): def _sanitize_column(self, key, value): # Need to make sure new columns (which go into the BlockManager as new # blocks) are always copied - if _is_sequence(value): + + # dont' need further processing on an equal index + if isinstance(value, Index) and (not len(self.index) or value.equals(self.index)): + value = value.values.copy() + elif isinstance(value, Series) or _is_sequence(value): is_frame = isinstance(value, DataFrame) if isinstance(value, Series) or is_frame: if value.index.equals(self.index) or not len(self.index): diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index 67c0aa227f886..3567ee2b09f99 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -120,6 +120,21 @@ def j(): frame_getitem_single_column2 = Benchmark('j()', setup, start_date=datetime(2010, 6, 1)) +#---------------------------------------------------------------------- +# assignment + +setup = common_setup + """ +idx = date_range('1/1/2000', periods=100000, freq='D') +df = DataFrame(randn(100000, 1),columns=['A'],index=idx) +def f(x): + x = x.copy() + x['date'] = x.index +""" + +frame_assign_timeseries_index = Benchmark('f(df)', setup, + start_date=datetime(2013, 10, 1)) + + #---------------------------------------------------------------------- # to_string diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py index a990a9873cea0..0850499f42480 100644 --- a/vb_suite/timeseries.py +++ b/vb_suite/timeseries.py @@ -229,12 +229,12 @@ def date_range(start=None, end=None, periods=None, freq=None): # tz_localize with infer argument. This is an attempt to emulate the results # of read_csv with duplicated data. Not passing infer_dst will fail setup = common_setup + """ -dst_rng = date_range('10/29/2000 1:00:00', +dst_rng = date_range('10/29/2000 1:00:00', '10/29/2000 1:59:59', freq='S') index = date_range('10/29/2000', '10/29/2000 00:59:59', freq='S') index = index.append(dst_rng) index = index.append(dst_rng) -index = index.append(date_range('10/29/2000 2:00:00', +index = index.append(date_range('10/29/2000 2:00:00', '10/29/2000 3:00:00', freq='S')) """
PERF: direct index assignment in a frame was doing lots of work closes #5320 ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- frame_assign_timeseries_index | 0.7157 | 398.8634 | 0.0018 | ```
https://api.github.com/repos/pandas-dev/pandas/pulls/5321
2013-10-25T13:30:56Z
2013-10-25T13:44:35Z
2013-10-25T13:44:35Z
2014-07-16T08:36:46Z
TST: Fixed int32/int64 problem in csv parser tests
diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 66730f255eb1d..84736f16e7cba 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -1274,8 +1274,8 @@ def test_header_multiindex_common_format(self): tm.assert_frame_equal(df.reset_index(drop=True),result) # malformed case 1 - expected = DataFrame(np.array([[ 2, 3, 4, 5, 6], - [ 8, 9, 10, 11, 12]]), + expected = DataFrame(np.array([[2, 3, 4, 5, 6], + [8, 9, 10, 11, 12]], dtype='int64'), index=Index([1, 7]), columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]], labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]], @@ -1290,8 +1290,8 @@ def test_header_multiindex_common_format(self): tm.assert_frame_equal(expected,result) # malformed case 2 - expected = DataFrame(np.array([[ 2, 3, 4, 5, 6], - [ 8, 9, 10, 11, 12]]), + expected = DataFrame(np.array([[2, 3, 4, 5, 6], + [8, 9, 10, 11, 12]], dtype='int64'), index=Index([1, 7]), columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]], labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]], @@ -1307,7 +1307,7 @@ def test_header_multiindex_common_format(self): # mi on columns and index (malformed) expected = DataFrame(np.array([[ 3, 4, 5, 6], - [ 9, 10, 11, 12]]), + [ 9, 10, 11, 12]], dtype='int64'), index=MultiIndex(levels=[[1, 7], [2, 8]], labels=[[0, 1], [0, 1]]), columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
Fixes #5316 I think this was just an issue with 32 bit OSes.
https://api.github.com/repos/pandas-dev/pandas/pulls/5318
2013-10-25T03:30:59Z
2013-10-25T13:45:12Z
2013-10-25T13:45:12Z
2014-07-16T08:36:42Z
ENH: add basic postgresql support to io.sql
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index e269d14f72712..e39c3a9582964 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -207,7 +207,7 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): #create or drop-recreate if necessary create = None if exists and if_exists == 'replace': - create = "DROP TABLE %s" % name + create = "DROP TABLE %s; %s" % (name, get_schema(frame, name, flavor)) elif not exists: create = get_schema(frame, name, flavor) @@ -220,7 +220,8 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): # Replace spaces in DataFrame column names with _. safe_names = [s.replace(' ', '_').strip() for s in frame.columns] flavor_picker = {'sqlite': _write_sqlite, - 'mysql': _write_mysql} + 'mysql': _write_mysql, + 'postgresql': _write_postgresql} func = flavor_picker.get(flavor, None) if func is None: @@ -253,12 +254,26 @@ def _write_mysql(frame, table, names, cur): data = [tuple(x) for x in frame.values] cur.executemany(insert_query, data) +def _write_postgresql(frame, table, names, cur): + bracketed_names = ['"' + column.lower() +'"' for column in names] + col_names = ','.join(bracketed_names) + wildcards = ','.join(["%s"] * len(names)) + insert_query = "INSERT INTO %s (%s) VALUES (%s)" % (table, col_names, wildcards) + data = [tuple(x) for x in frame.values] + cur.executemany(insert_query, data) def table_exists(name, con, flavor): + if flavor=='postgresql': + if '.' in name: + (schema, name) = name.split('.') + else: + schema = 'public' flavor_map = { 'sqlite': ("SELECT name FROM sqlite_master " "WHERE type='table' AND name='%s';") % name, - 'mysql': "SHOW TABLES LIKE '%s'" % name} + 'mysql': "SHOW TABLES LIKE '%s'" % name, + 'postgresql': "select tablename FROM pg_tables WHERE schemaname = '%s' and tablename='%s'" % (schema, name) + } query = flavor_map.get(flavor, None) if query is None: raise NotImplementedError @@ -267,28 +282,34 @@ def table_exists(name, con, flavor): def get_sqltype(pytype, flavor): sqltype = {'mysql': 'VARCHAR (63)', - 'sqlite': 'TEXT'} + 'sqlite': 'TEXT', + 'postgresql': 'TEXT'} if issubclass(pytype, np.floating): sqltype['mysql'] = 'FLOAT' sqltype['sqlite'] = 'REAL' + sqltype['postgresql'] = 'NUMERIC' if issubclass(pytype, np.integer): #TODO: Refine integer size. sqltype['mysql'] = 'BIGINT' sqltype['sqlite'] = 'INTEGER' + sqltype['postgresql'] = 'BIGINT' if issubclass(pytype, np.datetime64) or pytype is datetime: # Caution: np.datetime64 is also a subclass of np.number. sqltype['mysql'] = 'DATETIME' sqltype['sqlite'] = 'TIMESTAMP' + sqltype['postgresql'] = 'TIMESTAMP' if pytype is datetime.date: sqltype['mysql'] = 'DATE' sqltype['sqlite'] = 'TIMESTAMP' + sqltype['postgresql'] = 'TIMESTAMP' if issubclass(pytype, np.bool_): sqltype['sqlite'] = 'INTEGER' + sqltype['postgresql'] = 'BOOLEAN' return sqltype[flavor] @@ -297,10 +318,13 @@ def get_schema(frame, name, flavor, keys=None): "Return a CREATE TABLE statement to suit the contents of a DataFrame." lookup_type = lambda dtype: get_sqltype(dtype.type, flavor) # Replace spaces in DataFrame column names with _. - safe_columns = [s.replace(' ', '_').strip() for s in frame.dtypes.index] + # Also force lowercase, postgresql can be case sensitive + safe_columns = [s.replace(' ', '_').strip().lower() for s in frame.dtypes.index] column_types = lzip(safe_columns, map(lookup_type, frame.dtypes)) if flavor == 'sqlite': columns = ',\n '.join('[%s] %s' % x for x in column_types) + elif flavor == 'postgresql': + columns = ',\n '.join('"%s" %s' % x for x in column_types) else: columns = ',\n '.join('`%s` %s' % x for x in column_types)
I added in basic postresql support
https://api.github.com/repos/pandas-dev/pandas/pulls/5315
2013-10-24T20:40:40Z
2014-01-26T12:21:13Z
null
2014-07-07T14:18:40Z
TST: changes AssertionErrors in core/generic/_construct_axes_from_arguments to Type/Value Errors
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9666fe42cc822..3259522e20926 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -233,7 +233,7 @@ def _construct_axes_from_arguments(self, args, kwargs, require_all=False): if alias is not None: if a in kwargs: if alias in kwargs: - raise Exception( + raise TypeError( "arguments are multually exclusive for [%s,%s]" % (a, alias)) continue if alias in kwargs: @@ -246,8 +246,8 @@ def _construct_axes_from_arguments(self, args, kwargs, require_all=False): kwargs[a] = args.pop(0) except (IndexError): if require_all: - raise AssertionError( - "not enough arguments specified!") + raise TypeError( + "not enough/duplicate arguments specified!") axes = dict([(a, kwargs.get(a)) for a in self._AXIS_ORDERS]) return axes, kwargs diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 07b33266d88a1..d2da403907280 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1248,14 +1248,12 @@ def test_transpose(self): expected = self.panel.swapaxes('items', 'minor') assert_panel_equal(result, expected) - ## test bad aliases - # test ambiguous aliases - self.assertRaises(AssertionError, self.panel.transpose, 'minor', - maj='major', majo='items') - - # test invalid kwargs - self.assertRaises(AssertionError, self.panel.transpose, 'minor', - maj='major', minor='items') + # duplicate axes + with tm.assertRaisesRegexp(TypeError, 'not enough/duplicate arguments'): + self.panel.transpose('minor', maj='major', minor='items') + + with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'): + self.panel.transpose('minor', 'major', major='minor', minor='items') result = self.panel.transpose(2, 1, 0) assert_panel_equal(result, expected)
closes #5051
https://api.github.com/repos/pandas-dev/pandas/pulls/5311
2013-10-24T12:32:48Z
2013-10-24T18:18:37Z
2013-10-24T18:18:36Z
2014-07-16T08:36:40Z
ENH: Make 'rows' an axis alias for 'index'
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5e33532587506..b611f87b0a557 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4358,8 +4358,8 @@ def combineMult(self, other): return self.mul(other, fill_value=1.) -DataFrame._setup_axes( - ['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True) +DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0, + axes_are_reversed=True, aliases={'rows': 0}) DataFrame._add_numeric_operations() _EMPTY_SERIES = Series([]) diff --git a/pandas/core/series.py b/pandas/core/series.py index 11033893b0b93..51a5241e51ded 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2381,7 +2381,8 @@ def to_period(self, freq=None, copy=True): new_index = self.index.to_period(freq=freq) return self._constructor(new_values, index=new_index).__finalize__(self) -Series._setup_axes(['index'], info_axis=0, stat_axis=0) +Series._setup_axes(['index'], info_axis=0, stat_axis=0, + aliases={'rows': 0}) Series._add_numeric_operations() _INDEX_TYPES = ndarray, Index, list, tuple diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index f6db680d30061..16ba262beb5fc 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -1930,11 +1930,13 @@ def test_get_axis(self): self.assertEquals(f._get_axis_number(0), 0) self.assertEquals(f._get_axis_number(1), 1) self.assertEquals(f._get_axis_number('index'), 0) + self.assertEquals(f._get_axis_number('rows'), 0) self.assertEquals(f._get_axis_number('columns'), 1) self.assertEquals(f._get_axis_name(0), 'index') self.assertEquals(f._get_axis_name(1), 'columns') self.assertEquals(f._get_axis_name('index'), 'index') + self.assertEquals(f._get_axis_name('rows'), 'index') self.assertEquals(f._get_axis_name('columns'), 'columns') self.assert_(f._get_axis(0) is f.index) @@ -8400,6 +8402,8 @@ def create(): expected = df.apply(lambda x, y: x.where(x>0,y), y=df[0]) result = df.where(df>0,df[0],axis='index') assert_frame_equal(result, expected) + result = df.where(df>0,df[0],axis='rows') + assert_frame_equal(result, expected) # frame df = create() diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 6988c81b181de..09f6668e05196 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3566,6 +3566,13 @@ def test_dropna_empty(self): # invalid axis self.assertRaises(ValueError, s.dropna, axis=1) + def test_axis_alias(self): + s = Series([1, 2, np.nan]) + assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index')) + self.assertEqual(s.dropna().sum('rows'), 3) + self.assertEqual(s._get_axis_number('rows'), 0) + self.assertEqual(s._get_axis_name('rows'), 'index') + def test_drop_duplicates(self): s = Series([1, 2, 3, 3])
Think this covers it - think I have an appropriate number of cases too. I don't actually want to put this in the docs, as it's just for convenience. Thoughts on that?
https://api.github.com/repos/pandas-dev/pandas/pulls/5309
2013-10-24T02:03:08Z
2013-10-25T09:59:02Z
2013-10-25T09:59:02Z
2014-07-16T08:36:38Z
Revert print_skipped.py changes for now
diff --git a/.travis.yml b/.travis.yml index 818278eebf5b5..477e81a956553 100644 --- a/.travis.yml +++ b/.travis.yml @@ -48,3 +48,4 @@ script: after_script: - ci/print_versions.py + - ci/print_skipped.py /tmp/nosetests.xml diff --git a/ci/install.sh b/ci/install.sh index 528d669ae693c..edd6d0690d3c8 100755 --- a/ci/install.sh +++ b/ci/install.sh @@ -55,9 +55,6 @@ if [ -n "$LOCALE_OVERRIDE" ]; then time sudo locale-gen "$LOCALE_OVERRIDE" fi -# show-skipped is working at this particular commit -show_skipped_commit=fa4ff84e53c09247753a155b428c1bf2c69cb6c3 -time pip install git+git://github.com/cpcloud/nose-show-skipped.git@$show_skipped_commit time pip install $PIP_ARGS -r ci/requirements-${wheel_box}.txt # we need these for numpy diff --git a/ci/print_skipped.py b/ci/print_skipped.py new file mode 100755 index 0000000000000..9fb05df64bcea --- /dev/null +++ b/ci/print_skipped.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +import sys +import math +import xml.etree.ElementTree as et + + +def parse_results(filename): + tree = et.parse(filename) + root = tree.getroot() + skipped = [] + + current_class = old_class = '' + i = 1 + assert i - 1 == len(skipped) + for el in root.findall('testcase'): + cn = el.attrib['classname'] + for sk in el.findall('skipped'): + old_class = current_class + current_class = cn + name = '{classname}.{name}'.format(classname=current_class, + name=el.attrib['name']) + msg = sk.attrib['message'] + out = '' + if old_class != current_class: + ndigits = int(math.log(i, 10) + 1) + out += ('-' * (len(name + msg) + 4 + ndigits) + '\n') # 4 for : + space + # + space + out += '#{i} {name}: {msg}'.format(i=i, name=name, msg=msg) + skipped.append(out) + i += 1 + assert i - 1 == len(skipped) + assert i - 1 == len(skipped) + assert len(skipped) == int(root.attrib['skip']) + return '\n'.join(skipped) + + +def main(args): + print('SKIPPED TESTS:') + print(parse_results(args.filename)) + return 0 + + +def parse_args(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('filename', help='XUnit file to parse') + return parser.parse_args() + + +if __name__ == '__main__': + sys.exit(main(parse_args())) diff --git a/ci/script.sh b/ci/script.sh index 67dadde2b20fb..361ad41901f37 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -9,5 +9,5 @@ if [ -n "$LOCALE_OVERRIDE" ]; then python -c "$pycmd" fi -echo nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --show-skipped -nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --show-skipped +echo nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml +nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml
goes back to @cpcloud's original plugin that echo'd separately. Just until @cpcloud has time to implement this for nose-skipped. Just makes it much easier to read through Travis builds (especially for new users) if the print_skipped stuff is hidden at first glance.
https://api.github.com/repos/pandas-dev/pandas/pulls/5308
2013-10-24T01:42:57Z
2013-10-24T11:32:14Z
2013-10-24T11:32:14Z
2014-07-16T08:36:37Z
ENH: allow astype conversions for timedeltas to other timedelta freqs (still returns a float series), related to GH4521
diff --git a/doc/source/release.rst b/doc/source/release.rst index 7dc6876d35124..5db612059685b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -118,7 +118,7 @@ Improvements to existing features by an integer series (:issue`4521`) - A Series of dtype ``timedelta64[ns]`` can now be divided by another ``timedelta64[ns]`` object to yield a ``float64`` dtyped Series. This - is frequency conversion. + is frequency conversion; astyping is also supported. - Timedelta64 support ``fillna/ffill/bfill`` with an integer interpreted as seconds, or a ``timedelta`` (:issue:`3371`) - Box numeric ops on ``timedelta`` Series (:issue:`4984`) diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 875ba2de93956..83e26d83a9363 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1285,8 +1285,8 @@ It can also construct Series. **frequency conversion** -Timedeltas can be converted to other 'frequencies' by dividing by another timedelta. -These operations yield ``float64`` dtyped Series. +Timedeltas can be converted to other 'frequencies' by dividing by another timedelta, +or by astyping to a specific timedelta type. These operations yield ``float64`` dtyped Series. .. ipython:: python @@ -1297,9 +1297,11 @@ These operations yield ``float64`` dtyped Series. # to days td / np.timedelta64(1,'D') + td.astype('timedelta64[D]') # to seconds td / np.timedelta64(1,'s') + td.astype('timedelta64[s]') Dividing or multiplying a ``timedelta64[ns]`` Series by an integer or integer Series yields another ``timedelta64[ns]`` dtypes Series. diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index 02f231170ab97..de94d08602750 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -357,7 +357,7 @@ Enhancements to_timedelta(np.arange(5),unit='d') A Series of dtype ``timedelta64[ns]`` can now be divided by another - ``timedelta64[ns]`` object to yield a ``float64`` dtyped Series. This + ``timedelta64[ns]`` object, or astyped to yield a ``float64`` dtyped Series. This is frequency conversion. See :ref:`the docs<timeseries.timedeltas_convert>` for the docs. .. ipython:: python @@ -370,9 +370,11 @@ Enhancements # to days td / np.timedelta64(1,'D') + td.astype('timedelta64[D]') # to seconds td / np.timedelta64(1,'s') + td.astype('timedelta64[s]') Dividing or multiplying a ``timedelta64[ns]`` Series by an integer or integer Series diff --git a/pandas/core/common.py b/pandas/core/common.py index bacd759ee4fd5..d9e8f4164adb4 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -2043,7 +2043,16 @@ def _astype_nansafe(arr, dtype, copy=True): # in py3, timedelta64[ns] are int64 elif (compat.PY3 and dtype not in [_INT64_DTYPE,_TD_DTYPE]) or (not compat.PY3 and dtype != _TD_DTYPE): + + # allow frequency conversions + if dtype.kind == 'm': + mask = isnull(arr) + result = arr.astype(dtype).astype(np.float64) + result[mask] = np.nan + return result + raise TypeError("cannot astype a timedelta from [%s] to [%s]" % (arr.dtype,dtype)) + return arr.astype(_TD_DTYPE) elif (np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer)): diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 9292dba651421..6988c81b181de 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2128,9 +2128,9 @@ def test_constructor_dtype_timedelta64(self): for i in range(3)] + [np.nan ], dtype='m8[ns]' ) self.assert_(td.dtype == 'timedelta64[ns]') - # invalid astypes - for t in ['s', 'D', 'us', 'ms']: - self.assertRaises(TypeError, td.astype, 'm8[%s]' % t) + # these are frequency conversion astypes + #for t in ['s', 'D', 'us', 'ms']: + # self.assertRaises(TypeError, td.astype, 'm8[%s]' % t) # valid astype td.astype('int64') @@ -2371,10 +2371,18 @@ def test_timedelta64_conversions(self): for m in [1, 3, 10]: for unit in ['D','h','m','s','ms','us','ns']: + + # op expected = s1.apply(lambda x: x / np.timedelta64(m,unit)) result = s1 / np.timedelta64(m,unit) assert_series_equal(result, expected) + if m == 1 and unit != 'ns': + + # astype + result = s1.astype("timedelta64[{0}]".format(unit)) + assert_series_equal(result, expected) + # reverse op expected = s1.apply(lambda x: np.timedelta64(m,unit) / x) result = np.timedelta64(m,unit) / s1
related #4521 ``` In [4]: td = pd.to_timedelta([0,1,2],unit='D') In [5]: td Out[5]: 0 00:00:00 1 1 days, 00:00:00 2 2 days, 00:00:00 dtype: timedelta64[ns] In [6]: td / np.timedelta64(1,'s') Out[6]: 0 0 1 86400 2 172800 dtype: float64 ``` This is new (used to raise) ``` In [7]: td.astype('timedelta64[s]') Out[7]: 0 0 1 86400 2 172800 dtype: float64 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/5305
2013-10-23T23:55:58Z
2013-10-24T00:13:07Z
2013-10-24T00:13:07Z
2014-07-16T08:36:35Z
BUG: Fix Series.isin with date/time-like dtypes (GH5021)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 5db612059685b..f161ead7f7ecc 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -633,6 +633,7 @@ Bug Fixes - Fixed issue with ``drop`` and a non-unique index on Series (:issue:`5248`) - Fixed seg fault in C parser caused by passing more names than columns in the file. (:issue:`5156`) + - Fix ``Series.isin`` with date/time-like dtypes (:issue:`5021`) pandas 0.12.0 ------------- diff --git a/pandas/core/series.py b/pandas/core/series.py index 11033893b0b93..4a288f4c283b2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2077,8 +2077,20 @@ def isin(self, values): raise TypeError("only list-like objects are allowed to be passed" " to Series.isin(), you passed a " "{0!r}".format(type(values).__name__)) + + # may need i8 conversion for proper membership testing + comps = _values_from_object(self) + if com.is_datetime64_dtype(self): + from pandas.tseries.tools import to_datetime + values = Series(to_datetime(values)).values.view('i8') + comps = comps.view('i8') + elif com.is_timedelta64_dtype(self): + from pandas.tseries.timedeltas import to_timedelta + values = Series(to_timedelta(values)).values.view('i8') + comps = comps.view('i8') + value_set = set(values) - result = lib.ismember(_values_from_object(self), value_set) + result = lib.ismember(comps, value_set) return self._constructor(result, index=self.index).__finalize__(self) def between(self, left, right, inclusive=True): diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 6988c81b181de..9b7d1008329c5 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -4840,7 +4840,7 @@ def test_isin(self): assert_series_equal(result, expected) def test_isin_with_string_scalar(self): - #GH4763 + # GH4763 s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C']) with tm.assertRaises(TypeError): s.isin('a') @@ -4849,6 +4849,38 @@ def test_isin_with_string_scalar(self): s = Series(['aaa', 'b', 'c']) s.isin('aaa') + def test_isin_with_i8(self): + # GH 5021 + + expected = Series([True,True,False,False,False]) + expected2 = Series([False,True,False,False,False]) + + # datetime64[ns] + s = Series(date_range('jan-01-2013','jan-05-2013')) + + result = s.isin(s[0:2]) + assert_series_equal(result, expected) + + result = s.isin(s[0:2].values) + assert_series_equal(result, expected) + + # fails on dtype conversion in the first place + if not _np_version_under1p7: + result = s.isin(s[0:2].values.astype('datetime64[D]')) + assert_series_equal(result, expected) + + result = s.isin([s[1]]) + assert_series_equal(result, expected2) + + result = s.isin([np.datetime64(s[1])]) + assert_series_equal(result, expected2) + + # timedelta64[ns] + if not _np_version_under1p7: + s = Series(pd.to_timedelta(lrange(5),unit='d')) + result = s.isin(s[0:2]) + assert_series_equal(result, expected) + #------------------------------------------------------------------------------ # TimeSeries-specific def test_cummethods_bool(self):
closes #5021 ``` In [6]: s = Series(date_range('jan-01-2013','jan-05-2013')) In [7]: s Out[7]: 0 2013-01-01 00:00:00 1 2013-01-02 00:00:00 2 2013-01-03 00:00:00 3 2013-01-04 00:00:00 4 2013-01-05 00:00:00 dtype: datetime64[ns] In [8]: s.isin(s[0:2]) Out[8]: 0 True 1 True 2 False 3 False 4 False dtype: bool In [9]: s = Series(pd.to_timedelta(list(range(5)),unit='d')) In [10]: s Out[10]: 0 00:00:00 1 1 days, 00:00:00 2 2 days, 00:00:00 3 3 days, 00:00:00 4 4 days, 00:00:00 dtype: timedelta64[ns] In [11]: s.isin(s[0:2]) Out[11]: 0 True 1 True 2 False 3 False 4 False dtype: bool ```
https://api.github.com/repos/pandas-dev/pandas/pulls/5303
2013-10-23T21:03:57Z
2013-10-24T10:21:38Z
2013-10-24T10:21:38Z
2014-06-25T23:10:50Z
BUG: setting on a frame without an index silently was failing, related (GH5226)
diff --git a/doc/source/release.rst b/doc/source/release.rst index a2015a3b361ac..7dc6876d35124 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -210,8 +210,8 @@ API Changes an alias of iteritems used to get around ``2to3``'s changes). (:issue:`4384`, :issue:`4375`, :issue:`4372`) - ``Series.get`` with negative indexers now returns the same as ``[]`` (:issue:`4390`) - - allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when the single-key is not currently contained in - the index for that axis (:issue:`2578`, :issue:`5226`) + - allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when the single-key + is not currently contained in the index for that axis (:issue:`2578`, :issue:`5226`) - Default export for ``to_clipboard`` is now csv with a sep of `\t` for compat (:issue:`3368`) - ``at`` now will enlarge the object inplace (and return the same) (:issue:`2578`) diff --git a/pandas/core/common.py b/pandas/core/common.py index 80ee9cd34779d..bacd759ee4fd5 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1632,7 +1632,6 @@ def _default_index(n): def ensure_float(arr): if issubclass(arr.dtype.type, (np.integer, np.bool_)): arr = arr.astype(float) - return arr diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e6ad5bf550f7f..5e33532587506 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1573,7 +1573,13 @@ def _ixs(self, i, axis=0, copy=False): if isinstance(label, Index): return self.take(i, axis=1, convert=True) + # if the values returned are not the same length + # as the index (iow a not found value), iget returns + # a 0-len ndarray. This is effectively catching + # a numpy error (as numpy should really raise) values = self._data.iget(i) + if not len(values): + values = np.array([np.nan]*len(self.index),dtype=object) return self._constructor_sliced.from_array( values, index=self.index, name=label, fastpath=True) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index f9aeb1f726ff7..45e6a54721bd2 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -300,12 +300,18 @@ def nanmin(values, axis=None, skipna=True): apply_ax = axis if axis is not None else 0 result = np.apply_along_axis(builtins.min, apply_ax, values) else: - result = builtins.min(values) + try: + result = builtins.min(values) + except: + result = np.nan else: if ((axis is not None and values.shape[axis] == 0) or values.size == 0): - result = com.ensure_float(values.sum(axis)) - result.fill(np.nan) + try: + result = com.ensure_float(values.sum(axis)) + result.fill(np.nan) + except: + result = np.nan else: result = values.min(axis) @@ -324,12 +330,18 @@ def nanmax(values, axis=None, skipna=True): apply_ax = axis if axis is not None else 0 result = np.apply_along_axis(builtins.max, apply_ax, values) else: - result = builtins.max(values) + try: + result = builtins.max(values) + except: + result = np.nan else: if ((axis is not None and values.shape[axis] == 0) or values.size == 0): - result = com.ensure_float(values.sum(axis)) - result.fill(np.nan) + try: + result = com.ensure_float(values.sum(axis)) + result.fill(np.nan) + except: + result = np.nan else: result = values.max(axis) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 7745c2f2a083b..2cb26804ea4be 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1592,6 +1592,26 @@ def f(): df.loc[3] = [6,7] assert_frame_equal(df,DataFrame([[6,7]],index=[3],columns=['A','B'])) + # no label overlap + df = DataFrame(columns=['A','B']) + df.loc[0] = Series(1,index=range(4)) + assert_frame_equal(df,DataFrame(columns=['A','B'],index=[0])) + + # no index to start + expected = DataFrame({ 0 : Series(1,index=range(4)) },columns=['A','B',0]) + + df = DataFrame(columns=['A','B']) + df[0] = Series(1,index=range(4)) + df.dtypes + str(df) + assert_frame_equal(df,expected) + + df = DataFrame(columns=['A','B']) + df.loc[:,0] = Series(1,index=range(4)) + df.dtypes + str(df) + assert_frame_equal(df,expected) + def test_cache_updating(self): # GH 4939, make sure to update the cache on setitem
related to #5226 the following would previously silently fail: ``` In [3]: df = DataFrame(columns=['A','B']) In [4]: df[0] = Series(1,index=range(4)) In [5]: df Out[5]: A B 0 0 NaN NaN 1 1 NaN NaN 1 2 NaN NaN 1 3 NaN NaN 1 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/5301
2013-10-23T14:51:25Z
2013-10-23T15:30:29Z
2013-10-23T15:30:29Z
2014-06-23T20:59:54Z
BUG: parser can handle a common_format multi-column index (no row index cols), (GH4702)
diff --git a/doc/source/io.rst b/doc/source/io.rst index 6840717854dea..90bb762f1a1ba 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -890,6 +890,22 @@ of tupleizing columns, specify ``tupleize_cols=True``. print(open('mi.csv').read()) pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1]) +Starting in 0.13.0, ``read_csv`` will be able to interpret a more common format +of multi-columns indices. + +.. ipython:: python + :suppress: + + data = ",a,a,a,b,c,c\n,q,r,s,t,u,v\none,1,2,3,4,5,6\ntwo,7,8,9,10,11,12" + fh = open('mi2.csv','w') + fh.write(data) + fh.close() + +.. ipython:: python + + print(open('mi2.csv').read()) + pd.read_csv('mi2.csv',header=[0,1],index_col=0) + Note: If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will be *lost*. @@ -898,6 +914,7 @@ with ``df.to_csv(..., index=False``), then any ``names`` on the columns index wi import os os.remove('mi.csv') + os.remove('mi2.csv') .. _io.sniff: @@ -1069,7 +1086,7 @@ Note ``NaN``'s, ``NaT``'s and ``None`` will be converted to ``null`` and ``datet Orient Options ++++++++++++++ -There are a number of different options for the format of the resulting JSON +There are a number of different options for the format of the resulting JSON file / string. Consider the following DataFrame and Series: .. ipython:: python @@ -1080,7 +1097,7 @@ file / string. Consider the following DataFrame and Series: sjo = Series(dict(x=15, y=16, z=17), name='D') sjo -**Column oriented** (the default for ``DataFrame``) serialises the data as +**Column oriented** (the default for ``DataFrame``) serialises the data as nested JSON objects with column labels acting as the primary index: .. ipython:: python @@ -1113,7 +1130,7 @@ values only, column and index labels are not included: dfjo.to_json(orient="values") # Not available for Series -**Split oriented** serialises to a JSON object containing separate entries for +**Split oriented** serialises to a JSON object containing separate entries for values, index and columns. Name is also included for ``Series``: .. ipython:: python @@ -1123,7 +1140,7 @@ values, index and columns. Name is also included for ``Series``: .. note:: - Any orient option that encodes to a JSON object will not preserve the ordering of + Any orient option that encodes to a JSON object will not preserve the ordering of index and column labels during round-trip serialisation. If you wish to preserve label ordering use the `split` option as it uses ordered containers. @@ -1351,7 +1368,7 @@ The Numpy Parameter If ``numpy=True`` is passed to ``read_json`` an attempt will be made to sniff an appropriate dtype during deserialisation and to subsequently decode directly -to numpy arrays, bypassing the need for intermediate Python objects. +to numpy arrays, bypassing the need for intermediate Python objects. This can provide speedups if you are deserialising a large amount of numeric data: @@ -1375,7 +1392,7 @@ data: The speedup is less noticable for smaller datasets: .. ipython:: python - + jsonfloats = dffloats.head(100).to_json() .. ipython:: python @@ -1399,7 +1416,7 @@ The speedup is less noticable for smaller datasets: - labels are ordered. Labels are only read from the first container, it is assumed that each subsequent row / column has been encoded in the same order. This should be satisfied if the - data was encoded using ``to_json`` but may not be the case if the JSON + data was encoded using ``to_json`` but may not be the case if the JSON is from another source. .. ipython:: python diff --git a/doc/source/release.rst b/doc/source/release.rst index f161ead7f7ecc..b3fa90ed6f624 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -634,6 +634,8 @@ Bug Fixes - Fixed seg fault in C parser caused by passing more names than columns in the file. (:issue:`5156`) - Fix ``Series.isin`` with date/time-like dtypes (:issue:`5021`) + - C and Python Parser can now handle the more common multi-index column format + which doesn't have a row for index names (:issue:`4702`) pandas 0.12.0 ------------- diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index e9e82824326a7..c10cb84de34fd 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -569,7 +569,6 @@ def _clean_options(self, options, engine): skiprows = set() if skiprows is None else set(skiprows) # put stuff back - result['index_col'] = index_col result['names'] = names result['converters'] = converters result['na_values'] = na_values @@ -641,7 +640,7 @@ def __init__(self, kwds): self.orig_names = None self.prefix = kwds.pop('prefix', None) - self.index_col = kwds.pop('index_col', None) + self.index_col = kwds.get('index_col', None) self.index_names = None self.col_names = None @@ -1455,6 +1454,7 @@ def _convert_data(self, data): def _infer_columns(self): names = self.names num_original_columns = 0 + clear_buffer = True if self.header is not None: header = self.header @@ -1473,6 +1473,7 @@ def _infer_columns(self): while self.pos <= hr: line = self._next_line() + unnamed_count = 0 this_columns = [] for i, c in enumerate(line): if c == '': @@ -1480,6 +1481,7 @@ def _infer_columns(self): this_columns.append('Unnamed: %d_level_%d' % (i, level)) else: this_columns.append('Unnamed: %d' % i) + unnamed_count += 1 else: this_columns.append(c) @@ -1490,12 +1492,25 @@ def _infer_columns(self): if cur_count > 0: this_columns[i] = '%s.%d' % (col, cur_count) counts[col] = cur_count + 1 + elif have_mi_columns: + + # if we have grabbed an extra line, but its not in our format + # so save in the buffer, and create an blank extra line for the rest of the + # parsing code + if hr == header[-1]: + lc = len(this_columns) + ic = len(self.index_col) if self.index_col is not None else 0 + if lc != unnamed_count and lc-ic > unnamed_count: + clear_buffer = False + this_columns = [ None ] * lc + self.buf = [ self.buf[-1] ] columns.append(this_columns) if len(columns) == 1: num_original_columns = len(this_columns) - self._clear_buffer() + if clear_buffer: + self._clear_buffer() if names is not None: if (self.usecols is not None and len(names) != len(self.usecols)) \ diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 99a6c630e6ac4..66730f255eb1d 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -1215,29 +1215,113 @@ def test_header_multi_index(self): R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2 """ - df = read_csv(StringIO(data), header=[0, 2, 3, 4], index_col=[0, 1], tupleize_cols=False) + df = self.read_csv(StringIO(data), header=[0, 2, 3, 4], index_col=[0, 1], tupleize_cols=False) tm.assert_frame_equal(df, expected) # skipping lines in the header - df = read_csv(StringIO(data), header=[0, 2, 3, 4], index_col=[0, 1], tupleize_cols=False) + df = self.read_csv(StringIO(data), header=[0, 2, 3, 4], index_col=[0, 1], tupleize_cols=False) tm.assert_frame_equal(df, expected) #### invalid options #### # no as_recarray - self.assertRaises(ValueError, read_csv, StringIO(data), header=[0,1,2,3], + self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0,1,2,3], index_col=[0,1], as_recarray=True, tupleize_cols=False) # names - self.assertRaises(ValueError, read_csv, StringIO(data), header=[0,1,2,3], + self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0,1,2,3], index_col=[0,1], names=['foo','bar'], tupleize_cols=False) # usecols - self.assertRaises(ValueError, read_csv, StringIO(data), header=[0,1,2,3], + self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0,1,2,3], index_col=[0,1], usecols=['foo','bar'], tupleize_cols=False) # non-numeric index_col - self.assertRaises(ValueError, read_csv, StringIO(data), header=[0,1,2,3], + self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0,1,2,3], index_col=['foo','bar'], tupleize_cols=False) + def test_header_multiindex_common_format(self): + + df = DataFrame([[1,2,3,4,5,6],[7,8,9,10,11,12]], + index=['one','two'], + columns=MultiIndex.from_tuples([('a','q'),('a','r'),('a','s'), + ('b','t'),('c','u'),('c','v')])) + + # to_csv + data = """,a,a,a,b,c,c +,q,r,s,t,u,v +,,,,,, +one,1,2,3,4,5,6 +two,7,8,9,10,11,12""" + + result = self.read_csv(StringIO(data),header=[0,1],index_col=0) + tm.assert_frame_equal(df,result) + + # common + data = """,a,a,a,b,c,c +,q,r,s,t,u,v +one,1,2,3,4,5,6 +two,7,8,9,10,11,12""" + + result = self.read_csv(StringIO(data),header=[0,1],index_col=0) + tm.assert_frame_equal(df,result) + + # common, no index_col + data = """a,a,a,b,c,c +q,r,s,t,u,v +1,2,3,4,5,6 +7,8,9,10,11,12""" + + result = self.read_csv(StringIO(data),header=[0,1],index_col=None) + tm.assert_frame_equal(df.reset_index(drop=True),result) + + # malformed case 1 + expected = DataFrame(np.array([[ 2, 3, 4, 5, 6], + [ 8, 9, 10, 11, 12]]), + index=Index([1, 7]), + columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]], + labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]], + names=[u('a'), u('q')])) + + data = """a,a,a,b,c,c +q,r,s,t,u,v +1,2,3,4,5,6 +7,8,9,10,11,12""" + + result = self.read_csv(StringIO(data),header=[0,1],index_col=0) + tm.assert_frame_equal(expected,result) + + # malformed case 2 + expected = DataFrame(np.array([[ 2, 3, 4, 5, 6], + [ 8, 9, 10, 11, 12]]), + index=Index([1, 7]), + columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]], + labels=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]], + names=[None, u('q')])) + + data = """,a,a,b,c,c +q,r,s,t,u,v +1,2,3,4,5,6 +7,8,9,10,11,12""" + + result = self.read_csv(StringIO(data),header=[0,1],index_col=0) + tm.assert_frame_equal(expected,result) + + # mi on columns and index (malformed) + expected = DataFrame(np.array([[ 3, 4, 5, 6], + [ 9, 10, 11, 12]]), + index=MultiIndex(levels=[[1, 7], [2, 8]], + labels=[[0, 1], [0, 1]]), + columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]], + labels=[[0, 1, 2, 2], [0, 1, 2, 3]], + names=[None, u('q')])) + + data = """,a,a,b,c,c +q,r,s,t,u,v +1,2,3,4,5,6 +7,8,9,10,11,12""" + + result = self.read_csv(StringIO(data),header=[0,1],index_col=[0, 1]) + tm.assert_frame_equal(expected,result) + def test_pass_names_with_index(self): lines = self.data1.split('\n') no_header = '\n'.join(lines[1:]) diff --git a/pandas/parser.pyx b/pandas/parser.pyx index 06a1ddfdae025..36b4b91023a73 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -250,6 +250,7 @@ cdef class TextReader: object memory_map object as_recarray object header, orig_header, names, header_start, header_end + object index_col object low_memory object skiprows object compact_ints, use_unsigned @@ -266,6 +267,7 @@ cdef class TextReader: header=0, header_start=0, header_end=0, + index_col=None, names=None, memory_map=False, @@ -439,6 +441,8 @@ cdef class TextReader: # XXX self.noconvert = set() + self.index_col = index_col + #---------------------------------------- # header stuff @@ -574,7 +578,7 @@ cdef class TextReader: # header is now a list of lists, so field_count should use header[0] cdef: - size_t i, start, data_line, field_count, passed_count, hr + size_t i, start, data_line, field_count, passed_count, hr, unnamed_count char *word object name int status @@ -606,6 +610,7 @@ cdef class TextReader: # TODO: Py3 vs. Py2 counts = {} + unnamed_count = 0 for i in range(field_count): word = self.parser.words[start + i] @@ -623,6 +628,7 @@ cdef class TextReader: name = 'Unnamed: %d_level_%d' % (i,level) else: name = 'Unnamed: %d' % i + unnamed_count += 1 count = counts.get(name, 0) if count > 0 and self.mangle_dupe_cols and not self.has_mi_columns: @@ -631,6 +637,19 @@ cdef class TextReader: this_header.append(name) counts[name] = count + 1 + if self.has_mi_columns: + + # if we have grabbed an extra line, but its not in our format + # so save in the buffer, and create an blank extra line for the rest of the + # parsing code + if hr == self.header[-1]: + lc = len(this_header) + ic = len(self.index_col) if self.index_col is not None else 0 + if lc != unnamed_count and lc-ic > unnamed_count: + hr -= 1 + self.parser_start -= 1 + this_header = [ None ] * lc + data_line = hr + 1 header.append(this_header) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index f6db680d30061..7ae537b0b94df 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5348,15 +5348,19 @@ def test_to_csv_moar(self): def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None, dupe_col=False): + kwargs = dict(parse_dates=False) if cnlvl: - header = lrange(cnlvl) + if rnlvl is not None: + kwargs['index_col'] = lrange(rnlvl) + kwargs['header'] = lrange(cnlvl) with ensure_clean(path) as path: df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False) - recons = DataFrame.from_csv(path,header=lrange(cnlvl),tupleize_cols=False,parse_dates=False) + recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs) else: + kwargs['header'] = 0 with ensure_clean(path) as path: df.to_csv(path,encoding='utf8',chunksize=chunksize) - recons = DataFrame.from_csv(path,header=0,parse_dates=False) + recons = DataFrame.from_csv(path,**kwargs) def _to_uni(x): if not isinstance(x, compat.text_type): @@ -5366,7 +5370,7 @@ def _to_uni(x): # read_Csv disambiguates the columns by # labeling them dupe.1,dupe.2, etc'. monkey patch columns recons.columns = df.columns - if rnlvl: + if rnlvl and not cnlvl: delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)] ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl) recons.index = ix @@ -5417,7 +5421,7 @@ def _to_uni(x): recons.columns = np.array(recons.columns,dtype=c_dtype ) df.columns = np.array(df.columns,dtype=c_dtype ) - assert_frame_equal(df, recons,check_names=False,check_less_precise=True) + assert_frame_equal(df,recons,check_names=False,check_less_precise=True) N = 100 chunksize=1000 @@ -5476,7 +5480,7 @@ def make_dtnat_arr(n,nnat=None): base = int((chunksize// ncols or 1) or 1) for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2, base-1,base,base+1]: - print( nrows,ncols) + #print( nrows,ncols) _do_test(mkdf(nrows, ncols),path) for nrows in [10,N-2,N-1,N,N+1,N+2]: @@ -5498,7 +5502,7 @@ def make_dtnat_arr(n,nnat=None): base = int(chunksize//ncols) for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2, base-1,base,base+1]: - print(nrows, ncols) + #print(nrows, ncols) _do_test(mkdf(nrows, ncols,r_idx_nlevels=2),path,rnlvl=2) _do_test(mkdf(nrows, ncols,c_idx_nlevels=2),path,cnlvl=2) _do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2), @@ -5615,11 +5619,8 @@ def _make_frame(names=None): # dup column names? df = mkdf(5,3,r_idx_nlevels=3,c_idx_nlevels=4) df.to_csv(path,tupleize_cols=False) - result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False) - result.columns = ['R2','A','B','C'] - new_result = result.reset_index().set_index(['R0','R1','R2']) - new_result.columns = df.columns - assert_frame_equal(df,new_result) + result = read_csv(path,header=[0,1,2,3],index_col=[0,1,2],tupleize_cols=False) + assert_frame_equal(df,result) # writing with no index df = _make_frame() @@ -9881,7 +9882,7 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, if not ('max' in name or 'min' in name or 'count' in name): df = DataFrame({'b': date_range('1/1/2001', periods=2)}) _f = getattr(df, name) - print(df) + #print(df) self.assertFalse(len(_f())) df['a'] = lrange(len(df)) @@ -11786,7 +11787,7 @@ def to_series(mi, level): if isinstance(v, Index): assert v.is_(expected[k]) elif isinstance(v, Series): - print(k) + #print(k) tm.assert_series_equal(v, expected[k]) else: raise AssertionError("object must be a Series or Index")
closes #4702, should help with #5254 Will handle the format generated by `to_csv` (which has a an 'extra' line for the index names) and the more 'common' format ``` In [4]: data = """,a,a,a,b,c,c ...: ,q,r,s,t,u,v ...: one,1,2,3,4,5,6 ...: two,7,8,9,10,11,12""" In [6]: read_csv(StringIO(data),header=[0,1],index_col=0) Out[6]: a b c q r s t u v one 1 2 3 4 5 6 two 7 8 9 10 11 12 ``` equiv to ``` df = DataFrame([[1,2,3,4,5,6],[7,8,9,10,11,12]], index=['one','two'], columns=MultiIndex.from_tuples([('a','q'),('a','r'),('a','s'), ('b','t'),('c','u'),('c','v')])) ``` no index col ``` In [1]: data = """a,a,a,b,c,c ...: q,r,s,t,u,v ...: 1,2,3,4,5,6 ...: 7,8,9,10,11,12""" In [2]: read_csv(StringIO(data),header=[0,1],index_col=None) Out[2]: a b c q r s t u v 0 1 2 3 4 5 6 1 7 8 9 10 11 12 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/5298
2013-10-22T23:40:12Z
2013-10-24T13:39:19Z
2013-10-24T13:39:19Z
2014-06-22T16:42:49Z
DOC: Remove iloc example.
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 143f2bd9d8252..9236ef94f85fd 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -590,14 +590,6 @@ a list of items you want to check for. df.isin(values) -You can also describe columns using integer location: - -.. ipython:: python - - values = {0: ['a', 'b']} - - df.isin(values, iloc=True) - Combine DataFrame's ``isin`` with the ``any()`` and ``all()`` methods to quickly select subsets of your data that meet a given criteria. To select a row where each column meets its own criterion:
Forgot to remove the `isin(iloc=True)` example from indexing.rst. https://github.com/pydata/pandas/pull/5199#issuecomment-26786681
https://api.github.com/repos/pandas-dev/pandas/pulls/5296
2013-10-22T13:40:07Z
2013-10-22T13:53:21Z
2013-10-22T13:53:21Z
2016-11-03T12:37:31Z
BLD: 0 13 pickles
diff --git a/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_AMD64_windows_2.7.3.pickle b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_AMD64_windows_2.7.3.pickle new file mode 100644 index 0000000000000..f03ec67f4d7c4 Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_AMD64_windows_2.7.3.pickle differ diff --git a/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_i686_linux_2.6.5.pickle b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_i686_linux_2.6.5.pickle new file mode 100644 index 0000000000000..aa2f607fa4302 Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_i686_linux_2.6.5.pickle differ diff --git a/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_i686_linux_2.7.3.pickle b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_i686_linux_2.7.3.pickle new file mode 100644 index 0000000000000..5c988a1b0c7ef Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_i686_linux_2.7.3.pickle differ diff --git a/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_i686_linux_3.2.3.pickle b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_i686_linux_3.2.3.pickle new file mode 100644 index 0000000000000..275cc4b951134 Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_i686_linux_3.2.3.pickle differ diff --git a/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_darwin_2.7.5.pickle b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_darwin_2.7.5.pickle new file mode 100644 index 0000000000000..0ff7f6c68f34e Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_darwin_2.7.5.pickle differ diff --git a/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_linux_2.7.3.pickle b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_linux_2.7.3.pickle new file mode 100644 index 0000000000000..0ff7f6c68f34e Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_linux_2.7.3.pickle differ diff --git a/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_linux_3.3.0.pickle b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_linux_3.3.0.pickle new file mode 100644 index 0000000000000..5f27dd2ff056d Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.13.0/0.13.0_x86_64_linux_3.3.0.pickle differ diff --git a/pandas/io/tests/generate_legacy_pickles.py b/pandas/io/tests/generate_legacy_pickles.py index 05e5d68379b09..08f63b0179db2 100644 --- a/pandas/io/tests/generate_legacy_pickles.py +++ b/pandas/io/tests/generate_legacy_pickles.py @@ -1,12 +1,6 @@ """ self-contained to write legacy pickle files """ from __future__ import print_function -# make sure we are < 0.13 compat (in py3) -try: - from pandas.compat import zip, cPickle as pickle -except: - import pickle - def _create_sp_series(): import numpy as np @@ -114,21 +108,35 @@ def write_legacy_pickles(): import sys sys.path.insert(0,'.') - import os + import os, os.path import numpy as np import pandas import pandas.util.testing as tm import platform as pl - print("This script generates a pickle file for the current arch, system, and python version") + # make sure we are < 0.13 compat (in py3) + try: + from pandas.compat import zip, cPickle as pickle + except: + import pickle + + sys_version = version = pandas.__version__ + if len(sys.argv) < 2: + exit("{0} <version> <output_dir>".format(sys.argv[0])) - version = pandas.__version__ + version = str(sys.argv[1]) + output_dir = str(sys.argv[2]) + + print("This script generates a pickle file for the current arch, system, and python version") + print(" system version: {0}".format(sys_version)) + print(" output version: {0}".format(version)) + print(" output dir : {0}".format(output_dir)) # construct a reasonable platform name f = '_'.join([ str(version), str(pl.machine()), str(pl.system().lower()), str(pl.python_version()) ]) pth = '{0}.pickle'.format(f) - fh = open(pth,'wb') + fh = open(os.path.join(output_dir,pth),'wb') pickle.dump(create_data(),fh,pickle.HIGHEST_PROTOCOL) fh.close() diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index 167eed95fd5a6..ea769a0515a78 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -80,6 +80,9 @@ def test_read_pickles_0_11_0(self): def test_read_pickles_0_12_0(self): self.read_pickles('0.12.0') + def test_read_pickles_0_13_0(self): + self.read_pickles('0.13.0') + def test_round_trip_current(self): for typ, dv in self.data.items(): diff --git a/setup.py b/setup.py index 635da56d7339f..6ab478bfc2541 100755 --- a/setup.py +++ b/setup.py @@ -555,6 +555,7 @@ def pxd(name): 'tests/data/legacy_pickle/0.10.1/*.pickle', 'tests/data/legacy_pickle/0.11.0/*.pickle', 'tests/data/legacy_pickle/0.12.0/*.pickle', + 'tests/data/legacy_pickle/0.13.0/*.pickle', 'tests/data/*.csv', 'tests/data/*.dta', 'tests/data/*.txt',
closes #4796
https://api.github.com/repos/pandas-dev/pandas/pulls/5295
2013-10-21T23:03:54Z
2013-10-23T12:32:55Z
2013-10-23T12:32:55Z
2014-06-27T17:57:16Z
DOC add get_dummies to reshaping.rst
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 5dedfa1ad144d..c82a5115bc634 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -361,3 +361,44 @@ Alternatively we can specify custom bin-edges: .. ipython:: python cut(ages, bins=[0, 18, 35, 70]) + + +.. _reshaping.dummies: + +Computing indicator / dummy variables +------------------------------------- + +To convert a categorical variable into a "dummy" or "indicator" DataFrame, for example +a column in a DataFrame (a Series) which has ``k`` distinct values, can derive a DataFrame +containing ``k`` columns of 1s and 0s: + +.. ipython:: python + + df = DataFrame({'key': list('bbacab'), 'data1': range(6)}) + + + get_dummies(df['key']) + +Sometimes it's useful to prefix the column names, for example when merging the result +with the original DataFrame: + +.. ipython:: python + + dummies = get_dummies(df['key'], prefix='key') + dummies + + + df[['data']].join(dummies) + +This function is often used along with discretization functions like ``cut``: + +.. ipython:: python + + values = randn(10) + values + + + bins = [0, 0.2, 0.4, 0.6, 0.8, 1] + + + get_dummies(cut(values, bins)) \ No newline at end of file
examples shamelessly from Wes' book closes #4444
https://api.github.com/repos/pandas-dev/pandas/pulls/5293
2013-10-21T21:05:33Z
2013-10-21T22:23:25Z
2013-10-21T22:23:25Z
2014-07-16T08:36:23Z
BUG: DateOffset weekday around DST produces unexpected results. fixes #5175
diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index 1db189fcc74e3..006b44fe02379 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -596,12 +596,13 @@ Bug Fixes - Bug in area plot draws legend with incorrect ``alpha`` when ``stacked=True`` (:issue:`8027`) -- ``Period`` and ``PeriodIndex`` addition/subtraction with ``np.timedelta64`` results in incorrect internal representations (:issue:`7740`) - ``Holiday`` bug in Holiday with no offset or observance (:issue:`7987`) - Bug in ``DataFrame.to_latex`` formatting when columns or index is a ``MultiIndex`` (:issue:`7982`). +- Bug in ``DateOffset`` around Daylight Savings Time produces unexpected results (:issue:`5175`). + diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index cd37f4000e5a2..8bb5584fee7a7 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -152,6 +152,12 @@ def __add__(date): """ _cacheable = False _normalize_cache = True + _kwds_use_relativedelta = ( + 'years', 'months', 'weeks', 'days', + 'year', 'month', 'week', 'day', 'weekday', + 'hour', 'minute', 'second', 'microsecond' + ) + _use_relativedelta = False # default for prior pickles normalize = False @@ -160,21 +166,52 @@ def __init__(self, n=1, normalize=False, **kwds): self.n = int(n) self.normalize = normalize self.kwds = kwds - if len(kwds) > 0: - self._offset = relativedelta(**kwds) + self._offset, self._use_relativedelta = self._determine_offset() + + def _determine_offset(self): + # timedelta is used for sub-daily plural offsets and all singular offsets + # relativedelta is used for plural offsets of daily length or more + # nanosecond(s) are handled by apply_wraps + kwds_no_nanos = dict( + (k, v) for k, v in self.kwds.items() + if k not in ('nanosecond', 'nanoseconds') + ) + use_relativedelta = False + + if len(kwds_no_nanos) > 0: + if any(k in self._kwds_use_relativedelta for k in kwds_no_nanos): + use_relativedelta = True + offset = relativedelta(**kwds_no_nanos) + else: + # sub-daily offset - use timedelta (tz-aware) + offset = timedelta(**kwds_no_nanos) else: - self._offset = timedelta(1) + offset = timedelta(1) + return offset, use_relativedelta @apply_wraps def apply(self, other): + if self._use_relativedelta: + other = as_datetime(other) + if len(self.kwds) > 0: + tzinfo = getattr(other, 'tzinfo', None) + if tzinfo is not None and self._use_relativedelta: + # perform calculation in UTC + other = other.replace(tzinfo=None) + if self.n > 0: for i in range(self.n): other = other + self._offset else: for i in range(-self.n): other = other - self._offset - return other + + if tzinfo is not None and self._use_relativedelta: + # bring tz back from UTC calculation + other = tslib._localize_pydatetime(other, tzinfo) + + return as_timestamp(other) else: return other + timedelta(self.n) diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index f6f91760e8ad8..b3764b73b15ac 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -3104,6 +3104,134 @@ def test_str_for_named_is_name(self): self.assertEqual(str(offset), name) +def get_utc_offset_hours(ts): + # take a Timestamp and compute total hours of utc offset + o = ts.utcoffset() + return (o.days * 24 * 3600 + o.seconds) / 3600.0 + + +class TestDST(tm.TestCase): + """ + test DateOffset additions over Daylight Savings Time + """ + # one microsecond before the DST transition + ts_pre_fallback = "2013-11-03 01:59:59.999999" + ts_pre_springfwd = "2013-03-10 01:59:59.999999" + + # test both basic names and dateutil timezones + timezone_utc_offsets = { + 'US/Eastern': dict( + utc_offset_daylight=-4, + utc_offset_standard=-5, + ), + 'dateutil/US/Pacific': dict( + utc_offset_daylight=-7, + utc_offset_standard=-8, + ) + } + valid_date_offsets_singular = [ + 'weekday', 'day', 'hour', 'minute', 'second', 'microsecond' + ] + valid_date_offsets_plural = [ + 'weeks', 'days', + 'hours', 'minutes', 'seconds', + 'milliseconds', 'microseconds' + ] + + def _test_all_offsets(self, n, **kwds): + valid_offsets = self.valid_date_offsets_plural if n > 1 \ + else self.valid_date_offsets_singular + + for name in valid_offsets: + self._test_offset(offset_name=name, offset_n=n, **kwds) + + def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset): + offset = DateOffset(**{offset_name: offset_n}) + t = tstart + offset + if expected_utc_offset is not None: + self.assertTrue(get_utc_offset_hours(t) == expected_utc_offset) + + if offset_name == 'weeks': + # dates should match + self.assertTrue( + t.date() == + timedelta(days=7 * offset.kwds['weeks']) + tstart.date() + ) + # expect the same day of week, hour of day, minute, second, ... + self.assertTrue( + t.dayofweek == tstart.dayofweek and + t.hour == tstart.hour and + t.minute == tstart.minute and + t.second == tstart.second + ) + elif offset_name == 'days': + # dates should match + self.assertTrue(timedelta(offset.kwds['days']) + tstart.date() == t.date()) + # expect the same hour of day, minute, second, ... + self.assertTrue( + t.hour == tstart.hour and + t.minute == tstart.minute and + t.second == tstart.second + ) + elif offset_name in self.valid_date_offsets_singular: + # expect the signular offset value to match between tstart and t + datepart_offset = getattr(t, offset_name if offset_name != 'weekday' else 'dayofweek') + self.assertTrue(datepart_offset == offset.kwds[offset_name]) + else: + # the offset should be the same as if it was done in UTC + self.assertTrue( + t == (tstart.tz_convert('UTC') + offset).tz_convert('US/Pacific') + ) + + def _make_timestamp(self, string, hrs_offset, tz): + offset_string = '{hrs:02d}00'.format(hrs=hrs_offset) if hrs_offset >= 0 else \ + '-{hrs:02d}00'.format(hrs=-1 * hrs_offset) + return Timestamp(string + offset_string).tz_convert(tz) + + def test_fallback_plural(self): + """test moving from daylight savings to standard time""" + for tz, utc_offsets in self.timezone_utc_offsets.items(): + hrs_pre = utc_offsets['utc_offset_daylight'] + hrs_post = utc_offsets['utc_offset_standard'] + self._test_all_offsets( + n=3, + tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz), + expected_utc_offset=hrs_post + ) + + def test_springforward_plural(self): + """test moving from standard to daylight savings""" + for tz, utc_offsets in self.timezone_utc_offsets.items(): + hrs_pre = utc_offsets['utc_offset_standard'] + hrs_post = utc_offsets['utc_offset_daylight'] + self._test_all_offsets( + n=3, + tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz), + expected_utc_offset=hrs_post + ) + + def test_fallback_singular(self): + # in the case of signular offsets, we dont neccesarily know which utc offset + # the new Timestamp will wind up in (the tz for 1 month may be different from 1 second) + # so we don't specify an expected_utc_offset + for tz, utc_offsets in self.timezone_utc_offsets.items(): + hrs_pre = utc_offsets['utc_offset_standard'] + self._test_all_offsets( + n=1, + tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz), + expected_utc_offset=None + ) + + def test_springforward_singular(self): + for tz, utc_offsets in self.timezone_utc_offsets.items(): + hrs_pre = utc_offsets['utc_offset_standard'] + self._test_all_offsets( + n=1, + tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz), + expected_utc_offset=None + ) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
closes #5175 Wound up working on this in preparation for the DST change next week. Here's a first pass at a solution and a test.
https://api.github.com/repos/pandas-dev/pandas/pulls/5292
2013-10-21T21:04:06Z
2014-09-04T22:18:50Z
2014-09-04T22:18:50Z
2014-09-10T17:46:09Z
DOC: expand JSON docs
diff --git a/doc/source/io.rst b/doc/source/io.rst index 37227edc83fe2..6840717854dea 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1018,7 +1018,7 @@ which, if set to ``True``, will additionally output the length of the Series. JSON ---- -Read and write ``JSON`` format files. +Read and write ``JSON`` format files and strings. .. _io.json: @@ -1066,12 +1066,77 @@ Note ``NaN``'s, ``NaT``'s and ``None`` will be converted to ``null`` and ``datet json = dfj.to_json() json +Orient Options +++++++++++++++ + +There are a number of different options for the format of the resulting JSON +file / string. Consider the following DataFrame and Series: + +.. ipython:: python + + dfjo = DataFrame(dict(A=range(1, 4), B=range(4, 7), C=range(7, 10)), + columns=list('ABC'), index=list('xyz')) + dfjo + sjo = Series(dict(x=15, y=16, z=17), name='D') + sjo + +**Column oriented** (the default for ``DataFrame``) serialises the data as +nested JSON objects with column labels acting as the primary index: + +.. ipython:: python + + dfjo.to_json(orient="columns") + # Not available for Series + +**Index oriented** (the default for ``Series``) similar to column oriented +but the index labels are now primary: + +.. ipython:: python + + dfjo.to_json(orient="index") + sjo.to_json(orient="index") + +**Record oriented** serialises the data to a JSON array of column -> value records, +index labels are not included. This is useful for passing DataFrame data to plotting +libraries, for example the JavaScript library d3.js: + +.. ipython:: python + + dfjo.to_json(orient="records") + sjo.to_json(orient="records") + +**Value oriented** is a bare-bones option which serialises to nested JSON arrays of +values only, column and index labels are not included: + +.. ipython:: python + + dfjo.to_json(orient="values") + # Not available for Series + +**Split oriented** serialises to a JSON object containing separate entries for +values, index and columns. Name is also included for ``Series``: + +.. ipython:: python + + dfjo.to_json(orient="split") + sjo.to_json(orient="split") + +.. note:: + + Any orient option that encodes to a JSON object will not preserve the ordering of + index and column labels during round-trip serialisation. If you wish to preserve + label ordering use the `split` option as it uses ordered containers. + +Date Handling ++++++++++++++ + Writing in iso date format .. ipython:: python dfd = DataFrame(randn(5, 2), columns=list('AB')) dfd['date'] = Timestamp('20130101') + dfd = dfd.sort_index(1, ascending=False) json = dfd.to_json(date_format='iso') json @@ -1082,7 +1147,7 @@ Writing in iso date format, with microseconds json = dfd.to_json(date_format='iso', date_unit='us') json -Actually I prefer epoch timestamps, in seconds +Epoch timestamps, in seconds .. ipython:: python @@ -1101,6 +1166,9 @@ Writing to a file, with a date index and a date column dfj2.to_json('test.json') open('test.json').read() +Fallback Behavior ++++++++++++++++++ + If the JSON serialiser cannot handle the container contents directly it will fallback in the following manner: - if a ``toDict`` method is defined by the unrecognised object then that @@ -1182,7 +1250,7 @@ is ``None``. To explicity force ``Series`` parsing, pass ``typ=series`` - ``convert_dates`` : a list of columns to parse for dates; If True, then try to parse datelike columns, default is True - ``keep_default_dates`` : boolean, default True. If parsing dates, then parse the default datelike columns - ``numpy`` : direct decoding to numpy arrays. default is False; - Note that the JSON ordering **MUST** be the same for each term if ``numpy=True`` + Supports numeric data only, although labels may be non-numeric. Also note that the JSON ordering **MUST** be the same for each term if ``numpy=True`` - ``precise_float`` : boolean, default ``False``. Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (``False``) is to use fast but less precise builtin functionality - ``date_unit`` : string, the timestamp unit to detect if converting dates. Default None. By default the timestamp precision will be detected, if this is not desired @@ -1191,6 +1259,13 @@ is ``None``. To explicity force ``Series`` parsing, pass ``typ=series`` The parser will raise one of ``ValueError/TypeError/AssertionError`` if the JSON is not parsable. +If a non-default ``orient`` was used when encoding to JSON be sure to pass the same +option here so that decoding produces sensible results, see `Orient Options`_ for an +overview. + +Data Conversion ++++++++++++++++ + The default of ``convert_axes=True``, ``dtype=True``, and ``convert_dates=True`` will try to parse the axes, and all of the data into appropriate types, including dates. If you need to override specific dtypes, pass a dict to ``dtype``. ``convert_axes`` should only be set to ``False`` if you need to preserve string-like numbers (e.g. '1', '2') in an axes. @@ -1209,31 +1284,31 @@ be set to ``False`` if you need to preserve string-like numbers (e.g. '1', '2') Thus there are times where you may want to specify specific dtypes via the ``dtype`` keyword argument. -Reading from a JSON string +Reading from a JSON string: .. ipython:: python pd.read_json(json) -Reading from a file +Reading from a file: .. ipython:: python pd.read_json('test.json') -Don't convert any data (but still convert axes and dates) +Don't convert any data (but still convert axes and dates): .. ipython:: python pd.read_json('test.json', dtype=object).dtypes -Specify how I want to convert data +Specify dtypes for conversion: .. ipython:: python pd.read_json('test.json', dtype={'A' : 'float32', 'bools' : 'int8'}).dtypes -I like my string indicies +Preserve string indicies: .. ipython:: python @@ -1250,8 +1325,7 @@ I like my string indicies sij.index sij.columns -My dates have been written in nanoseconds, so they need to be read back in -nanoseconds +Dates written in nanoseconds need to be read back in nanoseconds: .. ipython:: python @@ -1269,6 +1343,65 @@ nanoseconds dfju = pd.read_json(json, date_unit='ns') dfju +The Numpy Parameter ++++++++++++++++++++ + +.. note:: + This supports numeric data only. Index and columns labels may be non-numeric, e.g. strings, dates etc. + +If ``numpy=True`` is passed to ``read_json`` an attempt will be made to sniff +an appropriate dtype during deserialisation and to subsequently decode directly +to numpy arrays, bypassing the need for intermediate Python objects. + +This can provide speedups if you are deserialising a large amount of numeric +data: + +.. ipython:: python + + randfloats = np.random.uniform(-100, 1000, 10000) + randfloats.shape = (1000, 10) + dffloats = DataFrame(randfloats, columns=list('ABCDEFGHIJ')) + + jsonfloats = dffloats.to_json() + +.. ipython:: python + + timeit read_json(jsonfloats) + +.. ipython:: python + + timeit read_json(jsonfloats, numpy=True) + +The speedup is less noticable for smaller datasets: + +.. ipython:: python + + jsonfloats = dffloats.head(100).to_json() + +.. ipython:: python + + timeit read_json(jsonfloats) + +.. ipython:: python + + timeit read_json(jsonfloats, numpy=True) + +.. warning:: + + Direct numpy decoding makes a number of assumptions and may fail or produce + unexpected output if these assumptions are not satisfied: + + - data is numeric. + + - data is uniform. The dtype is sniffed from the first value decoded. + A ``ValueError`` may be raised, or incorrect output may be produced + if this condition is not satisfied. + + - labels are ordered. Labels are only read from the first container, it is assumed + that each subsequent row / column has been encoded in the same order. This should be satisfied if the + data was encoded using ``to_json`` but may not be the case if the JSON + is from another source. + .. ipython:: python :suppress: diff --git a/doc/source/release.rst b/doc/source/release.rst index c147a83032761..6eeaa55280e43 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -102,6 +102,8 @@ Improvements to existing features - Significant table writing performance improvements in ``HDFStore`` - JSON date serialisation now performed in low-level C code. - JSON support for encoding datetime.time + - Expanded JSON docs, more info about orient options and the use of the numpy + param when decoding. - Add ``drop_level`` argument to xs (:issue:`4180`) - Can now resample a DataFrame with ohlc (:issue:`2320`) - ``Index.copy()`` and ``MultiIndex.copy()`` now accept keyword arguments to diff --git a/pandas/io/json.py b/pandas/io/json.py index c81064d1c0516..83c503e7419e9 100644 --- a/pandas/io/json.py +++ b/pandas/io/json.py @@ -153,8 +153,9 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, keep_default_dates : boolean, default True. If parsing dates, then parse the default datelike columns numpy : boolean, default False - Direct decoding to numpy arrays. Note that the JSON ordering MUST be - the same for each term if numpy=True. + Direct decoding to numpy arrays. Supports numeric data only, but + non-numeric column and index labels are supported. Note also that the + JSON ordering MUST be the same for each term if numpy=True. precise_float : boolean, default False. Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (False) is to use fast but
closes #4703 Expanded JSON docs to add more detail about the different orient options and the use of the numpy param when decoding. Feedback welcome.
https://api.github.com/repos/pandas-dev/pandas/pulls/5287
2013-10-21T00:33:13Z
2013-10-21T00:41:26Z
2013-10-21T00:41:26Z
2014-07-16T08:36:20Z
API: Add equals method to NDFrames.
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index bd2980c2f1c9f..e9cc03c098d03 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -215,6 +215,14 @@ These operations produce a pandas object the same type as the left-hand-side inp that if of dtype ``bool``. These ``boolean`` objects can be used in indexing operations, see :ref:`here<indexing.boolean>` +As of v0.13.1, Series, DataFrames and Panels have an equals method to compare if +two such objects are equal. + +.. ipython:: python + + df.equals(df) + df.equals(df2) + .. _basics.reductions: Boolean Reductions diff --git a/doc/source/release.rst b/doc/source/release.rst index a69c0f8acaa46..78b94f14daf54 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -66,6 +66,7 @@ API Changes timedeltas (:issue:`5458`,:issue:`5689`) - Add ``-NaN`` and ``-nan`` to the default set of NA values (:issue:`5952`). See :ref:`NA Values <io.na_values>`. + - ``NDFrame`` now has an ``equals`` method. (:issue:`5283`) Experimental Features ~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.13.1.txt b/doc/source/v0.13.1.txt index 3efe79ce281db..1740d21a08466 100644 --- a/doc/source/v0.13.1.txt +++ b/doc/source/v0.13.1.txt @@ -42,6 +42,21 @@ API changes - Add ``-NaN`` and ``-nan`` to the default set of NA values (:issue:`5952`). See :ref:`NA Values <io.na_values>`. +- Added the ``NDFrame.equals()`` method to compare if two NDFrames are + equal have equal axes, dtypes, and values. Added the + ``array_equivalent`` function to compare if two ndarrays are + equal. NaNs in identical locations are treated as + equal. (:issue:`5283`) + + .. ipython:: python + + df = DataFrame({'col':['foo', 0, np.nan]}).sort() + df2 = DataFrame({'col':[np.nan, 0, 'foo']}, index=[2,1,0]) + df.equals(df) + + import pandas.core.common as com + com.array_equivalent(np.array([0, np.nan]), np.array([0, np.nan])) + np.array_equal(np.array([0, np.nan]), np.array([0, np.nan])) Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/common.py b/pandas/core/common.py index 5b585c44ca3b8..c3c038fa0945c 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -277,6 +277,42 @@ def notnull(obj): return -res +def array_equivalent(left, right): + """ + True if two arrays, left and right, have equal non-NaN elements, and NaNs in + corresponding locations. False otherwise. It is assumed that left and right + are NumPy arrays of the same dtype. The behavior of this function + (particularly with respect to NaNs) is not defined if the dtypes are + different. + + Parameters + ---------- + left, right : array_like + Input arrays. + + Returns + ------- + b : bool + Returns True if the arrays are equivalent. + + Examples + -------- + >>> array_equivalent([1, 2, nan], np.array([1, 2, nan])) + True + >>> array_equivalent([1, nan, 2], [1, 2, nan]) + False + """ + if left.shape != right.shape: return False + # NaNs occur only in object arrays, float or complex arrays. + if left.dtype == np.object_: + # If object array, we need to use pd.isnull + return ((left == right) | pd.isnull(left) & pd.isnull(right)).all() + elif not issubclass(left.dtype.type, (np.floating, np.complexfloating)): + # if not a float or complex array, then there are no NaNs + return np.array_equal(left, right) + # For float or complex arrays, using np.isnan is faster than pd.isnull + return ((left == right) | (np.isnan(left) & np.isnan(right))).all() + def _iterable_not_string(x): return (isinstance(x, collections.Iterable) and not isinstance(x, compat.string_types)) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bd53e1a35e166..2c03d16fc5cbe 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -608,6 +608,15 @@ def __invert__(self): arr = operator.inv(_values_from_object(self)) return self._wrap_array(arr, self.axes, copy=False) + def equals(self, other): + """ + Determines if two NDFrame objects contain the same elements. NaNs in the + same location are considered equal. + """ + if not isinstance(other, self._constructor): + return False + return self._data.equals(other._data) + #---------------------------------------------------------------------- # Iteration diff --git a/pandas/core/internals.py b/pandas/core/internals.py index dacab4fd6e6c6..bd85391abe8a7 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1121,13 +1121,24 @@ def func(c, v, o): return result_blocks + def equals(self, other): + if self.dtype != other.dtype or self.shape != other.shape: return False + return np.array_equal(self.values, other.values) + class NumericBlock(Block): is_numeric = True _can_hold_na = True -class FloatBlock(NumericBlock): +class FloatOrComplexBlock(NumericBlock): + def equals(self, other): + if self.dtype != other.dtype or self.shape != other.shape: return False + left, right = self.values, other.values + return ((left == right) | (np.isnan(left) & np.isnan(right))).all() + + +class FloatBlock(FloatOrComplexBlock): is_float = True _downcast_dtype = 'int64' @@ -1166,8 +1177,7 @@ def should_store(self, value): return (issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype) - -class ComplexBlock(NumericBlock): +class ComplexBlock(FloatOrComplexBlock): is_complex = True def _can_hold_element(self, element): @@ -2563,7 +2573,7 @@ def get_data(self, copy=False, columns=None, **kwargs): return self.combine(blocks) def combine(self, blocks): - """ reutrn a new manager with the blocks """ + """ return a new manager with the blocks """ indexer = np.sort(np.concatenate([b.ref_locs for b in blocks])) new_items = self.items.take(indexer) @@ -3491,6 +3501,16 @@ def item_dtypes(self): raise AssertionError('Some items were not in any block') return result + def equals(self, other): + self_axes, other_axes = self.axes, other.axes + if len(self_axes) != len(other_axes): + return False + if not all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): + return False + self._consolidate_inplace() + other._consolidate_inplace() + return all(block.equals(oblock) for block, oblock in + zip(self.blocks, other.blocks)) class SingleBlockManager(BlockManager): @@ -4004,6 +4024,9 @@ def _merge_blocks(blocks, items, dtype=None, _can_consolidate=True): raise AssertionError("_merge_blocks are invalid!") dtype = blocks[0].dtype + if not items.is_unique: + blocks = sorted(blocks, key=lambda b: b.ref_locs.tolist()) + new_values = _vstack([b.values for b in blocks], dtype) new_items = blocks[0].items.append([b.items for b in blocks[1:]]) new_block = make_block(new_values, new_items, items) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index cdc188e52935b..45c9bf5d8c374 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -5,11 +5,10 @@ from nose.tools import assert_equal import numpy as np from pandas.tslib import iNaT, NaT - from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp from pandas import compat from pandas.compat import range, long, lrange, lmap, u -from pandas.core.common import notnull, isnull +from pandas.core.common import notnull, isnull, array_equivalent import pandas.core.common as com import pandas.util.testing as tm import pandas.core.config as cf @@ -167,6 +166,19 @@ def test_downcast_conv(): result = com._possibly_downcast_to_dtype(arr,'infer') tm.assert_almost_equal(result, expected) + +def test_array_equivalent(): + assert array_equivalent(np.array([np.nan, np.nan]), np.array([np.nan, np.nan])) + assert array_equivalent(np.array([np.nan, 1, np.nan]), np.array([np.nan, 1, np.nan])) + assert array_equivalent(np.array([np.nan, None], dtype='object'), + np.array([np.nan, None], dtype='object')) + assert array_equivalent(np.array([np.nan, 1+1j], dtype='complex'), + np.array([np.nan, 1+1j], dtype='complex')) + assert not array_equivalent(np.array([np.nan, 1+1j], dtype='complex'), + np.array([np.nan, 1+2j], dtype='complex')) + assert not array_equivalent(np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan])) + assert not array_equivalent(np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e'])) + def test_datetimeindex_from_empty_datetime64_array(): for unit in [ 'ms', 'us', 'ns' ]: idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit)) diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 2d72a596dc769..60c51c09915f3 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -812,7 +812,8 @@ def test_metadata_propagation_indiv(self): self.check_metadata(df,result) # resample - df = DataFrame(np.random.randn(1000,2), index=date_range('20130101',periods=1000,freq='s')) + df = DataFrame(np.random.randn(1000,2), + index=date_range('20130101',periods=1000,freq='s')) result = df.resample('1T') self.check_metadata(df,result) @@ -851,6 +852,80 @@ def test_squeeze(self): p4d = tm.makePanel4D().reindex(labels=['label1'],items=['ItemA']) tm.assert_frame_equal(p4d.squeeze(),p4d.ix['label1','ItemA']) + def test_equals(self): + s1 = pd.Series([1, 2, 3], index=[0, 2, 1]) + s2 = s1.copy() + self.assert_(s1.equals(s2)) + + s1[1] = 99 + self.assert_(not s1.equals(s2)) + + # NaNs compare as equal + s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3]) + s2 = s1.copy() + self.assert_(s1.equals(s2)) + + s2[0] = 9.9 + self.assert_(not s1.equals(s2)) + + idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')]) + s1 = Series([1, 2, np.nan], index=idx) + s2 = s1.copy() + self.assert_(s1.equals(s2)) + + # Add object dtype column with nans + index = np.random.random(10) + df1 = DataFrame(np.random.random(10,), index=index, columns=['floats']) + df1['text'] = 'the sky is so blue. we could use more chocolate.'.split() + df1['start'] = date_range('2000-1-1', periods=10, freq='T') + df1['end'] = date_range('2000-1-1', periods=10, freq='D') + df1['diff'] = df1['end'] - df1['start'] + df1['bool'] = (np.arange(10) % 3 == 0) + df1.ix[::2] = nan + df2 = df1.copy() + self.assert_(df1['text'].equals(df2['text'])) + self.assert_(df1['start'].equals(df2['start'])) + self.assert_(df1['end'].equals(df2['end'])) + self.assert_(df1['diff'].equals(df2['diff'])) + self.assert_(df1['bool'].equals(df2['bool'])) + self.assert_(df1.equals(df2)) + self.assert_(not df1.equals(object)) + + # different dtype + different = df1.copy() + different['floats'] = different['floats'].astype('float32') + self.assert_(not df1.equals(different)) + + # different index + different_index = -index + different = df2.set_index(different_index) + self.assert_(not df1.equals(different)) + + # different columns + different = df2.copy() + different.columns = df2.columns[::-1] + self.assert_(not df1.equals(different)) + + # DatetimeIndex + index = pd.date_range('2000-1-1', periods=10, freq='T') + df1 = df1.set_index(index) + df2 = df1.copy() + self.assert_(df1.equals(df2)) + + # MultiIndex + df3 = df1.set_index(['text'], append=True) + df2 = df1.set_index(['text'], append=True) + self.assert_(df3.equals(df2)) + + df2 = df1.set_index(['floats'], append=True) + self.assert_(not df3.equals(df2)) + + # NaN in index + df3 = df1.set_index(['floats'], append=True) + df2 = df1.set_index(['floats'], append=True) + self.assert_(df3.equals(df2)) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index 701b240479a62..27860b738d161 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -584,6 +584,27 @@ def test_missing_unicode_key(self): except KeyError: pass # this is the expected exception + def test_equals(self): + # unique items + index = Index(list('abcdef')) + block1 = make_block(np.arange(12).reshape(3,4), list('abc'), index) + block2 = make_block(np.arange(12).reshape(3,4)*10, list('def'), index) + block1.ref_items = block2.ref_items = index + bm1 = BlockManager([block1, block2], [index, np.arange(block1.shape[1])]) + bm2 = BlockManager([block2, block1], [index, np.arange(block1.shape[1])]) + self.assert_(bm1.equals(bm2)) + + # non-unique items + index = Index(list('aaabbb')) + block1 = make_block(np.arange(12).reshape(3,4), list('aaa'), index, + placement=[0,1,2]) + block2 = make_block(np.arange(12).reshape(3,4)*10, list('bbb'), index, + placement=[3,4,5]) + block1.ref_items = block2.ref_items = index + bm1 = BlockManager([block1, block2], [index, np.arange(block1.shape[1])]) + bm2 = BlockManager([block2, block1], [index, np.arange(block1.shape[1])]) + self.assert_(bm1.equals(bm2)) + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 7b12467575f78..80e33eb1717da 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -436,6 +436,7 @@ def isiterable(obj): def is_sorted(seq): return assert_almost_equal(seq, np.sort(np.array(seq))) +# This could be refactored to use the NDFrame.equals method def assert_series_equal(left, right, check_dtype=True, check_index_type=False, check_series_type=False, @@ -455,7 +456,7 @@ def assert_series_equal(left, right, check_dtype=True, assert_attr_equal('dtype', left.index, right.index) assert_attr_equal('inferred_type', left.index, right.index) - +# This could be refactored to use the NDFrame.equals method def assert_frame_equal(left, right, check_dtype=True, check_index_type=False, check_column_type=False, diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index c3425389684ae..e658ce75247b4 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -370,3 +370,36 @@ def f(K=100): frame_dtypes = Benchmark('df.dtypes', setup, start_date=datetime(2012,1,1)) +#---------------------------------------------------------------------- +# equals +setup = common_setup + """ +def make_pair(name): + df = globals()[name] + df2 = df.copy() + df2.ix[-1,-1] = np.nan + return df, df2 + +def test_equal(name): + df, df2 = pairs[name] + return df.equals(df) + +def test_unequal(name): + df, df2 = pairs[name] + return df.equals(df2) + +float_df = DataFrame(np.random.randn(1000, 1000)) +object_df = DataFrame([['foo']*1000]*1000) +nonunique_cols = object_df.copy() +nonunique_cols.columns = ['A']*len(nonunique_cols.columns) + +pairs = dict([(name,make_pair(name)) + for name in ('float_df', 'object_df', 'nonunique_cols')]) +""" +frame_float_equal = Benchmark('test_equal("float_df")', setup) +frame_object_equal = Benchmark('test_equal("object_df")', setup) +frame_nonunique_equal = Benchmark('test_equal("nonunique_cols")', setup) + +frame_float_unequal = Benchmark('test_unequal("float_df")', setup) +frame_object_unequal = Benchmark('test_unequal("object_df")', setup) +frame_nonunique_unequal = Benchmark('test_unequal("nonunique_cols")', setup) +
Also adds `array_equivalent`, which is similar to `np.array_equal` except that it handles object arrays and treats NaNs in corresponding locations as equal. closes https://github.com/pydata/pandas/issues/5183
https://api.github.com/repos/pandas-dev/pandas/pulls/5283
2013-10-20T17:49:45Z
2014-01-24T21:43:37Z
2014-01-24T21:43:37Z
2014-10-01T22:01:28Z
DOC: add some missing entries to api docs
diff --git a/doc/source/api.rst b/doc/source/api.rst index d73a8d3ad7489..20bfe037f7373 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -132,7 +132,11 @@ Data manipulations .. autosummary:: :toctree: generated/ + melt pivot_table + crosstab + cut + qcut merge concat get_dummies @@ -154,6 +158,9 @@ Top-level dealing with datetimes to_datetime to_timedelta + date_range + bdate_range + period_range Top-level evaluation ~~~~~~~~~~~~~~~~~~~~ @@ -175,12 +182,16 @@ Standard moving window functions rolling_median rolling_var rolling_std + rolling_min + rolling_max rolling_corr + rolling_corr_pairwise rolling_cov rolling_skew rolling_kurt rolling_apply rolling_quantile + rolling_window Standard expanding window functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -194,7 +205,10 @@ Standard expanding window functions expanding_median expanding_var expanding_std + expanding_min + expanding_max expanding_corr + expanding_corr_pairwise expanding_cov expanding_skew expanding_kurt @@ -474,6 +488,8 @@ Serialization / IO / Conversion Series.to_csv Series.to_dict Series.to_frame + Series.to_hdf + Series.to_json Series.to_sparse Series.to_string Series.to_clipboard @@ -733,6 +749,7 @@ Serialization / IO / Conversion DataFrame.to_excel DataFrame.to_json DataFrame.to_html + DataFrame.to_latex DataFrame.to_stata DataFrame.to_records DataFrame.to_sparse @@ -933,6 +950,8 @@ Serialization / IO / Conversion Panel.from_dict Panel.to_pickle Panel.to_excel + Panel.to_hdf + Panel.to_json Panel.to_sparse Panel.to_frame Panel.to_clipboard @@ -1039,24 +1058,26 @@ DatetimeIndex Time/Date Components ~~~~~~~~~~~~~~~~~~~~ - * **year** - * **month** - * **day** - * **hour** - * **minute** - * **second** - * **microsecond** - * **nanosecond** - - * **weekofyear** - * **week**: Same as weekofyear - * **dayofweek**: (0=Monday, 6=Sunday) - * **weekday**: (0=Monday, 6=Sunday) - * **dayofyear** - * **quarter** - - * **date**: Returns date component of Timestamps - * **time**: Returns time component of Timestamps + +.. autosummary:: + :toctree: generated/ + + DatetimeIndex.year + DatetimeIndex.month + DatetimeIndex.day + DatetimeIndex.hour + DatetimeIndex.minute + DatetimeIndex.second + DatetimeIndex.microsecond + DatetimeIndex.nanosecond + DatetimeIndex.date + DatetimeIndex.time + DatetimeIndex.dayofyear + DatetimeIndex.weekofyear + DatetimeIndex.week + DatetimeIndex.dayofweek + DatetimeIndex.weekday + DatetimeIndex.quarter Selecting
Thanks to @JanSchulz for his notebook.
https://api.github.com/repos/pandas-dev/pandas/pulls/5282
2013-10-20T16:59:55Z
2013-10-20T21:36:23Z
2013-10-20T21:36:23Z
2014-07-16T08:36:12Z
DOC: Added versionadded for "Setting index metadata"
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index a7e907fdd19cc..143f2bd9d8252 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -1879,6 +1879,8 @@ if you compute the levels and labels yourself, please be careful. Setting index metadata (``name(s)``, ``levels``, ``labels``) ------------------------------------------------------------ +.. versionadded:: 0.13.0 + .. _indexing.set_metadata: Indexes are "mostly immutable", but it is possible to set and change their
Closes #5279
https://api.github.com/repos/pandas-dev/pandas/pulls/5281
2013-10-20T16:17:39Z
2013-10-20T16:19:02Z
2013-10-20T16:19:02Z
2014-07-12T14:44:36Z
DOC: changed the term "Vern" to "Version" to be clearer
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 33d1219b3b11f..d211b5b59cddd 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -103,8 +103,8 @@ a set of specialized cython routines that are especially fast when dealing with Here is a sample (using 100 column x 100,000 row ``DataFrames``): .. csv-table:: - :header: "Operation", "0.11.0 (ms)", "Prior Vern (ms)", "Ratio to Prior" - :widths: 30, 30, 30, 30 + :header: "Operation", "0.11.0 (ms)", "Prior Version (ms)", "Ratio to Prior" + :widths: 25, 25, 25, 25 :delim: ; ``df1 > df2``; 13.32; 125.35; 0.1063
Second attempt to change a single word
https://api.github.com/repos/pandas-dev/pandas/pulls/5277
2013-10-20T12:33:05Z
2014-01-01T03:20:48Z
2014-01-01T03:20:48Z
2014-07-10T12:11:16Z
CLN: Remove unused ValuesProperty class.
diff --git a/pandas/src/properties.pyx b/pandas/src/properties.pyx index 28e1ecfefc6a8..a8b39d13db2f6 100644 --- a/pandas/src/properties.pyx +++ b/pandas/src/properties.pyx @@ -57,17 +57,3 @@ cdef class AxisProperty(object): def __set__(self, obj, value): obj._set_axis(self.axis, value) - -cdef class ValuesProperty(object): - - def __get__(self, obj, type): - cdef: - ndarray arr = obj - object base - - base = np.get_array_base(arr) - if base is None or not np.PyArray_CheckExact(base): - arr = arr.view(np.ndarray) - else: - arr = base - return arr
https://api.github.com/repos/pandas-dev/pandas/pulls/5276
2013-10-20T05:18:48Z
2013-10-20T22:46:04Z
2013-10-20T22:46:04Z
2014-07-16T08:36:09Z
Added handling for v3 advanced segment ids which aren't just ints as of July 15th
diff --git a/doc/source/release.rst b/doc/source/release.rst index 7171b48f4097a..ed1834f14fc2e 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -186,6 +186,7 @@ Improvements to existing features - DatetimeIndex (and date_range) can now be constructed in a left- or right-open fashion using the ``closed`` parameter (:issue:`4579`) - Python csv parser now supports usecols (:issue:`4335`) + - Added support for Google Analytics v3 API segment IDs that also supports v2 IDs. (:issue:`5271`) API Changes ~~~~~~~~~~~ diff --git a/pandas/io/ga.py b/pandas/io/ga.py index dcbecd74886ac..78c331cda829c 100644 --- a/pandas/io/ga.py +++ b/pandas/io/ga.py @@ -5,6 +5,7 @@ 4. Download JSON secret file and move into same directory as this file """ from datetime import datetime +import re from pandas import compat import numpy as np from pandas import DataFrame @@ -359,7 +360,10 @@ def format_query(ids, metrics, start_date, end_date=None, dimensions=None, [_maybe_add_arg(qry, n, d) for n, d in zip(names, lst)] if isinstance(segment, compat.string_types): - _maybe_add_arg(qry, 'segment', segment, 'dynamic::ga') + if re.match("^[a-zA-Z0-9]+\-*[a-zA-Z0-9]*$", segment): + _maybe_add_arg(qry, 'segment', segment, 'gaid:') + else: + _maybe_add_arg(qry, 'segment', segment, 'dynamic::ga') elif isinstance(segment, int): _maybe_add_arg(qry, 'segment', segment, 'gaid:') elif segment: diff --git a/pandas/io/tests/test_ga.py b/pandas/io/tests/test_ga.py index a0f4dc45725a3..166917799ca82 100644 --- a/pandas/io/tests/test_ga.py +++ b/pandas/io/tests/test_ga.py @@ -10,8 +10,9 @@ try: import httplib2 + import pandas.io.ga as ga from pandas.io.ga import GAnalytics, read_ga - from pandas.io.auth import AuthenticationConfigError, reset_token_store + from pandas.io.auth import AuthenticationConfigError, reset_default_token_store from pandas.io import auth except ImportError: raise nose.SkipTest("need httplib2 and auth libs") @@ -25,7 +26,7 @@ def test_remove_token_store(self): with open(auth.DEFAULT_TOKEN_FILE, 'w') as fh: fh.write('test') - reset_token_store() + reset_default_token_store() self.assert_(not os.path.exists(auth.DEFAULT_TOKEN_FILE)) @slow @@ -98,6 +99,26 @@ def test_iterator(self): except AuthenticationConfigError: raise nose.SkipTest("authentication error") + def test_v2_advanced_segment_format(self): + advanced_segment_id = 1234567 + query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id) + assert query['segment'] == 'gaid::' + str(advanced_segment_id), "An integer value should be formatted as an advanced segment." + + def test_v2_dynamic_segment_format(self): + dynamic_segment_id = 'medium==referral' + query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=dynamic_segment_id) + assert query['segment'] == 'dynamic::ga:' + str(dynamic_segment_id), "A string value with more than just letters and numbers should be formatted as a dynamic segment." + + def test_v3_advanced_segment_common_format(self): + advanced_segment_id = 'aZwqR234' + query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id) + assert query['segment'] == 'gaid::' + str(advanced_segment_id), "A string value with just letters and numbers should be formatted as an advanced segment." + + def test_v3_advanced_segment_weird_format(self): + advanced_segment_id = 'aZwqR234-s1' + query = ga.format_query('google_profile_id', ['visits'], '2013-09-01', segment=advanced_segment_id) + assert query['segment'] == 'gaid::' + str(advanced_segment_id), "A string value with just letters, numbers, and hyphens should be formatted as an advanced segment." + @slow @with_connectivity_check("http://www.google.com") def test_segment(self):
The old int based advanced segment ids are deprecated. The new segment ids are alphanumeric. I know from experimentation that their form includes alphanumeric and alphanumeric+hyphen. Updated code for both cases. Reference: https://developers.google.com/analytics/devguides/reporting/core/v3/changelog
https://api.github.com/repos/pandas-dev/pandas/pulls/5271
2013-10-19T23:02:49Z
2013-10-22T04:06:16Z
2013-10-22T04:06:16Z
2014-06-14T07:09:34Z
CLN: Fix unhelpful "length mismatch" error message on set_axis
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index b4d5c1814a6bc..3ce9789447c81 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1881,8 +1881,8 @@ def set_axis(self, axis, value, maybe_rename=True, check_axis=True): value = _ensure_index(value) if check_axis and len(value) != len(cur_axis): - raise ValueError('Length mismatch (%d vs %d)' - % (len(value), len(cur_axis))) + raise ValueError('Length mismatch: Expected %d elements, got %d elements' + % (len(cur_axis), len(value))) self.axes[axis] = value self._shape = None @@ -3361,8 +3361,8 @@ def set_axis(self, axis, value): value = _ensure_index(value) if len(value) != len(cur_axis): - raise Exception('Length mismatch (%d vs %d)' - % (len(value), len(cur_axis))) + raise ValueError('Length mismatch: Expected %d elements, got %d elements' + % (len(cur_axis), len(value))) self.axes[axis] = value self._shape = None self._block.set_ref_items(self.items, maybe_rename=True)
closes #5269
https://api.github.com/repos/pandas-dev/pandas/pulls/5270
2013-10-19T21:11:25Z
2013-10-23T12:35:47Z
2013-10-23T12:35:47Z
2014-07-24T02:22:25Z
BUG: Fixed issue #5156: segfault on read_csv
diff --git a/doc/source/release.rst b/doc/source/release.rst index de9743bdc705a..c147a83032761 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -183,6 +183,7 @@ Improvements to existing features - ``Series`` now supports a ``to_frame`` method to convert it to a single-column DataFrame (:issue:`5164`) - DatetimeIndex (and date_range) can now be constructed in a left- or right-open fashion using the ``closed`` parameter (:issue:`4579`) + - Python csv parser now supports usecols (:issue:`4335`) API Changes ~~~~~~~~~~~ @@ -625,6 +626,8 @@ Bug Fixes - Fixed bug in Excel writers where frames with duplicate column names weren't written correctly. (:issue:`5235`) - Fixed issue with ``drop`` and a non-unique index on Series (:issue:`5248`) + - Fixed seg fault in C parser caused by passing more names than columns in + the file. (:issue:`5156`) pandas 0.12.0 ------------- diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index b81feec6ab6f8..99a6c630e6ac4 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -1955,6 +1955,15 @@ def test_integer_overflow_bug(self): result = self.read_csv(StringIO(data), header=None, sep='\s+') self.assertTrue(result[0].dtype == np.float64) + def test_catch_too_many_names(self): + # Issue 5156 + data = """\ +1,2,3 +4,,6 +7,8,9 +10,11,12\n""" + tm.assertRaises(Exception, read_csv, StringIO(data), header=0, names=['a', 'b', 'c', 'd']) + class TestPythonParser(ParserTests, unittest.TestCase): def test_negative_skipfooter_raises(self): diff --git a/pandas/parser.pyx b/pandas/parser.pyx index 8625038c57b23..06a1ddfdae025 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -801,7 +801,6 @@ cdef class TextReader: raise StopIteration self._end_clock('Tokenization') - self._start_clock() columns = self._convert_column_data(rows=rows, footer=footer, @@ -840,11 +839,12 @@ cdef class TextReader: def _convert_column_data(self, rows=None, upcast_na=False, footer=0): cdef: - Py_ssize_t i, nused, ncols + Py_ssize_t i, nused kh_str_t *na_hashset = NULL int start, end object name, na_flist bint na_filter = 0 + Py_ssize_t num_cols start = self.parser_start @@ -857,6 +857,22 @@ cdef class TextReader: # if footer > 0: # end -= footer + #print >> sys.stderr, self.table_width + #print >> sys.stderr, self.leading_cols + #print >> sys.stderr, self.parser.lines + #print >> sys.stderr, start + #print >> sys.stderr, end + #print >> sys.stderr, self.header + #print >> sys.stderr, "index" + num_cols = -1 + for i in range(self.parser.lines): + num_cols = (num_cols < self.parser.line_fields[i]) * self.parser.line_fields[i] +\ + (num_cols >= self.parser.line_fields[i]) * num_cols + + if self.table_width - self.leading_cols > num_cols: + raise CParserError("Too many columns specified: expected %s and found %s" % + (self.table_width - self.leading_cols, num_cols)) + results = {} nused = 0 for i in range(self.table_width): @@ -1446,7 +1462,6 @@ cdef _try_int64(parser_t *parser, int col, int line_start, int line_end, if na_filter: for i in range(lines): word = COLITER_NEXT(it) - k = kh_get_str(na_hashset, word) # in the hash table if k != na_hashset.n_buckets: @@ -1828,16 +1843,6 @@ cdef _apply_converter(object f, parser_t *parser, int col, return lib.maybe_convert_objects(result) - # if issubclass(values.dtype.type, (np.number, np.bool_)): - # return values - - # # XXX - # na_values = set(['']) - # try: - # return lib.maybe_convert_numeric(values, na_values, False) - # except Exception: - # na_count = lib.sanitize_objects(values, na_values, False) - # return result def _to_structured_array(dict columns, object names): cdef: diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c index 45b8b9263e9cd..da991ec23c373 100644 --- a/pandas/src/parser/tokenizer.c +++ b/pandas/src/parser/tokenizer.c @@ -709,7 +709,6 @@ int tokenize_delimited(parser_t *self, size_t line_limit) if (c == '\n') { END_FIELD(); END_LINE(); - /* self->state = START_RECORD; */ } else if (c == '\r') { END_FIELD(); self->state = EAT_CRNL; diff --git a/pandas/src/parser/tokenizer.h b/pandas/src/parser/tokenizer.h index 69f627dda554c..4e40d892a8b4a 100644 --- a/pandas/src/parser/tokenizer.h +++ b/pandas/src/parser/tokenizer.h @@ -161,7 +161,7 @@ typedef struct parser_t { int *line_start; // position in words for start of line int *line_fields; // Number of fields in each line - int lines; // Number of (good) lines observedb + int lines; // Number of (good) lines observed int file_lines; // Number of file lines observed (including bad or skipped) int lines_cap; // Vector capacity
closes #5156 Our c parser was not checking array bounds. Example: ``` In [1]: from StringIO import StringIO; import pandas as pd In [2]: data = """\ 1,2,3 4,,6 7,8,9 10,11,12\n""" In [3]: df = pd.read_csv(StringIO(data), header=0, names=['a', 'b', 'c', 'd'], engine='c') CParserError: Too many columns specified: expected 4 and found 3 ``` Previously, this would segfault.
https://api.github.com/repos/pandas-dev/pandas/pulls/5268
2013-10-19T20:05:43Z
2013-10-19T21:13:49Z
2013-10-19T21:13:49Z
2014-07-06T15:02:43Z
BUG: union should not try to sort inplace because platform impls differ as to when sorting occurs for objects that cannot be compared
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py index 0275193031a07..e22b6218a2227 100644 --- a/pandas/computation/tests/test_eval.py +++ b/pandas/computation/tests/test_eval.py @@ -1,5 +1,7 @@ #!/usr/bin/env python +import warnings +import operator from itertools import product from distutils.version import LooseVersion @@ -28,7 +30,7 @@ from pandas.util.testing import (assert_frame_equal, randbool, assertRaisesRegexp, assert_produces_warning, assert_series_equal) -from pandas.compat import PY3, u +from pandas.compat import PY3, u, reduce _series_frame_incompatible = _bool_ops_syms _scalar_skip = 'in', 'not in' @@ -699,6 +701,16 @@ def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs): #------------------------------------- # basic and complex alignment +def _is_datetime(x): + return issubclass(x.dtype.type, np.datetime64) + + +def should_warn(*args): + not_mono = not any(map(operator.attrgetter('is_monotonic'), args)) + only_one_dt = reduce(operator.xor, map(_is_datetime, args)) + return not_mono and only_one_dt + + class TestAlignment(object): index_types = 'i', 'u', 'dt' @@ -719,13 +731,20 @@ def check_basic_frame_alignment(self, engine, parser): tm.skip_if_no_ne(engine) args = product(self.lhs_index_types, self.index_types, self.index_types) - for lr_idx_type, rr_idx_type, c_idx_type in args: - df = mkdf(10, 10, data_gen_f=f, r_idx_type=lr_idx_type, - c_idx_type=c_idx_type) - df2 = mkdf(20, 10, data_gen_f=f, r_idx_type=rr_idx_type, - c_idx_type=c_idx_type) - res = pd.eval('df + df2', engine=engine, parser=parser) - assert_frame_equal(res, df + df2) + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + for lr_idx_type, rr_idx_type, c_idx_type in args: + df = mkdf(10, 10, data_gen_f=f, r_idx_type=lr_idx_type, + c_idx_type=c_idx_type) + df2 = mkdf(20, 10, data_gen_f=f, r_idx_type=rr_idx_type, + c_idx_type=c_idx_type) + # only warns if not monotonic and not sortable + if should_warn(df.index, df2.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval('df + df2', engine=engine, parser=parser) + else: + res = pd.eval('df + df2', engine=engine, parser=parser) + assert_frame_equal(res, df + df2) def test_basic_frame_alignment(self): for engine, parser in ENGINES_PARSERS: @@ -754,12 +773,20 @@ def check_medium_complex_frame_alignment(self, engine, parser): args = product(self.lhs_index_types, self.index_types, self.index_types, self.index_types) - for r1, c1, r2, c2 in args: - df = mkdf(3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1) - df2 = mkdf(4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) - df3 = mkdf(5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) - res = pd.eval('df + df2 + df3', engine=engine, parser=parser) - assert_frame_equal(res, df + df2 + df3) + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + + for r1, c1, r2, c2 in args: + df = mkdf(3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1) + df2 = mkdf(4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) + df3 = mkdf(5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) + if should_warn(df.index, df2.index, df3.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval('df + df2 + df3', engine=engine, + parser=parser) + else: + res = pd.eval('df + df2 + df3', engine=engine, parser=parser) + assert_frame_equal(res, df + df2 + df3) @slow def test_medium_complex_frame_alignment(self): @@ -775,20 +802,24 @@ def testit(r_idx_type, c_idx_type, index_name): index = getattr(df, index_name) s = Series(np.random.randn(5), index[:5]) - res = pd.eval('df + s', engine=engine, parser=parser) + if should_warn(df.index, s.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval('df + s', engine=engine, parser=parser) + else: + res = pd.eval('df + s', engine=engine, parser=parser) + if r_idx_type == 'dt' or c_idx_type == 'dt': - if engine == 'numexpr': - expected = df.add(s) - else: - expected = df + s + expected = df.add(s) if engine == 'numexpr' else df + s else: expected = df + s assert_frame_equal(res, expected) args = product(self.lhs_index_types, self.index_types, ('index', 'columns')) - for r_idx_type, c_idx_type, index_name in args: - testit(r_idx_type, c_idx_type, index_name) + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + for r_idx_type, c_idx_type, index_name in args: + testit(r_idx_type, c_idx_type, index_name) def test_basic_frame_series_alignment(self): for engine, parser in ENGINES_PARSERS: @@ -802,13 +833,14 @@ def testit(r_idx_type, c_idx_type, index_name): c_idx_type=c_idx_type) index = getattr(df, index_name) s = Series(np.random.randn(5), index[:5]) + if should_warn(s.index, df.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval('s + df', engine=engine, parser=parser) + else: + res = pd.eval('s + df', engine=engine, parser=parser) - res = pd.eval('s + df', engine=engine, parser=parser) if r_idx_type == 'dt' or c_idx_type == 'dt': - if engine == 'numexpr': - expected = df.add(s) - else: - expected = s + df + expected = df.add(s) if engine == 'numexpr' else s + df else: expected = s + df assert_frame_equal(res, expected) @@ -820,8 +852,10 @@ def testit(r_idx_type, c_idx_type, index_name): # dt with dt args = product(['dt'], ['dt'], ('index', 'columns')) - for r_idx_type, c_idx_type, index_name in args: - testit(r_idx_type, c_idx_type, index_name) + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + for r_idx_type, c_idx_type, index_name in args: + testit(r_idx_type, c_idx_type, index_name) def test_basic_series_frame_alignment(self): for engine, parser in ENGINES_PARSERS: @@ -831,20 +865,29 @@ def check_series_frame_commutativity(self, engine, parser): tm.skip_if_no_ne(engine) args = product(self.lhs_index_types, self.index_types, ('+', '*'), ('index', 'columns')) - for r_idx_type, c_idx_type, op, index_name in args: - df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type, - c_idx_type=c_idx_type) - index = getattr(df, index_name) - s = Series(np.random.randn(5), index[:5]) - lhs = 's {0} df'.format(op) - rhs = 'df {0} s'.format(op) - a = pd.eval(lhs, engine=engine, parser=parser) - b = pd.eval(rhs, engine=engine, parser=parser) + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + for r_idx_type, c_idx_type, op, index_name in args: + df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type, + c_idx_type=c_idx_type) + index = getattr(df, index_name) + s = Series(np.random.randn(5), index[:5]) + + lhs = 's {0} df'.format(op) + rhs = 'df {0} s'.format(op) + if should_warn(df.index, s.index): + with tm.assert_produces_warning(RuntimeWarning): + a = pd.eval(lhs, engine=engine, parser=parser) + with tm.assert_produces_warning(RuntimeWarning): + b = pd.eval(rhs, engine=engine, parser=parser) + else: + a = pd.eval(lhs, engine=engine, parser=parser) + b = pd.eval(rhs, engine=engine, parser=parser) - if r_idx_type != 'dt' and c_idx_type != 'dt': - if engine == 'numexpr': - assert_frame_equal(a, b) + if r_idx_type != 'dt' and c_idx_type != 'dt': + if engine == 'numexpr': + assert_frame_equal(a, b) def test_series_frame_commutativity(self): for engine, parser in ENGINES_PARSERS: @@ -860,34 +903,41 @@ def check_complex_series_frame_alignment(self, engine, parser): m1 = 5 m2 = 2 * m1 - for r1, r2, c1, c2 in args: - index_name = random.choice(['index', 'columns']) - obj_name = random.choice(['df', 'df2']) - - df = mkdf(m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1) - df2 = mkdf(m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) - index = getattr(locals().get(obj_name), index_name) - s = Series(np.random.randn(n), index[:n]) - - if r2 == 'dt' or c2 == 'dt': - if engine == 'numexpr': - expected2 = df2.add(s) + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + for r1, r2, c1, c2 in args: + index_name = random.choice(['index', 'columns']) + obj_name = random.choice(['df', 'df2']) + + df = mkdf(m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1) + df2 = mkdf(m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) + index = getattr(locals().get(obj_name), index_name) + s = Series(np.random.randn(n), index[:n]) + + if r2 == 'dt' or c2 == 'dt': + if engine == 'numexpr': + expected2 = df2.add(s) + else: + expected2 = df2 + s else: expected2 = df2 + s - else: - expected2 = df2 + s - if r1 == 'dt' or c1 == 'dt': - if engine == 'numexpr': - expected = expected2.add(df) + if r1 == 'dt' or c1 == 'dt': + if engine == 'numexpr': + expected = expected2.add(df) + else: + expected = expected2 + df else: expected = expected2 + df - else: - expected = expected2 + df - res = pd.eval('df2 + s + df', engine=engine, parser=parser) - tm.assert_equal(res.shape, expected.shape) - assert_frame_equal(res, expected) + if should_warn(df2.index, s.index, df.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval('df2 + s + df', engine=engine, + parser=parser) + else: + res = pd.eval('df2 + s + df', engine=engine, parser=parser) + tm.assert_equal(res.shape, expected.shape) + assert_frame_equal(res, expected) @slow def test_complex_series_frame_alignment(self): diff --git a/pandas/core/index.py b/pandas/core/index.py index 10e5558e12542..148fa5ecd8dad 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1,5 +1,6 @@ # pylint: disable=E1101,E1103,W0232 import datetime +import warnings from functools import partial import warnings from pandas.compat import range, zip, lrange, lzip, u, reduce @@ -997,29 +998,36 @@ def union(self, other): result.extend([x for x in other.values if x not in value_set]) else: indexer = self.get_indexer(other) - indexer = (indexer == -1).nonzero()[0] + indexer, = (indexer == -1).nonzero() if len(indexer) > 0: other_diff = com.take_nd(other.values, indexer, allow_fill=False) result = com._concat_compat((self.values, other_diff)) + try: + self.values[0] < other_diff[0] + except TypeError as e: + warnings.warn("%s, sort order is undefined for " + "incomparable objects" % e, RuntimeWarning) + else: result.sort() - except Exception: - pass + else: - # contained in + result = self.values + try: - result = np.sort(self.values) - except TypeError: # pragma: no cover - result = self.values + result = np.sort(result) + except TypeError as e: + warnings.warn("%s, sort order is undefined for " + "incomparable objects" % e, RuntimeWarning) # for subclasses return self._wrap_union_result(other, result) def _wrap_union_result(self, other, result): name = self.name if self.name == other.name else None - return type(self)(data=result, name=name) + return self.__class__(data=result, name=name) def intersection(self, other): """ diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 9681a606c7c57..5def2039c5ee8 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -2,6 +2,7 @@ from datetime import datetime, timedelta from pandas.compat import range, lrange, lzip, u, zip +import sys import operator import pickle import re @@ -15,7 +16,6 @@ from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex, InvalidIndexError) from pandas.tseries.index import DatetimeIndex -from pandas.core.frame import DataFrame from pandas.core.series import Series from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp, assert_copy) @@ -844,11 +844,13 @@ def test_slice_keep_name(self): self.assertEqual(idx.name, idx[1:].name) def test_join_self(self): + # instance attributes of the form self.<name>Index indices = 'unicode', 'str', 'date', 'int', 'float' kinds = 'outer', 'inner', 'left', 'right' for index_kind in indices: + res = getattr(self, '{0}Index'.format(index_kind)) + for kind in kinds: - res = getattr(self, '{0}Index'.format(index_kind)) joined = res.join(res, how=kind) self.assertIs(res, joined) @@ -860,6 +862,17 @@ def test_indexing_doesnt_change_class(self): self.assertTrue(idx[[0,1]].identical( pd.Index([1, 2], dtype=np.object_))) + def test_outer_join_sort(self): + left_idx = Index(np.random.permutation(15)) + right_idx = tm.makeDateIndex(10) + + with tm.assert_produces_warning(RuntimeWarning): + joined = left_idx.join(right_idx, how='outer') + # right_idx in this case because DatetimeIndex has join precedence over + # Int64Index + expected = right_idx.astype(object).union(left_idx.astype(object)) + tm.assert_index_equal(joined, expected) + class TestFloat64Index(tm.TestCase): _multiprocess_can_split_ = True @@ -2765,6 +2778,7 @@ def test_get_combined_index(): assert(result.equals(Index([]))) + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
closes #5039
https://api.github.com/repos/pandas-dev/pandas/pulls/5266
2013-10-19T16:49:01Z
2014-06-03T13:36:59Z
2014-06-03T13:36:59Z
2014-06-15T16:56:32Z
TST: test_fast.sh and test_multi.sh should skip network tests
diff --git a/test_fast.sh b/test_fast.sh index 2ae736e952839..b390705f901ad 100755 --- a/test_fast.sh +++ b/test_fast.sh @@ -1 +1 @@ -nosetests -A "not slow" pandas --with-id $* +nosetests -A "not slow and not network" pandas --with-id $* diff --git a/test_multi.sh b/test_multi.sh index 550f86bdb7b7b..5d77945c66a26 100755 --- a/test_multi.sh +++ b/test_multi.sh @@ -1 +1 @@ -nosetests -A "not slow" pandas --processes=4 $* +nosetests -A "not slow and not network" pandas --processes=4 $*
https://api.github.com/repos/pandas-dev/pandas/pulls/5265
2013-10-19T16:44:20Z
2013-10-19T19:38:22Z
2013-10-19T19:38:22Z
2014-07-16T08:35:57Z
BUG: Pagination in pandas.io.gbq
diff --git a/ci/requirements-2.6.txt b/ci/requirements-2.6.txt index 60a8b57e72907..751d034ef97f5 100644 --- a/ci/requirements-2.6.txt +++ b/ci/requirements-2.6.txt @@ -4,4 +4,4 @@ python-dateutil==1.5 pytz==2013b http://www.crummy.com/software/BeautifulSoup/bs4/download/4.2/beautifulsoup4-4.2.0.tar.gz html5lib==1.0b2 -bigquery==2.0.15 +bigquery==2.0.17 diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt index fe27fe10f7c04..3b786152cd653 100644 --- a/ci/requirements-2.7.txt +++ b/ci/requirements-2.7.txt @@ -18,4 +18,4 @@ MySQL-python==1.2.4 scipy==0.10.0 beautifulsoup4==4.2.1 statsmodels==0.5.0 -bigquery==2.0.15 +bigquery==2.0.17 diff --git a/ci/requirements-2.7_LOCALE.txt b/ci/requirements-2.7_LOCALE.txt index f037cbed15160..b18bff6797840 100644 --- a/ci/requirements-2.7_LOCALE.txt +++ b/ci/requirements-2.7_LOCALE.txt @@ -16,4 +16,4 @@ lxml==3.2.1 scipy==0.10.0 beautifulsoup4==4.2.1 statsmodels==0.5.0 -bigquery==2.0.15 +bigquery==2.0.17 diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 931aa732d5286..2d490ec071b4e 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -7,6 +7,8 @@ import csv import logging from datetime import datetime +import pkg_resources +from distutils.version import LooseVersion import pandas as pd import numpy as np @@ -19,6 +21,13 @@ import bigquery_client import gflags as flags _BQ_INSTALLED = True + + _BQ_VERSION = pkg_resources.get_distribution('bigquery').version + if LooseVersion(_BQ_VERSION) >= '2.0.17': + _BQ_VALID_VERSION = True + else: + _BQ_VALID_VERSION = False + except ImportError: _BQ_INSTALLED = False @@ -102,6 +111,9 @@ def _parse_entry(field_value, field_type): field_value = np.datetime64(timestamp) elif field_type == 'BOOLEAN': field_value = field_value == 'true' + # Note that results are unicode, so this will + # fail for non-ASCII characters.. this probably + # functions differently in Python 3 else: field_value = str(field_value) return field_value @@ -228,68 +240,76 @@ def _parse_data(client, job, index_col=None, col_order=None): # Iterate over the result rows. # Since Google's API now requires pagination of results, # we do that here. The following is repurposed from - # bigquery_client.py :: Client.ReadTableRows() + # bigquery_client.py :: Client._JobTableReader._ReadOnePage + + # TODO: Enable Reading From Table, see Client._TableTableReader._ReadOnePage # Initially, no page token is set page_token = None - # Most of Google's client API's allow one to set total_rows in case - # the user only wants the first 'n' results from a query. Typically - # they set this to sys.maxint by default, but this caused problems - # during testing - specifically on OS X. It appears that at some - # point in bigquery_client.py, there is an attempt to cast this value - # to an unsigned integer. Depending on the python install, - # sys.maxint may exceed the limitations of unsigned integers. - # - # See: - # https://code.google.com/p/google-bigquery-tools/issues/detail?id=14 - - # This is hardcoded value for 32bit sys.maxint per - # the above note. Theoretically, we could simply use - # 100,000 (or whatever the current max page size is), - # but this is more flexible in the event of an API change - total_rows = 2147483647 - - # Keep track of rows read - row_count = 0 + # This number is the current max results per page + max_rows = bigquery_client._MAX_ROWS_PER_REQUEST + + # How many rows in result set? Initialize to max_rows + total_rows = max_rows + + # This is the starting row for a particular page... + # is ignored if page_token is present, though + # it may be useful if we wish to implement SQL like LIMITs + # with minimums + start_row = 0 # Keep our page DataFrames until the end when we # concatentate them dataframe_list = list() - # Iterate over all rows - while row_count < total_rows: - data = client.apiclient.tabledata().list(maxResults=total_rows - row_count, - pageToken=page_token, - **table_dict).execute() + current_job = job['jobReference'] - # If there are more results than will fit on a page, - # you will recieve a token for the next page. - page_token = data.get('pageToken', None) + # Iterate over all rows + while start_row < total_rows: + # Setup the parameters for getQueryResults() API Call + kwds = dict(current_job) + kwds['maxResults'] = max_rows + # Sets the timeout to 0 because we assume the table is already ready. + # This is because our previous call to Query() is synchronous + # and will block until it's actually done + kwds['timeoutMs'] = 0 + # Use start row if there's no page_token ... in other words, the + # user requested to start somewhere other than the beginning... + # presently this is not a parameter to read_gbq(), but it will be + # added eventually. + if page_token: + kwds['pageToken'] = page_token + else: + kwds['startIndex'] = start_row + data = client.apiclient.jobs().getQueryResults(**kwds).execute() + if not data['jobComplete']: + raise BigqueryError('Job was not completed, or was invalid') # How many rows are there across all pages? - total_rows = min(total_rows, int(data['totalRows'])) # Changed to use get(data[rows],0) + # Note: This is presently the only reason we don't just use + # _ReadOnePage() directly + total_rows = int(data['totalRows']) + + page_token = data.get('pageToken', None) raw_page = data.get('rows', []) page_array = _parse_page(raw_page, col_names, col_types, col_dtypes) - row_count += len(page_array) + start_row += len(raw_page) if total_rows > 0: - completed = (100 * row_count) / total_rows - logger.info('Remaining Rows: ' + str(total_rows - row_count) + '(' + str(completed) + '% Complete)') + completed = (100 * start_row) / total_rows + logger.info('Remaining Rows: ' + str(total_rows - start_row) + '(' + str(completed) + '% Complete)') else: logger.info('No Rows') dataframe_list.append(DataFrame(page_array)) - # Handle any exceptions that might have occured - if not page_token and row_count != total_rows: + # Did we get enough rows? Note: gbq.py stopped checking for this + # but we felt it was still a good idea. + if not page_token and not raw_page and start_row != total_rows: raise bigquery_client.BigqueryInterfaceError( - 'PageToken missing for %r' % ( - bigquery_client.ApiClientHelper.TableReference.Create(**table_dict),)) - if not raw_page and row_count != total_rows: - raise bigquery_client.BigqueryInterfaceError( - 'Not enough rows returned by server for %r' % ( - bigquery_client.ApiClientHelper.TableReference.Create(**table_dict),)) + ("Not enough rows returned by server. Expected: {0}" + \ + " Rows, But Recieved {1}").format(total_rows, start_row)) # Build final dataframe final_df = concat(dataframe_list, ignore_index=True) @@ -355,6 +375,10 @@ def to_gbq(dataframe, destination_table, schema=None, col_order=None, if_exists= else: raise ImportError('Could not import Google BigQuery Client.') + if not _BQ_VALID_VERSION: + raise ImportError("pandas requires bigquery >= 2.0.17 for Google BigQuery " + "support, current version " + _BQ_VERSION) + ALLOWED_TYPES = ['STRING', 'INTEGER', 'FLOAT', 'BOOLEAN', 'TIMESTAMP', 'RECORD'] if if_exists == 'replace' and schema is None: @@ -456,6 +480,10 @@ def read_gbq(query, project_id = None, destination_table = None, index_col=None, else: raise ImportError('Could not import Google BigQuery Client.') + if not _BQ_VALID_VERSION: + raise ImportError("pandas requires bigquery >= 2.0.17 for Google BigQuery " + "support, current version " + _BQ_VERSION) + query_args = kwargs query_args['project_id'] = project_id query_args['query'] = query diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py index 89b048d472d5f..f56c1aa042421 100644 --- a/pandas/io/tests/test_gbq.py +++ b/pandas/io/tests/test_gbq.py @@ -40,20 +40,21 @@ def GetTableSchema(self,table_dict): # Fake Google BigQuery API Client class FakeApiClient: def __init__(self): - self._tabledata = FakeTableData() + self._fakejobs = FakeJobs() - def tabledata(self): - return self._tabledata + def jobs(self): + return self._fakejobs -class FakeTableData: +class FakeJobs: def __init__(self): - self._list = FakeList() + self._fakequeryresults = FakeResults() - def list(self,maxResults = None, pageToken = None, **table_dict): - return self._list + def getQueryResults(self, job_id=None, project_id=None, + max_results=None, timeout_ms=None, **kwargs): + return self._fakequeryresults -class FakeList: +class FakeResults: def execute(self): return {'rows': [ {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'brave'}, {'v': '3'}]}, {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'attended'}, {'v': '1'}]}, @@ -68,7 +69,8 @@ def execute(self): ], 'kind': 'bigquery#tableDataList', 'etag': '"4PTsVxg68bQkQs1RJ1Ndewqkgg4/hoRHzb4qfhJAIa2mEewC-jhs9Bg"', - 'totalRows': '10'} + 'totalRows': '10', + 'jobComplete' : True} #################################################################################### @@ -225,16 +227,16 @@ def test_column_order_plus_index(self): correct_frame_small = DataFrame(correct_frame_small)[col_order] tm.assert_index_equal(result_frame.columns, correct_frame_small.columns) - # @with_connectivity_check - # def test_download_dataset_larger_than_100k_rows(self): - # # Test for known BigQuery bug in datasets larger than 100k rows - # # http://stackoverflow.com/questions/19145587/bq-py-not-paging-results - # if not os.path.exists(self.bq_token): - # raise nose.SkipTest('Skipped because authentication information is not available.') + @with_connectivity_check + def test_download_dataset_larger_than_100k_rows(self): + # Test for known BigQuery bug in datasets larger than 100k rows + # http://stackoverflow.com/questions/19145587/bq-py-not-paging-results + if not os.path.exists(self.bq_token): + raise nose.SkipTest('Skipped because authentication information is not available.') - # client = gbq._authenticate() - # a = gbq.read_gbq("SELECT id, FROM [publicdata:samples.wikipedia] LIMIT 100005") - # self.assertTrue(len(a) == 100005) + client = gbq._authenticate() + a = gbq.read_gbq("SELECT id, FROM [publicdata:samples.wikipedia] LIMIT 100005") + self.assertTrue(len(a) == 100005) @with_connectivity_check def test_download_all_data_types(self):
In light of some last minute API changes in Google BigQuery, I have updated our code to function properly. In particular, this fixes a known bug that limited result sets to 10,000. Hopefully we'll have an entirely new version soon that will make better use of Google's reference code and thus be more future proof. Note that bigquery v2.0.17 is new as of today... 2.0.16 tested fine, but they fixed a few important backend things so we went ahead and made the change mandatory in light of the pandas release candidate. See: closes https://github.com/pydata/pandas/issues/5255
https://api.github.com/repos/pandas-dev/pandas/pulls/5262
2013-10-19T01:16:06Z
2013-10-21T18:00:12Z
2013-10-21T18:00:12Z
2014-07-16T08:35:55Z
DOC: changed the term `Vern` to `Versions` to be clearer
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 33d1219b3b11f..390fab887ed8d 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -103,8 +103,8 @@ a set of specialized cython routines that are especially fast when dealing with Here is a sample (using 100 column x 100,000 row ``DataFrames``): .. csv-table:: - :header: "Operation", "0.11.0 (ms)", "Prior Vern (ms)", "Ratio to Prior" - :widths: 30, 30, 30, 30 + :header: "Operation", "0.11.0 (ms)", "Prior Version (ms)", "Ratio to Prior" + :widths: 25, 25, 25, 25 :delim: ; ``df1 > df2``; 13.32; 125.35; 0.1063 @@ -1040,12 +1040,6 @@ Methods like ``contains``, ``startswith``, and ``endswith`` takes an extra ``cat``,Concatenate strings ``split``,Split strings on delimiter - ``get``,Index into each element (retrieve i-th element) - ``join``,Join strings in each element of the Series with passed separator - ``contains``,Return boolean array if each string contains pattern/regex - ``replace``,Replace occurrences of pattern/regex with some other string - ``repeat``,Duplicate values (``s.str.repeat(3)`` equivalent to ``x * 3``) - ``pad``,"Add whitespace to left, right, or both sides of strings" ``center``,Equivalent to ``pad(side='both')`` ``slice``,Slice each string in the Series ``slice_replace``,Replace slice in each string with passed value
based on this SO question: http://stackoverflow.com/q/19438309/1301710
https://api.github.com/repos/pandas-dev/pandas/pulls/5258
2013-10-18T05:51:55Z
2013-10-20T12:12:22Z
null
2014-07-02T17:29:56Z
DOC/TST: core/generic/drop doc-string and tests
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e0559a27463d4..fc220c7cbefa6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1092,7 +1092,7 @@ def drop(self, labels, axis=0, level=None): Parameters ---------- - labels : list-like + labels : list-like; passing a string or tuple will be treated as a single index label axis : int level : int or name, default None For MultiIndex diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 22bdb66b715ef..9292dba651421 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1440,6 +1440,11 @@ def test_drop(self): result = s.drop('one') assert_series_equal(result,expected) + # single string/tuple-like + s = Series(range(3),index=list('abc')) + self.assertRaises(ValueError, s.drop, 'bc') + self.assertRaises(ValueError, s.drop, ('a',)) + def test_ix_setitem(self): inds = self.series.index[[3, 4, 7]]
https://api.github.com/repos/pandas-dev/pandas/pulls/5253
2013-10-17T14:18:36Z
2013-10-17T14:31:21Z
2013-10-17T14:31:21Z
2014-07-16T08:35:50Z
DOC: core/generic/drop docs from array-like to list-like
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fcbceecf9a19b..e0559a27463d4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1092,7 +1092,7 @@ def drop(self, labels, axis=0, level=None): Parameters ---------- - labels : array-like + labels : list-like axis : int level : int or name, default None For MultiIndex @@ -1103,8 +1103,6 @@ def drop(self, labels, axis=0, level=None): """ axis_name = self._get_axis_name(axis) axis, axis_ = self._get_axis(axis), axis - if not is_list_like(labels): - labels = [ labels ] if axis.is_unique: if level is not None: @@ -1121,6 +1119,7 @@ def drop(self, labels, axis=0, level=None): return dropped else: + labels = com._index_labels_to_array(labels) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex')
related #5248
https://api.github.com/repos/pandas-dev/pandas/pulls/5252
2013-10-17T13:08:01Z
2013-10-17T13:53:10Z
2013-10-17T13:53:10Z
2014-06-25T08:44:44Z
API: allow Series/Panel dropna to accept other args for compat with DataFrame (GH5233/GH5250)
diff --git a/doc/source/release.rst b/doc/source/release.rst index e3eec9690e487..de9743bdc705a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -409,6 +409,8 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` (:issue:`5189`, related :issue:`5004`) - ``MultiIndex`` constructor now validates that passed levels and labels are compatible. (:issue:`5213`, :issue:`5214`) + - Unity ``dropna`` for Series/DataFrame signature (:issue:`5250`), + tests from :issue:`5234`, courtesy of @rockg .. _release.bug_fixes-0.13.0: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d2d5776c4a67d..ece38e18e3688 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2459,8 +2459,6 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None): return result axis = self._get_axis_number(axis) - if axis not in (0, 1): # pragma: no cover - raise AssertionError('axis must be 0 or 1') agg_axis = 1 - axis agg_obj = self diff --git a/pandas/core/panel.py b/pandas/core/panel.py index a86c186e26b53..f35070c634aa1 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -615,7 +615,7 @@ def _reindex_multi(self, axes, copy, fill_value): return Panel(new_values, items=new_items, major_axis=new_major, minor_axis=new_minor) - def dropna(self, axis=0, how='any'): + def dropna(self, axis=0, how='any', **kwargs): """ Drop 2D from panel, holding passed axis constant diff --git a/pandas/core/series.py b/pandas/core/series.py index 526355b0f4dc3..11033893b0b93 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2178,7 +2178,7 @@ def to_csv(self, path, index=True, sep=",", na_rep='', index_label=index_label, mode=mode, nanRep=nanRep, encoding=encoding, date_format=date_format) - def dropna(self): + def dropna(self, axis=0, **kwargs): """ Return Series without null values @@ -2186,6 +2186,7 @@ def dropna(self): ------- valid : Series """ + axis = self._get_axis_number(axis or 0) return remove_na(self) valid = lambda self: self.dropna() diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index 4d8b2578426ec..52d536fc16d37 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -569,11 +569,12 @@ def cumsum(self, axis=0, dtype=None, out=None): return self._constructor(new_array, index=self.index, sparse_index=new_array.sp_index).__finalize__(self) return Series(new_array, index=self.index).__finalize__(self) - def dropna(self): + def dropna(self, axis=0, **kwargs): """ Analogous to Series.dropna. If fill_value=NaN, returns a dense Series """ # TODO: make more efficient + axis = self._get_axis_number(axis or 0) dense_valid = self.to_dense().valid() if isnull(self.fill_value): return dense_valid diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py index df2f545c90b92..69a101021f27d 100644 --- a/pandas/stats/tests/test_ols.py +++ b/pandas/stats/tests/test_ols.py @@ -379,6 +379,9 @@ def test_series_rhs(self): expected = ols(y=y, x={'x': x}) assert_series_equal(model.beta, expected.beta) + # GH 5233/5250 + assert_series_equal(model.y_predict, model.predict(x=x)) + def test_various_attributes(self): # just make sure everything "works". test correctness elsewhere diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index eb8969db9d15e..22bdb66b715ef 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3550,6 +3550,9 @@ def test_dropna_empty(self): s = Series([]) self.assert_(len(s.dropna()) == 0) + # invalid axis + self.assertRaises(ValueError, s.dropna, axis=1) + def test_drop_duplicates(self): s = Series([1, 2, 3, 3])
closes #5233 closes #5234 closes #5250
https://api.github.com/repos/pandas-dev/pandas/pulls/5251
2013-10-17T12:42:10Z
2013-10-17T13:07:34Z
2013-10-17T13:07:34Z
2014-07-16T08:35:47Z
BUG: fixed issue with drop on a non-unique index with Series (GH5248)
diff --git a/doc/source/release.rst b/doc/source/release.rst index cc9aa4bbc64ff..886a0a62b4068 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -621,6 +621,7 @@ Bug Fixes non-business date. (:issue:`5203`) - Fixed bug in Excel writers where frames with duplicate column names weren't written correctly. (:issue:`5235`) + - Fixed issue with ``drop`` and a non-unique index on Series (:issue:`5248`) pandas 0.12.0 ------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bfc086b09730e..d2d5776c4a67d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -672,9 +672,9 @@ def to_dict(self, outtype='dict'): raise ValueError("outtype %s not understood" % outtype) def to_gbq(self, destination_table, schema=None, col_order=None, if_exists='fail', **kwargs): - """Write a DataFrame to a Google BigQuery table. - - If the table exists, the DataFrame will be appended. If not, a new table + """Write a DataFrame to a Google BigQuery table. + + If the table exists, the DataFrame will be appended. If not, a new table will be created, in which case the schema will have to be specified. By default, rows will be written in the order they appear in the DataFrame, though the user may specify an alternative order. @@ -2233,33 +2233,6 @@ def rename(self, index=None, columns=None, **kwargs): return super(DataFrame, self).rename(index=index, columns=columns, **kwargs) - def reindex_like(self, other, method=None, copy=True, limit=None, - fill_value=NA): - """ - Reindex DataFrame to match indices of another DataFrame, optionally - with filling logic - - Parameters - ---------- - other : DataFrame - method : string or None - copy : boolean, default True - limit : int, default None - Maximum size gap to forward or backward fill - - Notes - ----- - Like calling s.reindex(index=other.index, columns=other.columns, - method=...) - - Returns - ------- - reindexed : DataFrame - """ - return self.reindex(index=other.index, columns=other.columns, - method=method, copy=copy, limit=limit, - fill_value=fill_value) - def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): """ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 266253e05ed61..fcbceecf9a19b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1103,6 +1103,8 @@ def drop(self, labels, axis=0, level=None): """ axis_name = self._get_axis_name(axis) axis, axis_ = self._get_axis(axis), axis + if not is_list_like(labels): + labels = [ labels ] if axis.is_unique: if level is not None: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index d32bf166ddea1..0bc0afaf255f2 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -581,9 +581,11 @@ def _multi_take_opportunity(self, tup): return False # just too complicated - for ax in self.obj._data.axes: + for indexer, ax in zip(tup,self.obj._data.axes): if isinstance(ax, MultiIndex): return False + elif com._is_bool_indexer(indexer): + return False return True diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index d74ea8a5d2ffc..3f5eef8c04f7d 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -3175,9 +3175,11 @@ def check(result, expected=None): # drop df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a']) - df = df.drop(['a'],axis=1) + result = df.drop(['a'],axis=1) expected = DataFrame([[1],[1],[1]],columns=['bar']) - check(df,expected) + check(result,expected) + result = df.drop('a',axis=1) + check(result,expected) # describe df = DataFrame([[1,1,1],[2,2,2],[3,3,3]],columns=['bar','a','a'],dtype='float64') diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 645533d5629d2..eb8969db9d15e 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1415,6 +1415,31 @@ def test_mask(self): rs = s.where(cond, np.nan) assert_series_equal(rs, s.mask(~cond)) + def test_drop(self): + + # unique + s = Series([1,2],index=['one','two']) + expected = Series([1],index=['one']) + result = s.drop(['two']) + assert_series_equal(result,expected) + result = s.drop('two') + assert_series_equal(result,expected) + + # non-unique + # GH 5248 + s = Series([1,1,2],index=['one','two','one']) + expected = Series([1,2],index=['one','one']) + result = s.drop(['two']) + assert_series_equal(result,expected) + result = s.drop('two') + assert_series_equal(result,expected) + + expected = Series([1],index=['two']) + result = s.drop(['one']) + assert_series_equal(result,expected) + result = s.drop('one') + assert_series_equal(result,expected) + def test_ix_setitem(self): inds = self.series.index[[3, 4, 7]]
closes #5248
https://api.github.com/repos/pandas-dev/pandas/pulls/5249
2013-10-17T11:30:50Z
2013-10-17T11:59:24Z
2013-10-17T11:59:24Z
2014-06-25T10:44:46Z
ENH: Add inplace option to drop and dropna
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6227fedda11d4..f23852885668a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -200,6 +200,11 @@ Improvements to existing features argument. (:issue:`5354`) - Added short docstrings to a few methods that were missing them + fixed the docstrings for Panel flex methods. (:issue:`5336`) + - ``NDFrame.drop()``, ``NDFrame.dropna()``, and ``.drop_duplicates()`` all + accept ``inplace`` as a kewyord argument; however, this only means that the + wrapper is updated inplace, a copy is still made internally. + (:issue:`1960`, :issue:`5247`, and related :issue:`2325` [still not + closed]) API Changes ~~~~~~~~~~~ @@ -474,6 +479,9 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` - Unity ``dropna`` for Series/DataFrame signature (:issue:`5250`), tests from :issue:`5234`, courtesy of @rockg - Rewrite assert_almost_equal() in cython for performance (:issue:`4398`) + - Added an internal ``_update_inplace`` method to facilitate updating + ``NDFrame`` wrappers on inplace ops (only is for convenience of caller, + doesn't actually prevent copies). (:issue:`5247`) .. _release.bug_fixes-0.13.0: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 690ac7c3e76c9..2361c6920985b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2421,7 +2421,7 @@ def _maybe_cast(values, labels=None): #---------------------------------------------------------------------- # Reindex-based selection methods - def dropna(self, axis=0, how='any', thresh=None, subset=None): + def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Return object with labels on given axis omitted where alternately any or all of the data are missing @@ -2438,6 +2438,8 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None): subset : array-like Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include + inplace : bool, defalt False + If True, do operation inplace and return None. Returns ------- @@ -2448,31 +2450,36 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None): for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax) - return result - - axis = self._get_axis_number(axis) - agg_axis = 1 - axis - - agg_obj = self - if subset is not None: - agg_axis_name = self._get_axis_name(agg_axis) - agg_obj = self.reindex(**{agg_axis_name: subset}) + else: + axis = self._get_axis_number(axis) + agg_axis = 1 - axis + + agg_obj = self + if subset is not None: + agg_axis_name = self._get_axis_name(agg_axis) + agg_obj = self.reindex(**{agg_axis_name: subset}) + + count = agg_obj.count(axis=agg_axis) + + if thresh is not None: + mask = count >= thresh + elif how == 'any': + mask = count == len(agg_obj._get_axis(agg_axis)) + elif how == 'all': + mask = count > 0 + else: + if how is not None: + raise ValueError('invalid how option: %s' % how) + else: + raise TypeError('must specify how or thresh') - count = agg_obj.count(axis=agg_axis) + result = self.take(mask.nonzero()[0], axis=axis, convert=False) - if thresh is not None: - mask = count >= thresh - elif how == 'any': - mask = count == len(agg_obj._get_axis(agg_axis)) - elif how == 'all': - mask = count > 0 + if inplace: + self._update_inplace(result) else: - if how is not None: - raise ValueError('invalid how option: %s' % how) - else: - raise TypeError('must specify how or thresh') + return result - return self.take(mask.nonzero()[0], axis=axis, convert=False) def drop_duplicates(self, cols=None, take_last=False, inplace=False): """ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a48488f57e833..b230df7483760 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1101,7 +1101,7 @@ def reindex_like(self, other, method=None, copy=True, limit=None): d = other._construct_axes_dict(method=method) return self.reindex(**d) - def drop(self, labels, axis=0, level=None): + def drop(self, labels, axis=0, level=None, inplace=False, **kwargs): """ Return new object with labels in requested axis removed @@ -1111,6 +1111,8 @@ def drop(self, labels, axis=0, level=None): axis : int or axis name level : int or name, default None For MultiIndex + inplace : bool, default False + If True, do operation inplace and return None. Returns ------- @@ -1132,7 +1134,7 @@ def drop(self, labels, axis=0, level=None): dropped.axes[axis_].set_names(axis.names, inplace=True) except AttributeError: pass - return dropped + result = dropped else: labels = com._index_labels_to_array(labels) @@ -1147,7 +1149,20 @@ def drop(self, labels, axis=0, level=None): slicer = [slice(None)] * self.ndim slicer[self._get_axis_number(axis_name)] = indexer - return self.ix[tuple(slicer)] + result = self.ix[tuple(slicer)] + + if inplace: + self._update_inplace(result) + else: + return result + + def _update_inplace(self, result): + "replace self internals with result." + # NOTE: This does *not* call __finalize__ and that's an explicit + # decision that we may revisit in the future. + self._reset_cache() + self._data = result._data + self._maybe_update_cacher() def add_prefix(self, prefix): """ diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 04ace84cace37..d4ba7dd4e708a 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -615,7 +615,7 @@ def _reindex_multi(self, axes, copy, fill_value): return Panel(new_values, items=new_items, major_axis=new_major, minor_axis=new_minor) - def dropna(self, axis=0, how='any', **kwargs): + def dropna(self, axis=0, how='any', inplace=False, **kwargs): """ Drop 2D from panel, holding passed axis constant @@ -627,6 +627,8 @@ def dropna(self, axis=0, how='any', **kwargs): how : {'all', 'any'}, default 'any' 'any': one or more values are NA in the DataFrame along the axis. For 'all' they all must be. + inplace : bool, default False + If True, do operation inplace and return None. Returns ------- @@ -648,7 +650,11 @@ def dropna(self, axis=0, how='any', **kwargs): cond = mask == per_slice new_ax = self._get_axis(axis)[cond] - return self.reindex_axis(new_ax, axis=axis) + result = self.reindex_axis(new_ax, axis=axis) + if inplace: + self._update_inplace(result) + else: + return result def _combine(self, other, func, axis=0): if isinstance(other, Panel): diff --git a/pandas/core/series.py b/pandas/core/series.py index 572f0c44d0bc9..699dc9b31464e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1155,7 +1155,7 @@ def nunique(self): """ return len(self.value_counts()) - def drop_duplicates(self, take_last=False): + def drop_duplicates(self, take_last=False, inplace=False): """ Return Series with duplicate values removed @@ -1163,13 +1163,20 @@ def drop_duplicates(self, take_last=False): ---------- take_last : boolean, default False Take the last observed index in a group. Default first + inplace : boolean, default False + If True, performs operation inplace and returns None. Returns ------- deduplicated : Series """ duplicated = self.duplicated(take_last=take_last) - return self[-duplicated] + result = self[-duplicated] + if inplace: + return self._update_inplace(result) + else: + return result + def duplicated(self, take_last=False): """ @@ -2190,18 +2197,25 @@ def to_csv(self, path, index=True, sep=",", na_rep='', index_label=index_label, mode=mode, nanRep=nanRep, encoding=encoding, date_format=date_format) - def dropna(self, axis=0, **kwargs): + def dropna(self, axis=0, inplace=False, **kwargs): """ Return Series without null values Returns ------- valid : Series + inplace : bool (default False) + Do operation in place. """ axis = self._get_axis_number(axis or 0) - return remove_na(self) + result = remove_na(self) + if inplace: + self._update_inplace(result) + else: + return result - valid = lambda self: self.dropna() + valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace, + **kwargs) def first_valid_index(self): """ diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index 52d536fc16d37..cf4060fa6d871 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -569,13 +569,16 @@ def cumsum(self, axis=0, dtype=None, out=None): return self._constructor(new_array, index=self.index, sparse_index=new_array.sp_index).__finalize__(self) return Series(new_array, index=self.index).__finalize__(self) - def dropna(self, axis=0, **kwargs): + def dropna(self, axis=0, inplace=False, **kwargs): """ Analogous to Series.dropna. If fill_value=NaN, returns a dense Series """ # TODO: make more efficient axis = self._get_axis_number(axis or 0) dense_valid = self.to_dense().valid() + if inplace: + raise NotImplementedError("Cannot perform inplace dropna" + " operations on a SparseSeries") if isnull(self.fill_value): return dense_valid else: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 6c7877a8d6e4a..12b960ad376ff 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -6464,10 +6464,13 @@ def test_drop_names(self): df.index.name, df.columns.name = 'first', 'second' df_dropped_b = df.drop('b') df_dropped_e = df.drop('e', axis=1) - self.assertEqual(df_dropped_b.index.name, 'first') - self.assertEqual(df_dropped_e.index.name, 'first') - self.assertEqual(df_dropped_b.columns.name, 'second') - self.assertEqual(df_dropped_e.columns.name, 'second') + df_inplace_b, df_inplace_e = df.copy(), df.copy() + df_inplace_b.drop('b', inplace=True) + df_inplace_e.drop('e', axis=1, inplace=True) + for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e): + self.assertEqual(obj.index.name, 'first') + self.assertEqual(obj.columns.name, 'second') + self.assertEqual(list(df.columns), ['d', 'e', 'f']) def test_dropEmptyRows(self): N = len(self.frame.index) @@ -6475,12 +6478,21 @@ def test_dropEmptyRows(self): mat[:5] = nan frame = DataFrame({'foo': mat}, index=self.frame.index) + original = Series(mat, index=self.frame.index) + expected = original.dropna() + inplace_frame1, inplace_frame2 = frame.copy(), frame.copy() smaller_frame = frame.dropna(how='all') - self.assert_(np.array_equal(smaller_frame['foo'], mat[5:])) + # check that original was preserved + assert_series_equal(frame['foo'], original) + inplace_frame1.dropna(how='all', inplace=True) + assert_series_equal(smaller_frame['foo'], expected) + assert_series_equal(inplace_frame1['foo'], expected) smaller_frame = frame.dropna(how='all', subset=['foo']) - self.assert_(np.array_equal(smaller_frame['foo'], mat[5:])) + inplace_frame2.dropna(how='all', subset=['foo'], inplace=True) + assert_series_equal(smaller_frame['foo'], expected) + assert_series_equal(inplace_frame2['foo'], expected) def test_dropIncompleteRows(self): N = len(self.frame.index) @@ -6489,12 +6501,21 @@ def test_dropIncompleteRows(self): frame = DataFrame({'foo': mat}, index=self.frame.index) frame['bar'] = 5 + original = Series(mat, index=self.frame.index) + inp_frame1, inp_frame2 = frame.copy(), frame.copy() smaller_frame = frame.dropna() + assert_series_equal(frame['foo'], original) + inp_frame1.dropna(inplace=True) self.assert_(np.array_equal(smaller_frame['foo'], mat[5:])) + self.assert_(np.array_equal(inp_frame1['foo'], mat[5:])) samesize_frame = frame.dropna(subset=['bar']) + assert_series_equal(frame['foo'], original) + self.assert_((frame['bar'] == 5).all()) + inp_frame2.dropna(subset=['bar'], inplace=True) self.assert_(samesize_frame.index.equals(self.frame.index)) + self.assert_(inp_frame2.index.equals(self.frame.index)) def test_dropna(self): df = DataFrame(np.random.randn(6, 4)) @@ -6502,20 +6523,32 @@ def test_dropna(self): dropped = df.dropna(axis=1) expected = df.ix[:, [0, 1, 3]] + inp = df.copy() + inp.dropna(axis=1, inplace=True) assert_frame_equal(dropped, expected) + assert_frame_equal(inp, expected) dropped = df.dropna(axis=0) expected = df.ix[lrange(2, 6)] + inp = df.copy() + inp.dropna(axis=0, inplace=True) assert_frame_equal(dropped, expected) + assert_frame_equal(inp, expected) # threshold dropped = df.dropna(axis=1, thresh=5) expected = df.ix[:, [0, 1, 3]] + inp = df.copy() + inp.dropna(axis=1, thresh=5, inplace=True) assert_frame_equal(dropped, expected) + assert_frame_equal(inp, expected) dropped = df.dropna(axis=0, thresh=4) expected = df.ix[lrange(2, 6)] + inp = df.copy() + inp.dropna(axis=0, thresh=4, inplace=True) assert_frame_equal(dropped, expected) + assert_frame_equal(inp, expected) dropped = df.dropna(axis=1, thresh=4) assert_frame_equal(dropped, df) @@ -6525,7 +6558,10 @@ def test_dropna(self): # subset dropped = df.dropna(axis=0, subset=[0, 1, 3]) + inp = df.copy() + inp.dropna(axis=0, subset=[0, 1, 3], inplace=True) assert_frame_equal(dropped, df) + assert_frame_equal(inp, df) # all dropped = df.dropna(axis=1, how='all') @@ -6539,6 +6575,22 @@ def test_dropna(self): # bad input self.assertRaises(ValueError, df.dropna, axis=3) + + def test_drop_and_dropna_caching(self): + # tst that cacher updates + original = Series([1, 2, np.nan]) + expected = Series([1, 2], dtype=original.dtype) + df = pd.DataFrame({'A': original.values.copy()}) + df2 = df.copy() + df['A'].dropna() + assert_series_equal(df['A'], original) + df['A'].dropna(inplace=True) + assert_series_equal(df['A'], expected) + df2['A'].drop([1]) + assert_series_equal(df2['A'], original) + df2['A'].drop([1], inplace=True) + assert_series_equal(df2['A'], original.drop([1])) + def test_dropna_corner(self): # bad input self.assertRaises(ValueError, self.frame.dropna, how='foo') @@ -6549,13 +6601,18 @@ def test_dropna_multiple_axes(self): [4, np.nan, 5, 6], [np.nan, np.nan, np.nan, np.nan], [7, np.nan, 8, 9]]) - + cp = df.copy() result = df.dropna(how='all', axis=[0, 1]) result2 = df.dropna(how='all', axis=(0, 1)) expected = df.dropna(how='all').dropna(how='all', axis=1) assert_frame_equal(result, expected) assert_frame_equal(result2, expected) + assert_frame_equal(df, cp) + + inp = df.copy() + inp.dropna(how='all', axis=(0, 1), inplace=True) + assert_frame_equal(inp, expected) def test_drop_duplicates(self): df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar', diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 39ceba7469f36..96f14a09180ed 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1600,6 +1600,9 @@ def test_dropna(self): result = p.dropna(axis=1) exp = p.ix[:, ['a', 'c', 'e'], :] assert_panel_equal(result, exp) + inp = p.copy() + inp.dropna(axis=1, inplace=True) + assert_panel_equal(inp, exp) result = p.dropna(axis=1, how='all') assert_panel_equal(result, p) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index ad6fa68f063e1..4405fcc778886 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3582,6 +3582,8 @@ def test_unique(self): def test_dropna_empty(self): s = Series([]) self.assert_(len(s.dropna()) == 0) + s.dropna(inplace=True) + self.assert_(len(s) == 0) # invalid axis self.assertRaises(ValueError, s.dropna, axis=1) @@ -3607,10 +3609,16 @@ def test_drop_duplicates(self): result = s.drop_duplicates() expected = s[[True, True, True, False]] assert_series_equal(result, expected) + sc = s.copy() + sc.drop_duplicates(inplace=True) + assert_series_equal(sc, expected) result = s.drop_duplicates(take_last=True) expected = s[[True, True, False, True]] assert_series_equal(result, expected) + sc = s.copy() + sc.drop_duplicates(take_last=True, inplace=True) + assert_series_equal(sc, expected) def test_sort(self): ts = self.ts.copy() @@ -5196,6 +5204,10 @@ def test_dropna_preserve_name(self): self.ts[:5] = np.nan result = self.ts.dropna() self.assertEquals(result.name, self.ts.name) + name = self.ts.name + ts = self.ts.copy() + ts.dropna(inplace=True) + self.assertEquals(ts.name, name) def test_numpy_unique(self): # it works!
And appropriately invalidates caches as necessary BUT _still_ makes a copy. @jreback and I decided that it shouldn't call `__finalize__` a second time in `_update_inplace` (so if metadata is supposed to change, it's not going to be updated, but subclass can always override it). Closes #1960, related #2325 (doesn't actually fix because still makes a copy). Adds `_update_inplace` method that takes a result and invalidates the internal cache then reindexes. --- @jreback - would you mind taking a look? It's almost there, I think I'm missing one cache reference or something... (or I'm just using reindex wrong).
https://api.github.com/repos/pandas-dev/pandas/pulls/5247
2013-10-17T01:37:45Z
2013-10-28T02:06:38Z
2013-10-28T02:06:38Z
2014-06-25T15:32:58Z
Make doc compiling under windows workable
diff --git a/doc/make.py b/doc/make.py index 532395b41ce95..5eb9da8e0c289 100755 --- a/doc/make.py +++ b/doc/make.py @@ -99,11 +99,35 @@ def clean(): def html(): + # Sphinx fails on embedded unicode on Windows, so replace the offending + # strings before the build and restore them afterwards + import os, io + _bad = r" data = 'word,length\nTr\xe4umen,7\nGr\xfc\xdfe,5'" + _good = """ # Due to problems with sphinx and embedded unicode under windows, + # the umlauts were replaced during doc generation! + data = 'word,length\\nTraumen,7\\nGruse,5'""" + if os.name == 'nt': + with io.open("source/io.rst", 'r', encoding='ascii') as f: + io_doc = f.read() + io_doc = io_doc.replace(_bad, _good) + with io.open("source/io.rst", 'w', encoding='ascii') as f: + f.write(io_doc) check_build() if os.system('sphinx-build -P -b html -d build/doctrees ' 'source build/html'): raise SystemExit("Building HTML failed.") - + if os.name == 'nt': + with io.open("source/io.rst", 'r', encoding='ascii') as f: + io_doc = f.read() + io_doc = io_doc.replace(_good,_bad) + with io.open("source/io.rst", 'w', encoding='ascii') as f: + f.write(io_doc) + # these files are often left over due to "[Error 32] file in use" + try: + os.remove('tmp.sv') + os.remove('store.h5') + except: + pass def latex(): check_build()
The doc generation failed under windows due to problems with sphinx and encoded umlauts in code (see links in https://github.com/pydata/pandas/issues/5142). The workaround is to replace the offending text with one which does not fail (but which makes the example a bit pointless), build the docs and restore the old text. Closes: #5142
https://api.github.com/repos/pandas-dev/pandas/pulls/5245
2013-10-16T22:47:17Z
2013-10-21T21:53:30Z
null
2014-06-18T13:37:40Z
API/CLN: consolidate truncate into NDFrame (panel had a separate method)
diff --git a/doc/source/release.rst b/doc/source/release.rst index af9611bb98fae..d331dc7e164fc 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -311,6 +311,7 @@ API Changes - Provide __dir__ method (and local context) for tab completion / remove ipython completers code (:issue:`4501`) - Support non-unique axes in a Panel via indexing operations (:issue:`4960`) + - ``.truncate`` will raise a ``ValueError`` if invalid before and afters dates are given (:issue:`5242`) Internal Refactoring ~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 16f4118d5d1df..266253e05ed61 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2813,7 +2813,7 @@ def tshift(self, periods=1, freq=None, axis=0, **kwds): return self._constructor(new_data).__finalize__(self) - def truncate(self, before=None, after=None, copy=True): + def truncate(self, before=None, after=None, axis=None, copy=True): """Truncates a sorted NDFrame before and/or after some particular dates. @@ -2823,28 +2823,38 @@ def truncate(self, before=None, after=None, copy=True): Truncate before date after : date Truncate after date + axis : the truncation axis, defaults to the stat axis + copy : boolean, default is True, + return a copy of the truncated section Returns ------- truncated : type of caller """ + if axis is None: + axis = self._stat_axis_number + axis = self._get_axis_number(axis) + ax = self._get_axis(axis) + # if we have a date index, convert to dates, otherwise # treat like a slice - if self.index.is_all_dates: + if ax.is_all_dates: from pandas.tseries.tools import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None: if before > after: - raise AssertionError('Truncate: %s must be after %s' % - (after, before)) + raise ValueError('Truncate: %s must be after %s' % + (after, before)) - result = self.ix[before:after] + slicer = [ slice(None, None) ] * self._AXIS_LEN + slicer[axis] = slice(before,after) + result = self.ix[tuple(slicer)] - if isinstance(self.index, MultiIndex): - result.index = self.index.truncate(before, after) + if isinstance(ax, MultiIndex): + setattr(result,self._get_axis_name(axis),ax.truncate(before, after)) if copy: result = result.copy() diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 87e9121b2dffc..a86c186e26b53 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -998,30 +998,6 @@ def shift(self, lags, freq=None, axis='major'): def tshift(self, periods=1, freq=None, axis='major', **kwds): return super(Panel, self).tshift(periods, freq, axis, **kwds) - def truncate(self, before=None, after=None, axis='major'): - """Function truncates a sorted Panel before and/or after some - particular values on the requested axis - - Parameters - ---------- - before : date - Left boundary - after : date - Right boundary - axis : {'major', 'minor', 'items'} - - Returns - ------- - Panel - """ - axis = self._get_axis_name(axis) - index = self._get_axis(axis) - - beg_slice, end_slice = index.slice_locs(before, after) - new_index = index[beg_slice:end_slice] - - return self.reindex(**{axis: new_index}) - def join(self, other, how='left', lsuffix='', rsuffix=''): """ Join items with other Panel either on major and minor axes column diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index 74bca7de89bcc..86dcf97c8bd3d 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -187,6 +187,15 @@ def _ixs(self, i, axis=0): return self.xs(key, axis=axis) + def _slice(self, slobj, axis=0, raise_on_error=False, typ=None): + """ + for compat as we don't support Block Manager here + """ + axis = self._get_axis_name(axis) + index = self._get_axis(axis) + + return self.reindex(**{axis: index[slobj]}) + def _get_item_cache(self, key): return self._frames[key] diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index fe0f9244c31a3..d74ea8a5d2ffc 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -7731,6 +7731,10 @@ def test_truncate(self): truncated = ts.truncate(after=end_missing) assert_frame_equal(truncated, expected) + self.assertRaises(ValueError, ts.truncate, + before=ts.index[-1] - 1, + after=ts.index[0] +1) + def test_truncate_copy(self): index = self.tsframe.index truncated = self.tsframe.truncate(index[5], index[10]) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 3715de6dffeb9..645533d5629d2 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3899,7 +3899,7 @@ def test_truncate(self): truncated = ts.truncate(before=self.ts.index[-1] + offset) assert(len(truncated) == 0) - self.assertRaises(Exception, ts.truncate, + self.assertRaises(ValueError, ts.truncate, before=self.ts.index[-1] + offset, after=self.ts.index[0] - offset)
API: truncate error if before > after is now a ValueError (rather than AssertionError) (related #5242)
https://api.github.com/repos/pandas-dev/pandas/pulls/5244
2013-10-16T21:36:53Z
2013-10-17T11:29:15Z
2013-10-17T11:29:15Z
2014-07-16T08:35:37Z
API: add excel kw to to_clipboard to preserve compat behavior (related GH5070)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8a2ed9926d630..16f4118d5d1df 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -868,14 +868,19 @@ def load(self, path): # TODO remove in 0.14 warnings.warn("load is deprecated, use pd.read_pickle", FutureWarning) return read_pickle(path) - def to_clipboard(self, sep=None, **kwargs): + def to_clipboard(self, excel=None, sep=None, **kwargs): """ Attempt to write text representation of object to the system clipboard This can be pasted into Excel, for example. Parameters ---------- - sep : optional, defaults to comma + excel : boolean, defaults to True + if True, use the provided separator, writing in a csv + format for allowing easy pasting into excel. + if False, write a string representation of the object + to the clipboard + sep : optional, defaults to tab other keywords are passed to to_csv Notes @@ -886,7 +891,7 @@ def to_clipboard(self, sep=None, **kwargs): - OS X: none """ from pandas.io import clipboard - clipboard.to_clipboard(self, sep, **kwargs) + clipboard.to_clipboard(self, excel=excel, sep=sep, **kwargs) #---------------------------------------------------------------------- # Fancy Indexing diff --git a/pandas/io/clipboard.py b/pandas/io/clipboard.py index 401a0689fe000..51142c9f52655 100644 --- a/pandas/io/clipboard.py +++ b/pandas/io/clipboard.py @@ -26,11 +26,22 @@ def read_clipboard(**kwargs): # pragma: no cover return read_table(StringIO(text), **kwargs) -def to_clipboard(obj, sep=None, **kwargs): # pragma: no cover +def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover """ Attempt to write text representation of object to the system clipboard The clipboard can be then pasted into Excel for example. + Parameters + ---------- + obj : the object to write to the clipboard + excel : boolean, defaults to True + if True, use the provided separator, writing in a csv + format for allowing easy pasting into excel. + if False, write a string representation of the object + to the clipboard + sep : optional, defaults to tab + other keywords are passed to to_csv + Notes ----- Requirements for your platform @@ -39,12 +50,19 @@ def to_clipboard(obj, sep=None, **kwargs): # pragma: no cover - OS X: """ from pandas.util.clipboard import clipboard_set - try: - if sep is None: - sep = '\t' - buf = StringIO() - obj.to_csv(buf,sep=sep, **kwargs) - clipboard_set(buf.getvalue()) - except: - clipboard_set(str(obj)) + if excel is None: + excel = True + + if excel: + try: + if sep is None: + sep = '\t' + buf = StringIO() + obj.to_csv(buf,sep=sep, **kwargs) + clipboard_set(buf.getvalue()) + return + except: + pass + + clipboard_set(str(obj)) diff --git a/pandas/io/tests/test_clipboard.py b/pandas/io/tests/test_clipboard.py index 90ec2d6fed0ce..45b479ebb589e 100644 --- a/pandas/io/tests/test_clipboard.py +++ b/pandas/io/tests/test_clipboard.py @@ -39,9 +39,9 @@ def setUpClass(cls): def tearDownClass(cls): del cls.data_types, cls.data - def check_round_trip_frame(self, data_type, sep=None): + def check_round_trip_frame(self, data_type, excel=None, sep=None): data = self.data[data_type] - data.to_clipboard(sep=sep) + data.to_clipboard(excel=excel, sep=sep) if sep is not None: result = read_clipboard(sep=sep,index_col=0) else: @@ -52,6 +52,10 @@ def test_round_trip_frame_sep(self): for dt in self.data_types: self.check_round_trip_frame(dt,sep=',') + def test_round_trip_frame_string(self): + for dt in self.data_types: + self.check_round_trip_frame(dt,excel=False) + def test_round_trip_frame(self): for dt in self.data_types: self.check_round_trip_frame(dt)
related #5070
https://api.github.com/repos/pandas-dev/pandas/pulls/5243
2013-10-16T18:29:04Z
2013-10-16T19:54:25Z
2013-10-16T19:54:25Z
2014-07-16T08:35:35Z
Fixed assertion error message
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4dfe0a55fce28..bc5d84b9ff0f5 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2828,7 +2828,7 @@ def truncate(self, before=None, after=None, copy=True): if before is not None and after is not None: if before > after: raise AssertionError('Truncate: %s must be after %s' % - (before, after)) + (after, before)) result = self.ix[before:after]
This is a pretty straightforward fix, message was simply inverted.
https://api.github.com/repos/pandas-dev/pandas/pulls/5242
2013-10-16T12:03:03Z
2013-10-16T12:20:16Z
2013-10-16T12:20:16Z
2014-06-21T09:23:33Z
DOC: fix building of gbq docs
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index 14e120fdff672..b2c78f38140b4 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -651,7 +651,7 @@ Experimental - ``pandas.io.gbq`` provides a simple way to extract from, and load data into, Google's BigQuery Data Sets by way of pandas DataFrames. BigQuery is a high performance SQL-like database service, useful for performing ad-hoc queries - against extremely large datasets. :ref:`See the docs<io.gbq>` + against extremely large datasets. :ref:`See the docs <io.bigquery>` .. code-block:: python @@ -684,24 +684,24 @@ Experimental df3 = pandas.concat([df2.min(), df2.mean(), df2.max()], axis=1,keys=["Min Tem", "Mean Temp", "Max Temp"]) - The resulting dataframe is: - - ``` - Min Tem Mean Temp Max Temp - MONTH - 1 -53.336667 39.827892 89.770968 - 2 -49.837500 43.685219 93.437932 - 3 -77.926087 48.708355 96.099998 - 4 -82.892858 55.070087 97.317240 - 5 -92.378261 61.428117 102.042856 - 6 -77.703334 65.858888 102.900000 - 7 -87.821428 68.169663 106.510714 - 8 -89.431999 68.614215 105.500000 - 9 -86.611112 63.436935 107.142856 - 10 -78.209677 56.880838 92.103333 - 11 -50.125000 48.861228 94.996428 - 12 -50.332258 42.286879 94.396774 - ``` + The resulting dataframe is:: + + > df3 + Min Tem Mean Temp Max Temp + MONTH + 1 -53.336667 39.827892 89.770968 + 2 -49.837500 43.685219 93.437932 + 3 -77.926087 48.708355 96.099998 + 4 -82.892858 55.070087 97.317240 + 5 -92.378261 61.428117 102.042856 + 6 -77.703334 65.858888 102.900000 + 7 -87.821428 68.169663 106.510714 + 8 -89.431999 68.614215 105.500000 + 9 -86.611112 63.436935 107.142856 + 10 -78.209677 56.880838 92.103333 + 11 -50.125000 48.861228 94.996428 + 12 -50.332258 42.286879 94.396774 + .. warning:: To use this module, you will need a BigQuery account. See diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 504d49ddca13a..bfc086b09730e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -672,34 +672,34 @@ def to_dict(self, outtype='dict'): raise ValueError("outtype %s not understood" % outtype) def to_gbq(self, destination_table, schema=None, col_order=None, if_exists='fail', **kwargs): - """ - Write a DataFrame to a Google BigQuery table. If the table exists, - the DataFrame will be appended. If not, a new table will be created, - in which case the schema will have to be specified. By default, + """Write a DataFrame to a Google BigQuery table. + + If the table exists, the DataFrame will be appended. If not, a new table + will be created, in which case the schema will have to be specified. By default, rows will be written in the order they appear in the DataFrame, though the user may specify an alternative order. Parameters --------------- - destination_table: string + destination_table : string name of table to be written, in the form 'dataset.tablename' schema : sequence (optional) list of column types in order for data to be inserted, e.g. ['INTEGER', 'TIMESTAMP', 'BOOLEAN'] - col_order: sequence (optional) + col_order : sequence (optional) order which columns are to be inserted, e.g. ['primary_key', 'birthday', 'username'] - if_exists: {'fail', 'replace', 'append'} (optional) - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. + if_exists : {'fail', 'replace', 'append'} (optional) + - fail: If table exists, do nothing. + - replace: If table exists, drop it, recreate it, and insert data. + - append: If table exists, insert data. Create if does not exist. kwargs are passed to the Client constructor - Raises: + Raises ------ - SchemaMissing: + SchemaMissing : Raised if the 'if_exists' parameter is set to 'replace', but no schema is specified - TableExists: + TableExists : Raised if the specified 'destination_table' exists but the 'if_exists' parameter is set to 'fail' (the default) - InvalidSchema: + InvalidSchema : Raised if the 'schema' parameter does not match the provided DataFrame """ diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index f226af6629aa5..931aa732d5286 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -316,36 +316,36 @@ def _parse_data(client, job, index_col=None, col_order=None): return final_df def to_gbq(dataframe, destination_table, schema=None, col_order=None, if_exists='fail', **kwargs): - """ - Write a DataFrame to a Google BigQuery table. If the table exists, - the DataFrame will be appended. If not, a new table will be created, - in which case the schema will have to be specified. By default, + """Write a DataFrame to a Google BigQuery table. + + If the table exists, the DataFrame will be appended. If not, a new table + will be created, in which case the schema will have to be specified. By default, rows will be written in the order they appear in the DataFrame, though the user may specify an alternative order. Parameters - --------------- - dataframe: DataFrame + ---------- + dataframe : DataFrame DataFrame to be written - destination_table: string + destination_table : string name of table to be written, in the form 'dataset.tablename' schema : sequence (optional) list of column types in order for data to be inserted, e.g. ['INTEGER', 'TIMESTAMP', 'BOOLEAN'] - col_order: sequence (optional) + col_order : sequence (optional) order which columns are to be inserted, e.g. ['primary_key', 'birthday', 'username'] - if_exists: {'fail', 'replace', 'append'} (optional) - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. + if_exists : {'fail', 'replace', 'append'} (optional) + - fail: If table exists, do nothing. + - replace: If table exists, drop it, recreate it, and insert data. + - append: If table exists, insert data. Create if does not exist. kwargs are passed to the Client constructor - Raises: + Raises ------ - SchemaMissing: + SchemaMissing : Raised if the 'if_exists' parameter is set to 'replace', but no schema is specified - TableExists: + TableExists : Raised if the specified 'destination_table' exists but the 'if_exists' parameter is set to 'fail' (the default) - InvalidSchema: + InvalidSchema : Raised if the 'schema' parameter does not match the provided DataFrame """ @@ -416,35 +416,37 @@ def to_gbq(dataframe, destination_table, schema=None, col_order=None, if_exists= job = client.Load(table_reference, csv_file.name, schema=schema, **opts) def read_gbq(query, project_id = None, destination_table = None, index_col=None, col_order=None, **kwargs): - """ + """Load data from Google BigQuery. + The main method a user calls to load data from Google BigQuery into a pandas DataFrame. This is a simple wrapper for Google's bq.py and bigquery_client.py, which we use to get the source data. Because of this, this script respects the user's bq settings file, '~/.bigqueryrc', if it exists. Such a file can be generated using 'bq init'. Further, - additional parameters for the query can be specified as either **kwds in the command, + additional parameters for the query can be specified as either ``**kwds`` in the command, or using FLAGS provided in the 'gflags' module. Particular options can be found in bigquery_client.py. Parameters ---------- - query: str + query : str SQL-Like Query to return data values - project_id: str (optional) + project_id : str (optional) Google BigQuery Account project ID. Optional, since it may be located in ~/.bigqueryrc - index_col: str (optional) + index_col : str (optional) Name of result column to use for index in results DataFrame - col_order: list(str) (optional) + col_order : list(str) (optional) List of BigQuery column names in the desired order for results DataFrame - destination_table: string (optional) + destination_table : string (optional) If provided, send the results to the given table. - **kwargs: to be passed to bq.Client.Create(). Particularly: 'trace', 'sync', - 'api', 'api_version' + **kwargs : + To be passed to bq.Client.Create(). Particularly: 'trace', + 'sync', 'api', 'api_version' Returns ------- - df: pandas DataFrame + df: DataFrame DataFrame representing results of query """
This fixes some build warnings in de gbq docs after merging #5179 (``` is markdown syntax + wrong internal reference). And in the same time I corrected some minor numpydoc docstring things.
https://api.github.com/repos/pandas-dev/pandas/pulls/5241
2013-10-16T07:08:09Z
2013-10-16T12:30:54Z
2013-10-16T12:30:54Z
2014-07-16T08:35:32Z
CLN/ENH: Make names getattr'able in core/ops +PEP8.
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6ea4e5a3046b2..7a64b1becae51 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -304,6 +304,7 @@ API Changes ``SparsePanel``, etc.), now support the entire set of arithmetic operators and arithmetic flex methods (add, sub, mul, etc.). ``SparsePanel`` does not support ``pow`` or ``mod`` with non-scalars. (:issue:`3765`) + - Arithemtic func factories are now passed real names (suitable for using with super) (:issue:`5240`) - Provide numpy compatibility with 1.7 for a calling convention like ``np.prod(pandas_object)`` as numpy call with additional keyword args (:issue:`4435`) - Provide __dir__ method (and local context) for tab completion / remove ipython completers code diff --git a/pandas/core/ops.py b/pandas/core/ops.py index f8ab35656d99c..7fad806e43af3 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -19,8 +19,12 @@ # Functions that add arithmetic methods to objects, given arithmetic factory # methods + def _create_methods(arith_method, radd_func, comp_method, bool_method, use_numexpr, special=False, default_axis='columns'): + # creates actual methods based upon arithmetic, comp and bool method + # constructors. + # NOTE: Only frame cares about default_axis, specifically: special methods # have default axis None, whereas flex methods have default axis 'columns' # if we're not using numexpr, then don't pass a str_rep @@ -37,42 +41,62 @@ def names(x): else: names = lambda x: x radd_func = radd_func or operator.add - # Inframe, all special methods have default_axis=None, flex methods have default_axis set to the default (columns) + # Inframe, all special methods have default_axis=None, flex methods have + # default_axis set to the default (columns) new_methods = dict( - add=arith_method(operator.add, names('add'), op('+'), default_axis=default_axis), - radd=arith_method(radd_func, names('radd'), op('+'), default_axis=default_axis), - sub=arith_method(operator.sub, names('sub'), op('-'), default_axis=default_axis), - mul=arith_method(operator.mul, names('mul'), op('*'), default_axis=default_axis), + add=arith_method(operator.add, names('add'), op('+'), + default_axis=default_axis), + radd=arith_method(radd_func, names('radd'), op('+'), + default_axis=default_axis), + sub=arith_method(operator.sub, names('sub'), op('-'), + default_axis=default_axis), + mul=arith_method(operator.mul, names('mul'), op('*'), + default_axis=default_axis), truediv=arith_method(operator.truediv, names('truediv'), op('/'), - truediv=True, fill_zeros=np.inf, default_axis=default_axis), + truediv=True, fill_zeros=np.inf, + default_axis=default_axis), floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'), default_axis=default_axis, fill_zeros=np.inf), # Causes a floating point exception in the tests when numexpr # enabled, so for now no speedup mod=arith_method(operator.mod, names('mod'), default_axis=default_axis, fill_zeros=np.nan), - pow=arith_method(operator.pow, names('pow'), op('**'), default_axis=default_axis), + pow=arith_method(operator.pow, names('pow'), op('**'), + default_axis=default_axis), # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility - rmul=arith_method(operator.mul, names('rmul'), default_axis=default_axis), - rsub=arith_method(lambda x, y: y - x, names('rsub'), default_axis=default_axis), - rtruediv=arith_method(lambda x, y: operator.truediv(y, x), names('rtruediv'), - truediv=True, fill_zeros=np.inf, default_axis=default_axis), - rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x), names('rfloordiv'), - default_axis=default_axis, fill_zeros=np.inf), - rpow=arith_method(lambda x, y: y ** x, names('rpow'), default_axis=default_axis), - rmod=arith_method(lambda x, y: y % x, names('rmod'), default_axis=default_axis), + rmul=arith_method(operator.mul, names('rmul'), + default_axis=default_axis), + rsub=arith_method(lambda x, y: y - x, names('rsub'), + default_axis=default_axis), + rtruediv=arith_method(lambda x, y: operator.truediv(y, x), + names('rtruediv'), truediv=True, + fill_zeros=np.inf, default_axis=default_axis), + rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x), + names('rfloordiv'), default_axis=default_axis, + fill_zeros=np.inf), + rpow=arith_method(lambda x, y: y ** x, names('rpow'), + default_axis=default_axis), + rmod=arith_method(lambda x, y: y % x, names('rmod'), + default_axis=default_axis), ) if not compat.PY3: new_methods["div"] = arith_method(operator.div, names('div'), op('/'), - truediv=False, fill_zeros=np.inf, default_axis=default_axis) - new_methods["rdiv"] = arith_method(lambda x, y: operator.div(y, x), names('rdiv'), - truediv=False, fill_zeros=np.inf, default_axis=default_axis) + truediv=False, fill_zeros=np.inf, + default_axis=default_axis) + new_methods["rdiv"] = arith_method(lambda x, y: operator.div(y, x), + names('rdiv'), truediv=False, + fill_zeros=np.inf, + default_axis=default_axis) else: - new_methods["div"] = arith_method(operator.truediv, names('div'), op('/'), - truediv=True, fill_zeros=np.inf, default_axis=default_axis) - new_methods["rdiv"] = arith_method(lambda x, y: operator.truediv(y, x), names('rdiv'), - truediv=False, fill_zeros=np.inf, default_axis=default_axis) + new_methods["div"] = arith_method(operator.truediv, names('div'), + op('/'), truediv=True, + fill_zeros=np.inf, + default_axis=default_axis) + new_methods["rdiv"] = arith_method(lambda x, y: operator.truediv(y, x), + names('rdiv'), truediv=False, + fill_zeros=np.inf, + default_axis=default_axis) # Comp methods never had a default axis set if comp_method: new_methods.update(dict( @@ -85,13 +109,14 @@ def names(x): )) if bool_method: new_methods.update(dict( - and_=bool_method(operator.and_, names('and_ [&]'), op('&')), - or_=bool_method(operator.or_, names('or_ [|]'), op('|')), - # For some reason ``^`` wasn't used in original. - xor=bool_method(operator.xor, names('xor [^]')), - rand_=bool_method(lambda x, y: operator.and_(y, x), names('rand_[&]')), - ror_=bool_method(lambda x, y: operator.or_(y, x), names('ror_ [|]')), - rxor=bool_method(lambda x, y: operator.xor(y, x), names('rxor [^]')) + and_=bool_method(operator.and_, names('and_'), op('&')), + or_=bool_method(operator.or_, names('or_'), op('|')), + # For some reason ``^`` wasn't used in original. + xor=bool_method(operator.xor, names('xor')), + rand_=bool_method(lambda x, y: operator.and_(y, x), + names('rand_')), + ror_=bool_method(lambda x, y: operator.or_(y, x), names('ror_')), + rxor=bool_method(lambda x, y: operator.xor(y, x), names('rxor')) )) new_methods = dict((names(k), v) for k, v in new_methods.items()) @@ -116,6 +141,7 @@ def add_methods(cls, new_methods, force, select, exclude): if force or name not in cls.__dict__: bind_method(cls, name, method) + #---------------------------------------------------------------------- # Arithmetic def add_special_arithmetic_methods(cls, arith_method=None, radd_func=None, @@ -123,7 +149,8 @@ def add_special_arithmetic_methods(cls, arith_method=None, radd_func=None, use_numexpr=True, force=False, select=None, exclude=None): """ - Adds the full suite of special arithmetic methods (``__add__``, ``__sub__``, etc.) to the class. + Adds the full suite of special arithmetic methods (``__add__``, + ``__sub__``, etc.) to the class. Parameters ---------- @@ -137,16 +164,18 @@ def add_special_arithmetic_methods(cls, arith_method=None, radd_func=None, use_numexpr : bool, default True whether to accelerate with numexpr, defaults to True force : bool, default False - if False, checks whether function is defined **on ``cls.__dict__``** before defining - if True, always defines functions on class base + if False, checks whether function is defined **on ``cls.__dict__``** + before defining if True, always defines functions on class base select : iterable of strings (optional) if passed, only sets functions with names in select exclude : iterable of strings (optional) if passed, will not set functions with names in exclude """ radd_func = radd_func or operator.add - # in frame, special methods have default_axis = None, comp methods use 'columns' - new_methods = _create_methods(arith_method, radd_func, comp_method, bool_method, use_numexpr, default_axis=None, + # in frame, special methods have default_axis = None, comp methods use + # 'columns' + new_methods = _create_methods(arith_method, radd_func, comp_method, + bool_method, use_numexpr, default_axis=None, special=True) # inplace operators (I feel like these should get passed an `inplace=True` @@ -161,7 +190,8 @@ def add_special_arithmetic_methods(cls, arith_method=None, radd_func=None, if not compat.PY3: new_methods["__idiv__"] = new_methods["__div__"] - add_methods(cls, new_methods=new_methods, force=force, select=select, exclude=exclude) + add_methods(cls, new_methods=new_methods, force=force, select=select, + exclude=exclude) def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None, @@ -169,7 +199,8 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None, use_numexpr=True, force=False, select=None, exclude=None): """ - Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. + Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) + to the class. Parameters ---------- @@ -177,14 +208,15 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None, factory for special arithmetic methods, with op string: f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs) radd_func : function (optional) - Possible replacement for ``lambda x, y: operator.add(y, x)`` for compatibility + Possible replacement for ``lambda x, y: operator.add(y, x)`` for + compatibility flex_comp_method : function, optional, factory for rich comparison - signature: f(op, name, str_rep) use_numexpr : bool, default True whether to accelerate with numexpr, defaults to True force : bool, default False - if False, checks whether function is defined **on ``cls.__dict__``** before defining - if True, always defines functions on class base + if False, checks whether function is defined **on ``cls.__dict__``** + before defining if True, always defines functions on class base select : iterable of strings (optional) if passed, only sets functions with names in select exclude : iterable of strings (optional) @@ -205,28 +237,10 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None, if k in new_methods: new_methods.pop(k) - add_methods(cls, new_methods=new_methods, force=force, select=select, exclude=exclude) + add_methods(cls, new_methods=new_methods, force=force, select=select, + exclude=exclude) + -def cleanup_name(name): - """cleanup special names - >>> cleanup_name("__rsub__") - sub - >>> cleanup_name("rand_") - and_ - """ - if name[:2] == "__": - name = name[2:-2] - if name[0] == "r": - name = name[1:] - # readd last _ for operator names. - if name == "or": - name = "or_" - elif name == "and": - name = "and_" - return name - - -# direct copy of original Series _TimeOp class _TimeOp(object): """ Wrapper around Series datetime/time/timedelta arithmetic operations. @@ -244,13 +258,13 @@ def __init__(self, left, right, name): rvalues = self._convert_to_array(right, name=name) self.is_timedelta_lhs = com.is_timedelta64_dtype(left) - self.is_datetime_lhs = com.is_datetime64_dtype(left) - self.is_integer_lhs = left.dtype.kind in ['i','u'] - self.is_datetime_rhs = com.is_datetime64_dtype(rvalues) + self.is_datetime_lhs = com.is_datetime64_dtype(left) + self.is_integer_lhs = left.dtype.kind in ['i', 'u'] + self.is_datetime_rhs = com.is_datetime64_dtype(rvalues) self.is_timedelta_rhs = (com.is_timedelta64_dtype(rvalues) or (not self.is_datetime_rhs and pd._np_version_under1p7)) - self.is_integer_rhs = rvalues.dtype.kind in ('i','u') + self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u') self._validate() @@ -262,36 +276,42 @@ def _validate(self): if (self.is_timedelta_lhs and self.is_integer_rhs) or\ (self.is_integer_lhs and self.is_timedelta_rhs): - if self.name not in ('__truediv__','__div__','__mul__'): - raise TypeError("can only operate on a timedelta and an integer for " - "division, but the operator [%s] was passed" % self.name) + if self.name not in ('__truediv__', '__div__', '__mul__'): + raise TypeError("can only operate on a timedelta and an " + "integer for division, but the operator [%s]" + "was passed" % self.name) # 2 datetimes elif self.is_datetime_lhs and self.is_datetime_rhs: if self.name != '__sub__': - raise TypeError("can only operate on a datetimes for subtraction, " - "but the operator [%s] was passed" % self.name) - + raise TypeError("can only operate on a datetimes for" + " subtraction, but the operator [%s] was" + " passed" % self.name) # 2 timedeltas elif self.is_timedelta_lhs and self.is_timedelta_rhs: - if self.name not in ('__div__', '__truediv__', '__add__', '__sub__'): + if self.name not in ('__div__', '__truediv__', '__add__', + '__sub__'): raise TypeError("can only operate on a timedeltas for " - "addition, subtraction, and division, but the operator [%s] was passed" % self.name) + "addition, subtraction, and division, but the" + " operator [%s] was passed" % self.name) # datetime and timedelta elif self.is_datetime_lhs and self.is_timedelta_rhs: - if self.name not in ('__add__','__sub__'): - raise TypeError("can only operate on a datetime with a rhs of a timedelta for " - "addition and subtraction, but the operator [%s] was passed" % self.name) + if self.name not in ('__add__', '__sub__'): + raise TypeError("can only operate on a datetime with a rhs of" + " a timedelta for addition and subtraction, " + " but the operator [%s] was passed" % + self.name) elif self.is_timedelta_lhs and self.is_datetime_rhs: if self.name != '__add__': - raise TypeError("can only operate on a timedelta and a datetime for " - "addition, but the operator [%s] was passed" % self.name) + raise TypeError("can only operate on a timedelta and" + " a datetime for addition, but the operator" + " [%s] was passed" % self.name) else: raise TypeError('cannot operate on a series with out a rhs ' 'of a series/ndarray of type datetime64[ns] ' @@ -305,9 +325,10 @@ def _convert_to_array(self, values, name=None): if not is_list_like(values): values = np.array([values]) inferred_type = lib.infer_dtype(values) - if inferred_type in ('datetime64','datetime','date','time'): + if inferred_type in ('datetime64', 'datetime', 'date', 'time'): # a datetlike - if not (isinstance(values, (pa.Array, pd.Series)) and com.is_datetime64_dtype(values)): + if not (isinstance(values, (pa.Array, pd.Series)) and + com.is_datetime64_dtype(values)): values = tslib.array_to_datetime(values) elif isinstance(values, pd.DatetimeIndex): values = values.to_series() @@ -320,20 +341,22 @@ def _convert_to_array(self, values, name=None): values = values.astype('timedelta64[ns]') elif isinstance(values, pd.PeriodIndex): values = values.to_timestamp().to_series() - elif name not in ('__truediv__','__div__','__mul__'): + elif name not in ('__truediv__', '__div__', '__mul__'): raise TypeError("incompatible type for a datetime/timedelta " "operation [{0}]".format(name)) elif isinstance(values[0], pd.DateOffset): # handle DateOffsets - os = pa.array([ getattr(v,'delta',None) for v in values ]) + os = pa.array([getattr(v, 'delta', None) for v in values]) mask = isnull(os) if mask.any(): raise TypeError("cannot use a non-absolute DateOffset in " "datetime/timedelta operations [{0}]".format( - ','.join([ com.pprint_thing(v) for v in values[mask] ]))) + ', '.join([com.pprint_thing(v) + for v in values[mask]]))) values = _possibly_cast_to_timedelta(os, coerce=coerce) else: - raise TypeError("incompatible type [{0}] for a datetime/timedelta operation".format(pa.array(values).dtype)) + raise TypeError("incompatible type [{0}] for a datetime/timedelta" + " operation".format(pa.array(values).dtype)) return values @@ -372,8 +395,8 @@ def _convert_for_datetime(self, lvalues, rvalues): if mask is not None: if mask.any(): def f(x): - x = pa.array(x,dtype=self.dtype) - np.putmask(x,mask,self.fill_value) + x = pa.array(x, dtype=self.dtype) + np.putmask(x, mask, self.fill_value) return x self.wrap_results = f self.lvalues = lvalues @@ -391,7 +414,7 @@ def maybe_convert_for_time_op(cls, left, right, name): """ # decide if we can do it is_timedelta_lhs = com.is_timedelta64_dtype(left) - is_datetime_lhs = com.is_datetime64_dtype(left) + is_datetime_lhs = com.is_datetime64_dtype(left) if not (is_datetime_lhs or is_timedelta_lhs): return None # rops are allowed. No need for special checks, just strip off @@ -401,7 +424,8 @@ def maybe_convert_for_time_op(cls, left, right, name): return cls(left, right, name) -def _arith_method_SERIES(op, name, str_rep=None, fill_zeros=None, default_axis=None, **eval_kwargs): +def _arith_method_SERIES(op, name, str_rep=None, fill_zeros=None, + default_axis=None, **eval_kwargs): """ Wrapper function for Series arithmetic operations, to avoid code duplication. @@ -412,7 +436,7 @@ def na_op(x, y): raise_on_error=True, **eval_kwargs) except TypeError: if isinstance(y, (pa.Array, pd.Series)): - dtype = np.find_common_type([x.dtype,y.dtype],[]) + dtype = np.find_common_type([x.dtype, y.dtype], []) result = np.empty(x.size, dtype=dtype) mask = notnull(x) & notnull(y) result[mask] = op(x[mask], y[mask]) @@ -471,9 +495,11 @@ def wrapper(left, right, name=name): if hasattr(lvalues, 'values'): lvalues = lvalues.values return left._constructor(wrap_results(na_op(lvalues, rvalues)), - index=left.index, name=left.name, dtype=dtype) + index=left.index, name=left.name, + dtype=dtype) return wrapper + def _comp_method_SERIES(op, name, str_rep=None, masker=False): """ Wrapper function for Series arithmetic operations, to avoid @@ -494,7 +520,7 @@ def na_op(x, y): else: try: - result = getattr(x,name)(y) + result = getattr(x, name)(y) if result is NotImplemented: raise TypeError("invalid type comparison") except (AttributeError): @@ -535,7 +561,8 @@ def wrapper(self, other): # always return a full value series here res = _values_from_object(res) - res = pd.Series(res, index=self.index, name=self.name, dtype='bool') + res = pd.Series(res, index=self.index, name=self.name, + dtype='bool') # mask out the invalids if mask.any(): @@ -574,7 +601,8 @@ def na_op(x, y): result = lib.scalar_binop(x, y, op) except: raise TypeError("cannot compare a dtyped [{0}] array with " - "a scalar of type [{1}]".format(x.dtype,type(y).__name__)) + "a scalar of type [{1}]".format( + x.dtype, type(y).__name__)) return result @@ -584,17 +612,18 @@ def wrapper(self, other): other = other.reindex_like(self).fillna(False).astype(bool) return self._constructor(na_op(self.values, other.values), - index=self.index, name=name).fillna(False).astype(bool) + index=self.index, + name=name).fillna(False).astype(bool) elif isinstance(other, pd.DataFrame): return NotImplemented else: # scalars - return self._constructor(na_op(self.values, other), - index=self.index).fillna(False).astype(bool).__finalize__(self) + res = self._constructor(na_op(self.values, other), + index=self.index).fillna(False) + return res.astype(bool).__finalize__(self) return wrapper -# original Series _radd_compat method def _radd_compat(left, right): radd = lambda x, y: y + x # GH #353, NumPy 1.5.1 workaround @@ -685,7 +714,8 @@ def f(self, other, level=None, fill_value=None): """ -def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns', fill_zeros=None, **eval_kwargs): +def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns', + fill_zeros=None, **eval_kwargs): def na_op(x, y): try: result = expressions.evaluate( @@ -693,7 +723,7 @@ def na_op(x, y): except TypeError: xrav = x.ravel() if isinstance(y, (np.ndarray, pd.Series)): - dtype = np.find_common_type([x.dtype,y.dtype],[]) + dtype = np.find_common_type([x.dtype, y.dtype], []) result = np.empty(x.size, dtype=dtype) yrav = y.ravel() mask = notnull(xrav) & notnull(yrav) @@ -718,6 +748,7 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): return self._combine_series(other, na_op, fill_value, axis, level) elif isinstance(other, (list, tuple)): if axis is not None and self._get_axis_name(axis) == 'index': + # TODO: Get all of these to use _constructor_sliced # casted = self._constructor_sliced(other, index=self.index) casted = pd.Series(other, index=self.index) else: @@ -727,22 +758,24 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): elif isinstance(other, np.ndarray): if other.ndim == 1: if axis is not None and self._get_axis_name(axis) == 'index': - # casted = self._constructor_sliced(other, index=self.index) + # casted = self._constructor_sliced(other, + # index=self.index) casted = pd.Series(other, index=self.index) else: - # casted = self._constructor_sliced(other, index=self.columns) + # casted = self._constructor_sliced(other, + # index=self.columns) casted = pd.Series(other, index=self.columns) return self._combine_series(casted, na_op, fill_value, axis, level) elif other.ndim == 2: # casted = self._constructor(other, index=self.index, - # columns=self.columns) + # columns=self.columns) casted = pd.DataFrame(other, index=self.index, - columns=self.columns) + columns=self.columns) return self._combine_frame(casted, na_op, fill_value, level) else: raise ValueError("Incompatible argument shape: %s" % - (other.shape,)) + (other.shape, )) else: return self._combine_const(other, na_op) @@ -805,13 +838,13 @@ def f(self, other, axis=default_axis, level=None): elif other.ndim == 2: casted = pd.DataFrame(other, index=self.index, - columns=self.columns) + columns=self.columns) return self._flex_compare_frame(casted, na_op, str_rep, level) else: raise ValueError("Incompatible argument shape: %s" % - (other.shape,)) + (other.shape, )) else: return self._combine_const(other, na_op) @@ -832,7 +865,8 @@ def f(self, other): # straight boolean comparisions we want to allow all columns # (regardless of dtype to pass thru) See #4537 for discussion. - return self._combine_const(other, func, raise_on_error=False).fillna(True).astype(bool) + res = self._combine_const(other, func, raise_on_error=False) + return res.fillna(True).astype(bool) f.__name__ = name @@ -868,12 +902,13 @@ def na_op(x, y): result = com._fill_zeros(result, y, fill_zeros) return result - # work only for scalars + # work only for scalars def f(self, other): if not np.isscalar(other): raise ValueError('Simple arithmetic with %s can only be ' - 'done with scalar values' % self._constructor.__name__) + 'done with scalar values' % + self._constructor.__name__) return self._combine(other, op) f.__name__ = name
Now the name passed to a method can be used to get that method from the NDFrame (previously you had things like `or_ [|]`. better to just have them all be named exactly what they are s.t. subclasses can do `getattr(super(self, MyClass), name)(*args, **kwargs)`.
https://api.github.com/repos/pandas-dev/pandas/pulls/5240
2013-10-16T02:41:28Z
2013-10-16T22:31:48Z
2013-10-16T22:31:48Z
2014-06-14T14:18:48Z
CLN: Add _reset_cache method to PandasObject
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6ea4e5a3046b2..1194e92bb25f8 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -257,6 +257,8 @@ API Changes ('mostly immutable') - ``levels``, ``labels`` and ``names`` are validated upon setting and are either copied or shallow-copied. + - inplace setting of ``levels`` or ``labels`` now correctly invalidates the + cached properties. (:issue:`5238`). - ``__deepcopy__`` now returns a shallow copy (currently: a view) of the data - allowing metadata changes. - ``MultiIndex.astype()`` now only allows ``np.object_``-like dtypes and diff --git a/pandas/core/base.py b/pandas/core/base.py index 2acc045156720..6b9fa78d45406 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -48,16 +48,6 @@ def __repr__(self): """ return str(self) - def _local_dir(self): - """ provide addtional __dir__ for this object """ - return [] - - def __dir__(self): - """ - Provide method name lookup and completion - Only provide 'public' methods - """ - return list(sorted(list(set(dir(type(self)) + self._local_dir())))) class PandasObject(StringMixin): """baseclass for various pandas objects""" @@ -77,6 +67,29 @@ def __unicode__(self): # Should be overwritten by base classes return object.__repr__(self) + def _local_dir(self): + """ provide addtional __dir__ for this object """ + return [] + + def __dir__(self): + """ + Provide method name lookup and completion + Only provide 'public' methods + """ + return list(sorted(list(set(dir(type(self)) + self._local_dir())))) + + def _reset_cache(self, key=None): + """ + Reset cached properties. If ``key`` is passed, only clears that key. + """ + if getattr(self, '_cache', None) is None: + return + if key is None: + self._cache.clear() + else: + self._cache.pop(key, None) + + class FrozenList(PandasObject, list): """ Container that doesn't allow setting item *but* diff --git a/pandas/core/index.py b/pandas/core/index.py index a79670579198b..1f2e823833810 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1929,7 +1929,9 @@ def _set_levels(self, levels, copy=False, validate=True, self._levels = levels if any(names): self._set_names(names) + self._tuples = None + self._reset_cache() if verify_integrity: self._verify_integrity() @@ -1981,6 +1983,7 @@ def _set_labels(self, labels, copy=False, validate=True, self._labels = FrozenList(_ensure_frozen(labs, copy=copy)._shallow_copy() for labs in labels) self._tuples = None + self._reset_cache() if verify_integrity: self._verify_integrity() diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 755d74c9ea0bc..a634cbd0ca2ed 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -2462,6 +2462,16 @@ def test_isnull_behavior(self): with tm.assertRaises(NotImplementedError): pd.isnull(self.index) + def test_level_setting_resets_attributes(self): + ind = MultiIndex.from_arrays([ + ['A', 'A', 'B', 'B', 'B'], + [1, 2, 1, 2, 3]]) + assert ind.is_monotonic + ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], + inplace=True) + # if this fails, probably didn't reset the cache correctly. + assert not ind.is_monotonic + def test_get_combined_index(): from pandas.core.index import _get_combined_index
I think this is the right way to do it. MI needs this for _engine and friends. I think there are other objects that use `cache_readonly` that might want to reset too. I also moved the local dir methods off of StringMixin to PandasObject (where they ought to have been). Fixes #5236 and fixes #5225.
https://api.github.com/repos/pandas-dev/pandas/pulls/5238
2013-10-16T01:06:04Z
2013-10-16T22:32:15Z
2013-10-16T22:32:15Z
2014-06-13T16:31:31Z
BUG/TST: Fix Excel writers with duplicated column names.
diff --git a/doc/source/release.rst b/doc/source/release.rst index 805b8d24d70d9..3ab1f9e05a5e5 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -611,6 +611,8 @@ Bug Fixes the original ordering (:issue:`4621`). - Fixed ``Period`` with a business date freq to always roll-forward if on a non-business date. (:issue:`5203`) + - Fixed bug in Excel writers where frames with duplicate column names weren't + written correctly. (:issue: `5235`) pandas 0.12.0 ------------- diff --git a/pandas/core/format.py b/pandas/core/format.py index 4f2d9f214ce6e..2355ae16874ce 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -1371,8 +1371,8 @@ def _format_regular_rows(self): for idx, idxval in enumerate(index_values): yield ExcelCell(self.rowcounter + idx, 0, idxval, header_style) - for colidx, colname in enumerate(self.columns): - series = self.df[colname] + for colidx in range(len(self.columns)): + series = self.df.iloc[:, colidx] for i, val in enumerate(series): yield ExcelCell(self.rowcounter + i, colidx + coloffset, val) @@ -1408,8 +1408,8 @@ def _format_hierarchical_rows(self): indexcolval, header_style) gcolidx += 1 - for colidx, colname in enumerate(self.columns): - series = self.df[colname] + for colidx in range(len(self.columns)): + series = self.df.iloc[:, colidx] for i, val in enumerate(series): yield ExcelCell(self.rowcounter + i, gcolidx + colidx, val) diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index b279c7ffd2892..15130c552c8a8 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -874,6 +874,24 @@ def roundtrip(df, header=True, parser_hdr=0): self.assertEqual(res.shape, (1, 2)) self.assertTrue(res.ix[0, 0] is not np.nan) + def test_duplicated_columns(self): + # Test for issue #5235. + _skip_if_no_xlrd() + ext = self.ext + path = '__tmp_to_excel_duplicated_columns__.' + ext + + with ensure_clean(path) as path: + write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + colnames = ['A', 'B', 'B'] + + write_frame.columns = colnames + write_frame.to_excel(path, 'test1') + + read_frame = read_excel(path, 'test1').astype(np.int64) + read_frame.columns = colnames + + tm.assert_frame_equal(write_frame, read_frame) + class OpenpyxlTests(ExcelWriterBase, unittest.TestCase): ext = 'xlsx'
Test and fix for issue #5235
https://api.github.com/repos/pandas-dev/pandas/pulls/5237
2013-10-15T23:27:24Z
2013-10-16T18:50:14Z
2013-10-16T18:50:14Z
2014-06-18T11:05:10Z
FIX: OLS.predict() - cast input to DataFrame from Series before performing operations
diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py index 9d22068c1612f..83fb387254eda 100644 --- a/pandas/stats/ols.py +++ b/pandas/stats/ols.py @@ -445,12 +445,12 @@ def predict(self, beta=None, x=None, fill_value=None, orig_x = x else: orig_x = x + if isinstance(x, Series): + x = DataFrame({'x': x}) if fill_value is None and fill_method is None: x = x.dropna(how='any') else: x = x.fillna(value=fill_value, method=fill_method, axis=axis) - if isinstance(x, Series): - x = DataFrame({'x': x}) if self._intercept: x['intercept'] = 1. diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py index df2f545c90b92..e487eb3bad5b8 100644 --- a/pandas/stats/tests/test_ols.py +++ b/pandas/stats/tests/test_ols.py @@ -378,6 +378,9 @@ def test_series_rhs(self): model = ols(y=y, x=x) expected = ols(y=y, x={'x': x}) assert_series_equal(model.beta, expected.beta) + + # Test predict using a series + assert_series_equal(model.y_predict, model.predict(x=x)) def test_various_attributes(self): # just make sure everything "works". test correctness elsewhere
Closes #5233. Simply cast to a Series to a DataFrame before dropping NAs or filling.
https://api.github.com/repos/pandas-dev/pandas/pulls/5234
2013-10-15T20:14:08Z
2013-10-17T13:07:34Z
null
2014-06-16T18:51:25Z
EHN/FIX: Add na_last parameter to DataFrame.sort. Fixes GH3917
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index fe3fc42992468..3e11552be3612 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1286,14 +1286,14 @@ The ``by`` argument can take a list of column names, e.g.: Series has the method ``order`` (analogous to `R's order function <http://stat.ethz.ch/R-manual/R-patched/library/base/html/order.html>`__) which -sorts by value, with special treatment of NA values via the ``na_last`` +sorts by value, with special treatment of NA values via the ``na_position`` argument: .. ipython:: python s[2] = np.nan s.order() - s.order(na_last=False) + s.order(na_position='first') Some other sorting notes / nuances: diff --git a/doc/source/release.rst b/doc/source/release.rst index 5134130ba7865..ea5af9165b483 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -147,6 +147,8 @@ API Changes - Define and document the order of column vs index names in query/eval (:issue:`6676`) +- ``DataFrame.sort`` now places NaNs at the beginning or end of the sort according to the ``na_position`` parameter. (:issue:`3917`) + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/core/common.py b/pandas/core/common.py index dadd21f8fc128..daeb43c7e76ac 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -316,9 +316,9 @@ def array_equivalent(left, right): # NaNs occur only in object arrays, float or complex arrays. if issubclass(left.dtype.type, np.object_): return ((left == right) | (pd.isnull(left) & pd.isnull(right))).all() - if not issubclass(left.dtype.type, (np.floating, np.complexfloating)): - return np.array_equal(left, right) - return ((left == right) | (np.isnan(left) & np.isnan(right))).all() + if issubclass(left.dtype.type, (np.floating, np.complexfloating)): + return ((left == right) | (np.isnan(left) & np.isnan(right))).all() + return np.array_equal(left, right) def _iterable_not_string(x): return (isinstance(x, collections.Iterable) and diff --git a/pandas/core/frame.py b/pandas/core/frame.py old mode 100644 new mode 100755 index a410bb8be8c52..430b309260f8c --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2522,7 +2522,7 @@ def _m8_to_i8(x): # Sorting def sort(self, columns=None, axis=0, ascending=True, - inplace=False): + inplace=False, kind='quicksort', na_position='last'): """ Sort DataFrame either by labels (along either axis) or by the values in column(s) @@ -2540,6 +2540,11 @@ def sort(self, columns=None, axis=0, ascending=True, Sort index/rows versus columns inplace : boolean, default False Sort the DataFrame without creating a new instance + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + This option is only applied when sorting on a single column or label. + na_position : {'first', 'last'} (optional, default='last') + 'first' puts NaNs at the beginning + 'last' puts NaNs at the end Examples -------- @@ -2550,10 +2555,10 @@ def sort(self, columns=None, axis=0, ascending=True, sorted : DataFrame """ return self.sort_index(by=columns, axis=axis, ascending=ascending, - inplace=inplace) + inplace=inplace, kind=kind, na_position=na_position) def sort_index(self, axis=0, by=None, ascending=True, inplace=False, - kind='quicksort'): + kind='quicksort', na_position='last'): """ Sort DataFrame either by labels (along either axis) or by the values in a column @@ -2571,6 +2576,11 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False, orders inplace : boolean, default False Sort the DataFrame without creating a new instance + na_position : {'first', 'last'} (optional, default='last') + 'first' puts NaNs at the beginning + 'last' puts NaNs at the end + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + This option is only applied when sorting on a single column or label. Examples -------- @@ -2580,8 +2590,8 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False, ------- sorted : DataFrame """ - from pandas.core.groupby import _lexsort_indexer - + + from pandas.core.groupby import _lexsort_indexer, _nargsort axis = self._get_axis_number(axis) if axis not in [0, 1]: # pragma: no cover raise AssertionError('Axis must be 0 or 1, got %s' % str(axis)) @@ -2597,23 +2607,19 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False, if com._is_sequence(ascending) and len(by) != len(ascending): raise ValueError('Length of ascending (%d) != length of by' ' (%d)' % (len(ascending), len(by))) - if len(by) > 1: - keys = [] - for x in by: - k = self[x].values - if k.ndim == 2: - raise ValueError('Cannot sort by duplicate column %s' - % str(x)) - keys.append(k) - def trans(v): if com.needs_i8_conversion(v): return v.view('i8') return v - - keys = [trans(self[x].values) for x in by] - indexer = _lexsort_indexer(keys, orders=ascending) + keys = [] + for x in by: + k = self[x].values + if k.ndim == 2: + raise ValueError('Cannot sort by duplicate column %s' % str(x)) + keys.append(trans(k)) + indexer = _lexsort_indexer(keys, orders=ascending, + na_position=na_position) indexer = com._ensure_platform_int(indexer) else: by = by[0] @@ -2630,20 +2636,17 @@ def trans(v): % str(by)) if isinstance(ascending, (tuple, list)): ascending = ascending[0] + indexer = _nargsort(k, kind=kind, ascending=ascending, + na_position=na_position) - if not ascending: - k = k[::-1] - indexer = k.argsort(kind=kind) - if not ascending: - indexer = indexer.max() - indexer[::-1] elif isinstance(labels, MultiIndex): - indexer = _lexsort_indexer(labels.labels, orders=ascending) + indexer = _lexsort_indexer(labels.labels, orders=ascending, + na_position=na_position) indexer = com._ensure_platform_int(indexer) else: - indexer = labels.argsort(kind=kind) - if not ascending: - indexer = indexer[::-1] - + indexer = _nargsort(labels, kind=kind, ascending=ascending, + na_position=na_position) + if inplace: if axis == 1: new_data = self._data.reindex_items( diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index c6ecba7d11836..208f9f1a8e19a 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -3145,33 +3145,72 @@ def _indexer_from_factorized(labels, shape, compress=True): return indexer -def _lexsort_indexer(keys, orders=None): +def _lexsort_indexer(keys, orders=None, na_position='last'): labels = [] shape = [] - if isinstance(orders, bool): orders = [orders] * len(keys) elif orders is None: orders = [True] * len(keys) for key, order in zip(keys, orders): + key = np.asanyarray(key) rizer = _hash.Factorizer(len(key)) if not key.dtype == np.object_: key = key.astype('O') + # factorize maps nans to na_sentinel=-1 ids = rizer.factorize(key, sort=True) - n = len(rizer.uniques) + mask = (ids == -1) + if order: # ascending + if na_position == 'last': + ids = np.where(mask, n, ids) + elif na_position == 'first': + ids += 1 + else: + raise ValueError('invalid na_position: {!r}'.format(na_position)) + else: # not order means descending + if na_position == 'last': + ids = np.where(mask, n, n-ids-1) + elif na_position == 'first': + ids = np.where(mask, 0, n-ids) + else: + raise ValueError('invalid na_position: {!r}'.format(na_position)) + if mask.any(): + n += 1 shape.append(n) - if not order: - mask = ids == -1 - ids = np.where(mask, -1, n - ids) - labels.append(ids) - return _indexer_from_factorized(labels, shape) +def _nargsort(items, kind='quicksort', ascending=True, na_position='last'): + """ + This is intended to be a drop-in replacement for np.argsort which handles NaNs + It adds ascending and na_position parameters. + GH #6399, #5231 + """ + items = np.asanyarray(items) + idx = np.arange(len(items)) + mask = isnull(items) + non_nans = items[~mask] + non_nan_idx = idx[~mask] + nan_idx = np.nonzero(mask)[0] + if not ascending: + non_nans = non_nans[::-1] + non_nan_idx = non_nan_idx[::-1] + indexer = non_nan_idx[non_nans.argsort(kind=kind)] + if not ascending: + indexer = indexer[::-1] + # Finally, place the NaNs at the end or the beginning according to na_position + if na_position == 'last': + indexer = np.concatenate([indexer, nan_idx]) + elif na_position == 'first': + indexer = np.concatenate([nan_idx, indexer]) + else: + raise ValueError('invalid na_position: {!r}'.format(na_position)) + return indexer + class _KeyMapper(object): diff --git a/pandas/core/index.py b/pandas/core/index.py index 3eab4d0339082..32c1672566da0 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -13,7 +13,7 @@ from pandas.core.base import FrozenList, FrozenNDArray, IndexOpsMixin from pandas.util.decorators import cache_readonly, deprecate -from pandas.core.common import isnull +from pandas.core.common import isnull, array_equivalent import pandas.core.common as com from pandas.core.common import _values_from_object, is_float, is_integer, ABCSeries from pandas.core.config import get_option @@ -800,7 +800,7 @@ def equals(self, other): if type(other) != Index: return other.equals(self) - return np.array_equal(self, other) + return array_equivalent(self, other) def identical(self, other): """Similar to equals, but check that other comparable attributes are @@ -1872,7 +1872,7 @@ def equals(self, other): # return False try: - return np.array_equal(self, other) + return array_equivalent(self, other) except TypeError: # e.g. fails in numpy 1.6 with DatetimeIndex #1681 return False @@ -3533,7 +3533,7 @@ def equals(self, other): return True if not isinstance(other, MultiIndex): - return np.array_equal(self.values, _ensure_index(other)) + return array_equivalent(self.values, _ensure_index(other)) if self.nlevels != other.nlevels: return False @@ -3546,7 +3546,7 @@ def equals(self, other): allow_fill=False) ovalues = com.take_nd(other.levels[i].values, other.labels[i], allow_fill=False) - if not np.array_equal(svalues, ovalues): + if not array_equivalent(svalues, ovalues): return False return True diff --git a/pandas/core/series.py b/pandas/core/series.py index 60429630eb7d3..14b4e084cdeae 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1743,24 +1743,32 @@ def rank(self, method='average', na_option='keep', ascending=True, ascending=ascending, pct=pct) return self._constructor(ranks, index=self.index).__finalize__(self) - def order(self, na_last=True, ascending=True, kind='mergesort'): + def order(self, na_last=None, ascending=True, kind='mergesort', na_position='last'): """ Sorts Series object, by value, maintaining index-value link Parameters ---------- - na_last : boolean (optional, default=True) + na_last : boolean (optional, default=True) (DEPRECATED; use na_position) Put NaN's at beginning or end ascending : boolean, default True Sort ascending. Passing False sorts descending kind : {'mergesort', 'quicksort', 'heapsort'}, default 'mergesort' Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm + na_position : {'first', 'last'} (optional, default='last') + 'first' puts NaNs at the beginning + 'last' puts NaNs at the end Returns ------- y : Series """ + if na_last is not None: + warnings.warn(("na_last is deprecated. Please use na_position instead"), + FutureWarning) + na_position = 'last' if na_last else 'first' + def _try_kind_sort(arr): # easier to ask forgiveness than permission try: @@ -1784,15 +1792,16 @@ def _try_kind_sort(arr): if not ascending: argsorted = argsorted[::-1] - if na_last: + if na_position == 'last': n = good.sum() sortedIdx[:n] = idx[good][argsorted] sortedIdx[n:] = idx[bad] - else: + elif na_position == 'first': n = bad.sum() sortedIdx[n:] = idx[good][argsorted] sortedIdx[:n] = idx[bad] - + else: + raise ValueError('invalid na_position: {!r}'.format(na_position)) return self._constructor(arr[sortedIdx], index=self.index[sortedIdx])\ .__finalize__(self) diff --git a/pandas/hashtable.pyx b/pandas/hashtable.pyx index 10c43478a5352..d4ed7fac5d6b7 100644 --- a/pandas/hashtable.pyx +++ b/pandas/hashtable.pyx @@ -835,20 +835,23 @@ cdef class Factorizer: return self.count def factorize(self, ndarray[object] values, sort=False, na_sentinel=-1): + """ + Factorize values with nans replaced by na_sentinel + >>> factorize(np.array([1,2,np.nan], dtype='O'), na_sentinel=20) + array([ 0, 1, 20]) + """ labels = self.table.get_labels(values, self.uniques, self.count, na_sentinel) - + mask = (labels == na_sentinel) # sort on if sort: if labels.dtype != np.int_: labels = labels.astype(np.int_) - sorter = self.uniques.to_array().argsort() reverse_indexer = np.empty(len(sorter), dtype=np.int_) reverse_indexer.put(sorter, np.arange(len(sorter))) - - labels = reverse_indexer.take(labels) - + labels = reverse_indexer.take(labels, mode='clip') + labels[mask] = na_sentinel self.count = len(self.uniques) return labels diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 2101c732893e3..c1862c4ff91ab 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -9770,6 +9770,121 @@ def test_sort_index(self): with assertRaisesRegexp(ValueError, msg): frame.sort_index(by=['A', 'B'], axis=0, ascending=[True] * 5) + def test_sort_nan(self): + # GH3917 + nan = np.nan + df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4], + 'B': [9, nan, 5, 2, 5, 4, 5]}) + + # sort one column only + expected = DataFrame( + {'A': [nan, 1, 1, 2, 4, 6, 8], + 'B': [5, 9, 2, nan, 5, 5, 4]}, + index=[2, 0, 3, 1, 6, 4, 5]) + sorted_df = df.sort(['A'], na_position='first') + assert_frame_equal(sorted_df, expected) + + expected = DataFrame( + {'A': [nan, 8, 6, 4, 2, 1, 1], + 'B': [5, 4, 5, 5, nan, 9, 2]}, + index=[2, 5, 4, 6, 1, 0, 3]) + sorted_df = df.sort(['A'], na_position='first', ascending=False) + assert_frame_equal(sorted_df, expected) + + # na_position='last', order + expected = DataFrame( + {'A': [1, 1, 2, 4, 6, 8, nan], + 'B': [2, 9, nan, 5, 5, 4, 5]}, + index=[3, 0, 1, 6, 4, 5, 2]) + sorted_df = df.sort(['A','B']) + assert_frame_equal(sorted_df, expected) + + # na_position='first', order + expected = DataFrame( + {'A': [nan, 1, 1, 2, 4, 6, 8], + 'B': [5, 2, 9, nan, 5, 5, 4]}, + index=[2, 3, 0, 1, 6, 4, 5]) + sorted_df = df.sort(['A','B'], na_position='first') + assert_frame_equal(sorted_df, expected) + + # na_position='first', not order + expected = DataFrame( + {'A': [nan, 1, 1, 2, 4, 6, 8], + 'B': [5, 9, 2, nan, 5, 5, 4]}, + index=[2, 0, 3, 1, 6, 4, 5]) + sorted_df = df.sort(['A','B'], ascending=[1,0], na_position='first') + assert_frame_equal(sorted_df, expected) + + # na_position='last', not order + expected = DataFrame( + {'A': [8, 6, 4, 2, 1, 1, nan], + 'B': [4, 5, 5, nan, 2, 9, 5]}, + index=[5, 4, 6, 1, 3, 0, 2]) + sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='last') + assert_frame_equal(sorted_df, expected) + + # Test DataFrame with nan label + df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4], + 'B': [9, nan, 5, 2, 5, 4, 5]}, + index = [1, 2, 3, 4, 5, 6, nan]) + + # NaN label, ascending=True, na_position='last' + sorted_df = df.sort(kind='quicksort', ascending=True, na_position='last') + expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4], + 'B': [9, nan, 5, 2, 5, 4, 5]}, + index = [1, 2, 3, 4, 5, 6, nan]) + assert_frame_equal(sorted_df, expected) + + # NaN label, ascending=True, na_position='first' + sorted_df = df.sort(na_position='first') + expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8], + 'B': [5, 9, nan, 5, 2, 5, 4]}, + index = [nan, 1, 2, 3, 4, 5, 6]) + assert_frame_equal(sorted_df, expected) + + # NaN label, ascending=False, na_position='last' + sorted_df = df.sort(kind='quicksort', ascending=False) + expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4], + 'B': [4, 5, 2, 5, nan, 9, 5]}, + index = [6, 5, 4, 3, 2, 1, nan]) + assert_frame_equal(sorted_df, expected) + + # NaN label, ascending=False, na_position='first' + sorted_df = df.sort(kind='quicksort', ascending=False, na_position='first') + expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1], + 'B': [5, 4, 5, 2, 5, nan, 9]}, + index = [nan, 6, 5, 4, 3, 2, 1]) + assert_frame_equal(sorted_df, expected) + + def test_stable_descending_sort(self): + # GH #6399 + df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']], + columns=['sort_col', 'order']) + sorted_df = df.sort_index(by='sort_col', kind='mergesort', + ascending=False) + assert_frame_equal(df, sorted_df) + + def test_stable_descending_multicolumn_sort(self): + nan = np.nan + df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4], + 'B': [9, nan, 5, 2, 5, 4, 5]}) + # test stable mergesort + expected = DataFrame( + {'A': [nan, 8, 6, 4, 2, 1, 1], + 'B': [5, 4, 5, 5, nan, 2, 9]}, + index=[2, 5, 4, 6, 1, 3, 0]) + sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='first', + kind='mergesort') + assert_frame_equal(sorted_df, expected) + + expected = DataFrame( + {'A': [nan, 8, 6, 4, 2, 1, 1], + 'B': [5, 4, 5, 5, nan, 9, 2]}, + index=[2, 5, 4, 6, 1, 0, 3]) + sorted_df = df.sort(['A','B'], ascending=[0,0], na_position='first', + kind='mergesort') + assert_frame_equal(sorted_df, expected) + def test_sort_index_multicolumn(self): import random A = np.arange(5).repeat(20) @@ -9926,13 +10041,6 @@ def test_frame_column_inplace_sort_exception(self): cp = s.copy() cp.sort() # it works! - def test_stable_descending_sort(self): - df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']], - columns=['sort_col', 'order']) - sorted = df.sort_index(by='sort_col', kind='mergesort', - ascending=False) - assert_frame_equal(df, sorted) - def test_combine_first(self): # disjoint head, tail = self.frame[:5], self.frame[5:] diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 506eb348a8113..b14c355f44a1c 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -10,7 +10,8 @@ from pandas.core.index import Index, MultiIndex, Int64Index from pandas.core.common import rands from pandas.core.api import Categorical, DataFrame -from pandas.core.groupby import SpecificationError, DataError +from pandas.core.groupby import (SpecificationError, DataError, + _nargsort, _lexsort_indexer) from pandas.core.series import Series from pandas.util.testing import (assert_panel_equal, assert_frame_equal, assert_series_equal, assert_almost_equal, @@ -29,6 +30,7 @@ import pandas.util.testing as tm import pandas as pd +from numpy.testing import assert_equal def commonSetUp(self): self.dateRange = bdate_range('1/1/2005', periods=250) @@ -3831,6 +3833,97 @@ def test_tab_completion(self): ]) self.assertEqual(results, expected) + def test_lexsort_indexer(self): + keys = [[nan]*5 + list(range(100)) + [nan]*5] + # orders=True, na_position='last' + result = _lexsort_indexer(keys, orders=True, na_position='last') + expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110)) + assert_equal(result, expected) + + # orders=True, na_position='first' + result = _lexsort_indexer(keys, orders=True, na_position='first') + expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105)) + assert_equal(result, expected) + + # orders=False, na_position='last' + result = _lexsort_indexer(keys, orders=False, na_position='last') + expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) + assert_equal(result, expected) + + # orders=False, na_position='first' + result = _lexsort_indexer(keys, orders=False, na_position='first') + expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) + assert_equal(result, expected) + + def test_nargsort(self): + # np.argsort(items) places NaNs last + items = [nan]*5 + list(range(100)) + [nan]*5 + # np.argsort(items2) may not place NaNs first + items2 = np.array(items, dtype='O') + + try: + # GH 2785; due to a regression in NumPy1.6.2 + np.argsort(np.array([[1, 2], [1, 3], [1, 2]], dtype='i')) + np.argsort(items2, kind='mergesort') + except TypeError as err: + raise nose.SkipTest('requested sort not available for type') + + # mergesort is the most difficult to get right because we want it to be stable. + + # According to numpy/core/tests/test_multiarray, """The number + # of sorted items must be greater than ~50 to check the actual algorithm + # because quick and merge sort fall over to insertion sort for small + # arrays.""" + + + # mergesort, ascending=True, na_position='last' + result = _nargsort( + items, kind='mergesort', ascending=True, na_position='last') + expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110)) + assert_equal(result, expected) + + # mergesort, ascending=True, na_position='first' + result = _nargsort( + items, kind='mergesort', ascending=True, na_position='first') + expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105)) + assert_equal(result, expected) + + # mergesort, ascending=False, na_position='last' + result = _nargsort( + items, kind='mergesort', ascending=False, na_position='last') + expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) + assert_equal(result, expected) + + # mergesort, ascending=False, na_position='first' + result = _nargsort( + items, kind='mergesort', ascending=False, na_position='first') + expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) + assert_equal(result, expected) + + # mergesort, ascending=True, na_position='last' + result = _nargsort( + items2, kind='mergesort', ascending=True, na_position='last') + expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110)) + assert_equal(result, expected) + + # mergesort, ascending=True, na_position='first' + result = _nargsort( + items2, kind='mergesort', ascending=True, na_position='first') + expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105)) + assert_equal(result, expected) + + # mergesort, ascending=False, na_position='last' + result = _nargsort( + items2, kind='mergesort', ascending=False, na_position='last') + expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) + assert_equal(result, expected) + + # mergesort, ascending=False, na_position='first' + result = _nargsort( + items2, kind='mergesort', ascending=False, na_position='first') + expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) + assert_equal(result, expected) + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all() diff --git a/pandas/tests/test_hashtable.py b/pandas/tests/test_hashtable.py new file mode 100644 index 0000000000000..be51d50ee6783 --- /dev/null +++ b/pandas/tests/test_hashtable.py @@ -0,0 +1,30 @@ +import numpy as np +import unittest +import nose +import pandas.hashtable as _hash +import pandas as pd + +class TestFactorizer(unittest.TestCase): + def test_factorize_nan(self): + # nan should map to na_sentinel, not reverse_indexer[na_sentinel] + # rizer.factorize should not raise an exception if na_sentinel indexes + # outside of reverse_indexer + key = np.array([1, 2, 1, np.nan], dtype='O') + rizer = _hash.Factorizer(len(key)) + for na_sentinel in (-1, 20): + ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel) + expected = np.array([0, 1, 0, na_sentinel], dtype='int32') + self.assertEqual(len(set(key)), len(set(expected))) + self.assert_(np.array_equal(pd.isnull(key), expected == na_sentinel)) + + # nan still maps to na_sentinel when sort=False + key = np.array([0, np.nan, 1], dtype='O') + na_sentinel = -1 + ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) + expected = np.array([ 2, -1, 0], dtype='int32') + self.assertEqual(len(set(key)), len(set(expected))) + self.assert_(np.array_equal(pd.isnull(key), expected == na_sentinel)) + +if __name__ == '__main__': + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index a94ca5dfc1075..95b7b6ace4e2d 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -4007,7 +4007,7 @@ def test_order(self): self.assert_(np.isnan(result[-5:]).all()) self.assert_numpy_array_equal(result[:-5], np.sort(vals[5:])) - result = ts.order(na_last=False) + result = ts.order(na_position='first') self.assert_(np.isnan(result[:5]).all()) self.assert_numpy_array_equal(result[5:], np.sort(vals[5:])) @@ -4020,7 +4020,7 @@ def test_order(self): ordered = ts.order(ascending=False) expected = np.sort(ts.valid().values)[::-1] assert_almost_equal(expected, ordered.valid().values) - ordered = ts.order(ascending=False, na_last=False) + ordered = ts.order(ascending=False, na_position='first') assert_almost_equal(expected, ordered.valid().values) def test_rank(self):
closes #3917 This is an attempt to fix the Nested Sort with NaN bug (https://github.com/pydata/pandas/issues/3917). I've added tests to `test_frame.py` and `test_hashtable.py` to demonstrate the problem. `hashtable.Factorizer.factorize` has been modified to map `nan` to `na_sentinel`. Before it was mapping `nan` to a label which was already being used. This, I believe is the origin of the bug. My first idea was to mimick/reuse code from `Series.order`, since this method already handles `nan`s nicely, and allows the user to choose if nans should be placed at the beginning or the end of the sort via the `na_last` parameter. Although I found a solution using code from `Series.order`, I eventually abandoned this when I realized this patches the problem at too high a level and that it could be handled more generally with a modification of `factorize`. I retained the idea that `df.sort` should have a `na_last` parameter, however. To that end, `groupby._lexsort_indexer` has been modified to handle all possible combinations of `na_last` and `orders` settings. There are four tests (assertions) in `test_frame.py` to exercise the possibilities, one of which demonstrates that `df.sort(['A','B'])` now behaves correctly for the DataFrame shown in GH3917.
https://api.github.com/repos/pandas-dev/pandas/pulls/5231
2013-10-15T17:15:17Z
2014-03-27T22:23:50Z
2014-03-27T22:23:50Z
2014-09-10T00:22:49Z
BUG: allow enlargement to work with empty objects (GH5226)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 36afea7648ab2..ab4bc1a1f0bf9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -206,7 +206,7 @@ API Changes (:issue:`4384`, :issue:`4375`, :issue:`4372`) - ``Series.get`` with negative indexers now returns the same as ``[]`` (:issue:`4390`) - allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when the single-key is not currently contained in - the index for that axis (:issue:`2578`) + the index for that axis (:issue:`2578`, :issue:`5226`) - ``at`` now will enlarge the object inplace (and return the same) (:issue:`2578`) - ``HDFStore`` diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7013ad4f9b02b..504d49ddca13a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1865,6 +1865,15 @@ def _setitem_frame(self, key, value): self.where(-key, value, inplace=True) + def _ensure_valid_index(self, value): + """ + ensure that if we don't have an index, that we can create one from the passed value + """ + if not len(self.index): + if not isinstance(value, Series): + raise ValueError("cannot set a frame with no defined index and a non-series") + self._data.set_axis(1, value.index.copy(), check_axis=False) + def _set_item(self, key, value): """ Add series to DataFrame in specified column. @@ -1875,6 +1884,7 @@ def _set_item(self, key, value): Series/TimeSeries will be conformed to the DataFrame's index to ensure homogeneity. """ + self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) @@ -1890,6 +1900,7 @@ def insert(self, loc, column, value, allow_duplicates=False): column : object value : int, Series, or array-like """ + self._ensure_valid_index(value) value = self._sanitize_column(column, value) self._data.insert( loc, column, value, allow_duplicates=allow_duplicates) @@ -1900,7 +1911,7 @@ def _sanitize_column(self, key, value): if _is_sequence(value): is_frame = isinstance(value, DataFrame) if isinstance(value, Series) or is_frame: - if value.index.equals(self.index): + if value.index.equals(self.index) or not len(self.index): # copy the values value = value.values.copy() else: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index fa58d82a3b580..d32bf166ddea1 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -173,9 +173,19 @@ def _setitem_with_indexer(self, indexer, value): if self.ndim > 1 and i == self.obj._info_axis_number: # add the new item, and set the value - new_indexer = _convert_from_missing_indexer_tuple(indexer) + # must have all defined axes if we have a scalar + # or a list-like on the non-info axes if we have a list-like + len_non_info_axes = [ len(_ax) for _i, _ax in enumerate(self.obj.axes) if _i != i ] + if any([ not l for l in len_non_info_axes ]): + if not is_list_like(value): + raise ValueError("cannot set a frame with no defined index and a scalar") + self.obj[key] = value + return self.obj + self.obj[key] = np.nan - self.obj.loc[new_indexer] = value + + new_indexer = _convert_from_missing_indexer_tuple(indexer, self.obj.axes) + self._setitem_with_indexer(new_indexer, value) return self.obj # reindex the axis @@ -208,12 +218,21 @@ def _setitem_with_indexer(self, indexer, value): else: new_index = _safe_append_to_index(index, indexer) - new_values = np.concatenate([self.obj.values, [value]]) + # this preserves dtype of the value + new_values = Series([value]).values + if len(self.obj.values): + new_values = np.concatenate([self.obj.values, new_values]) + self.obj._data = self.obj._constructor(new_values, index=new_index, name=self.obj.name) self.obj._maybe_update_cacher(clear=True) return self.obj elif self.ndim == 2: + + # no columns and scalar + if not len(self.obj.columns): + raise ValueError("cannot set a frame with no defined columns") + index = self.obj._get_axis(0) labels = _safe_append_to_index(index, indexer) self.obj._data = self.obj.reindex_axis(labels,0)._data @@ -410,8 +429,9 @@ def _align_series(self, indexer, ser): new_ix = Index([new_ix]) else: new_ix = Index(new_ix.ravel()) - if ser.index.equals(new_ix): + if ser.index.equals(new_ix) or not len(new_ix): return ser.values.copy() + return ser.reindex(new_ix).values # 2 dims @@ -419,7 +439,7 @@ def _align_series(self, indexer, ser): # reindex along index ax = self.obj.axes[1] - if ser.index.equals(ax): + if ser.index.equals(ax) or not len(ax): return ser.values.copy() return ser.reindex(ax).values @@ -819,6 +839,12 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False): # if we are setting and its not a valid location # its an insert which fails by definition if is_setter: + + # always valid + if self.name == 'loc': + return { 'key' : obj } + + # a positional if obj >= len(self.obj) and not isinstance(labels, MultiIndex): raise ValueError("cannot set by positional indexing with enlargement") @@ -1307,11 +1333,11 @@ def _convert_missing_indexer(indexer): return indexer, False -def _convert_from_missing_indexer_tuple(indexer): +def _convert_from_missing_indexer_tuple(indexer, axes): """ create a filtered indexer that doesn't have any missing indexers """ - def get_indexer(_idx): - return _idx['key'] if isinstance(_idx,dict) else _idx - return tuple([ get_indexer(_idx) for _i, _idx in enumerate(indexer) ]) + def get_indexer(_i, _idx): + return axes[_i].get_loc(_idx['key']) if isinstance(_idx,dict) else _idx + return tuple([ get_indexer(_i, _idx) for _i, _idx in enumerate(indexer) ]) def _safe_append_to_index(index, key): """ a safe append to an index, if incorrect type, then catch and recreate """ diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index b69496b042274..7745c2f2a083b 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1542,6 +1542,56 @@ def f(): df.ix[100,:] = df.ix[0] self.assertRaises(ValueError, f) + def test_partial_set_empty(self): + + # GH5226 + + # partially set with an empty object + # series + s = Series() + s.loc[1] = 1 + assert_series_equal(s,Series([1],index=[1])) + s.loc[3] = 3 + assert_series_equal(s,Series([1,3],index=[1,3])) + + s = Series() + s.loc[1] = 1. + assert_series_equal(s,Series([1.],index=[1])) + s.loc[3] = 3. + assert_series_equal(s,Series([1.,3.],index=[1,3])) + + s = Series() + s.loc['foo'] = 1 + assert_series_equal(s,Series([1],index=['foo'])) + s.loc['bar'] = 3 + assert_series_equal(s,Series([1,3],index=['foo','bar'])) + s.loc[3] = 4 + assert_series_equal(s,Series([1,3,4],index=['foo','bar',3])) + + # partially set with an empty object + # frame + df = DataFrame() + + def f(): + df.loc[1] = 1 + self.assertRaises(ValueError, f) + def f(): + df.loc[1] = Series([1],index=['foo']) + self.assertRaises(ValueError, f) + def f(): + df.loc[:,1] = 1 + self.assertRaises(ValueError, f) + + df2 = DataFrame() + df2[1] = Series([1],index=['foo']) + df.loc[:,1] = Series([1],index=['foo']) + assert_frame_equal(df,DataFrame([[1]],index=['foo'],columns=[1])) + assert_frame_equal(df,df2) + + df = DataFrame(columns=['A','B']) + df.loc[3] = [6,7] + assert_frame_equal(df,DataFrame([[6,7]],index=[3],columns=['A','B'])) + def test_cache_updating(self): # GH 4939, make sure to update the cache on setitem
closes #5226
https://api.github.com/repos/pandas-dev/pandas/pulls/5227
2013-10-15T02:37:32Z
2013-10-15T13:11:40Z
2013-10-15T13:11:40Z
2014-06-12T20:46:55Z
Redefine match
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 33d1219b3b11f..78b0a54b8893f 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -960,6 +960,9 @@ importantly, these methods exclude missing/NA values automatically. These are accessed via the Series's ``str`` attribute and generally have names matching the equivalent (scalar) build-in string methods: +Splitting and Replacing Strings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. ipython:: python s = Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) @@ -990,11 +993,12 @@ Methods like ``replace`` and ``findall`` take regular expressions, too: s3 s3.str.replace('^.a|dog', 'XX-XX ', case=False) -The method ``match`` returns the groups in a regular expression in one tuple. -Starting in pandas version 0.13.0, the method ``extract`` is available to -accomplish this more conveniently. +Extracting Substrings +~~~~~~~~~~~~~~~~~~~~~ -Extracting a regular expression with one group returns a Series of strings. +The method ``extract`` (introduced in version 0.13) accepts regular expressions +with match groups. Extracting a regular expression with one group returns +a Series of strings. .. ipython:: python @@ -1016,18 +1020,34 @@ Named groups like .. ipython:: python - Series(['a1', 'b2', 'c3']).str.match('(?P<letter>[ab])(?P<digit>\d)') + Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)') and optional groups like .. ipython:: python - Series(['a1', 'b2', '3']).str.match('(?P<letter>[ab])?(?P<digit>\d)') + Series(['a1', 'b2', '3']).str.extract('(?P<letter>[ab])?(?P<digit>\d)') can also be used. -Methods like ``contains``, ``startswith``, and ``endswith`` takes an extra -``na`` arguement so missing values can be considered True or False: +Testing for Strings that Match or Contain a Pattern +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In previous versions, *extracting* match groups was accomplished by ``match``, +which returned a not-so-convenient Series of tuples. Starting in version 0.14, +the default behavior of match will change. It will return a boolean +indexer, analagous to the method ``contains``. + +The distinction between +``match`` and ``contains`` is strictness: ``match`` relies on +strict ``re.match`` while ``contains`` relies on ``re.search``. + +In version 0.13, ``match`` performs its old, deprecated behavior by default, +but the new behavior is availabe through the keyword argument +``as_indexer=True``. + +Methods like ``match``, ``contains``, ``startswith``, and ``endswith`` take + an extra ``na`` arguement so missing values can be considered True or False: .. ipython:: python diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index 6bf32b2343084..3ff0477678d79 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -102,6 +102,14 @@ Deprecated in 0.13.0 - deprecated ``iterkv``, which will be removed in a future release (this was an alias of iteritems used to bypass ``2to3``'s changes). (:issue:`4384`, :issue:`4375`, :issue:`4372`) +- deprecated the string method ``match``, whose role is now performed more + idiomatically by ``extract``. In a future release, the default behavior + of ``match`` will change to become analogous to ``contains``, which returns + a boolean indexer. (Their + distinction is strictness: ``match`` relies on ``re.match`` while + ``contains`` relies on ``re.serach``.) In this release, the deprecated + behavior is the default, but the new behavior is available through the + keyword argument ``as_indexer=True``. Indexing API Changes ~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 2c47911318238..e5e3f9866dc52 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -7,7 +7,7 @@ import pandas.compat as compat import re import pandas.lib as lib - +import warnings def _get_array_list(arr, others): if isinstance(others[0], (list, np.ndarray)): @@ -169,6 +169,10 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan): regex = re.compile(pat, flags=flags) + if regex.groups > 0: + warnings.warn("""This pattern has match groups. To actually get the +groups, use str.extract.""", UserWarning) + f = lambda x: bool(regex.search(x)) return _na_map(f, arr, na) @@ -303,35 +307,70 @@ def rep(x, r): return result -def str_match(arr, pat, flags=0): +def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False): """ - Find groups in each string (from beginning) using passed regular expression + Deprecated: Find groups in each string using passed regular expression. + If as_indexer=True, determine if each string matches a regular expression. Parameters ---------- pat : string - Pattern or regular expression + Character sequence or regular expression + case : boolean, default True + If True, case sensitive flags : int, default 0 (no flags) re module flags, e.g. re.IGNORECASE + na : default NaN, fill value for missing values. + as_indexer : False, by default, gives deprecated behavior better achieved + using str_extract. True return boolean indexer. + Returns ------- - matches : array + matches : boolean array (if as_indexer=True) + matches : array of tuples (if as_indexer=False, default but deprecated) + + Note + ---- + To extract matched groups, which is the deprecated behavior of match, use + str.extract. """ + + if not case: + flags |= re.IGNORECASE + regex = re.compile(pat, flags=flags) - def f(x): - m = regex.match(x) - if m: - return m.groups() - else: - return [] + if (not as_indexer) and regex.groups > 0: + # Do this first, to make sure it happens even if the re.compile + # raises below. + warnings.warn("""In future versions of pandas, match will change +to always return a bool indexer.""", UserWarning) + + if as_indexer and regex.groups > 0: + warnings.warn("""This pattern has match groups. To actually get the +groups, use str.extract.""", UserWarning) + + # If not as_indexer and regex.groups == 0, this returns empty lists + # and is basically useless, so we will not warn. + + if (not as_indexer) and regex.groups > 0: + def f(x): + m = regex.match(x) + if m: + return m.groups() + else: + return [] + else: + # This is the new behavior of str_match. + f = lambda x: bool(regex.match(x)) return _na_map(f, arr) + def str_extract(arr, pat, flags=0): """ - Find groups in each string (from beginning) using passed regular expression + Find groups in each string using passed regular expression Parameters ---------- @@ -358,7 +397,7 @@ def str_extract(arr, pat, flags=0): def f(x): if not isinstance(x, compat.string_types): return None - m = regex.match(x) + m = regex.search(x) if m: return m.groups()[0] # may be None else: @@ -368,7 +407,7 @@ def f(x): def f(x): if not isinstance(x, compat.string_types): return empty_row - m = regex.match(x) + m = regex.search(x) if m: return Series(list(m.groups())) # may contain None else: @@ -668,13 +707,13 @@ def wrapper(self): return wrapper -def _pat_wrapper(f, flags=False, na=False): +def _pat_wrapper(f, flags=False, na=False, **kwargs): def wrapper1(self, pat): result = f(self.series, pat) return self._wrap_result(result) - def wrapper2(self, pat, flags=0): - result = f(self.series, pat, flags=flags) + def wrapper2(self, pat, flags=0, **kwargs): + result = f(self.series, pat, flags=flags, **kwargs) return self._wrap_result(result) def wrapper3(self, pat, na=np.nan): diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 1be9013ce7575..29bdffd86a2c7 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -5,6 +5,7 @@ import operator import re import unittest +import warnings import nose @@ -392,10 +393,14 @@ def test_repeat(self): u('dddddd')]) tm.assert_series_equal(result, exp) - def test_match(self): + def test_deprecated_match(self): + # Old match behavior, deprecated (but still default) in 0.13 values = Series(['fooBAD__barBAD', NA, 'foo']) - result = values.str.match('.*(BAD[_]+).*(BAD)') + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + result = values.str.match('.*(BAD[_]+).*(BAD)') + assert issubclass(w[-1].category, UserWarning) exp = Series([('BAD__', 'BAD'), NA, []]) tm.assert_series_equal(result, exp) @@ -403,7 +408,10 @@ def test_match(self): mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(), 'foo', None, 1, 2.]) - rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)') + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)') + assert issubclass(w[-1].category, UserWarning) xp = [('BAD_', 'BAD'), NA, ('BAD_', 'BAD'), NA, NA, [], NA, NA, NA] tm.assert_isinstance(rs, Series) tm.assert_almost_equal(rs, xp) @@ -411,10 +419,52 @@ def test_match(self): # unicode values = Series([u('fooBAD__barBAD'), NA, u('foo')]) - result = values.str.match('.*(BAD[_]+).*(BAD)') + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + result = values.str.match('.*(BAD[_]+).*(BAD)') + assert issubclass(w[-1].category, UserWarning) exp = Series([(u('BAD__'), u('BAD')), NA, []]) tm.assert_series_equal(result, exp) + def test_match(self): + # New match behavior introduced in 0.13 + values = Series(['fooBAD__barBAD', NA, 'foo']) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True) + assert issubclass(w[-1].category, UserWarning) + exp = Series([True, NA, False]) + tm.assert_series_equal(result, exp) + + # If no groups, use new behavior even when as_indexer is False. + # (Old behavior is pretty much useless in this case.) + values = Series(['fooBAD__barBAD', NA, 'foo']) + result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False) + exp = Series([True, NA, False]) + tm.assert_series_equal(result, exp) + + # mixed + mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(), + 'foo', None, 1, 2.]) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)', as_indexer=True) + assert issubclass(w[-1].category, UserWarning) + xp = [True, NA, True, NA, NA, False, NA, NA, NA] + tm.assert_isinstance(rs, Series) + tm.assert_almost_equal(rs, xp) + + # unicode + values = Series([u('fooBAD__barBAD'), NA, u('foo')]) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True) + assert issubclass(w[-1].category, UserWarning) + exp = Series([True, NA, False]) + tm.assert_series_equal(result, exp) + def test_extract(self): # Contains tests like those in test_match and some others. @@ -966,7 +1016,10 @@ def test_match_findall_flags(self): pat = pattern = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})' - result = data.str.match(pat, flags=re.IGNORECASE) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + result = data.str.match(pat, flags=re.IGNORECASE) + assert issubclass(w[-1].category, UserWarning) self.assertEquals(result[0], ('dave', 'google', 'com')) result = data.str.findall(pat, flags=re.IGNORECASE) @@ -975,7 +1028,10 @@ def test_match_findall_flags(self): result = data.str.count(pat, flags=re.IGNORECASE) self.assertEquals(result[0], 1) - result = data.str.contains(pat, flags=re.IGNORECASE) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + result = data.str.contains(pat, flags=re.IGNORECASE) + assert issubclass(w[-1].category, UserWarning) self.assertEquals(result[0], True) def test_encode_decode(self):
closes #5075 How does this look? If the docstring and the tests reflect our consensus, I'll take a stab at the docs. This is the gist of it: Default behavior is unchanged, but issues a warning. ``` In [1]: Series(['aa', 'bb']).str.match('(a)(a)') pandas/core/strings.py:333: UserWarning: This usage of match will be removed in an upcoming version of pandas. Consider using extract instead. UserWarning) Out[1]: 0 (a, a) 1 [] dtype: object ``` New, more useful behavior is available through `as_indexer`. ``` In [2]: Series(['aa', 'bb']).str.match('(a)(a)', as_indexer=True) Out[2]: 0 True 1 False dtype: bool ``` P.S. There's a stray commit in here. Not sure why 88716e4 got lumped in....
https://api.github.com/repos/pandas-dev/pandas/pulls/5224
2013-10-14T21:24:20Z
2013-10-31T02:15:28Z
2013-10-31T02:15:28Z
2014-07-16T08:35:09Z
BUG: Groupby filter maintains ordering, closes #4621
diff --git a/doc/source/release.rst b/doc/source/release.rst index 331a578c5c349..5e5fb929bd0ca 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -600,6 +600,8 @@ Bug Fixes - Fixed bug where inplace setting of levels or labels on ``MultiIndex`` would not clear cached ``values`` property and therefore return wrong ``values``. (:issue:`5215`) + - Fixed bug where filtering a grouped DataFrame or Series did not maintain + the original ordering (:issue:`4621`). pandas 0.12.0 ------------- diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 22857449ead4f..e5447e5f8f58f 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1641,7 +1641,7 @@ def true_and_notnull(x, *args, **kwargs): if len(indexers) == 0: filtered = self.obj.take([]) # because np.concatenate would fail else: - filtered = self.obj.take(np.concatenate(indexers)) + filtered = self.obj.take(np.sort(np.concatenate(indexers))) if dropna: return filtered else: @@ -2166,7 +2166,7 @@ def add_indexer(): if len(indexers) == 0: filtered = self.obj.take([]) # because np.concatenate would fail else: - filtered = self.obj.take(np.concatenate(indexers)) + filtered = self.obj.take(np.sort(np.concatenate(indexers))) if dropna: return filtered else: diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 8f1bc91f7b46e..29f64090ddb11 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2622,14 +2622,12 @@ def test_filter_out_no_groups(self): grouper = s.apply(lambda x: x % 2) grouped = s.groupby(grouper) filtered = grouped.filter(lambda x: x.mean() > 0) - filtered.sort() # was sorted by group - s.sort() # was sorted arbitrarily assert_series_equal(filtered, s) df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()}) grouper = df['A'].apply(lambda x: x % 2) grouped = df.groupby(grouper) filtered = grouped.filter(lambda x: x['A'].mean() > 0) - assert_frame_equal(filtered.sort(), df) + assert_frame_equal(filtered, df) def test_filter_condition_raises(self): import pandas as pd @@ -2706,7 +2704,7 @@ def test_filter_against_workaround(self): old_way = df[grouped.floats.\ transform(lambda x: x.mean() > N/20).astype('bool')] new_way = grouped.filter(lambda x: x['floats'].mean() > N/20) - assert_frame_equal(new_way.sort(), old_way.sort()) + assert_frame_equal(new_way, old_way) # Group by floats (rounded); filter on strings. grouper = df.floats.apply(lambda x: np.round(x, -1)) @@ -2715,14 +2713,14 @@ def test_filter_against_workaround(self): transform(lambda x: len(x) < N/10).astype('bool')] new_way = grouped.filter( lambda x: len(x.letters) < N/10) - assert_frame_equal(new_way.sort(), old_way.sort()) + assert_frame_equal(new_way, old_way) # Group by strings; filter on ints. grouped = df.groupby('letters') old_way = df[grouped.ints.\ transform(lambda x: x.mean() > N/20).astype('bool')] new_way = grouped.filter(lambda x: x['ints'].mean() > N/20) - assert_frame_equal(new_way.sort_index(), old_way.sort_index()) + assert_frame_equal(new_way, old_way) def test_filter_using_len(self): # BUG GH4447 @@ -2747,6 +2745,48 @@ def test_filter_using_len(self): expected = s[[]] assert_series_equal(actual, expected) + def test_filter_maintains_ordering(self): + # Simple case: index is sequential. #4621 + df = DataFrame({'pid' : [1,1,1,2,2,3,3,3], + 'tag' : [23,45,62,24,45,34,25,62]}) + s = df['pid'] + grouped = df.groupby('tag') + actual = grouped.filter(lambda x: len(x) > 1) + expected = df.iloc[[1, 2, 4, 7]] + assert_frame_equal(actual, expected) + + grouped = s.groupby(df['tag']) + actual = grouped.filter(lambda x: len(x) > 1) + expected = s.iloc[[1, 2, 4, 7]] + assert_series_equal(actual, expected) + + # Now index is sequentially decreasing. + df.index = np.arange(len(df) - 1, -1, -1) + s = df['pid'] + grouped = df.groupby('tag') + actual = grouped.filter(lambda x: len(x) > 1) + expected = df.iloc[[1, 2, 4, 7]] + assert_frame_equal(actual, expected) + + grouped = s.groupby(df['tag']) + actual = grouped.filter(lambda x: len(x) > 1) + expected = s.iloc[[1, 2, 4, 7]] + assert_series_equal(actual, expected) + + # Index is shuffled. + SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3] + df.index = df.index[SHUFFLED] + s = df['pid'] + grouped = df.groupby('tag') + actual = grouped.filter(lambda x: len(x) > 1) + expected = df.iloc[[1, 2, 4, 7]] + assert_frame_equal(actual, expected) + + grouped = s.groupby(df['tag']) + actual = grouped.filter(lambda x: len(x) > 1) + expected = s.iloc[[1, 2, 4, 7]] + assert_series_equal(actual, expected) + def test_groupby_whitelist(self): from string import ascii_lowercase letters = np.array(list(ascii_lowercase))
Simply adds `np.sort`. Includes tests for SeriesGroupBy and DataFrameGroupBy, using indexes that are sequentially increasing, sequentially decreasing, and shuffled. Also, this PR removes all ad hoc sorting involved in the original filter tests -- now no longer needed. Much better!
https://api.github.com/repos/pandas-dev/pandas/pulls/5222
2013-10-14T20:07:10Z
2013-10-15T00:55:05Z
2013-10-15T00:55:05Z
2014-07-01T06:01:20Z
fix business day calculation
diff --git a/pandas/src/period.c b/pandas/src/period.c index ee3a50f98b8c9..5a744de4c3f7b 100644 --- a/pandas/src/period.c +++ b/pandas/src/period.c @@ -1127,8 +1127,16 @@ npy_int64 get_period_ordinal(int year, int month, int day, { goto onError; } - weeks = days / 7; - return (npy_int64)(days - weeks * 2) - BDAY_OFFSET; + // calculate the current week assuming sunday as last day of a week + weeks = (days - BASE_WEEK_TO_DAY_OFFSET) / DAYS_PER_WEEK; + // calculate the current weekday (in range 1 .. 7) + delta = (days - BASE_WEEK_TO_DAY_OFFSET) % DAYS_PER_WEEK + 1; + // return the number of business days in full weeks plus the business days in the last - possible partial - week + return (npy_int64)(weeks * BUSINESS_DAYS_PER_WEEK) + + (delta <= BUSINESS_DAYS_PER_WEEK + ? delta + : BUSINESS_DAYS_PER_WEEK + 1) + - BDAY_OFFSET; } if (freq_group == FR_WK) diff --git a/pandas/src/period.h b/pandas/src/period.h index e8537680e27e7..55c3722ebaae7 100644 --- a/pandas/src/period.h +++ b/pandas/src/period.h @@ -38,6 +38,9 @@ #define ORD_OFFSET 719163LL // days until 1970-01-01 #define BDAY_OFFSET 513689LL // days until 1970-01-01 #define WEEK_OFFSET 102737LL +#define BASE_WEEK_TO_DAY_OFFSET 1 // difference between day 0 and end of week in days +#define DAYS_PER_WEEK 7 +#define BUSINESS_DAYS_PER_WEEK 5 #define HIGHFREQ_ORIG 0 // ORD_OFFSET * 86400LL // days until 1970-01-01 #define FR_ANN 1000 /* Annual */ diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 55963b01d2779..0fc7101a99856 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -115,6 +115,10 @@ def test_period_constructor(self): # Biz day construction, roll forward if non-weekday i1 = Period('3/10/12', freq='B') + i2 = Period('3/10/12', freq='D') + self.assertEquals(i1, i2.asfreq('B')) + i2 = Period('3/11/12', freq='D') + self.assertEquals(i1, i2.asfreq('B')) i2 = Period('3/12/12', freq='D') self.assertEquals(i1, i2.asfreq('B')) @@ -292,7 +296,7 @@ def test_start_time(self): p = Period('2012', freq=f) self.assertEquals(p.start_time, xp) self.assertEquals(Period('2012', freq='B').start_time, - datetime(2011, 12, 30)) + datetime(2012, 1, 2)) self.assertEquals(Period('2012', freq='W').start_time, datetime(2011, 12, 26)) @@ -321,7 +325,7 @@ def _ex(*args): p = Period('2012', freq='H') self.assertEquals(p.end_time, xp) - xp = _ex(2012, 1, 2) + xp = _ex(2012, 1, 3) self.assertEquals(Period('2012', freq='B').end_time, xp) xp = _ex(2012, 1, 2) diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 20138cb8b1eb8..cfc93a22c454b 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -8,7 +8,7 @@ from pandas.core.api import Timestamp -from pandas.tslib import period_asfreq +from pandas.tslib import period_asfreq, period_ordinal from pandas.tseries.frequencies import get_freq @@ -254,6 +254,36 @@ def test_intraday_conversion_factors(self): self.assertEqual(period_asfreq(1, get_freq('U'), get_freq('N'), False), 1000) + def test_period_ordinal_start_values(self): + # information for 1.1.1970 + self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('Y'))) + self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('M'))) + self.assertEqual(1, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('W'))) + self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('D'))) + self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq('B'))) + + def test_period_ordinal_week(self): + self.assertEqual(1, period_ordinal(1970, 1, 4, 0, 0, 0, 0, 0, get_freq('W'))) + self.assertEqual(2, period_ordinal(1970, 1, 5, 0, 0, 0, 0, 0, get_freq('W'))) + + self.assertEqual(2284, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0, get_freq('W'))) + self.assertEqual(2285, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0, get_freq('W'))) + + def test_period_ordinal_business_day(self): + # Thursday + self.assertEqual(11415, period_ordinal(2013, 10, 3, 0, 0, 0, 0, 0, get_freq('B'))) + # Friday + self.assertEqual(11416, period_ordinal(2013, 10, 4, 0, 0, 0, 0, 0, get_freq('B'))) + # Saturday + self.assertEqual(11417, period_ordinal(2013, 10, 5, 0, 0, 0, 0, 0, get_freq('B'))) + # Sunday + self.assertEqual(11417, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0, get_freq('B'))) + # Monday + self.assertEqual(11417, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0, get_freq('B'))) + # Tuesday + self.assertEqual(11418, period_ordinal(2013, 10, 8, 0, 0, 0, 0, 0, get_freq('B'))) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
This will fix the so far broken calculation of business day periods and the corresponding tests reported closes #5203. The expected behaviour found in the present tests is that the date is always rolling forward to the next business day: ``` Friday: Period('2012-3-09', freq='B') => '2012-03-09' Saturday: Period('2012-3-10', freq='B') => '2012-03-12' Sunday: Period('2012-3-11', freq='B') => '2012-03-12' Monday: Period('2012-3-12', freq='B') => '2012-03-12' ``` Can anyone confirm this?
https://api.github.com/repos/pandas-dev/pandas/pulls/5220
2013-10-14T19:48:51Z
2013-10-15T13:28:59Z
2013-10-15T13:28:58Z
2014-06-22T14:38:02Z
TST/PERF: Re-write assert_almost_equal() in cython #4398
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6eeaa55280e43..7171b48f4097a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -414,6 +414,7 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` compatible. (:issue:`5213`, :issue:`5214`) - Unity ``dropna`` for Series/DataFrame signature (:issue:`5250`), tests from :issue:`5234`, courtesy of @rockg + - Rewrite assert_almost_equal() in cython for performance (:issue:`4398`) .. _release.bug_fixes-0.13.0: diff --git a/pandas/src/testing.pyx b/pandas/src/testing.pyx new file mode 100644 index 0000000000000..b324c6652d58f --- /dev/null +++ b/pandas/src/testing.pyx @@ -0,0 +1,142 @@ +import numpy as np + +from pandas import compat +from pandas.core.common import isnull + +cdef NUMERIC_TYPES = ( + bool, + int, + float, + np.bool, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float16, + np.float32, + np.float64, +) + +cdef bint is_comparable_as_number(obj): + return isinstance(obj, NUMERIC_TYPES) + +cdef bint isiterable(obj): + return hasattr(obj, '__iter__') + +cdef bint has_length(obj): + return hasattr(obj, '__len__') + +cdef bint is_dictlike(obj): + return hasattr(obj, 'keys') and hasattr(obj, '__getitem__') + +cdef bint decimal_almost_equal(double desired, double actual, int decimal): + # Code from + # http://docs.scipy.org/doc/numpy/reference/generated + # /numpy.testing.assert_almost_equal.html + return abs(desired - actual) < (0.5 * 10.0 ** -decimal) + +cpdef assert_dict_equal(a, b, bint compare_keys=True): + assert is_dictlike(a) and is_dictlike(b), ( + "Cannot compare dict objects, one or both is not dict-like" + ) + + a_keys = frozenset(a.keys()) + b_keys = frozenset(b.keys()) + + if compare_keys: + assert a_keys == b_keys + + for k in a_keys: + assert_almost_equal(a[k], b[k]) + + return True + +cpdef assert_almost_equal(a, b, bint check_less_precise=False): + cdef: + int decimal + Py_ssize_t i, na, nb + double fa, fb + + if isinstance(a, dict) or isinstance(b, dict): + return assert_dict_equal(a, b) + + if (isinstance(a, compat.string_types) or + isinstance(b, compat.string_types)): + assert a == b, "%r != %r" % (a, b) + return True + + if isiterable(a): + assert isiterable(b), ( + "First object is iterable, second isn't: %r != %r" % (a, b) + ) + assert has_length(a) and has_length(b), ( + "Can't compare objects without length, one or both is invalid: " + "(%r, %r)" % (a, b) + ) + + na, nb = len(a), len(b) + assert na == nb, ( + "Length of two iterators not the same: %r != %r" % (na, nb) + ) + if (isinstance(a, np.ndarray) and + isinstance(b, np.ndarray) and + np.array_equal(a, b)): + return True + else: + for i in xrange(na): + assert_almost_equal(a[i], b[i], check_less_precise) + return True + elif isiterable(b): + assert False, ( + "Second object is iterable, first isn't: %r != %r" % (a, b) + ) + + if isnull(a): + assert isnull(b), ( + "First object is null, second isn't: %r != %r" % (a, b) + ) + return True + elif isnull(b): + assert isnull(a), ( + "First object is not null, second is null: %r != %r" % (a, b) + ) + return True + + if is_comparable_as_number(a): + assert is_comparable_as_number(b), ( + "First object is numeric, second is not: %r != %r" % (a, b) + ) + + decimal = 5 + + # deal with differing dtypes + if check_less_precise: + dtype_a = np.dtype(type(a)) + dtype_b = np.dtype(type(b)) + if dtype_a.kind == 'f' and dtype_b == 'f': + if dtype_a.itemsize <= 4 and dtype_b.itemsize <= 4: + decimal = 3 + + if np.isinf(a): + assert np.isinf(b), "First object is inf, second isn't" + else: + fa, fb = a, b + + # case for zero + if abs(fa) < 1e-5: + if not decimal_almost_equal(fa, fb, decimal): + assert False, ( + '(very low values) expected %.5f but got %.5f' % (b, a) + ) + else: + if not decimal_almost_equal(1, fb / fa, decimal): + assert False, 'expected %.5f but got %.5f' % (b, a) + + else: + assert a == b, "%r != %r" % (a, b) + + return True diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py new file mode 100644 index 0000000000000..fa295838d47e9 --- /dev/null +++ b/pandas/tests/test_testing.py @@ -0,0 +1,123 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import pandas as pd +import unittest +import warnings +import nose +import numpy as np +import sys + +from pandas.util.testing import ( + assert_almost_equal, assertRaisesRegexp, raise_with_traceback +) + +# let's get meta. + +class TestAssertAlmostEqual(unittest.TestCase): + _multiprocess_can_split_ = True + + def _assert_almost_equal_both(self, a, b, **kwargs): + assert_almost_equal(a, b, **kwargs) + assert_almost_equal(b, a, **kwargs) + + def _assert_not_almost_equal_both(self, a, b, **kwargs): + self.assertRaises(AssertionError, assert_almost_equal, a, b, **kwargs) + self.assertRaises(AssertionError, assert_almost_equal, b, a, **kwargs) + + def test_assert_almost_equal_numbers(self): + self._assert_almost_equal_both(1.1, 1.1) + self._assert_almost_equal_both(1.1, 1.100001) + self._assert_almost_equal_both(np.int16(1), 1.000001) + self._assert_almost_equal_both(np.float64(1.1), 1.1) + self._assert_almost_equal_both(np.uint32(5), 5) + + self._assert_not_almost_equal_both(1.1, 1) + self._assert_not_almost_equal_both(1.1, True) + self._assert_not_almost_equal_both(1, 2) + self._assert_not_almost_equal_both(1.0001, np.int16(1)) + + def test_assert_almost_equal_numbers_with_zeros(self): + self._assert_almost_equal_both(0, 0) + self._assert_almost_equal_both(0.000001, 0) + + self._assert_not_almost_equal_both(0.001, 0) + self._assert_not_almost_equal_both(1, 0) + + def test_assert_almost_equal_numbers_with_mixed(self): + self._assert_not_almost_equal_both(1, 'abc') + self._assert_not_almost_equal_both(1, [1,]) + self._assert_not_almost_equal_both(1, object()) + + def test_assert_almost_equal_dicts(self): + self._assert_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 2}) + + self._assert_not_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 3}) + self._assert_not_almost_equal_both( + {'a': 1, 'b': 2}, {'a': 1, 'b': 2, 'c': 3} + ) + self._assert_not_almost_equal_both({'a': 1}, 1) + self._assert_not_almost_equal_both({'a': 1}, 'abc') + self._assert_not_almost_equal_both({'a': 1}, [1,]) + + def test_assert_almost_equal_dict_like_object(self): + class DictLikeObj(object): + def keys(self): + return ('a',) + + def __getitem__(self, item): + if item == 'a': + return 1 + + self._assert_almost_equal_both({'a': 1}, DictLikeObj()) + + self._assert_not_almost_equal_both({'a': 2}, DictLikeObj()) + + def test_assert_almost_equal_strings(self): + self._assert_almost_equal_both('abc', 'abc') + + self._assert_not_almost_equal_both('abc', 'abcd') + self._assert_not_almost_equal_both('abc', 'abd') + self._assert_not_almost_equal_both('abc', 1) + self._assert_not_almost_equal_both('abc', [1,]) + + def test_assert_almost_equal_iterables(self): + self._assert_almost_equal_both([1, 2, 3], [1, 2, 3]) + self._assert_almost_equal_both(np.array([1, 2, 3]), [1, 2, 3]) + + # Can't compare generators + self._assert_not_almost_equal_both(iter([1, 2, 3]), [1, 2, 3]) + + self._assert_not_almost_equal_both([1, 2, 3], [1, 2, 4]) + self._assert_not_almost_equal_both([1, 2, 3], [1, 2, 3, 4]) + self._assert_not_almost_equal_both([1, 2, 3], 1) + + def test_assert_almost_equal_null(self): + self._assert_almost_equal_both(None, None) + self._assert_almost_equal_both(None, np.NaN) + + self._assert_not_almost_equal_both(None, 0) + self._assert_not_almost_equal_both(np.NaN, 0) + + def test_assert_almost_equal_inf(self): + self._assert_almost_equal_both(np.inf, np.inf) + self._assert_almost_equal_both(np.inf, float("inf")) + + self._assert_not_almost_equal_both(np.inf, 0) + +class TestUtilTesting(unittest.TestCase): + _multiprocess_can_split_ = True + + def test_raise_with_traceback(self): + with assertRaisesRegexp(LookupError, "error_text"): + try: + raise ValueError("THIS IS AN ERROR") + except ValueError as e: + e = LookupError("error_text") + raise_with_traceback(e) + with assertRaisesRegexp(LookupError, "error_text"): + try: + raise ValueError("This is another error") + except ValueError: + e = LookupError("error_text") + _, _, traceback = sys.exc_info() + raise_with_traceback(e, traceback) diff --git a/pandas/tests/test_tests.py b/pandas/tests/test_tests.py deleted file mode 100644 index 1890c2607fc89..0000000000000 --- a/pandas/tests/test_tests.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -import pandas as pd -import unittest -import warnings -import nose -import sys - -from pandas.util.testing import ( - assert_almost_equal, assertRaisesRegexp, raise_with_traceback -) - -# let's get meta. - -class TestUtilTesting(unittest.TestCase): - _multiprocess_can_split_ = True - - def test_assert_almost_equal(self): - # don't die because values are not ndarrays - assert_almost_equal(1.1,1.1,check_less_precise=True) - - def test_raise_with_traceback(self): - with assertRaisesRegexp(LookupError, "error_text"): - try: - raise ValueError("THIS IS AN ERROR") - except ValueError as e: - e = LookupError("error_text") - raise_with_traceback(e) - with assertRaisesRegexp(LookupError, "error_text"): - try: - raise ValueError("This is another error") - except ValueError: - e = LookupError("error_text") - _, _, traceback = sys.exc_info() - raise_with_traceback(e, traceback) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 7a37be30f7bf6..be6f593da2043 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -37,6 +37,8 @@ from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex +from pandas import _testing + from pandas.io.common import urlopen Index = index.Index @@ -50,6 +52,11 @@ K = 4 _RAISE_NETWORK_ERROR_DEFAULT = False +# NOTE: don't pass an NDFrame or index to this function - may not handle it +# well. +assert_almost_equal = _testing.assert_almost_equal + +assert_dict_equal = _testing.assert_dict_equal def randbool(size=(), p=0.5): return rand(*size) <= p @@ -374,75 +381,9 @@ def assert_attr_equal(attr, left, right): def isiterable(obj): return hasattr(obj, '__iter__') - -# NOTE: don't pass an NDFrame or index to this function - may not handle it -# well. -def assert_almost_equal(a, b, check_less_precise=False): - if isinstance(a, dict) or isinstance(b, dict): - return assert_dict_equal(a, b) - - if isinstance(a, compat.string_types): - assert a == b, "%r != %r" % (a, b) - return True - - if isiterable(a): - np.testing.assert_(isiterable(b)) - na, nb = len(a), len(b) - assert na == nb, "%s != %s" % (na, nb) - if isinstance(a, np.ndarray) and isinstance(b, np.ndarray) and\ - np.array_equal(a, b): - return True - else: - for i in range(na): - assert_almost_equal(a[i], b[i], check_less_precise) - return True - - err_msg = lambda a, b: 'expected %.5f but got %.5f' % (b, a) - - if isnull(a): - np.testing.assert_(isnull(b)) - return - - if isinstance(a, (bool, float, int, np.float32)): - decimal = 5 - - # deal with differing dtypes - if check_less_precise: - dtype_a = np.dtype(type(a)) - dtype_b = np.dtype(type(b)) - if dtype_a.kind == 'f' and dtype_b == 'f': - if dtype_a.itemsize <= 4 and dtype_b.itemsize <= 4: - decimal = 3 - - if np.isinf(a): - assert np.isinf(b), err_msg(a, b) - - # case for zero - elif abs(a) < 1e-5: - np.testing.assert_almost_equal( - a, b, decimal=decimal, err_msg=err_msg(a, b), verbose=False) - else: - np.testing.assert_almost_equal( - 1, a / b, decimal=decimal, err_msg=err_msg(a, b), verbose=False) - else: - assert a == b, "%s != %s" % (a, b) - - def is_sorted(seq): return assert_almost_equal(seq, np.sort(np.array(seq))) - -def assert_dict_equal(a, b, compare_keys=True): - a_keys = frozenset(a.keys()) - b_keys = frozenset(b.keys()) - - if compare_keys: - assert(a_keys == b_keys) - - for k in a_keys: - assert_almost_equal(a[k], b[k]) - - def assert_series_equal(left, right, check_dtype=True, check_index_type=False, check_series_type=False, diff --git a/setup.py b/setup.py index c326d14f552e0..635da56d7339f 100755 --- a/setup.py +++ b/setup.py @@ -304,7 +304,8 @@ class CheckSDist(sdist): 'pandas/index.pyx', 'pandas/algos.pyx', 'pandas/parser.pyx', - 'pandas/src/sparse.pyx'] + 'pandas/src/sparse.pyx', + 'pandas/src/testing.pyx'] def initialize_options(self): sdist.initialize_options(self) @@ -464,6 +465,13 @@ def pxd(name): extensions.extend([sparse_ext]) +testing_ext = Extension('pandas._testing', + sources=[srcpath('testing', suffix=suffix)], + include_dirs=[], + libraries=libraries) + +extensions.extend([testing_ext]) + #---------------------------------------------------------------------- # msgpack stuff here
closes #4398 Add a testing.pyx cython file, and port assert_almost_equal() from python to cython. This also fixes a few minor bugs that were in the python version of assert_almost_equal() and adds more test cases to test_tests.py On my machine this brings a modest gain to the suite of "not slow" tests (160s -> 140s), but on assert_almost_equal() heavy tests, like test_expressions.py, it shows a large improvement (14s -> 4s).
https://api.github.com/repos/pandas-dev/pandas/pulls/5219
2013-10-14T19:34:42Z
2013-10-21T21:35:44Z
2013-10-21T21:35:44Z
2014-06-19T14:48:35Z
BUG: don't try to deal with a dead cache referant (GH5216)
diff --git a/doc/source/release.rst b/doc/source/release.rst index b74b1f9252709..331a578c5c349 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -372,7 +372,7 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` - Internal type checking is now done via a suite of generated classes, allowing ``isinstance(value, klass)`` without having to directly import the klass, courtesy of @jtratner - Bug in Series update where the parent frame is not updating its cache based on - changes (:issue:`4080`) or types (:issue:`3217`), fillna (:issue:`3386`) + changes (:issue:`4080`, :issue:`5216`) or types (:issue:`3217`), fillna (:issue:`3386`) - Indexing with dtype conversions fixed (:issue:`4463`, :issue:`4204`) - Refactor ``Series.reindex`` to core/generic.py (:issue:`4604`, :issue:`4618`), allow ``method=`` in reindexing on a Series to work diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3fca45b00d565..a5da0b4f23c9a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -943,7 +943,13 @@ def _maybe_update_cacher(self, clear=False): if clear, then clear our cache """ cacher = getattr(self,'_cacher',None) if cacher is not None: - cacher[1]()._maybe_cache_changed(cacher[0],self) + try: + cacher[1]()._maybe_cache_changed(cacher[0],self) + except: + + # our referant is dead + del self._cacher + if clear: self._clear_item_cache() diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index c649e73184aa3..b69496b042274 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1557,6 +1557,24 @@ def test_cache_updating(self): self.assert_("A+1" in panel.ix[0].columns) self.assert_("A+1" in panel.ix[1].columns) + # 5216 + # make sure that we don't try to set a dead cache + a = np.random.rand(10, 3) + df = DataFrame(a, columns=['x', 'y', 'z']) + tuples = [(i, j) for i in range(5) for j in range(2)] + index = MultiIndex.from_tuples(tuples) + df.index = index + + # setting via chained assignment + df.loc[0]['z'].iloc[0] = 1. + result = df.loc[(0,0),'z'] + self.assert_(result == 1) + + # correct setting + df.loc[(0,0),'z'] = 2 + result = df.loc[(0,0),'z'] + self.assert_(result == 2) + def test_floating_index_doc_example(self): index = Index([1.5, 2, 3, 4.5, 5])
closes #5216
https://api.github.com/repos/pandas-dev/pandas/pulls/5217
2013-10-14T15:04:05Z
2013-10-14T15:20:35Z
2013-10-14T15:20:35Z
2014-06-28T23:03:41Z
BUG/CLN: Clear _tuples on setting MI levels/labels
diff --git a/doc/source/release.rst b/doc/source/release.rst index fe1af472700ac..b74b1f9252709 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -597,6 +597,9 @@ Bug Fixes - Bug in ``to_datetime`` with a format and ``coerce=True`` not raising (:issue:`5195`) - Bug in ``loc`` setting with multiple indexers and a rhs of a Series that needs broadcasting (:issue:`5206`) + - Fixed bug where inplace setting of levels or labels on ``MultiIndex`` would + not clear cached ``values`` property and therefore return wrong ``values``. + (:issue:`5215`) pandas 0.12.0 ------------- diff --git a/pandas/core/index.py b/pandas/core/index.py index 446c57bcf20bf..773ca4acf80df 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1903,8 +1903,9 @@ def _set_levels(self, levels, copy=False, validate=True): for lev in levels) names = self.names self._levels = levels - if len(names): + if any(names): self._set_names(names) + self._tuples = None def set_levels(self, levels, inplace=False): """ @@ -1947,6 +1948,7 @@ def _set_labels(self, labels, copy=False, validate=True): raise ValueError("Length of labels must match length of levels") self._labels = FrozenList(_ensure_frozen(labs, copy=copy)._shallow_copy() for labs in labels) + self._tuples = None def set_labels(self, labels, inplace=False): """ diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index c7c9fd1de0fcd..5b2edc31e1fe9 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1319,6 +1319,41 @@ def test_metadata_immutable(self): with assertRaisesRegexp(TypeError, mutable_regex): names[0] = names[0] + def test_inplace_mutation_resets_values(self): + levels = [['a', 'b', 'c'], [4]] + levels2 = [[1, 2, 3], ['a']] + labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]] + mi1 = MultiIndex(levels=levels, labels=labels) + mi2 = MultiIndex(levels=levels2, labels=labels) + vals = mi1.values.copy() + vals2 = mi2.values.copy() + self.assert_(mi1._tuples is not None) + + # make sure level setting works + new_vals = mi1.set_levels(levels2).values + assert_almost_equal(vals2, new_vals) + # non-inplace doesn't kill _tuples [implementation detail] + assert_almost_equal(mi1._tuples, vals) + # and values is still same too + assert_almost_equal(mi1.values, vals) + + # inplace should kill _tuples + mi1.set_levels(levels2, inplace=True) + assert_almost_equal(mi1.values, vals2) + + # make sure label setting works too + labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]] + exp_values = np.array([(1, 'a')] * 6, dtype=object) + new_values = mi2.set_labels(labels2).values + # not inplace shouldn't change + assert_almost_equal(mi2._tuples, vals2) + # should have correct values + assert_almost_equal(exp_values, new_values) + + # and again setting inplace should kill _tuples, etc + mi2.set_labels(labels2, inplace=True) + assert_almost_equal(mi2.values, new_values) + def test_copy_in_constructor(self): levels = np.array(["a", "b", "c"]) labels = np.array([1, 1, 2, 0, 0, 1, 1]) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index dd59524e90f10..7a37be30f7bf6 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -382,14 +382,15 @@ def assert_almost_equal(a, b, check_less_precise=False): return assert_dict_equal(a, b) if isinstance(a, compat.string_types): - assert a == b, "%s != %s" % (a, b) + assert a == b, "%r != %r" % (a, b) return True if isiterable(a): np.testing.assert_(isiterable(b)) na, nb = len(a), len(b) assert na == nb, "%s != %s" % (na, nb) - if np.array_equal(a, b): + if isinstance(a, np.ndarray) and isinstance(b, np.ndarray) and\ + np.array_equal(a, b): return True else: for i in range(na):
Previously, setting levels and labels didn't reset `_tuple`, which meant that it would return wrong values. (and this was ocurring in v0.12 and earlier too).
https://api.github.com/repos/pandas-dev/pandas/pulls/5215
2013-10-14T04:01:33Z
2013-10-14T04:19:20Z
2013-10-14T04:19:20Z
2014-06-22T19:15:39Z
BUG/CLN: MI now checks level & label compatibility
diff --git a/doc/source/release.rst b/doc/source/release.rst index 805b8d24d70d9..6ea4e5a3046b2 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -400,6 +400,8 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` instead they are generated and cached on the fly. The internal representation and handling of DateOffsets has also been clarified. (:issue:`5189`, related :issue:`5004`) + - ``MultiIndex`` constructor now validates that passed levels and labels are + compatible. (:issue:`5213`, :issue:`5214`) .. _release.bug_fixes-0.13.0: diff --git a/pandas/core/index.py b/pandas/core/index.py index 773ca4acf80df..a79670579198b 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1394,7 +1394,7 @@ def _join_level(self, other, level, how='left', return_indexers=False): new_levels[level] = new_level join_index = MultiIndex(levels=new_levels, labels=new_labels, - names=left.names) + names=left.names, verify_integrity=False) left_indexer = np.arange(len(left))[new_lev_labels != -1] else: join_index = left @@ -1856,7 +1856,7 @@ class MultiIndex(Index): rename = Index.set_names def __new__(cls, levels=None, labels=None, sortorder=None, names=None, - copy=False): + copy=False, verify_integrity=True): if levels is None or labels is None: raise TypeError("Must pass both levels and labels") if len(levels) != len(labels): @@ -1886,12 +1886,36 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None, else: subarr.sortorder = sortorder + if verify_integrity: + subarr._verify_integrity() + return subarr + def _verify_integrity(self): + """Raises ValueError if length of levels and labels don't match or any + label would exceed level bounds""" + # NOTE: Currently does not check, among other things, that cached + # nlevels matches nor that sortorder matches actually sortorder. + labels, levels = self.labels, self.levels + if len(levels) != len(labels): + raise ValueError("Length of levels and labels must match. NOTE:" + " this index is in an inconsistent state.") + label_length = len(self.labels[0]) + for i, (level, label) in enumerate(zip(levels, labels)): + if len(label) != label_length: + raise ValueError("Unequal label lengths: %s" % ( + [len(lab) for lab in labels])) + if len(label) and label.max() >= len(level): + raise ValueError("On level %d, label max (%d) >= length of" + " level (%d). NOTE: this index is in an" + " inconsistent state" % (i, label.max(), + len(level))) + def _get_levels(self): return self._levels - def _set_levels(self, levels, copy=False, validate=True): + def _set_levels(self, levels, copy=False, validate=True, + verify_integrity=False): # This is NOT part of the levels property because it should be # externally not allowed to set levels. User beware if you change # _levels directly @@ -1907,7 +1931,10 @@ def _set_levels(self, levels, copy=False, validate=True): self._set_names(names) self._tuples = None - def set_levels(self, levels, inplace=False): + if verify_integrity: + self._verify_integrity() + + def set_levels(self, levels, inplace=False, verify_integrity=True): """ Set new levels on MultiIndex. Defaults to returning new index. @@ -1918,6 +1945,8 @@ def set_levels(self, levels, inplace=False): new levels to apply inplace : bool if True, mutates in place + verify_integrity : bool (default True) + if True, checks that levels and labels are compatible Returns ------- @@ -1930,27 +1959,33 @@ def set_levels(self, levels, inplace=False): else: idx = self._shallow_copy() idx._reset_identity() - idx._set_levels(levels) + idx._set_levels(levels, validate=True, + verify_integrity=verify_integrity) if not inplace: return idx # remove me in 0.14 and change to read only property __set_levels = deprecate("setting `levels` directly", - partial(set_levels, inplace=True), + partial(set_levels, inplace=True, + verify_integrity=True), alt_name="set_levels") levels = property(fget=_get_levels, fset=__set_levels) def _get_labels(self): return self._labels - def _set_labels(self, labels, copy=False, validate=True): + def _set_labels(self, labels, copy=False, validate=True, + verify_integrity=False): if validate and len(labels) != self.nlevels: raise ValueError("Length of labels must match length of levels") self._labels = FrozenList(_ensure_frozen(labs, copy=copy)._shallow_copy() for labs in labels) self._tuples = None - def set_labels(self, labels, inplace=False): + if verify_integrity: + self._verify_integrity() + + def set_labels(self, labels, inplace=False, verify_integrity=True): """ Set new labels on MultiIndex. Defaults to returning new index. @@ -1961,6 +1996,8 @@ def set_labels(self, labels, inplace=False): new labels to apply inplace : bool if True, mutates in place + verify_integrity : bool (default True) + if True, checks that levels and labels are compatible Returns ------- @@ -1973,13 +2010,14 @@ def set_labels(self, labels, inplace=False): else: idx = self._shallow_copy() idx._reset_identity() - idx._set_labels(labels) + idx._set_labels(labels, verify_integrity=verify_integrity) if not inplace: return idx # remove me in 0.14 and change to readonly property __set_labels = deprecate("setting labels directly", - partial(set_labels, inplace=True), + partial(set_labels, inplace=True, + verify_integrity=True), alt_name="set_labels") labels = property(fget=_get_labels, fset=__set_labels) @@ -2392,7 +2430,8 @@ def from_arrays(cls, arrays, sortorder=None, names=None): names = [c.name for c in cats] return MultiIndex(levels=levels, labels=labels, - sortorder=sortorder, names=names) + sortorder=sortorder, names=names, + verify_integrity=False) @classmethod def from_tuples(cls, tuples, sortorder=None, names=None): @@ -2463,6 +2502,7 @@ def __setstate__(self, state): self._set_labels(labels) self._set_names(names) self.sortorder = sortorder + self._verify_integrity() def __getitem__(self, key): if np.isscalar(key): @@ -2502,7 +2542,7 @@ def take(self, indexer, axis=None): indexer = com._ensure_platform_int(indexer) new_labels = [lab.take(indexer) for lab in self.labels] return MultiIndex(levels=self.levels, labels=new_labels, - names=self.names) + names=self.names, verify_integrity=False) def append(self, other): """ @@ -2618,7 +2658,7 @@ def droplevel(self, level=0): return result else: return MultiIndex(levels=new_levels, labels=new_labels, - names=new_names) + names=new_names, verify_integrity=False) def swaplevel(self, i, j): """ @@ -2645,7 +2685,7 @@ def swaplevel(self, i, j): new_names[i], new_names[j] = new_names[j], new_names[i] return MultiIndex(levels=new_levels, labels=new_labels, - names=new_names) + names=new_names, verify_integrity=False) def reorder_levels(self, order): """ @@ -2664,7 +2704,7 @@ def reorder_levels(self, order): new_names = [self.names[i] for i in order] return MultiIndex(levels=new_levels, labels=new_labels, - names=new_names) + names=new_names, verify_integrity=False) def __getslice__(self, i, j): return self.__getitem__(slice(i, j)) @@ -2705,7 +2745,8 @@ def sortlevel(self, level=0, ascending=True): new_labels = [lab.take(indexer) for lab in self.labels] new_index = MultiIndex(labels=new_labels, levels=self.levels, - names=self.names, sortorder=level) + names=self.names, sortorder=level, + verify_integrity=False) return new_index, indexer @@ -3086,7 +3127,8 @@ def truncate(self, before=None, after=None): new_labels = [lab[left:right] for lab in self.labels] new_labels[0] = new_labels[0] - i - return MultiIndex(levels=new_levels, labels=new_labels) + return MultiIndex(levels=new_levels, labels=new_labels, + verify_integrity=False) def equals(self, other): """ @@ -3180,7 +3222,7 @@ def intersection(self, other): if len(uniq_tuples) == 0: return MultiIndex(levels=[[]] * self.nlevels, labels=[[]] * self.nlevels, - names=result_names) + names=result_names, verify_integrity=False) else: return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names) @@ -3210,14 +3252,14 @@ def diff(self, other): if self.equals(other): return MultiIndex(levels=[[]] * self.nlevels, labels=[[]] * self.nlevels, - names=result_names) + names=result_names, verify_integrity=False) difference = sorted(set(self.values) - set(other.values)) if len(difference) == 0: return MultiIndex(levels=[[]] * self.nlevels, labels=[[]] * self.nlevels, - names=result_names) + names=result_names, verify_integrity=False) else: return MultiIndex.from_tuples(difference, sortorder=0, names=result_names) @@ -3269,7 +3311,7 @@ def insert(self, loc, item): new_labels.append(np.insert(labels, loc, lev_loc)) return MultiIndex(levels=new_levels, labels=new_labels, - names=self.names) + names=self.names, verify_integrity=False) def delete(self, loc): """ @@ -3281,7 +3323,7 @@ def delete(self, loc): """ new_labels = [np.delete(lab, loc) for lab in self.labels] return MultiIndex(levels=self.levels, labels=new_labels, - names=self.names) + names=self.names, verify_integrity=False) get_major_bounds = slice_locs diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 1389445b29943..87e9121b2dffc 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -96,7 +96,8 @@ def panel_index(time, panels, names=['time', 'panel']): labels = [time_factor.labels, panel_factor.labels] levels = [time_factor.levels, panel_factor.levels] - return MultiIndex(levels, labels, sortorder=None, names=names) + return MultiIndex(levels, labels, sortorder=None, names=names, + verify_integrity=False) @@ -838,7 +839,7 @@ def to_frame(self, filter_observations=True): index = MultiIndex(levels=[self.major_axis, self.minor_axis], labels=[major_labels, minor_labels], - names=[maj_name, min_name]) + names=[maj_name, min_name], verify_integrity=False) return DataFrame(data, index=index, columns=self.items) diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index a608b40847228..c2c1a2931d4aa 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -218,7 +218,7 @@ def get_new_columns(self): new_labels.append(np.tile(np.arange(stride), width)) return MultiIndex(levels=new_levels, labels=new_labels, - names=new_names) + names=new_names, verify_integrity=False) def get_new_index(self): result_labels = [] @@ -234,7 +234,8 @@ def get_new_index(self): else: new_index = MultiIndex(levels=self.new_index_levels, labels=result_labels, - names=self.new_index_names) + names=self.new_index_names, + verify_integrity=False) return new_index @@ -286,7 +287,8 @@ def _unstack_multiple(data, clocs): dummy_index = MultiIndex(levels=rlevels + [obs_ids], labels=rlabels + [comp_ids], - names=rnames + ['__placeholder__']) + names=rnames + ['__placeholder__'], + verify_integrity=False) if isinstance(data, Series): dummy = Series(data.values, index=dummy_index) @@ -320,7 +322,7 @@ def _unstack_multiple(data, clocs): new_labels.append(rec.take(unstcols.labels[-1])) new_columns = MultiIndex(levels=new_levels, labels=new_labels, - names=new_names) + names=new_names, verify_integrity=False) if isinstance(unstacked, Series): unstacked.index = new_columns @@ -505,13 +507,14 @@ def stack(frame, level=-1, dropna=True): new_names = list(frame.index.names) new_names.append(frame.columns.name) new_index = MultiIndex(levels=new_levels, labels=new_labels, - names=new_names) + names=new_names, verify_integrity=False) else: ilabels = np.arange(N).repeat(K) clabels = np.tile(np.arange(K), N).ravel() new_index = MultiIndex(levels=[frame.index, frame.columns], labels=[ilabels, clabels], - names=[frame.index.name, frame.columns.name]) + names=[frame.index.name, frame.columns.name], + verify_integrity=False) new_values = frame.values.ravel() if dropna: @@ -590,7 +593,7 @@ def _stack_multi_columns(frame, level=-1, dropna=True): new_names.append(frame.columns.names[level]) new_index = MultiIndex(levels=new_levels, labels=new_labels, - names=new_names) + names=new_names, verify_integrity=False) result = DataFrame(new_data, index=new_index, columns=new_columns) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 5b7297c7be2f4..999f0751abe99 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2209,7 +2209,8 @@ def read_multi_index(self, key): lab = self.read_array(label_key) labels.append(lab) - return MultiIndex(levels=levels, labels=labels, names=names) + return MultiIndex(levels=levels, labels=labels, names=names, + verify_integrity=True) def read_index_node(self, node): data = node[:] diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index 88464d683d543..b577f5ba8f5ec 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -767,7 +767,8 @@ def stack_sparse_frame(frame): major_labels = np.concatenate(inds_to_concat) stacked_values = np.concatenate(vals_to_concat) index = MultiIndex(levels=[frame.index, frame.columns], - labels=[major_labels, minor_labels]) + labels=[major_labels, minor_labels], + verify_integrity=False) lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index, columns=['foo']) diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index 65a24dc1bf25f..74bca7de89bcc 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -317,7 +317,8 @@ def to_frame(self, filter_observations=True): minor_labels = inds // N index = MultiIndex(levels=[self.major_axis, self.minor_axis], - labels=[major_labels, minor_labels]) + labels=[major_labels, minor_labels], + verify_integrity=False) df = DataFrame(values, index=index, columns=self.items) return df.sortlevel(level=0) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 5b2edc31e1fe9..755d74c9ea0bc 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -7,6 +7,7 @@ import re import unittest import nose +import warnings import os import numpy as np @@ -1213,7 +1214,7 @@ def setUp(self): self.index_names = ['first', 'second'] self.index = MultiIndex(levels=[major_axis, minor_axis], labels=[major_labels, minor_labels], - names=self.index_names) + names=self.index_names, verify_integrity=False) def test_hash_error(self): with tm.assertRaisesRegexp(TypeError, @@ -1447,11 +1448,38 @@ def test_constructor_no_levels(self): MultiIndex(labels=[]) def test_constructor_mismatched_label_levels(self): - levels = [np.array([1]), np.array([2]), np.array([3])] - labels = ["a"] + labels = [np.array([1]), np.array([2]), np.array([3])] + levels = ["a"] assertRaisesRegexp(ValueError, "Length of levels and labels must be" " the same", MultiIndex, levels=levels, labels=labels) + length_error = re.compile('>= length of level') + label_error = re.compile(r'Unequal label lengths: \[4, 2\]') + + # important to check that it's looking at the right thing. + with tm.assertRaisesRegexp(ValueError, length_error): + MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]]) + + with tm.assertRaisesRegexp(ValueError, label_error): + MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]]) + + # external API + with tm.assertRaisesRegexp(ValueError, length_error): + self.index.copy().set_levels([['a'], ['b']]) + + with tm.assertRaisesRegexp(ValueError, label_error): + self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]]) + + # deprecated properties + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + + with tm.assertRaisesRegexp(ValueError, length_error): + self.index.copy().levels = [['a'], ['b']] + + with tm.assertRaisesRegexp(ValueError, label_error): + self.index.copy().labels = [[0, 0, 0, 0], [0, 0]] + def assert_multiindex_copied(self, copy, original): # levels shoudl be (at least, shallow copied) diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index 2a9e7f8642601..b0a64d282e814 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -488,9 +488,6 @@ def _check(new_mgr,block_type, citems): _check(new_mgr,BoolBlock,['bool']) _check(new_mgr,DatetimeBlock,['dt']) - def test_xs(self): - pass - def test_interleave(self): pass diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 3a99793937096..c76bdea950650 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -1366,7 +1366,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None): # also copies names = names + _get_consensus_names(indexes) - return MultiIndex(levels=levels, labels=label_list, names=names) + return MultiIndex(levels=levels, labels=label_list, names=names, + verify_integrity=False) new_index = indexes[0] n = len(new_index) @@ -1402,7 +1403,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None): if len(new_names) < len(new_levels): new_names.extend(new_index.names) - return MultiIndex(levels=new_levels, labels=new_labels, names=new_names) + return MultiIndex(levels=new_levels, labels=new_labels, names=new_names, + verify_integrity=False) def _should_fill(lname, rname):
Via verify_integrity kwarg. Internals all pass False.I don't think I've missed anything / passed verify_integrity=True where it ought not to be. Fixes #5213. Clearly this adds a (likely small) perf hit - but should only occur from external MI calls that specifically call the constructor with levels and labels (for example, doesn't occur with `from_arrays` or `from_tuples`). For 0.14 I'll have to do it differently, but this should work for 0.13.
https://api.github.com/repos/pandas-dev/pandas/pulls/5214
2013-10-14T02:29:10Z
2013-10-16T02:07:46Z
2013-10-16T02:07:46Z
2014-06-12T18:48:21Z
ENH: Add usecols option to python parser.
diff --git a/doc/source/io.rst b/doc/source/io.rst index e75de91582b49..37227edc83fe2 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -85,11 +85,11 @@ They can take a number of arguments: ways to specify the file format - ``dtype``: A data type name or a dict of column name to data type. If not specified, data types will be inferred. - - ``header``: row number to use as the column names, and the start of the + - ``header``: row number(s) to use as the column names, and the start of the data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly pass ``header=0`` to be able to replace existing names. The header can be a list of integers that specify row locations for a multi-index on the columns - E.g. [0,1,3]. Interveaning rows that are not specified will be skipped. + E.g. [0,1,3]. Intervening rows that are not specified will be skipped. (E.g. 2 in this example are skipped) - ``skiprows``: A collection of numbers for rows in the file to skip. Can also be an integer to skip the first ``n`` rows @@ -2938,7 +2938,7 @@ into BigQuery and pull it into a DataFrame. .. code-block:: python from pandas.io import gbq - + # Insert your BigQuery Project ID Here # Can be found in the web console, or # using the command line tool `bq ls` @@ -2998,7 +2998,7 @@ To add more rows to this, simply: To use this module, you will need a BigQuery account. See <https://cloud.google.com/products/big-query> for details. - + As of 10/10/13, there is a bug in Google's API preventing result sets from being larger than 100,000 rows. A patch is scheduled for the week of 10/14/13. diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index b2c78f38140b4..603cffcc1b76b 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -505,11 +505,13 @@ Enhancements - :meth:`~pandas.io.json.json_normalize` is a new method to allow you to create a flat table from semi-structured JSON data. See :ref:`the docs<io.json_normalize>` (:issue:`1067`) - - Added PySide support for the qtpandas DataFrameModel and DataFrameWidget. +- Python csv parser now supports usecols (:issue:`4335`) + - DataFrame has a new ``interpolate`` method, similar to Series (:issue:`4434`, :issue:`1892`) + .. ipython:: python df = DataFrame({'A': [1, 2.1, np.nan, 4.7, 5.6, 6.8], @@ -654,7 +656,7 @@ Experimental against extremely large datasets. :ref:`See the docs <io.bigquery>` .. code-block:: python - + from pandas.io import gbq # A query to select the average monthly temperatures in the @@ -665,8 +667,8 @@ Experimental query = """SELECT station_number as STATION, month as MONTH, AVG(mean_temp) as MEAN_TEMP FROM publicdata:samples.gsod - WHERE YEAR = 2000 - GROUP BY STATION, MONTH + WHERE YEAR = 2000 + GROUP BY STATION, MONTH ORDER BY STATION, MONTH ASC""" # Fetch the result set for this query @@ -675,7 +677,7 @@ Experimental # To find this, see your dashboard: # https://code.google.com/apis/console/b/0/?noredirect projectid = xxxxxxxxx; - + df = gbq.read_gbq(query, project_id = projectid) # Use pandas to process and reshape the dataset @@ -686,9 +688,9 @@ Experimental The resulting dataframe is:: - > df3 + > df3 Min Tem Mean Temp Max Temp - MONTH + MONTH 1 -53.336667 39.827892 89.770968 2 -49.837500 43.685219 93.437932 3 -77.926087 48.708355 96.099998 diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 76d6a3909f89f..e9e82824326a7 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -52,11 +52,12 @@ dialect : string or csv.Dialect instance, default None If None defaults to Excel dialect. Ignored if sep longer than 1 char See csv.Dialect documentation for more details -header : int, default 0 if names parameter not specified, - Row to use for the column labels of the parsed DataFrame. Specify None if - there is no header row. Can be a list of integers that specify row - locations for a multi-index on the columns E.g. [0,1,3]. Interveaning - rows that are not specified (E.g. 2 in this example are skipped) +header : int row number(s) to use as the column names, and the start of the + data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly + pass ``header=0`` to be able to replace existing names. The header can be + a list of integers that specify row locations for a multi-index on the columns + E.g. [0,1,3]. Intervening rows that are not specified will be skipped. + (E.g. 2 in this example are skipped) skiprows : list-like or integer Row numbers to skip (0-indexed) or number of rows to skip (int) at the start of the file @@ -917,22 +918,6 @@ def _do_date_conversions(self, names, data): return names, data - def _exclude_implicit_index(self, alldata): - - if self._implicit_index: - excl_indices = self.index_col - - data = {} - offset = 0 - for i, col in enumerate(self.orig_names): - while i + offset in excl_indices: - offset += 1 - data[col] = alldata[i + offset] - else: - data = dict((k, v) for k, v in zip(self.orig_names, alldata)) - - return data - class CParserWrapper(ParserBase): """ @@ -1173,22 +1158,6 @@ def TextParser(*args, **kwds): return TextFileReader(*args, **kwds) -# delimiter=None, dialect=None, names=None, header=0, -# index_col=None, -# na_values=None, -# na_filter=True, -# thousands=None, -# quotechar='"', -# escapechar=None, -# doublequote=True, -# skipinitialspace=False, -# quoting=csv.QUOTE_MINIMAL, -# comment=None, parse_dates=False, keep_date_col=False, -# date_parser=None, dayfirst=False, -# chunksize=None, skiprows=None, skip_footer=0, converters=None, -# verbose=False, encoding=None, squeeze=False): - - def count_empty_vals(vals): return sum([1 for v in vals if v == '' or v is None]) @@ -1242,10 +1211,6 @@ def __init__(self, f, **kwds): self.buf = [] self.pos = 0 - if kwds['usecols'] is not None: - raise Exception("usecols not supported with engine='python'" - " or multicharacter separators (yet).") - self.encoding = kwds['encoding'] self.compression = kwds['compression'] self.skiprows = kwds['skiprows'] @@ -1259,7 +1224,10 @@ def __init__(self, f, **kwds): self.skipinitialspace = kwds['skipinitialspace'] self.lineterminator = kwds['lineterminator'] self.quoting = kwds['quoting'] - self.mangle_dupe_cols = kwds.get('mangle_dupe_cols',True) + self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True) + self.usecols = kwds['usecols'] + + self.names_passed = kwds['names'] or None self.has_index_names = False if 'has_index_names' in kwds: @@ -1283,17 +1251,25 @@ def __init__(self, f, **kwds): f = TextIOWrapper(f, encoding=self.encoding) + # Set self.data to something that can read lines. if hasattr(f, 'readline'): self._make_reader(f) else: self.data = f - self.columns = self._infer_columns() + # Get columns in two steps: infer from data, then + # infer column indices from self.usecols if is is specified. + self._col_indices = None + self.columns, self.num_original_columns = self._infer_columns() - # we are processing a multi index column + # Now self.columns has the set of columns that we will process. + # The original set is stored in self.original_columns. if len(self.columns) > 1: + # we are processing a multi index column self.columns, self.index_names, self.col_names, _ = self._extract_multi_indexer_columns( self.columns, self.index_names, self.col_names) + # Update list of original names to include all indices. + self.num_original_columns = len(self.columns) else: self.columns = self.columns[0] @@ -1304,7 +1280,7 @@ def __init__(self, f, **kwds): # multiple date column thing turning into a real spaghetti factory if not self._has_complex_date_col: (index_names, - self.orig_names, _) = self._get_index_name(self.columns) + self.orig_names, columns_) = self._get_index_name(self.columns) self._name_processed = True if self.index_names is None: self.index_names = index_names @@ -1442,6 +1418,22 @@ def read(self, rows=None): return index, columns, data + def _exclude_implicit_index(self, alldata): + + if self._implicit_index: + excl_indices = self.index_col + + data = {} + offset = 0 + for i, col in enumerate(self.orig_names): + while i + offset in excl_indices: + offset += 1 + data[col] = alldata[i + offset] + else: + data = dict((k, v) for k, v in zip(self.orig_names, alldata)) + + return data + # legacy def get_chunk(self, size=None): if size is None: @@ -1462,7 +1454,7 @@ def _convert_data(self, data): def _infer_columns(self): names = self.names - + num_original_columns = 0 if self.header is not None: header = self.header @@ -1476,10 +1468,7 @@ def _infer_columns(self): columns = [] for level, hr in enumerate(header): - if len(self.buf) > 0: - line = self.buf[0] - else: - line = self._next_line() + line = self._buffered_line() while self.pos <= hr: line = self._next_line() @@ -1488,51 +1477,103 @@ def _infer_columns(self): for i, c in enumerate(line): if c == '': if have_mi_columns: - this_columns.append('Unnamed: %d_level_%d' % (i,level)) + this_columns.append('Unnamed: %d_level_%d' % (i, level)) else: this_columns.append('Unnamed: %d' % i) else: this_columns.append(c) - if not have_mi_columns: - if self.mangle_dupe_cols: - counts = {} - for i, col in enumerate(this_columns): - cur_count = counts.get(col, 0) - if cur_count > 0: - this_columns[i] = '%s.%d' % (col, cur_count) - counts[col] = cur_count + 1 + if not have_mi_columns and self.mangle_dupe_cols: + counts = {} + for i, col in enumerate(this_columns): + cur_count = counts.get(col, 0) + if cur_count > 0: + this_columns[i] = '%s.%d' % (col, cur_count) + counts[col] = cur_count + 1 columns.append(this_columns) + if len(columns) == 1: + num_original_columns = len(this_columns) self._clear_buffer() if names is not None: - if len(names) != len(columns[0]): + if (self.usecols is not None and len(names) != len(self.usecols)) \ + or (self.usecols is None and len(names) != len(columns[0])): + raise ValueError('Number of passed names did not match ' - 'number of header fields in the file') + 'number of header fields in the file') if len(columns) > 1: raise TypeError('Cannot pass names with multi-index ' 'columns') - columns = [ names ] - else: - if len(self.buf) > 0: - line = self.buf[0] + if self.usecols is not None: + # Set _use_cols. We don't store columns because they are overwritten. + self._handle_usecols(columns, names) + else: + self._col_indices = None + num_original_columns = len(names) + columns = [names] else: - line = self._next_line() - + columns = self._handle_usecols(columns, columns[0]) + else: + # header is None + line = self._buffered_line() ncols = len(line) + num_original_columns = ncols if not names: if self.prefix: columns = [ ['X%d' % i for i in range(ncols)] ] else: columns = [ lrange(ncols) ] + columns = self._handle_usecols(columns, columns[0]) else: - columns = [ names ] + if self.usecols is None or len(names) == num_original_columns: + columns = self._handle_usecols([names], names) + num_original_columns = len(names) + else: + if self.usecols and len(names) != len(self.usecols): + raise ValueError('Number of passed names did not match ' + 'number of header fields in the file') + # Ignore output but set used columns. + self._handle_usecols([names], names) + columns = [names] + num_original_columns = ncols + return columns, num_original_columns + + def _handle_usecols(self, columns, usecols_key): + """ + Sets self._col_indices + + usecols_key is used if there are string usecols. + """ + if self.usecols is not None: + if any([isinstance(u, string_types) for u in self.usecols]): + if len(columns) > 1: + raise ValueError("If using multiple headers, usecols must be integers.") + col_indices = [] + for u in self.usecols: + if isinstance(u, string_types): + col_indices.append(usecols_key.index(u)) + else: + col_indices.append(u) + else: + col_indices = self.usecols + + columns = [[n for i, n in enumerate(column) if i in col_indices] for column in columns] + self._col_indices = col_indices return columns + def _buffered_line(self): + """ + Return a line from buffer, filling buffer if required. + """ + if len(self.buf) > 0: + return self.buf[0] + else: + return self._next_line() + def _next_line(self): if isinstance(self.data, list): while self.pos in self.skiprows: @@ -1598,6 +1639,17 @@ def _clear_buffer(self): _implicit_index = False def _get_index_name(self, columns): + """ + Try several cases to get lines: + + 0) There are headers on row 0 and row 1 and their + total summed lengths equals the length of the next line. + Treat row 0 as columns and row 1 as indices + 1) Look for implicit index: there are more columns + on row 1 than row 0. If this is true, assume that row + 1 lists index columns and row 0 lists normal columns. + 2) Get index from the columns if it was listed. + """ orig_names = list(columns) columns = list(columns) @@ -1615,29 +1667,34 @@ def _get_index_name(self, columns): implicit_first_cols = 0 if line is not None: # leave it 0, #2442 + # Case 1 if self.index_col is not False: - implicit_first_cols = len(line) - len(columns) + implicit_first_cols = len(line) - self.num_original_columns + # Case 0 if next_line is not None: - if len(next_line) == len(line) + len(columns): + if len(next_line) == len(line) + self.num_original_columns: # column and index names on diff rows - implicit_first_cols = 0 - self.index_col = lrange(len(line)) self.buf = self.buf[1:] for c in reversed(line): columns.insert(0, c) + # Update list of original names to include all indices. + self.num_original_columns = len(next_line) return line, columns, orig_names if implicit_first_cols > 0: + # Case 1 self._implicit_index = True if self.index_col is None: self.index_col = lrange(implicit_first_cols) + index_name = None else: + # Case 2 (index_name, columns, self.index_col) = _clean_index_names(columns, self.index_col) @@ -1646,7 +1703,7 @@ def _get_index_name(self, columns): def _rows_to_cols(self, content): zipped_content = list(lib.to_object_array(content).T) - col_len = len(self.orig_names) + col_len = self.num_original_columns zip_len = len(zipped_content) if self._implicit_index: @@ -1655,6 +1712,7 @@ def _rows_to_cols(self, content): if self.skip_footer < 0: raise ValueError('skip footer cannot be negative') + # Loop through rows to verify lengths are correct. if col_len != zip_len and self.index_col is not False: i = 0 for (i, l) in enumerate(content): @@ -1671,6 +1729,11 @@ def _rows_to_cols(self, content): (col_len, row_num + 1, zip_len)) raise ValueError(msg) + if self.usecols: + if self._implicit_index: + zipped_content = [a for i, a in enumerate(zipped_content) if i < len(self.index_col) or i - len(self.index_col) in self._col_indices] + else: + zipped_content = [a for i, a in enumerate(zipped_content) if i in self._col_indices] return zipped_content def _get_lines(self, rows=None): diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index cf0c01c8dff50..b81feec6ab6f8 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -18,7 +18,7 @@ from pandas.compat import( StringIO, BytesIO, PY3, range, long, lrange, lmap, u ) -from pandas.io.common import urlopen, URLError +from pandas.io.common import URLError import pandas.io.parsers as parsers from pandas.io.parsers import (read_csv, read_table, read_fwf, TextFileReader, TextParser) @@ -761,8 +761,6 @@ def test_deep_skiprows(self): condensed_data = self.read_csv(StringIO(condensed_text)) tm.assert_frame_equal(data, condensed_data) - - def test_detect_string_na(self): data = """A,B foo,bar @@ -1217,14 +1215,11 @@ def test_header_multi_index(self): R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2 """ - # basic test with both engines - for engine in ['c','python']: - df = read_csv(StringIO(data), header=[0,2,3,4],index_col=[0,1], tupleize_cols=False, - engine=engine) - tm.assert_frame_equal(df, expected) + df = read_csv(StringIO(data), header=[0, 2, 3, 4], index_col=[0, 1], tupleize_cols=False) + tm.assert_frame_equal(df, expected) # skipping lines in the header - df = read_csv(StringIO(data), header=[0,2,3,4],index_col=[0,1], tupleize_cols=False) + df = read_csv(StringIO(data), header=[0, 2, 3, 4], index_col=[0, 1], tupleize_cols=False) tm.assert_frame_equal(df, expected) #### invalid options #### @@ -1825,9 +1820,6 @@ def test_integer_overflow_bug(self): result = self.read_csv(StringIO(data), header=None, sep=' ') self.assertTrue(result[0].dtype == np.float64) - result = self.read_csv(StringIO(data), header=None, sep='\s+') - self.assertTrue(result[0].dtype == np.float64) - def test_int64_min_issues(self): # #2599 data = 'A,B\n0,0\n0,' @@ -1908,6 +1900,61 @@ def test_warn_if_chunks_have_mismatched_type(self): df = self.read_csv(StringIO(data)) self.assertEqual(df.a.dtype, np.object) + def test_usecols(self): + data = """\ +a,b,c +1,2,3 +4,5,6 +7,8,9 +10,11,12""" + + result = self.read_csv(StringIO(data), usecols=(1, 2)) + result2 = self.read_csv(StringIO(data), usecols=('b', 'c')) + exp = self.read_csv(StringIO(data)) + + self.assertEquals(len(result.columns), 2) + self.assertTrue((result['b'] == exp['b']).all()) + self.assertTrue((result['c'] == exp['c']).all()) + + tm.assert_frame_equal(result, result2) + + result = self.read_csv(StringIO(data), usecols=[1, 2], header=0, + names=['foo', 'bar']) + expected = self.read_csv(StringIO(data), usecols=[1, 2]) + expected.columns = ['foo', 'bar'] + tm.assert_frame_equal(result, expected) + + data = """\ +1,2,3 +4,5,6 +7,8,9 +10,11,12""" + result = self.read_csv(StringIO(data), names=['b', 'c'], + header=None, usecols=[1, 2]) + + expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'], + header=None) + expected = expected[['b', 'c']] + tm.assert_frame_equal(result, expected) + + result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'], + header=None, usecols=['b', 'c']) + tm.assert_frame_equal(result2, result) + + # length conflict, passed names and usecols disagree + self.assertRaises(ValueError, self.read_csv, StringIO(data), + names=['a', 'b'], usecols=[1], header=None) + + def test_integer_overflow_bug(self): + # #2601 + data = "65248E10 11\n55555E55 22\n" + + result = self.read_csv(StringIO(data), header=None, sep=' ') + self.assertTrue(result[0].dtype == np.float64) + + result = self.read_csv(StringIO(data), header=None, sep='\s+') + self.assertTrue(result[0].dtype == np.float64) + class TestPythonParser(ParserTests, unittest.TestCase): def test_negative_skipfooter_raises(self): @@ -2360,6 +2407,9 @@ def test_parse_dates_empty_string(self): result = pd.read_csv(s, parse_dates=["Date"], na_filter=False) self.assertTrue(result['Date'].isnull()[1]) + def test_usecols(self): + raise nose.SkipTest("Usecols is not supported in C High Memory engine.") + class TestCParserLowMemory(ParserTests, unittest.TestCase): @@ -2406,51 +2456,6 @@ def test_pass_dtype(self): self.assert_(result['one'].dtype == 'u1') self.assert_(result['two'].dtype == 'S1') - def test_usecols(self): - data = """\ -a,b,c -1,2,3 -4,5,6 -7,8,9 -10,11,12""" - - result = self.read_csv(StringIO(data), usecols=(1, 2)) - result2 = self.read_csv(StringIO(data), usecols=('b', 'c')) - exp = self.read_csv(StringIO(data)) - - self.assertEquals(len(result.columns), 2) - self.assertTrue((result['b'] == exp['b']).all()) - self.assertTrue((result['c'] == exp['c']).all()) - - tm.assert_frame_equal(result, result2) - - result = self.read_csv(StringIO(data), usecols=[1, 2], header=0, - names=['foo', 'bar']) - expected = self.read_csv(StringIO(data), usecols=[1, 2]) - expected.columns = ['foo', 'bar'] - tm.assert_frame_equal(result, expected) - - data = """\ -1,2,3 -4,5,6 -7,8,9 -10,11,12""" - result = self.read_csv(StringIO(data), names=['a', 'b', 'c'], - header=None, usecols=[1, 2]) - - expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'], - header=None) - expected = expected[['b', 'c']] - tm.assert_frame_equal(result, expected) - - result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'], - header=None, usecols=['b', 'c']) - tm.assert_frame_equal(result2, result) - - # length conflict, passed names and usecols disagree - self.assertRaises(ValueError, self.read_csv, StringIO(data), - names=['a', 'b'], usecols=[1], header=None) - def test_usecols_dtypes(self): data = """\ 1,2,3 @@ -2496,12 +2501,11 @@ def test_usecols_regex_sep(self): # #2733 data = 'a b c\n4 apple bat 5.7\n8 orange cow 10' - self.assertRaises(Exception, self.read_csv, StringIO(data), - sep='\s+', usecols=('a', 'b')) + df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b')) - # expected = DataFrame({'a': ['apple', 'orange'], - # 'b': ['bat', 'cow']}, index=[4, 8]) - # tm.assert_frame_equal(result, expected) + expected = DataFrame({'a': ['apple', 'orange'], + 'b': ['bat', 'cow']}, index=[4, 8]) + tm.assert_frame_equal(df, expected) def test_pure_python_failover(self): data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
Closes #4335 This ticket felt a little bit like whack-a-mole. The key is that names can have two meanings: if it matches length of usecols then it is the name to apply to the (numeric) usecols. If it matches the length of the original columns, it is the set of column names and string values in usecols refer to it.
https://api.github.com/repos/pandas-dev/pandas/pulls/5211
2013-10-13T23:14:00Z
2013-10-17T14:18:58Z
2013-10-17T14:18:58Z
2014-06-24T14:37:52Z
BUG: Bug in loc setting with multiple indexers and a rhs of a Series that needs broadcasting (GH5206)
diff --git a/doc/source/release.rst b/doc/source/release.rst index f899849475df8..2fb3a231660a4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -594,6 +594,8 @@ Bug Fixes - Bug in comparing duplicate frames (:issue:`4421`) related - Bug in describe on duplicate frames - Bug in ``to_datetime`` with a format and ``coerce=True`` not raising (:issue:`5195`) + - Bug in ``loc`` setting with multiple indexers and a rhs of a Series that needs + broadcasting (:issue:`5206`) pandas 0.12.0 ------------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 69114166b3406..fa58d82a3b580 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -370,9 +370,11 @@ def _align_series(self, indexer, ser): if isinstance(indexer, tuple): aligners = [ not _is_null_slice(idx) for idx in indexer ] - single_aligner = sum(aligners) == 1 + sum_aligners = sum(aligners) + single_aligner = sum_aligners == 1 is_frame = self.obj.ndim == 2 is_panel = self.obj.ndim >= 3 + obj = self.obj # are we a single alignable value on a non-primary # dim (e.g. panel: 1,2, or frame: 0) ? @@ -387,7 +389,15 @@ def _align_series(self, indexer, ser): elif is_panel: single_aligner = single_aligner and (aligners[1] or aligners[2]) - obj = self.obj + # we have a frame, with multiple indexers on both axes; and a series, + # so need to broadcast (see GH5206) + if sum_aligners == self.ndim and all([ com._is_sequence(_) for _ in indexer ]): + + ser = ser.reindex(obj.axes[0][indexer[0].ravel()],copy=True).values + l = len(indexer[1].ravel()) + ser = np.tile(ser,l).reshape(l,-1).T + return ser + for i, idx in enumerate(indexer): ax = obj.axes[i] @@ -398,6 +408,8 @@ def _align_series(self, indexer, ser): new_ix = ax[idx] if not is_list_like(new_ix): new_ix = Index([new_ix]) + else: + new_ix = Index(new_ix.ravel()) if ser.index.equals(new_ix): return ser.values.copy() return ser.reindex(new_ix).values diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 6292c5874772f..c649e73184aa3 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1037,6 +1037,17 @@ def test_multi_assign(self): df2.ix[mask, cols]= dft.ix[mask, cols].values assert_frame_equal(df2,expected) + # broadcasting on the rhs is required + df = DataFrame(dict(A = [1,2,0,0,0],B=[0,0,0,10,11],C=[0,0,0,10,11],D=[3,4,5,6,7])) + + expected = df.copy() + mask = expected['A'] == 0 + for col in ['A','B']: + expected.loc[mask,col] = df['D'] + + df.loc[df['A']==0,['A','B']] = df['D'] + assert_frame_equal(df,expected) + def test_ix_assign_column_mixed(self): # GH #1142 df = DataFrame(tm.getSeriesData())
closes #5206
https://api.github.com/repos/pandas-dev/pandas/pulls/5210
2013-10-13T23:12:23Z
2013-10-13T23:12:39Z
2013-10-13T23:12:39Z
2014-07-16T08:34:49Z
CLN/BUG: Better validation of levels/labels/names
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2fb3a231660a4..fe1af472700ac 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -109,7 +109,8 @@ Improvements to existing features (:issue:`4039`) - Add ``rename`` and ``set_names`` methods to ``Index`` as well as ``set_names``, ``set_levels``, ``set_labels`` to ``MultiIndex``. - (:issue:`4039`) + (:issue:`4039`) with improved validation for all (:issue:`4039`, + :issue:`4794`) - A Series of dtype ``timedelta64[ns]`` can now be divided/multiplied by an integer series (:issue`4521`) - A Series of dtype ``timedelta64[ns]`` can now be divided by another diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index dee3ff20b9538..6bf32b2343084 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -52,7 +52,7 @@ API changes # but setting names is not deprecated. index = index.set_names(["bob", "cranberry"]) - # and all methods take an inplace kwarg + # and all methods take an inplace kwarg - but returns None index.set_names(["bob", "cranberry"], inplace=True) - Infer and downcast dtype if ``downcast='infer'`` is passed to ``fillna/ffill/bfill`` (:issue:`4604`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 98f190360bc33..141eba8783926 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1672,8 +1672,8 @@ def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False): subarr = np.array(data, dtype=np.int64, copy=copy) if len(data) > 0: if (subarr != data).any(): - raise TypeError('Unsafe NumPy casting, you must ' - 'explicitly cast') + raise TypeError('Unsafe NumPy casting to integer, you must' + ' explicitly cast') subarr = subarr.view(cls) subarr.name = name @@ -1857,11 +1857,12 @@ class MultiIndex(Index): def __new__(cls, levels=None, labels=None, sortorder=None, names=None, copy=False): + if levels is None or labels is None: + raise TypeError("Must pass both levels and labels") if len(levels) != len(labels): - raise ValueError( - 'Length of levels and labels must be the same') + raise ValueError('Length of levels and labels must be the same.') if len(levels) == 0: - raise TypeError('Must pass non-zero number of levels/labels') + raise ValueError('Must pass non-zero number of levels/labels') if len(levels) == 1: if names: name = names[0] @@ -1872,10 +1873,12 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None, # v3, 0.8.0 subarr = np.empty(0, dtype=object).view(cls) - subarr._set_levels(levels, copy=copy) - subarr._set_labels(labels, copy=copy) + # we've already validated levels and labels, so shortcut here + subarr._set_levels(levels, copy=copy, validate=False) + subarr._set_labels(labels, copy=copy, validate=False) if names is not None: + # handles name validation subarr._set_names(names) if sortorder is not None: @@ -1888,12 +1891,14 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None, def _get_levels(self): return self._levels - def _set_levels(self, levels, copy=False): + def _set_levels(self, levels, copy=False, validate=True): # This is NOT part of the levels property because it should be # externally not allowed to set levels. User beware if you change # _levels directly - if len(levels) == 0: - raise ValueError("Must set non-zero number of levels.") + if validate and len(levels) == 0: + raise ValueError('Must set non-zero number of levels.') + if validate and len(levels) != len(self._labels): + raise ValueError('Length of levels must match length of labels.') levels = FrozenList(_ensure_index(lev, copy=copy)._shallow_copy() for lev in levels) names = self.names @@ -1917,13 +1922,16 @@ def set_levels(self, levels, inplace=False): ------- new index (of same type and class...etc) """ + if not com.is_list_like(levels) or not com.is_list_like(levels[0]): + raise TypeError("Levels must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_levels(levels) - return idx + if not inplace: + return idx # remove me in 0.14 and change to read only property __set_levels = deprecate("setting `levels` directly", @@ -1934,9 +1942,9 @@ def set_levels(self, levels, inplace=False): def _get_labels(self): return self._labels - def _set_labels(self, labels, copy=False): - if len(labels) != self.nlevels: - raise ValueError("Length of levels and labels must be the same.") + def _set_labels(self, labels, copy=False, validate=True): + if validate and len(labels) != self.nlevels: + raise ValueError("Length of labels must match length of levels") self._labels = FrozenList(_ensure_frozen(labs, copy=copy)._shallow_copy() for labs in labels) @@ -1956,13 +1964,16 @@ def set_labels(self, labels, inplace=False): ------- new index (of same type and class...etc) """ + if not com.is_list_like(labels) or not com.is_list_like(labels[0]): + raise TypeError("Labels must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_labels(labels) - return idx + if not inplace: + return idx # remove me in 0.14 and change to readonly property __set_labels = deprecate("setting labels directly", @@ -2021,7 +2032,8 @@ def __array_finalize__(self, obj): # instance. return - self._set_levels(getattr(obj, 'levels', [])) + # skip the validation on first, rest will catch the errors + self._set_levels(getattr(obj, 'levels', []), validate=False) self._set_labels(getattr(obj, 'labels', [])) self._set_names(getattr(obj, 'names', [])) self.sortorder = getattr(obj, 'sortorder', None) @@ -2083,16 +2095,15 @@ def _convert_slice_indexer(self, key, typ=None): def _get_names(self): return FrozenList(level.name for level in self.levels) - def _set_names(self, values): + def _set_names(self, values, validate=True): """ sets names on levels. WARNING: mutates! Note that you generally want to set this *after* changing levels, so that it only acts on copies""" values = list(values) - if len(values) != self.nlevels: - raise ValueError('Length of names (%d) must be same as level ' - '(%d)' % (len(values), self.nlevels)) + if validate and len(values) != self.nlevels: + raise ValueError('Length of names must match length of levels') # set the name for name, level in zip(values, self.levels): level.rename(name, inplace=True) @@ -2446,7 +2457,7 @@ def __setstate__(self, state): np.ndarray.__setstate__(self, nd_state) levels, labels, sortorder, names = own_state - self._set_levels([Index(x) for x in levels]) + self._set_levels([Index(x) for x in levels], validate=False) self._set_labels(labels) self._set_names(names) self.sortorder = sortorder @@ -2473,7 +2484,7 @@ def __getitem__(self, key): new_labels = [lab[key] for lab in self.labels] # an optimization - result._set_levels(self.levels) + result._set_levels(self.levels, validate=False) result._set_labels(new_labels) result.sortorder = sortorder result._set_names(self.names) @@ -3351,17 +3362,12 @@ def _ensure_index(index_like, copy=False): return Index(index_like) -def _ensure_frozen(nd_array_like, copy=False): - if not isinstance(nd_array_like, FrozenNDArray): - arr = np.asarray(nd_array_like, dtype=np.int_) - # have to do this separately so that non-index input gets copied - if copy: - arr = arr.copy() - nd_array_like = arr.view(FrozenNDArray) - else: - if copy: - nd_array_like = nd_array_like.copy() - return nd_array_like +def _ensure_frozen(array_like, copy=False): + array_like = np.asanyarray(array_like, dtype=np.int_) + array_like = array_like.view(FrozenNDArray) + if copy: + array_like = array_like.copy() + return array_like def _validate_join_method(method): diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index cd26016acba5c..c7c9fd1de0fcd 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -823,7 +823,12 @@ def test_constructor_corner(self): # preventing casting arr = np.array([1, '2', 3, '4'], dtype=object) - self.assertRaises(TypeError, Int64Index, arr) + with tm.assertRaisesRegexp(TypeError, 'casting'): + Int64Index(arr) + + arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1] + with tm.assertRaisesRegexp(TypeError, 'casting'): + Int64Index(arr_with_floats) def test_hash_error(self): with tm.assertRaisesRegexp(TypeError, @@ -1240,6 +1245,62 @@ def test_set_levels_and_set_labels(self): minor_labels = [(x + 1) % 1 for x in minor_labels] new_labels = [major_labels, minor_labels] + def assert_matching(actual, expected): + # avoid specifying internal representation + # as much as possible + self.assertEqual(len(actual), len(expected)) + for act, exp in zip(actual, expected): + act = np.asarray(act) + exp = np.asarray(exp) + assert_almost_equal(act, exp) + + # level changing [w/o mutation] + ind2 = self.index.set_levels(new_levels) + assert_matching(ind2.levels, new_levels) + assert_matching(self.index.levels, levels) + + # level changing [w/ mutation] + ind2 = self.index.copy() + inplace_return = ind2.set_levels(new_levels, inplace=True) + self.assert_(inplace_return is None) + assert_matching(ind2.levels, new_levels) + + # label changing [w/o mutation] + ind2 = self.index.set_labels(new_labels) + assert_matching(ind2.labels, new_labels) + assert_matching(self.index.labels, labels) + + # label changing [w/ mutation] + ind2 = self.index.copy() + inplace_return = ind2.set_labels(new_labels, inplace=True) + self.assert_(inplace_return is None) + assert_matching(ind2.labels, new_labels) + + def test_set_levels_labels_names_bad_input(self): + levels, labels = self.index.levels, self.index.labels + names = self.index.names + + with tm.assertRaisesRegexp(ValueError, 'Length of levels'): + self.index.set_levels([levels[0]]) + + with tm.assertRaisesRegexp(ValueError, 'Length of labels'): + self.index.set_labels([labels[0]]) + + with tm.assertRaisesRegexp(ValueError, 'Length of names'): + self.index.set_names([names[0]]) + + # shouldn't scalar data error, instead should demand list-like + with tm.assertRaisesRegexp(TypeError, 'list of lists-like'): + self.index.set_levels(levels[0]) + + # shouldn't scalar data error, instead should demand list-like + with tm.assertRaisesRegexp(TypeError, 'list of lists-like'): + self.index.set_labels(labels[0]) + + # shouldn't scalar data error, instead should demand list-like + with tm.assertRaisesRegexp(TypeError, 'list-like'): + self.index.set_names(names[0]) + def test_metadata_immutable(self): levels, labels = self.index.levels, self.index.labels # shouldn't be able to set at either the top level or base level @@ -1342,8 +1403,13 @@ def test_constructor_single_level(self): self.assert_(single_level.name is None) def test_constructor_no_levels(self): - assertRaisesRegexp(TypeError, "non-zero number of levels/labels", + assertRaisesRegexp(ValueError, "non-zero number of levels/labels", MultiIndex, levels=[], labels=[]) + both_re = re.compile('Must pass both levels and labels') + with tm.assertRaisesRegexp(TypeError, both_re): + MultiIndex(levels=[]) + with tm.assertRaisesRegexp(TypeError, both_re): + MultiIndex(labels=[]) def test_constructor_mismatched_label_levels(self): levels = [np.array([1]), np.array([2]), np.array([3])] diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 5ec97344373a2..bd431843a6b20 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -347,8 +347,8 @@ def test_frame_setitem_multi_column(self): # it broadcasts df['B', '1'] = [1, 2, 3] df['A'] = df['B', '1'] - assert_almost_equal(df['A', '1'], df['B', '1']) - assert_almost_equal(df['A', '2'], df['B', '1']) + assert_series_equal(df['A', '1'], df['B', '1']) + assert_series_equal(df['A', '2'], df['B', '1']) def test_getitem_tuple_plus_slice(self): # GH #671 diff --git a/pandas/util/testing.py b/pandas/util/testing.py index dfe81237ee15d..dd59524e90f10 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -332,11 +332,11 @@ def equalContents(arr1, arr2): return frozenset(arr1) == frozenset(arr2) -def assert_isinstance(obj, class_type_or_tuple): +def assert_isinstance(obj, class_type_or_tuple, msg=''): """asserts that obj is an instance of class_type_or_tuple""" assert isinstance(obj, class_type_or_tuple), ( - "Expected object to be of type %r, found %r instead" % ( - type(obj), class_type_or_tuple)) + "%sExpected object to be of type %r, found %r instead" % ( + msg, class_type_or_tuple, type(obj))) def assert_equal(a, b, msg=""): @@ -355,6 +355,8 @@ def assert_equal(a, b, msg=""): def assert_index_equal(left, right): + assert_isinstance(left, Index, '[index] ') + assert_isinstance(right, Index, '[index] ') if not left.equals(right): raise AssertionError("[index] left [{0} {1}], right [{2} {3}]".format(left.dtype, left, @@ -373,6 +375,8 @@ def isiterable(obj): return hasattr(obj, '__iter__') +# NOTE: don't pass an NDFrame or index to this function - may not handle it +# well. def assert_almost_equal(a, b, check_less_precise=False): if isinstance(a, dict) or isinstance(b, dict): return assert_dict_equal(a, b) @@ -385,9 +389,6 @@ def assert_almost_equal(a, b, check_less_precise=False): np.testing.assert_(isiterable(b)) na, nb = len(a), len(b) assert na == nb, "%s != %s" % (na, nb) - # TODO: Figure out why I thought this needed instance cheacks... - # if (isinstance(a, np.ndarray) and isinstance(b, np.ndarray) and - # np.array_equal(a, b)): if np.array_equal(a, b): return True else:
Improved validation of levels, labels and names (plus fleshed out test cases). Fixes #4794. Changed an assert_almost_equal to assert_series_equal to better reflect actual return value. Also makes set_levels, set_labels and set_names match behavior of rename to _not_ return self if inplace.
https://api.github.com/repos/pandas-dev/pandas/pulls/5209
2013-10-13T20:30:51Z
2013-10-14T01:43:01Z
2013-10-14T01:43:01Z
2014-07-16T08:34:45Z
DOC: make api docs reflect api usage (#4539)
diff --git a/doc/source/api.rst b/doc/source/api.rst index dfb562bcc3298..2e817e9d19c3f 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -13,8 +13,6 @@ Input/Output Pickling ~~~~~~~~ -.. currentmodule:: pandas.io.pickle - .. autosummary:: :toctree: generated/ @@ -23,8 +21,6 @@ Pickling Flat File ~~~~~~~~~ -.. currentmodule:: pandas.io.parsers - .. autosummary:: :toctree: generated/ @@ -35,8 +31,6 @@ Flat File Clipboard ~~~~~~~~~ -.. currentmodule:: pandas.io.clipboard - .. autosummary:: :toctree: generated/ @@ -45,8 +39,6 @@ Clipboard Excel ~~~~~ -.. currentmodule:: pandas.io.excel - .. autosummary:: :toctree: generated/ @@ -56,8 +48,6 @@ Excel JSON ~~~~ -.. currentmodule:: pandas.io.json - .. autosummary:: :toctree: generated/ @@ -66,8 +56,6 @@ JSON HTML ~~~~ -.. currentmodule:: pandas.io.html - .. autosummary:: :toctree: generated/ @@ -76,8 +64,6 @@ HTML HDFStore: PyTables (HDF5) ~~~~~~~~~~~~~~~~~~~~~~~~~ -.. currentmodule:: pandas.io.pytables - .. autosummary:: :toctree: generated/ @@ -89,64 +75,60 @@ HDFStore: PyTables (HDF5) SQL ~~~ -.. currentmodule:: pandas.io.sql .. autosummary:: :toctree: generated/ read_sql + +.. currentmodule:: pandas.io.sql + +.. autosummary:: + :toctree: generated/ + read_frame write_frame +.. currentmodule:: pandas STATA ~~~~~ +.. autosummary:: + :toctree: generated/ + + read_stata + .. currentmodule:: pandas.io.stata .. autosummary:: :toctree: generated/ - read_stata StataReader.data StataReader.data_label StataReader.value_labels StataReader.variable_labels StataWriter.write_file +.. currentmodule:: pandas General functions ----------------- Data manipulations ~~~~~~~~~~~~~~~~~~ -.. currentmodule:: pandas.tools.pivot .. autosummary:: :toctree: generated/ pivot_table - -.. currentmodule:: pandas.tools.merge - -.. autosummary:: - :toctree: generated/ - merge concat - -.. currentmodule:: pandas.core.reshape - -.. autosummary:: - :toctree: generated/ - get_dummies Top-level missing data ~~~~~~~~~~~~~~~~~~~~~~ -.. currentmodule:: pandas.core.common - .. autosummary:: :toctree: generated/ @@ -156,30 +138,22 @@ Top-level missing data Top-level dealing with datetimes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. currentmodule:: pandas.tseries.tools - .. autosummary:: :toctree: generated/ to_datetime - Top-level evaluation ~~~~~~~~~~~~~~~~~~~~ -.. currentmodule:: pandas - .. autosummary:: :toctree: generated/ eval - Standard moving window functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. currentmodule:: pandas.stats.moments - .. autosummary:: :toctree: generated/ @@ -199,8 +173,6 @@ Standard moving window functions Standard expanding window functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. currentmodule:: pandas.stats.moments - .. autosummary:: :toctree: generated/ @@ -229,8 +201,6 @@ Exponentially-weighted moving window functions ewmcorr ewmcov -.. currentmodule:: pandas - .. _api.series: Series @@ -875,8 +845,6 @@ Serialization / IO / Conversion Panel.to_frame Panel.to_clipboard -.. currentmodule:: pandas.core.index - .. _api.index: Index @@ -961,8 +929,6 @@ Properties Index.is_monotonic Index.is_numeric -.. currentmodule:: pandas.tseries.index - .. _api.datetimeindex: DatetimeIndex @@ -1020,3 +986,226 @@ Conversion DatetimeIndex.to_pydatetime +.. + HACK - see github issue #4539. To ensure old links remain valid, include + here the autosummaries with previous currentmodules as a comment and add + them to a hidden toctree (to avoid warnings): + +.. toctree:: + :hidden: + + generated/pandas.core.common.isnull + generated/pandas.core.common.notnull + generated/pandas.core.reshape.get_dummies + generated/pandas.io.clipboard.read_clipboard + generated/pandas.io.excel.ExcelFile.parse + generated/pandas.io.excel.read_excel + generated/pandas.io.html.read_html + generated/pandas.io.json.read_json + generated/pandas.io.parsers.read_csv + generated/pandas.io.parsers.read_fwf + generated/pandas.io.parsers.read_table + generated/pandas.io.pickle.read_pickle + generated/pandas.io.pytables.HDFStore.append + generated/pandas.io.pytables.HDFStore.get + generated/pandas.io.pytables.HDFStore.put + generated/pandas.io.pytables.HDFStore.select + generated/pandas.io.pytables.read_hdf + generated/pandas.io.sql.read_sql + generated/pandas.io.stata.read_stata + generated/pandas.stats.moments.ewma + generated/pandas.stats.moments.ewmcorr + generated/pandas.stats.moments.ewmcov + generated/pandas.stats.moments.ewmstd + generated/pandas.stats.moments.ewmvar + generated/pandas.stats.moments.expanding_apply + generated/pandas.stats.moments.expanding_corr + generated/pandas.stats.moments.expanding_count + generated/pandas.stats.moments.expanding_cov + generated/pandas.stats.moments.expanding_kurt + generated/pandas.stats.moments.expanding_mean + generated/pandas.stats.moments.expanding_median + generated/pandas.stats.moments.expanding_quantile + generated/pandas.stats.moments.expanding_skew + generated/pandas.stats.moments.expanding_std + generated/pandas.stats.moments.expanding_sum + generated/pandas.stats.moments.expanding_var + generated/pandas.stats.moments.rolling_apply + generated/pandas.stats.moments.rolling_corr + generated/pandas.stats.moments.rolling_count + generated/pandas.stats.moments.rolling_cov + generated/pandas.stats.moments.rolling_kurt + generated/pandas.stats.moments.rolling_mean + generated/pandas.stats.moments.rolling_median + generated/pandas.stats.moments.rolling_quantile + generated/pandas.stats.moments.rolling_skew + generated/pandas.stats.moments.rolling_std + generated/pandas.stats.moments.rolling_sum + generated/pandas.stats.moments.rolling_var + generated/pandas.tools.merge.concat + generated/pandas.tools.merge.merge + generated/pandas.tools.pivot.pivot_table + generated/pandas.tseries.tools.to_datetime + +.. + .. currentmodule:: pandas.io.pickle + + .. autosummary:: + :toctree: generated/ + + read_pickle + + .. currentmodule:: pandas.io.parsers + + .. autosummary:: + :toctree: generated/ + + read_table + read_csv + read_fwf + + .. currentmodule:: pandas.io.clipboard + + .. autosummary:: + :toctree: generated/ + + read_clipboard + + .. currentmodule:: pandas.io.excel + + .. autosummary:: + :toctree: generated/ + + read_excel + ExcelFile.parse + + .. currentmodule:: pandas.io.json + + .. autosummary:: + :toctree: generated/ + + read_json + + .. currentmodule:: pandas.io.html + + .. autosummary:: + :toctree: generated/ + + read_html + + .. currentmodule:: pandas.io.pytables + + .. autosummary:: + :toctree: generated/ + + read_hdf + HDFStore.put + HDFStore.append + HDFStore.get + HDFStore.select + + .. currentmodule:: pandas.io.sql + + .. autosummary:: + :toctree: generated/ + + read_sql + read_frame + write_frame + + .. currentmodule:: pandas.io.stata + + .. autosummary:: + :toctree: generated/ + + read_stata + StataReader.data + StataReader.data_label + StataReader.value_labels + StataReader.variable_labels + StataWriter.write_file + + .. currentmodule:: pandas.tools.pivot + + .. autosummary:: + :toctree: generated/ + + pivot_table + + .. currentmodule:: pandas.tools.merge + + .. autosummary:: + :toctree: generated/ + + merge + concat + + .. currentmodule:: pandas.core.reshape + + .. autosummary:: + :toctree: generated/ + + get_dummies + + .. currentmodule:: pandas.core.common + + .. autosummary:: + :toctree: generated/ + + isnull + notnull + + .. currentmodule:: pandas.tseries.tools + + .. autosummary:: + :toctree: generated/ + + to_datetime + + + .. currentmodule:: pandas.stats.moments + + .. autosummary:: + :toctree: generated/ + + rolling_count + rolling_sum + rolling_mean + rolling_median + rolling_var + rolling_std + rolling_corr + rolling_cov + rolling_skew + rolling_kurt + rolling_apply + rolling_quantile + + + .. currentmodule:: pandas.stats.moments + + .. autosummary:: + :toctree: generated/ + + expanding_count + expanding_sum + expanding_mean + expanding_median + expanding_var + expanding_std + expanding_corr + expanding_cov + expanding_skew + expanding_kurt + expanding_apply + expanding_quantile + + + .. autosummary:: + :toctree: generated/ + + ewma + ewmstd + ewmvar + ewmcorr + ewmcov
Closes #4539. This removes all use of `.. currentmodule:: pandas.some.submodule` (in favor of `.. currentmodule:: pandas`) in api.rst. Rationale is that docs should reflect the user api usage. Only exception are `pandas.io.sql.read_frame/write_frame` and `pandas.io.stata.StataReader` methods, because they are not available in the top-level pandas namespace. All old links will remain valid because they are still generated because I added all those in a comment at the bottom in api.rst (only not for Index, because this was only added very recently in api.rst). The **downside** with this approach is that you now get warnings when building the docs (`document isn't included in any toctree` for these functions). I am not sure how I can prevent this while keeping the old links but not including it visually in api.rst.
https://api.github.com/repos/pandas-dev/pandas/pulls/5208
2013-10-13T19:03:15Z
2013-10-14T06:37:31Z
2013-10-14T06:37:31Z
2015-04-23T21:47:53Z
DOC: Note -> Notes (numpydoc docstring section)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 8938e48eb493b..224fd56c6946d 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1990,8 +1990,8 @@ def transform(self, func, *args, **kwargs): f : function Function to apply to each subframe - Note - ---- + Notes + ----- Each subframe is endowed the attribute 'name' in case you need to know which group you are working on. @@ -2102,8 +2102,8 @@ def filter(self, func, dropna=True, *args, **kwargs): dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. - Note - ---- + Notes + ----- Each subframe is endowed the attribute 'name' in case you need to know which group you are working on. diff --git a/pandas/core/index.py b/pandas/core/index.py index 98f190360bc33..9d6b8a13608f7 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -71,8 +71,8 @@ class Index(FrozenNDArray): name : object Name to be stored in the index - Note - ---- + Notes + ----- An Index instance can **only** contain hashable objects """ # To hand over control to subclasses @@ -1634,8 +1634,8 @@ class Int64Index(Index): name : object Name to be stored in the index - Note - ---- + Notes + ----- An Index instance can **only** contain hashable objects """ @@ -1731,8 +1731,8 @@ class Float64Index(Index): name : object Name to be stored in the index - Note - ---- + Notes + ----- An Index instance can **only** contain hashable objects """ diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 312667930b54d..a608b40847228 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -357,8 +357,8 @@ def pivot_simple(index, columns, values): values : ndarray Values to use for populating new frame's values - Note - ---- + Notes + ----- Obviously, all 3 of the input arguments must have the same length Returns diff --git a/pandas/core/strings.py b/pandas/core/strings.py index a5a9caa4f542d..2c47911318238 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -345,8 +345,8 @@ def str_extract(arr, pat, flags=0): extracted groups : Series (one group) or DataFrame (multiple groups) - Note - ---- + Notes + ----- Compare to the string method match, which returns re.match objects. """ regex = re.compile(pat, flags=flags) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index e496bf46cf57a..309b6fbb9a51a 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1354,8 +1354,8 @@ def generate_range(start=None, end=None, periods=None, time_rule : (legacy) name of DateOffset object to be used, optional Corresponds with names expected by tseries.frequencies.get_offset - Note - ---- + Notes + ----- * This method is faster for generating weekdays than dateutil.rrule * At least two of (start, end, periods) must be specified. * If both start and end are specified, the returned dates will
While adding functions/methods to the docs in #5160, I noticed an error in the build output mentioning an unknown 'Note' section. This has to be 'Notes' for numpydoc. So I searched for all occurences and adjusted them.
https://api.github.com/repos/pandas-dev/pandas/pulls/5207
2013-10-13T18:23:00Z
2013-10-14T02:13:48Z
2013-10-14T02:13:48Z
2014-07-13T12:48:43Z
API: rename _prop_attributes to __finalize__ (in NDFrame)
diff --git a/pandas/core/common.py b/pandas/core/common.py index c34dfedc7130c..80ee9cd34779d 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -2554,7 +2554,8 @@ def save(obj, path): # TODO remove in 0.13 def _maybe_match_name(a, b): - name = None - if a.name == b.name: - name = a.name - return name + a_name = getattr(a,'name',None) + b_name = getattr(b,'name',None) + if a_name == b_name: + return a_name + return None diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a5da0b4f23c9a..4dfe0a55fce28 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -47,8 +47,7 @@ def _single_replace(self, to_replace, method, inplace, limit): if values.dtype == orig_dtype and inplace: return - result = pd.Series(values, index=self.index, name=self.name, - dtype=self.dtype) + result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self) if inplace: self._data = result._data @@ -72,7 +71,7 @@ class NDFrame(PandasObject): _internal_names = [ '_data', 'name', '_cacher', '_subtyp', '_index', '_default_kind', '_default_fill_value'] _internal_names_set = set(_internal_names) - _prop_attributes = [] + _metadata = [] def __init__(self, data, axes=None, copy=False, dtype=None, fastpath=False): @@ -413,7 +412,7 @@ def transpose(self, *args, **kwargs): new_values = self.values.transpose(axes_numbers) if kwargs.get('copy') or (len(args) and args[-1]): new_values = new_values.copy() - return self._constructor(new_values, **new_axes) + return self._constructor(new_values, **new_axes).__finalize__(self) def swapaxes(self, axis1, axis2, copy=True): """ @@ -439,7 +438,7 @@ def swapaxes(self, axis1, axis2, copy=True): if copy: new_values = new_values.copy() - return self._constructor(new_values, *new_axes) + return self._constructor(new_values, *new_axes).__finalize__(self) def pop(self, item): """ @@ -543,7 +542,7 @@ def f(x): self._clear_item_cache() else: - return result._propogate_attributes(self) + return result.__finalize__(self) rename.__doc__ = _shared_docs['rename'] @@ -655,14 +654,14 @@ def __abs__(self): def _wrap_array(self, arr, axes, copy=False): d = self._construct_axes_dict_from(self, axes, copy=copy) - return self._constructor(arr, **d) + return self._constructor(arr, **d).__finalize__(self) def __array__(self, dtype=None): return _values_from_object(self) def __array_wrap__(self, result): d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) - return self._constructor(result, **d) + return self._constructor(result, **d).__finalize__(self) def to_dense(self): # compat @@ -1024,7 +1023,7 @@ def take(self, indices, axis=0, convert=True): new_data = self._data.reindex_axis(new_items, indexer=indices, axis=0) else: new_data = self._data.take(indices, axis=baxis) - return self._constructor(new_data) + return self._constructor(new_data).__finalize__(self) # TODO: Check if this was clearer in 0.12 def select(self, crit, axis=0): @@ -1135,7 +1134,7 @@ def add_prefix(self, prefix): with_prefix : type of caller """ new_data = self._data.add_prefix(prefix) - return self._constructor(new_data) + return self._constructor(new_data).__finalize__(self) def add_suffix(self, suffix): """ @@ -1150,7 +1149,7 @@ def add_suffix(self, suffix): with_suffix : type of caller """ new_data = self._data.add_suffix(suffix) - return self._constructor(new_data) + return self._constructor(new_data).__finalize__(self) def sort_index(self, axis=0, ascending=True): """ @@ -1244,7 +1243,8 @@ def reindex(self, *args, **kwargs): return self # perform the reindex on the axes - return self._reindex_axes(axes, level, limit, method, fill_value, copy, takeable=takeable)._propogate_attributes(self) + return self._reindex_axes(axes, level, limit, + method, fill_value, copy, takeable=takeable).__finalize__(self) def _reindex_axes(self, axes, level, limit, method, fill_value, copy, takeable=False): """ perform the reinxed for all the axes """ @@ -1332,7 +1332,7 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, new_index, indexer = axis_values.reindex(labels, method, level, limit=limit, copy_if_needed=True) return self._reindex_with_indexers({axis: [new_index, indexer]}, method=method, fill_value=fill_value, - limit=limit, copy=copy)._propogate_attributes(self) + limit=limit, copy=copy).__finalize__(self) def _reindex_with_indexers(self, reindexers, method=None, fill_value=np.nan, limit=None, copy=False, allow_dups=False): """ allow_dups indicates an internal call here """ @@ -1370,7 +1370,7 @@ def _reindex_with_indexers(self, reindexers, method=None, fill_value=np.nan, lim if copy and new_data is self._data: new_data = new_data.copy() - return self._constructor(new_data) + return self._constructor(new_data).__finalize__(self) def _reindex_axis(self, new_index, fill_method, axis, copy): new_data = self._data.reindex_axis(new_index, axis=axis, @@ -1379,7 +1379,7 @@ def _reindex_axis(self, new_index, fill_method, axis, copy): if new_data is self._data and not copy: return self else: - return self._constructor(new_data) + return self._constructor(new_data).__finalize__(self) def filter(self, items=None, like=None, regex=None, axis=None): """ @@ -1421,9 +1421,18 @@ def filter(self, items=None, like=None, regex=None, axis=None): #---------------------------------------------------------------------- # Attribute access - def _propogate_attributes(self, other): - """ propogate attributes from other to self""" - for name in self._prop_attributes: + def __finalize__(self, other, method=None, **kwargs): + """ + propagate metadata from other to self + + Parameters + ---------- + other : the object from which to get the attributes that we are going to propagate + method : optional, a passed method name ; possibily to take different types + of propagation actions based on this + + """ + for name in self._metadata: object.__setattr__(self, name, getattr(other, name, None)) return self @@ -1484,7 +1493,7 @@ def consolidate(self, inplace=False): cons_data = self._protect_consolidate(f) if cons_data is self._data: cons_data = cons_data.copy() - return self._constructor(cons_data) + return self._constructor(cons_data).__finalize__(self) @property def _is_mixed_type(self): @@ -1504,10 +1513,10 @@ def _protect_consolidate(self, f): return result def _get_numeric_data(self): - return self._constructor(self._data.get_numeric_data()) + return self._constructor(self._data.get_numeric_data()).__finalize__(self) def _get_bool_data(self): - return self._constructor(self._data.get_bool_data()) + return self._constructor(self._data.get_bool_data()).__finalize__(self) #---------------------------------------------------------------------- # Internal Interface Methods @@ -1584,7 +1593,7 @@ def as_blocks(self, columns=None): for b in self._data.blocks: b = b.reindex_items_from(columns or b.items) bd[str(b.dtype)] = self._constructor( - BlockManager([b], [b.items, self.index])) + BlockManager([b], [b.items, self.index])).__finalize__(self) return bd @property @@ -1608,7 +1617,7 @@ def astype(self, dtype, copy=True, raise_on_error=True): mgr = self._data.astype( dtype, copy=copy, raise_on_error=raise_on_error) - return self._constructor(mgr)._propogate_attributes(self) + return self._constructor(mgr).__finalize__(self) def copy(self, deep=True): """ @@ -1626,7 +1635,7 @@ def copy(self, deep=True): data = self._data if deep: data = data.copy() - return self._constructor(data)._propogate_attributes(self) + return self._constructor(data).__finalize__(self) def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True): """ @@ -1642,7 +1651,9 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True): ------- converted : asm as input object """ - return self._constructor(self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric, copy=copy)) + return self._constructor(self._data.convert(convert_dates=convert_dates, + convert_numeric=convert_numeric, + copy=copy)).__finalize__(self) #---------------------------------------------------------------------- # Filling NA's @@ -1713,7 +1724,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False, # fill in 2d chunks result = dict([ (col,s.fillna(method=method, value=value)) for col, s in compat.iteritems(self) ]) - return self._constructor.from_dict(result) + return self._constructor.from_dict(result).__finalize__(self) # 2d or less method = com._clean_fill_method(method) @@ -1750,7 +1761,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False, if inplace: self._data = new_data else: - return self._constructor(new_data) + return self._constructor(new_data).__finalize__(self) def ffill(self, axis=0, inplace=False, limit=None, downcast=None): return self.fillna(method='ffill', axis=axis, inplace=inplace, @@ -1991,7 +2002,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, if inplace: self._data = new_data else: - return self._constructor(new_data) + return self._constructor(new_data).__finalize__(self) def interpolate(self, method='linear', axis=0, limit=None, inplace=False, downcast='infer', **kwargs): @@ -2101,7 +2112,7 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, else: self._data = new_data else: - res = self._constructor(new_data, index=self.index) + res = self._constructor(new_data).__finalize__(self) if axis == 1: res = res.T return res @@ -2113,13 +2124,13 @@ def isnull(self): """ Return a boolean same-sized object indicating if the values are null """ - return self.__class__(isnull(self),**self._construct_axes_dict())._propogate_attributes(self) + return self.__class__(isnull(self),**self._construct_axes_dict()).__finalize__(self) def notnull(self): """ Return a boolean same-sized object indicating if the values are not null """ - return self.__class__(notnull(self),**self._construct_axes_dict())._propogate_attributes(self) + return self.__class__(notnull(self),**self._construct_axes_dict()).__finalize__(self) def clip(self, lower=None, upper=None, out=None): """ @@ -2484,7 +2495,7 @@ def _align_frame(self, other, join='outer', axis=None, level=None, left = left.fillna(axis=fill_axis, method=method, limit=limit) right = right.fillna(axis=fill_axis, method=method, limit=limit) - return left, right + return left.__finalize__(self), right.__finalize__(other) def _align_series(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, @@ -2543,7 +2554,7 @@ def _align_series(self, other, join='outer', axis=None, level=None, right_result.fillna(fill_value, method=method, limit=limit)) else: - return left_result, right_result + return left_result.__finalize__(self), right_result.__finalize__(other) def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, try_cast=False, raise_on_error=True): @@ -2680,7 +2691,7 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, new_data = self._data.where( other, cond, align=axis is None, raise_on_error=raise_on_error, try_cast=try_cast) - return self._constructor(new_data) + return self._constructor(new_data).__finalize__(self) def mask(self, cond): """ @@ -2728,7 +2739,7 @@ def shift(self, periods=1, freq=None, axis=0, **kwds): else: return self.tshift(periods, freq, **kwds) - return self._constructor(new_data) + return self._constructor(new_data).__finalize__(self) def tshift(self, periods=1, freq=None, axis=0, **kwds): """ @@ -2789,7 +2800,7 @@ def tshift(self, periods=1, freq=None, axis=0, **kwds): new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods, offset) - return self._constructor(new_data) + return self._constructor(new_data).__finalize__(self) def truncate(self, before=None, after=None, copy=True): """Truncates a sorted NDFrame before and/or after some particular @@ -2864,7 +2875,7 @@ def tz_convert(self, tz, axis=0, copy=True): new_obj._set_axis(0, new_ax) self._clear_item_cache() - return new_obj + return new_obj.__finalize__(self) def tz_localize(self, tz, axis=0, copy=True, infer_dst=False): """ @@ -2902,7 +2913,7 @@ def tz_localize(self, tz, axis=0, copy=True, infer_dst=False): new_obj._set_axis(0, new_ax) self._clear_item_cache() - return new_obj + return new_obj.__finalize__(self) #---------------------------------------------------------------------- # Numeric Methods @@ -3128,7 +3139,7 @@ def func(self, axis=None, dtype=None, out=None, skipna=True, **kwargs): d = self._construct_axes_dict() d['copy'] = False - return self._constructor(result, **d)._propogate_attributes(self) + return self._constructor(result, **d).__finalize__(self) func.__name__ = name return func diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 17d524978158b..f8ab35656d99c 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -515,7 +515,7 @@ def wrapper(self, other): if len(self) != len(other): raise ValueError('Lengths must match to compare') return self._constructor(na_op(self.values, np.asarray(other)), - index=self.index, name=self.name) + index=self.index).__finalize__(self) else: mask = isnull(self) @@ -590,7 +590,7 @@ def wrapper(self, other): else: # scalars return self._constructor(na_op(self.values, other), - index=self.index, name=self.name).fillna(False).astype(bool) + index=self.index).fillna(False).astype(bool).__finalize__(self) return wrapper @@ -643,8 +643,8 @@ def f(self, other, level=None, fill_value=None): return self._binop(self._constructor(other, self.index), op, level=level, fill_value=fill_value) else: - return self._constructor(op(self.values, other), self.index, - name=self.name) + return self._constructor(op(self.values, other), + self.index).__finalize__(self) f.__name__ = name return f diff --git a/pandas/core/series.py b/pandas/core/series.py index fba3e946de0b0..526355b0f4dc3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -116,7 +116,7 @@ class Series(generic.NDFrame): If None, dtype will be inferred copy : boolean, default False, copy input data """ - _prop_attributes = ['name'] + _metadata = ['name'] def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False): @@ -334,8 +334,7 @@ def __len__(self): return len(self._data) def view(self, dtype=None): - return self._constructor(self.values.view(dtype), index=self.index, - name=self.name) + return self._constructor(self.values.view(dtype), index=self.index).__finalize__(self) def __array__(self, result=None): """ the array interface, return my values """ @@ -345,8 +344,7 @@ def __array_wrap__(self, result): """ Gets called prior to a ufunc (and after) """ - return self._constructor(result, index=self.index, name=self.name, - copy=False) + return self._constructor(result, index=self.index, copy=False).__finalize__(self) def __contains__(self, key): return key in self.index @@ -473,8 +471,7 @@ def _slice(self, slobj, axis=0, raise_on_error=False, typ=None): if raise_on_error: _check_slice_bounds(slobj, self.values) slobj = self.index._convert_slice_indexer(slobj,typ=typ or 'getitem') - return self._constructor(self.values[slobj], index=self.index[slobj], - name=self.name) + return self._constructor(self.values[slobj], index=self.index[slobj]).__finalize__(self) def __getitem__(self, key): try: @@ -565,12 +562,11 @@ def _get_values_tuple(self, key): # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) - return self._constructor(self.values[indexer], index=new_index, name=self.name) + return self._constructor(self.values[indexer], index=new_index).__finalize__(self) def _get_values(self, indexer): try: - return self._constructor(self._data.get_slice(indexer), - name=self.name, fastpath=True) + return self._constructor(self._data.get_slice(indexer), fastpath=True).__finalize__(self) except Exception: return self.values[indexer] @@ -704,7 +700,7 @@ def repeat(self, reps): """ new_index = self.index.repeat(reps) new_values = self.values.repeat(reps) - return self._constructor(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index).__finalize__(self) def reshape(self, *args, **kwargs): """ @@ -823,8 +819,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): # set name if it was passed, otherwise, keep the previous name self.name = name or self.name else: - return self._constructor(self.values.copy(), index=new_index, - name=self.name) + return self._constructor(self.values.copy(), index=new_index).__finalize__(self) elif inplace: raise TypeError('Cannot reset_index inplace on a Series ' 'to create a DataFrame') @@ -977,11 +972,11 @@ def iteritems(self): # inversion def __neg__(self): arr = operator.neg(self.values) - return self._constructor(arr, self.index, name=self.name) + return self._constructor(arr, self.index).__finalize__(self) def __invert__(self): arr = operator.inv(self.values) - return self._constructor(arr, self.index, name=self.name) + return self._constructor(arr, self.index).__finalize__(self) #---------------------------------------------------------------------- # unbox reductions @@ -1060,8 +1055,8 @@ def to_sparse(self, kind='block', fill_value=None): sp : SparseSeries """ from pandas.core.sparse import SparseSeries - return SparseSeries(self, kind=kind, fill_value=fill_value, - name=self.name) + return SparseSeries(self, kind=kind, + fill_value=fill_value).__finalize__(self) def head(self, n=5): """Returns first n rows of Series @@ -1101,14 +1096,14 @@ def count(self, level=None): level_index = self.index.levels[level] if len(self) == 0: - return self._constructor(0, index=level_index) + return self._constructor(0, index=level_index).__finalize__(self) # call cython function max_bin = len(level_index) labels = com._ensure_int64(self.index.labels[level]) counts = lib.count_level_1d(mask.view(pa.uint8), labels, max_bin) - return self._constructor(counts, index=level_index) + return self._constructor(counts, index=level_index).__finalize__(self) return notnull(_values_from_object(self)).sum() @@ -1191,7 +1186,7 @@ def duplicated(self, take_last=False): """ keys = _ensure_object(self.values) duplicated = lib.duplicated(keys, take_last=take_last) - return self._constructor(duplicated, index=self.index, name=self.name) + return self._constructor(duplicated, index=self.index).__finalize__(self) def idxmin(self, axis=None, out=None, skipna=True): """ @@ -1256,8 +1251,7 @@ def round(self, decimals=0, out=None): """ result = _values_from_object(self).round(decimals, out=out) if out is None: - result = self._constructor( - result, index=self.index, name=self.name) + result = self._constructor(result, index=self.index).__finalize__(self) return result @@ -1351,7 +1345,7 @@ def pretty_name(x): lb), self.median(), self.quantile(ub), self.max()] - return self._constructor(data, index=names) + return self._constructor(data, index=names).__finalize__(self) def corr(self, other, method='pearson', min_periods=None): @@ -1415,7 +1409,7 @@ def diff(self, periods=1): diffed : Series """ result = com.diff(_values_from_object(self), periods) - return self._constructor(result, self.index, name=self.name) + return self._constructor(result, index=self.index).__finalize__(self) def autocorr(self): """ @@ -1460,7 +1454,7 @@ def dot(self, other): if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), - index=other.columns) + index=other.columns).__finalize__(self) elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): @@ -1539,7 +1533,7 @@ def _binop(self, other, func, level=None, fill_value=None): result = func(this_vals, other_vals) name = _maybe_match_name(self, other) - return self._constructor(result, index=new_index, name=name) + return self._constructor(result, index=new_index).__finalize__(self) def combine(self, other, func, fill_value=nan): """ @@ -1589,7 +1583,7 @@ def combine_first(self, other): other = other.reindex(new_index, copy=False) name = _maybe_match_name(self, other) rs_vals = com._where_compat(isnull(this), other.values, this.values) - return self._constructor(rs_vals, index=new_index, name=name) + return self._constructor(rs_vals, index=new_index).__finalize__(self) def update(self, other): """ @@ -1673,7 +1667,7 @@ def sort_index(self, ascending=True): ascending=ascending) new_values = self.values.take(indexer) - return self._constructor(new_values, new_labels, name=self.name) + return self._constructor(new_values, index=new_labels).__finalize__(self) def argsort(self, axis=0, kind='quicksort', order=None): """ @@ -1701,11 +1695,11 @@ def argsort(self, axis=0, kind='quicksort', order=None): -1, index=self.index, name=self.name, dtype='int64') notmask = -mask result[notmask] = np.argsort(values[notmask], kind=kind) - return self._constructor(result, index=self.index, name=self.name) + return self._constructor(result, index=self.index).__finalize__(self) else: return self._constructor( np.argsort(values, kind=kind), index=self.index, - name=self.name, dtype='int64') + dtype='int64').__finalize__(self) def rank(self, method='average', na_option='keep', ascending=True): """ @@ -1731,7 +1725,7 @@ def rank(self, method='average', na_option='keep', ascending=True): from pandas.core.algorithms import rank ranks = rank(self.values, method=method, na_option=na_option, ascending=ascending) - return self._constructor(ranks, index=self.index, name=self.name) + return self._constructor(ranks, index=self.index).__finalize__(self) def order(self, na_last=True, ascending=True, kind='mergesort'): """ @@ -1783,8 +1777,8 @@ def _try_kind_sort(arr): sortedIdx[n:] = idx[good][argsorted] sortedIdx[:n] = idx[bad] - return self._constructor(arr[sortedIdx], index=self.index[sortedIdx], - name=self.name) + return self._constructor(arr[sortedIdx], + index=self.index[sortedIdx]).__finalize__(self) def sortlevel(self, level=0, ascending=True): """ @@ -1806,7 +1800,7 @@ def sortlevel(self, level=0, ascending=True): new_index, indexer = self.index.sortlevel(level, ascending=ascending) new_values = self.values.take(indexer) - return self._constructor(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index).__finalize__(self) def swaplevel(self, i, j, copy=True): """ @@ -1822,7 +1816,8 @@ def swaplevel(self, i, j, copy=True): swapped : Series """ new_index = self.index.swaplevel(i, j) - return self._constructor(self.values, index=new_index, copy=copy, name=self.name) + return self._constructor(self.values, index=new_index, + copy=copy).__finalize__(self) def reorder_levels(self, order): """ @@ -1934,10 +1929,10 @@ def map_f(values, f): indexer = arg.index.get_indexer(values) new_values = com.take_1d(arg.values, indexer) - return self._constructor(new_values, index=self.index, name=self.name) + return self._constructor(new_values, index=self.index).__finalize__(self) else: mapped = map_f(values, arg) - return self._constructor(mapped, index=self.index, name=self.name) + return self._constructor(mapped, index=self.index).__finalize__(self) def apply(self, func, convert_dtype=True, args=(), **kwds): """ @@ -1980,7 +1975,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): from pandas.core.frame import DataFrame return DataFrame(mapped.tolist(), index=self.index) else: - return self._constructor(mapped, index=self.index, name=self.name) + return self._constructor(mapped, index=self.index).__finalize__(self) def _reduce(self, op, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): @@ -1995,7 +1990,7 @@ def _reindex_indexer(self, new_index, indexer, copy): # be subclass-friendly new_values = com.take_1d(self.get_values(), indexer) - return self._constructor(new_values, new_index, name=self.name) + return self._constructor(new_values, index=new_index) def _needs_reindex_multi(self, axes, method, level): """ check if we do need a multi reindex; this is for compat with higher dims """ @@ -2037,7 +2032,7 @@ def take(self, indices, axis=0, convert=True): indices = com._ensure_platform_int(indices) new_index = self.index.take(indices) new_values = self.values.take(indices) - return self._constructor(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index).__finalize__(self) def isin(self, values): """ @@ -2084,7 +2079,7 @@ def isin(self, values): "{0!r}".format(type(values).__name__)) value_set = set(values) result = lib.ismember(_values_from_object(self), value_set) - return self._constructor(result, self.index, name=self.name) + return self._constructor(result, index=self.index).__finalize__(self) def between(self, left, right, inclusive=True): """ @@ -2270,11 +2265,11 @@ def asof(self, where): locs = self.index.asof_locs(where, notnull(values)) new_values = com.take_1d(values, locs) - return self._constructor(new_values, index=where, name=self.name) + return self._constructor(new_values, index=where).__finalize__(self) @property def weekday(self): - return self._constructor([d.weekday() for d in self.index], index=self.index) + return self._constructor([d.weekday() for d in self.index], index=self.index).__finalize__(self) def tz_convert(self, tz, copy=True): """ @@ -2296,7 +2291,7 @@ def tz_convert(self, tz, copy=True): if copy: new_values = new_values.copy() - return self._constructor(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index).__finalize__(self) def tz_localize(self, tz, copy=True, infer_dst=False): """ @@ -2333,7 +2328,7 @@ def tz_localize(self, tz, copy=True, infer_dst=False): if copy: new_values = new_values.copy() - return self._constructor(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index).__finalize__(self) @cache_readonly def str(self): @@ -2361,7 +2356,7 @@ def to_timestamp(self, freq=None, how='start', copy=True): new_values = new_values.copy() new_index = self.index.to_timestamp(freq=freq, how=how) - return self._constructor(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index).__finalize__(self) def to_period(self, freq=None, copy=True): """ @@ -2383,7 +2378,7 @@ def to_period(self, freq=None, copy=True): if freq is None: freq = self.index.freqstr or self.index.inferred_freq new_index = self.index.to_period(freq=freq) - return self._constructor(new_values, index=new_index, name=self.name) + return self._constructor(new_values, index=new_index).__finalize__(self) Series._setup_axes(['index'], info_axis=0, stat_axis=0) Series._add_numeric_operations() diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index ce29b6974de86..88464d683d543 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -124,7 +124,7 @@ def __init__(self, data=None, index=None, columns=None, @property def _constructor(self): - def wrapper(data, index=None, columns=None, default_fill_value=None, kind=None, fill_value=None, copy=False): + def wrapper(data=None, index=None, columns=None, default_fill_value=None, kind=None, fill_value=None, copy=False): result = SparseDataFrame(data, index=index, columns=columns, default_fill_value=fill_value, default_kind=kind, @@ -205,7 +205,7 @@ def _init_matrix(self, data, index, columns, dtype=None): def __array_wrap__(self, result): return SparseDataFrame(result, index=self.index, columns=self.columns, default_kind=self._default_kind, - default_fill_value=self._default_fill_value) + default_fill_value=self._default_fill_value).__finalize__(self) def __getstate__(self): # pickling @@ -420,7 +420,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None): raise NotImplementedError if self.empty and other.empty: - return SparseDataFrame(index=new_index) + return SparseDataFrame(index=new_index).__finalize__(self) new_data = {} new_fill_value = None @@ -452,7 +452,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None): index=new_index, columns=new_columns, default_fill_value=new_fill_value, - fill_value=new_fill_value) + fill_value=new_fill_value).__finalize__(self) def _combine_match_index(self, other, func, fill_value=None): new_data = {} @@ -482,7 +482,7 @@ def _combine_match_index(self, other, func, fill_value=None): index=new_index, columns=self.columns, default_fill_value=fill_value, - fill_value=self.default_fill_value) + fill_value=self.default_fill_value).__finalize__(self) def _combine_match_columns(self, other, func, fill_value): # patched version of DataFrame._combine_match_columns to account for @@ -508,7 +508,7 @@ def _combine_match_columns(self, other, func, fill_value): index=self.index, columns=union, default_fill_value=self.default_fill_value, - fill_value=self.default_fill_value) + fill_value=self.default_fill_value).__finalize__(self) def _combine_const(self, other, func): new_data = {} @@ -519,7 +519,7 @@ def _combine_const(self, other, func): index=self.index, columns=self.columns, default_fill_value=self.default_fill_value, - fill_value=self.default_fill_value) + fill_value=self.default_fill_value).__finalize__(self) def _reindex_index(self, index, method, copy, level, fill_value=np.nan, limit=None, takeable=False): @@ -598,7 +598,7 @@ def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, limit else: new_arrays[col] = self[col] - return self._constructor(new_arrays, index=index, columns=columns) + return SparseDataFrame(new_arrays, index=index, columns=columns).__finalize__(self) def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): @@ -656,7 +656,7 @@ def transpose(self): return SparseDataFrame(self.values.T, index=self.columns, columns=self.index, default_fill_value=self._default_fill_value, - default_kind=self._default_kind) + default_kind=self._default_kind).__finalize__(self) T = property(transpose) @Appender(DataFrame.count.__doc__) @@ -705,10 +705,10 @@ def apply(self, func, axis=0, broadcast=False, reduce=False): applied = func(v) applied.fill_value = func(applied.fill_value) new_series[k] = applied - return SparseDataFrame(new_series, index=self.index, - columns=self.columns, - default_fill_value=self._default_fill_value, - default_kind=self._default_kind) + return self._constructor(new_series, index=self.index, + columns=self.columns, + default_fill_value=self._default_fill_value, + kind=self._default_kind).__finalize__(self) else: if not broadcast: return self._apply_standard(func, axis, reduce=reduce) diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index eb97eec75be36..4d8b2578426ec 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -289,7 +289,7 @@ def __array_wrap__(self, result): index=self.index, sparse_index=self.sp_index, fill_value=self.fill_value, - copy=False) + copy=False).__finalize__(self) def __array_finalize__(self, obj): """ @@ -368,7 +368,7 @@ def __getitem__(self, key): key = _values_from_object(key) dataSlice = self.values[key] new_index = Index(self.index.view(ndarray)[key]) - return self._constructor(dataSlice, index=new_index, name=self.name) + return self._constructor(dataSlice, index=new_index).__finalize__(self) def _set_with_engine(self, key, value): return self.set_value(key, value) @@ -383,9 +383,9 @@ def abs(self): abs: type of caller """ res_sp_values = np.abs(self.sp_values) - return SparseSeries(res_sp_values, index=self.index, - sparse_index=self.sp_index, - fill_value=self.fill_value) + return self._constructor(res_sp_values, index=self.index, + sparse_index=self.sp_index, + fill_value=self.fill_value) def get(self, label, default=None): """ @@ -501,7 +501,7 @@ def copy(self, deep=True): return self._constructor(new_data, sparse_index=self.sp_index, - fill_value=self.fill_value, name=self.name) + fill_value=self.fill_value).__finalize__(self) def reindex(self, index=None, method=None, copy=True, limit=None): """ @@ -520,7 +520,8 @@ def reindex(self, index=None, method=None, copy=True, limit=None): return self.copy() else: return self - return self._constructor(self._data.reindex(new_index, method=method, limit=limit, copy=copy), index=new_index, name=self.name) + return self._constructor(self._data.reindex(new_index, method=method, limit=limit, copy=copy), + index=new_index).__finalize__(self) def sparse_reindex(self, new_index): """ @@ -541,7 +542,7 @@ def sparse_reindex(self, new_index): new_data = SingleBlockManager(block, block.ref_items) return self._constructor(new_data, index=self.index, sparse_index=new_index, - fill_value=self.fill_value) + fill_value=self.fill_value).__finalize__(self) def take(self, indices, axis=0, convert=True): """ @@ -553,7 +554,7 @@ def take(self, indices, axis=0, convert=True): """ new_values = SparseArray.take(self.values, indices) new_index = self.index.take(indices) - return self._constructor(new_values, index=new_index) + return self._constructor(new_values, index=new_index).__finalize__(self) def cumsum(self, axis=0, dtype=None, out=None): """ @@ -565,8 +566,8 @@ def cumsum(self, axis=0, dtype=None, out=None): """ new_array = SparseArray.cumsum(self.values) if isinstance(new_array, SparseArray): - return self._constructor(new_array, index=self.index, sparse_index=new_array.sp_index, name=self.name) - return Series(new_array, index=self.index, name=self.name) + return self._constructor(new_array, index=self.index, sparse_index=new_array.sp_index).__finalize__(self) + return Series(new_array, index=self.index).__finalize__(self) def dropna(self): """ @@ -602,7 +603,7 @@ def shift(self, periods, freq=None, **kwds): return self._constructor(self.sp_values, sparse_index=self.sp_index, index=self.index.shift(periods, offset), - fill_value=self.fill_value) + fill_value=self.fill_value).__finalize__(self) int_index = self.sp_index.to_int_index() new_indices = int_index.indices + periods @@ -617,7 +618,7 @@ def shift(self, periods, freq=None, **kwds): return self._constructor(self.sp_values[start:end].copy(), index=self.index, sparse_index=new_sp_index, - fill_value=self.fill_value) + fill_value=self.fill_value).__finalize__(self) def combine_first(self, other): """ diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index ce0cb909cf1c5..b3e216526d0f6 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -224,6 +224,99 @@ def f(dtype): f('float64') f('M8[ns]') + def check_metadata(self, x, y=None): + for m in x._metadata: + v = getattr(x,m,None) + if y is None: + self.assert_(v is None) + else: + self.assert_(v == getattr(y,m,None)) + + def test_metadata_propagation(self): + # check that the metadata matches up on the resulting ops + + o = self._construct(shape=3) + o.name = 'foo' + o2 = self._construct(shape=3) + o2.name = 'bar' + + # TODO + # Once panel can do non-trivial combine operations + # (currently there is an a raise in the Panel arith_ops to prevent + # this, though it actually does work) + # can remove all of these try: except: blocks on the actual operations + + + # ---------- + # preserving + # ---------- + + # simple ops with scalars + for op in [ '__add__','__sub__','__truediv__','__mul__' ]: + result = getattr(o,op)(1) + self.check_metadata(o,result) + + # ops with like + for op in [ '__add__','__sub__','__truediv__','__mul__' ]: + try: + result = getattr(o,op)(o) + self.check_metadata(o,result) + except (ValueError, AttributeError): + pass + + # simple boolean + for op in [ '__eq__','__le__', '__ge__' ]: + v1 = getattr(o,op)(o) + self.check_metadata(o,v1) + + try: + self.check_metadata(o, v1 & v1) + except (ValueError): + pass + + try: + self.check_metadata(o, v1 | v1) + except (ValueError): + pass + + # combine_first + try: + result = o.combine_first(o2) + self.check_metadata(o,result) + except (AttributeError): + pass + + + # --------------------------- + # non-preserving (by default) + # --------------------------- + + # add non-like + try: + result = o + o2 + self.check_metadata(result) + except (ValueError, AttributeError): + pass + + # simple boolean + for op in [ '__eq__','__le__', '__ge__' ]: + + # this is a name matching op + v1 = getattr(o,op)(o) + + v2 = getattr(o,op)(o2) + self.check_metadata(v2) + + try: + self.check_metadata(v1 & v2) + except (ValueError): + pass + + try: + self.check_metadata(v1 | v2) + except (ValueError): + pass + class TestSeries(unittest.TestCase, Generic): _typ = Series _comparator = lambda self, x, y: assert_series_equal(x,y) @@ -292,6 +385,17 @@ def test_nonzero_single_element(self): self.assertRaises(ValueError, lambda : bool(s)) self.assertRaises(ValueError, lambda : s.bool()) + def test_metadata_propagation_indiv(self): + # check that the metadata matches up on the resulting ops + + o = Series(range(3),range(3)) + o.name = 'foo' + o2 = Series(range(3),range(3)) + o2.name = 'bar' + + result = o.T + self.check_metadata(o,result) + def test_interpolate(self): ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index ee496673921f7..3715de6dffeb9 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -83,6 +83,7 @@ def test_append_preserve_name(self): self.assertEquals(result.name, self.ts.name) def test_binop_maybe_preserve_name(self): + # names match, preserve result = self.ts * self.ts self.assertEquals(result.name, self.ts.name)
- rename _propogate_attributes to _metadata (in NDFrame) - change methods which directly pass metadata to use **finalize** These provide a simple mechanism for meta data propagation, which can be overriden in subclasses if desired. Currently these only do non-ambiguous propagation. makes #60, #2485 easier
https://api.github.com/repos/pandas-dev/pandas/pulls/5205
2013-10-13T15:06:14Z
2013-10-15T00:58:25Z
2013-10-15T00:58:25Z
2014-06-15T14:41:17Z
improved daytimes conversion performance in period.c
diff --git a/pandas/src/helper.h b/pandas/src/helper.h new file mode 100644 index 0000000000000..e97e45f4e87b3 --- /dev/null +++ b/pandas/src/helper.h @@ -0,0 +1,16 @@ +#ifndef C_HELPER_H +#define C_HELPER_H + +#ifndef PANDAS_INLINE + #if defined(__GNUC__) + #define PANDAS_INLINE __inline__ + #elif defined(_MSC_VER) + #define PANDAS_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define PANDAS_INLINE inline + #else + #define PANDAS_INLINE + #endif +#endif + +#endif diff --git a/pandas/src/numpy_helper.h b/pandas/src/numpy_helper.h index d5485e74b4927..69b849de47fe7 100644 --- a/pandas/src/numpy_helper.h +++ b/pandas/src/numpy_helper.h @@ -1,18 +1,7 @@ #include "Python.h" #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" - -#ifndef PANDAS_INLINE - #if defined(__GNUC__) - #define PANDAS_INLINE __inline__ - #elif defined(_MSC_VER) - #define PANDAS_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define PANDAS_INLINE inline - #else - #define PANDAS_INLINE - #endif -#endif +#include "helper.h" #define PANDAS_FLOAT 0 #define PANDAS_INT 1 diff --git a/pandas/src/period.c b/pandas/src/period.c index 2e544afce9da2..ee3a50f98b8c9 100644 --- a/pandas/src/period.c +++ b/pandas/src/period.c @@ -13,7 +13,6 @@ * Code derived from scikits.timeseries * ------------------------------------------------------------------*/ - static int mod_compat(int x, int m) { int result = x % m; if (result < 0) return result + m; @@ -285,19 +284,19 @@ static int daytime_conversion_factors[][2] = { static npy_int64** daytime_conversion_factor_matrix = NULL; -static int max_value(int a, int b) { +PANDAS_INLINE static int max_value(int a, int b) { return a > b ? a : b; } -static int min_value(int a, int b) { +PANDAS_INLINE static int min_value(int a, int b) { return a < b ? a : b; } -static int get_freq_group(int freq) { +PANDAS_INLINE static int get_freq_group(int freq) { return (freq/1000)*1000; } -static int get_freq_group_index(int freq) { +PANDAS_INLINE static int get_freq_group_index(int freq) { return freq/1000; } @@ -374,56 +373,39 @@ static void populate_conversion_factors_matrix() { } } -static void initialize_daytime_conversion_factor_maxtrix() { - int matrix_size = calc_conversion_factors_matrix_size(); - alloc_conversion_factors_matrix(matrix_size); - populate_conversion_factors_matrix(); -} - -npy_int64 get_daytime_conversion_factor(int index1, int index2) -{ +void initialize_daytime_conversion_factor_matrix() { if (daytime_conversion_factor_matrix == NULL) { - initialize_daytime_conversion_factor_maxtrix(); + int matrix_size = calc_conversion_factors_matrix_size(); + alloc_conversion_factors_matrix(matrix_size); + populate_conversion_factors_matrix(); } - return daytime_conversion_factor_matrix[min_value(index1, index2)][max_value(index1, index2)]; } -npy_int64 convert_daytime(npy_int64 ordinal, int from, int to, int atEnd) +PANDAS_INLINE npy_int64 get_daytime_conversion_factor(int from_index, int to_index) { - int from_index, to_index, offset; - npy_int64 conversion_factor; - - if (from == to) { - return ordinal; - } - - from_index = get_freq_group_index(from); - to_index = get_freq_group_index(to); - - conversion_factor = get_daytime_conversion_factor(from_index, to_index); - - offset = atEnd ? 1 : 0; + return daytime_conversion_factor_matrix[min_value(from_index, to_index)][max_value(from_index, to_index)]; +} - if (from <= to) { - return (ordinal + offset) * conversion_factor - offset; +PANDAS_INLINE npy_int64 upsample_daytime(npy_int64 ordinal, asfreq_info *af_info, int atEnd) +{ + if (atEnd) { + return (ordinal + 1) * af_info->intraday_conversion_factor - 1; } else { - return ordinal / conversion_factor; + return ordinal * af_info->intraday_conversion_factor; } - } -static npy_int64 transform_via_day(npy_int64 ordinal, char relation, asfreq_info *af_info, freq_conv_func first_func, freq_conv_func second_func) { - int tempStore = af_info->targetFreq; +PANDAS_INLINE npy_int64 downsample_daytime(npy_int64 ordinal, asfreq_info *af_info, int atEnd) +{ + return ordinal / (af_info->intraday_conversion_factor); +} + +PANDAS_INLINE static npy_int64 transform_via_day(npy_int64 ordinal, char relation, asfreq_info *af_info, freq_conv_func first_func, freq_conv_func second_func) { + //printf("transform_via_day(%ld, %ld, %d)\n", ordinal, af_info->intraday_conversion_factor, af_info->intraday_conversion_upsample); npy_int64 result; - af_info->targetFreq = FR_DAY; result = (*first_func)(ordinal, relation, af_info); - af_info->targetFreq = tempStore; - - tempStore = af_info->sourceFreq; - af_info->sourceFreq = FR_DAY; result = (*second_func)(result, relation, af_info); - af_info->sourceFreq = tempStore; return result; } @@ -460,7 +442,7 @@ static npy_int64 absdate_from_ymd(int y, int m, int d) { static npy_int64 asfreq_DTtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; - ordinal = convert_daytime(ordinal, af_info->sourceFreq, FR_DAY, 0); + ordinal = downsample_daytime(ordinal, af_info, 0); if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, GREGORIAN_CALENDAR)) return INT_ERR_CODE; if (dinfo.month > af_info->to_a_year_end) { @@ -491,7 +473,7 @@ static npy_int64 DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year, int static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info) { int year, quarter; - ordinal = convert_daytime(ordinal, af_info->sourceFreq, FR_DAY, 0); + ordinal = downsample_daytime(ordinal, af_info, 0); if (DtoQ_yq(ordinal, af_info, &year, &quarter) == INT_ERR_CODE) { return INT_ERR_CODE; @@ -503,7 +485,7 @@ static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, char relation, asfreq_info *af_ static npy_int64 asfreq_DTtoM(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; - ordinal = convert_daytime(ordinal, af_info->sourceFreq, FR_DAY, 0); + ordinal = downsample_daytime(ordinal, af_info, 0); if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, GREGORIAN_CALENDAR)) return INT_ERR_CODE; @@ -511,14 +493,14 @@ static npy_int64 asfreq_DTtoM(npy_int64 ordinal, char relation, asfreq_info *af_ } static npy_int64 asfreq_DTtoW(npy_int64 ordinal, char relation, asfreq_info *af_info) { - ordinal = convert_daytime(ordinal, af_info->sourceFreq, FR_DAY, 0); + ordinal = downsample_daytime(ordinal, af_info, 0); return (ordinal + ORD_OFFSET - (1 + af_info->to_week_end))/7 + 1 - WEEK_OFFSET; } static npy_int64 asfreq_DTtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; - ordinal = convert_daytime(ordinal, af_info->sourceFreq, FR_DAY, 0); + ordinal = downsample_daytime(ordinal, af_info, 0); if (dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET, GREGORIAN_CALENDAR)) return INT_ERR_CODE; @@ -531,14 +513,13 @@ static npy_int64 asfreq_DTtoB(npy_int64 ordinal, char relation, asfreq_info *af_ } // all intra day calculations are now done within one function -static npy_int64 asfreq_WithinDT(npy_int64 ordinal, char relation, asfreq_info *af_info) { - //if (relation == 'E') { - // ordinal += 1; - //} - - return convert_daytime(ordinal, af_info->sourceFreq, af_info->targetFreq, relation == 'E'); +static npy_int64 asfreq_DownsampleWithinDay(npy_int64 ordinal, char relation, asfreq_info *af_info) { + return downsample_daytime(ordinal, af_info, relation == 'E'); } +static npy_int64 asfreq_UpsampleWithinDay(npy_int64 ordinal, char relation, asfreq_info *af_info) { + return upsample_daytime(ordinal, af_info, relation == 'E'); +} //************ FROM BUSINESS *************** static npy_int64 asfreq_BtoDT(npy_int64 ordinal, char relation, asfreq_info *af_info) @@ -547,7 +528,7 @@ static npy_int64 asfreq_BtoDT(npy_int64 ordinal, char relation, asfreq_info *af_ ordinal = (((ordinal - 1) / 5) * 7 + mod_compat(ordinal - 1, 5) + 1 - ORD_OFFSET); - return convert_daytime(ordinal, FR_DAY, af_info->targetFreq, relation != 'S'); + return upsample_daytime(ordinal, af_info, relation != 'S'); } static npy_int64 asfreq_BtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) { @@ -580,7 +561,7 @@ static npy_int64 asfreq_WtoDT(npy_int64 ordinal, char relation, asfreq_info *af_ ordinal -= 1; } - return convert_daytime(ordinal, FR_DAY, af_info->targetFreq, relation != 'S'); + return upsample_daytime(ordinal, af_info, relation != 'S'); } static npy_int64 asfreq_WtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) { @@ -602,12 +583,9 @@ static npy_int64 asfreq_WtoW(npy_int64 ordinal, char relation, asfreq_info *af_i static npy_int64 asfreq_WtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; - int tempStore = af_info->targetFreq; - af_info->targetFreq = FR_DAY; if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_WtoDT(ordinal, relation, af_info) + ORD_OFFSET, GREGORIAN_CALENDAR)) return INT_ERR_CODE; - af_info->targetFreq = tempStore; if (relation == 'S') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); @@ -639,7 +617,7 @@ static npy_int64 asfreq_MtoDT(npy_int64 ordinal, char relation, asfreq_info* af_ ordinal -= 1; } - return convert_daytime(ordinal, FR_DAY, af_info->targetFreq, relation != 'S'); + return upsample_daytime(ordinal, af_info, relation != 'S'); } static npy_int64 asfreq_MtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) { @@ -657,12 +635,9 @@ static npy_int64 asfreq_MtoW(npy_int64 ordinal, char relation, asfreq_info *af_i static npy_int64 asfreq_MtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; - int tempStore = af_info->targetFreq; - af_info->targetFreq = FR_DAY; if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_MtoDT(ordinal, relation, af_info) + ORD_OFFSET, GREGORIAN_CALENDAR)) return INT_ERR_CODE; - af_info->targetFreq = tempStore; if (relation == 'S') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } @@ -698,7 +673,7 @@ static npy_int64 asfreq_QtoDT(npy_int64 ordinal, char relation, asfreq_info *af_ absdate -= 1; } - return convert_daytime(absdate - ORD_OFFSET, FR_DAY, af_info->targetFreq, relation != 'S'); + return upsample_daytime(absdate - ORD_OFFSET, af_info, relation != 'S'); } static npy_int64 asfreq_QtoQ(npy_int64 ordinal, char relation, asfreq_info *af_info) { @@ -720,12 +695,9 @@ static npy_int64 asfreq_QtoW(npy_int64 ordinal, char relation, asfreq_info *af_i static npy_int64 asfreq_QtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; - int tempStore = af_info->targetFreq; - af_info->targetFreq = FR_DAY; if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_QtoDT(ordinal, relation, af_info) + ORD_OFFSET, GREGORIAN_CALENDAR)) return INT_ERR_CODE; - af_info->targetFreq = tempStore; if (relation == 'S') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } @@ -761,7 +733,7 @@ static npy_int64 asfreq_AtoDT(npy_int64 year, char relation, asfreq_info *af_inf absdate -= 1; } - return convert_daytime(absdate - ORD_OFFSET, FR_DAY, af_info->targetFreq, relation != 'S'); + return upsample_daytime(absdate - ORD_OFFSET, af_info, relation != 'S'); } static npy_int64 asfreq_AtoA(npy_int64 ordinal, char relation, asfreq_info *af_info) { @@ -783,12 +755,9 @@ static npy_int64 asfreq_AtoW(npy_int64 ordinal, char relation, asfreq_info *af_i static npy_int64 asfreq_AtoB(npy_int64 ordinal, char relation, asfreq_info *af_info) { struct date_info dinfo; - int tempStore = af_info->targetFreq; - af_info->targetFreq = FR_DAY; if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_AtoDT(ordinal, relation, af_info) + ORD_OFFSET, GREGORIAN_CALENDAR)) return INT_ERR_CODE; - af_info->targetFreq = tempStore; if (relation == 'S') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } @@ -813,8 +782,13 @@ void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info) { int fromGroup = get_freq_group(fromFreq); int toGroup = get_freq_group(toFreq); - af_info->sourceFreq = fromFreq; - af_info->targetFreq = toFreq; + af_info->intraday_conversion_factor = + get_daytime_conversion_factor( + get_freq_group_index(max_value(fromGroup, FR_DAY)), + get_freq_group_index(max_value(toGroup, FR_DAY)) + ); + + //printf("get_asfreq_info(%d, %d) %ld, %d\n", fromFreq, toFreq, af_info->intraday_conversion_factor, af_info->intraday_conversion_upsample); switch(fromGroup) { @@ -970,7 +944,11 @@ freq_conv_func get_asfreq_func(int fromFreq, int toFreq) case FR_MS: case FR_US: case FR_NS: - return &asfreq_WithinDT; + if (fromGroup > toGroup) { + return &asfreq_DownsampleWithinDay; + } else { + return &asfreq_UpsampleWithinDay; + } default: return &nofunc; } @@ -1073,6 +1051,8 @@ npy_int64 asfreq(npy_int64 period_ordinal, int freq1, int freq2, char relation) get_asfreq_info(freq1, freq2, &finfo); + //printf("\n%x %d %d %ld %ld\n", func, freq1, freq2, finfo.intraday_conversion_factor, -finfo.intraday_conversion_factor); + val = (*func)(period_ordinal, relation, &finfo); if (val == INT_ERR_CODE) { diff --git a/pandas/src/period.h b/pandas/src/period.h index af35838ad0355..e8537680e27e7 100644 --- a/pandas/src/period.h +++ b/pandas/src/period.h @@ -8,6 +8,7 @@ #define C_PERIOD_H #include <Python.h> +#include "helper.h" #include "numpy/ndarraytypes.h" #include "headers/stdint.h" #include "limits.h" @@ -106,8 +107,7 @@ typedef struct asfreq_info { int from_q_year_end; // month the year ends on in the "from" frequency int to_q_year_end; // month the year ends on in the "to" frequency - int sourceFreq; - int targetFreq; + npy_int64 intraday_conversion_factor; } asfreq_info; @@ -162,4 +162,5 @@ double getAbsTime(int freq, npy_int64 dailyDate, npy_int64 originalDate); char *c_strftime(struct date_info *dinfo, char *fmt); int get_yq(npy_int64 ordinal, int freq, int *quarter, int *year); +void initialize_daytime_conversion_factor_matrix(); #endif diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 372de1e7c1b21..d95956261bc44 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -2340,6 +2340,7 @@ cdef extern from "period.h": ctypedef int64_t (*freq_conv_func)(int64_t, char, asfreq_info*) + void initialize_daytime_conversion_factor_matrix() int64_t asfreq(int64_t dtordinal, int freq1, int freq2, char relation) except INT32_MIN freq_conv_func get_asfreq_func(int fromFreq, int toFreq) void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info) @@ -2368,6 +2369,8 @@ cdef extern from "period.h": char *c_strftime(date_info *dinfo, char *fmt) int get_yq(int64_t ordinal, int freq, int *quarter, int *year) +initialize_daytime_conversion_factor_matrix() + # Period logic #----------------------------------------------------------------------
Especially when using repeated conversion with period_asfreq_arr of tslib the code introduced with #3060 was about two to three times slower than before. This PR improves the performance issues by moving global calculations out of the loop.
https://api.github.com/repos/pandas-dev/pandas/pulls/5204
2013-10-13T08:14:17Z
2013-10-13T23:31:31Z
2013-10-13T23:31:31Z
2014-01-12T07:38:07Z
BUG/ENH: Index constructor with dtype now delegates
diff --git a/doc/source/release.rst b/doc/source/release.rst index 55f786d263a0a..9e1a0f059160a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -593,6 +593,9 @@ Bug Fixes - Compound dtypes in a constructor raise ``NotImplementedError`` (:issue:`5191`) - Bug in comparing duplicate frames (:issue:`4421`) related - Bug in describe on duplicate frames + - Fixed issue where passing dtype explicitly to ``Index()`` would always + result in object Index. Now delegates to appropriate subclass. + (:issue:`5196`, :issue:`5201`) pandas 0.12.0 ------------- diff --git a/pandas/core/index.py b/pandas/core/index.py index 98f190360bc33..3e09598e7e6f6 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -103,42 +103,54 @@ def __new__(cls, data, dtype=None, copy=False, name=None, fastpath=False, subarr.name = name return subarr + if np.isscalar(data): + cls._scalar_data_error(data) + + elif not isinstance(data, np.ndarray): + # other iterable of some kind + data = com._asarray_tuplesafe(data, dtype=dtype) + from pandas.tseries.period import PeriodIndex - if isinstance(data, np.ndarray): - if issubclass(data.dtype.type, np.datetime64): - from pandas.tseries.index import DatetimeIndex - result = DatetimeIndex(data, copy=copy, name=name, **kwargs) - if dtype is not None and _o_dtype == dtype: - return Index(result.to_pydatetime(), dtype=_o_dtype) - else: - return result - elif issubclass(data.dtype.type, np.timedelta64): - return Int64Index(data, copy=copy, name=name) + if issubclass(data.dtype.type, np.datetime64): + from pandas.tseries.index import DatetimeIndex + result = DatetimeIndex(data, copy=copy, name=name, **kwargs) + if dtype is not None and _o_dtype == dtype: + return Index(result.to_pydatetime(), dtype=_o_dtype) + else: + return result + elif issubclass(data.dtype.type, np.timedelta64): + return Int64Index(data, copy=copy, name=name) - if dtype is not None: - try: - data = np.array(data, dtype=dtype, copy=copy) - except TypeError: + if dtype is not None: + try: + data = np.array(data, dtype=dtype, copy=copy) + except TypeError: + pass + except ValueError as e: + # bad dtype + if 'data type' in str(e): + raise + # otherwise, something like int('level_1') going on + else: pass - elif isinstance(data, PeriodIndex): - return PeriodIndex(data, copy=copy, name=name, **kwargs) - if issubclass(data.dtype.type, np.integer): - return Int64Index(data, copy=copy, dtype=dtype, name=name) + elif isinstance(data, PeriodIndex): + return PeriodIndex(data, copy=copy, name=name, **kwargs) - subarr = com._asarray_tuplesafe(data, dtype=object) + if issubclass(data.dtype.type, np.integer): + return Int64Index(data, copy=copy, dtype=dtype, name=name) + if issubclass(data.dtype.type, np.float_): + # this will still skip reconstruction in Float64Index's __new__ + # (because it'll match dtype in array) + return Float64Index(data, dtype=dtype, copy=copy, name=name) - # _asarray_tuplesafe does not always copy underlying data, - # so need to make sure that this happens - if copy: - subarr = subarr.copy() + # couldn't assign to anything else. + subarr = com._asarray_tuplesafe(data, dtype=object) - elif np.isscalar(data): - cls._scalar_data_error(data) - - else: - # other iterable of some kind - subarr = com._asarray_tuplesafe(data, dtype=object) + # _asarray_tuplesafe does not always copy underlying data, + # so need to make sure that this happens + if copy: + subarr = subarr.copy() if dtype is None: inferred = lib.infer_dtype(subarr) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index cd26016acba5c..0f308458aab92 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -48,6 +48,56 @@ def setUp(self): for name, ind in self.indices.items(): setattr(self, name, ind) + def test_explicit_dtype_in_constructor(self): + # first, smoke tests to make sure result is the same + # (with NON-ndarray) + pairs = [(np.dtype(unicode), self.unicodeIndex), + (np.dtype(str), self.strIndex), + ('datetime64[ns]', self.dateIndex), + (np.dtype(int), self.intIndex), + (np.dtype(float), self.floatIndex), + (np.dtype(object), self.empty), + (np.dtype(object), self.tuples)] + for dtype, original_index in pairs: + new_index = Index(list(original_index), dtype=str(dtype)) + # key is that type is the same + tm.assert_isinstance(new_index, type(original_index)) + tm.assert_index_equal(new_index, original_index) + + # float coerces to Float64Index (even if it could be Int64) + ind = Index([1, 3, 5], dtype=float) + expected = Float64Index([1., 3., 5.]) + tm.assert_isinstance(ind, Float64Index) + tm.assert_index_equal(ind, expected) + + # These don't need to stay the same if new Index types are added + # object-like without explicit dtype + ind = Index(['s', 5, 3, None]) + assert ind.dtype == np.object_ + assert type(ind) is Index, "Expected Index type, found %s" % type(ind) + tm.assert_almost_equal(ind.values, ['s', 5, 3, None]) + + # int with nan goes to Float64Index + vals = [1, 3, 5, np.nan, 3, -3, -4] + ind1 = Index(vals, dtype=float) + # as should list-like without dtype + ind2 = Index(vals) + + for ind in (ind1, ind2): + tm.assert_almost_equal(ind.values, vals) + tm.assert_isinstance(ind1, Float64Index) + + # doesn't rescue with explicit (but wrong) dtype + with tm.assertRaises(ValueError): + Index(vals, dtype=int) + + # bad dtypes with list-likes + with tm.assertRaisesRegexp(TypeError, "data type .banana"): + Index(['apple', 1, 2], dtype='banana') + + with tm.assertRaisesRegexp(TypeError, "data type .ribbit"): + Index(range(3), dtype='ribbit') + def test_wrong_number_names(self): def testit(ind): ind.names = ["apple", "banana", "carrot"] diff --git a/pandas/util/testing.py b/pandas/util/testing.py index dfe81237ee15d..30d179dd14944 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -336,7 +336,7 @@ def assert_isinstance(obj, class_type_or_tuple): """asserts that obj is an instance of class_type_or_tuple""" assert isinstance(obj, class_type_or_tuple), ( "Expected object to be of type %r, found %r instead" % ( - type(obj), class_type_or_tuple)) + class_type_or_tuple, type(obj))) def assert_equal(a, b, msg=""):
...to Index subclass (if possible). Simplifies the Index constructor slightly too, which is a nice side bonus. Will even better after Index refactor. Fixes #5196.
https://api.github.com/repos/pandas-dev/pandas/pulls/5201
2013-10-12T22:52:19Z
2013-10-13T00:23:35Z
null
2014-07-12T17:10:21Z
ENH/API: Accept DataFrame for isin
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index b2cda7d1f4041..4c1ece032310f 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -476,7 +476,7 @@ Enhancements t = Timestamp('20130101 09:01:02') t + pd.datetools.Nano(123) -- A new method, ``isin`` for DataFrames, plays nicely with boolean indexing. See :ref:`the docs<indexing.basics.indexing_isin>` for more. +- A new method, ``isin`` for DataFrames, which plays nicely with boolean indexing. The argument to ``isin``, what we're comparing the DataFrame to, can be a DataFrame, Series, dict, or array of values. See :ref:`the docs<indexing.basics.indexing_isin>` for more. To get the rows where any of the conditions are met: @@ -484,7 +484,8 @@ Enhancements dfi = DataFrame({'A': [1, 2, 3, 4], 'B': ['a', 'b', 'f', 'n']}) dfi - mask = dfi.isin({'A': [1, 2], 'B': ['e', 'f']}) + other = DataFrame({'A': [1, 3, 3, 7], 'B': ['e', 'f', 'f', 'e']}) + mask = dfi.isin(other) mask dfi[mask.any(1)] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ece38e18e3688..e6ad5bf550f7f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4239,35 +4239,71 @@ def to_period(self, freq=None, axis=0, copy=True): return self._constructor(new_data) - - def isin(self, values, iloc=False): + def isin(self, values): """ - Return boolean DataFrame showing whether each element in the DataFrame is - contained in values. + Return boolean DataFrame showing whether each element in the + DataFrame is contained in values. Parameters ---------- - values : iterable or dictionary of columns to values - iloc : boolean, if passing a dict as values, describe columns using integer - locations (default is to use labels) + values : iterable, Series, DataFrame or dictionary + The result will only be true at a location if all the + labels match. If `values` is a Series, that's the index. If + `values` is a dictionary, the keys must be the column names, + which must match. If `values` is a DataFrame, + then both the index and column labels must match. Returns ------- DataFrame of booleans + + Examples + -------- + When ``values`` is a list: + + >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) + >>> df.isin([1, 3, 12, 'a']) + A B + 0 True True + 1 False False + 2 True False + + When ``values`` is a dict: + + >>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]}) + >>> df.isin({'A': [1, 3], 'B': [4, 7, 12]}) + A B + 0 True False # Note that B didn't match the 1 here. + 1 False True + 2 True True + + When ``values`` is a Series or DataFrame: + + >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) + >>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']}) + >>> df.isin(other) + A B + 0 True False + 1 False False # Column A in `other` has a 3, but not at index 1. + 2 True True """ if isinstance(values, dict): from collections import defaultdict from pandas.tools.merge import concat values = defaultdict(list, values) - if iloc: - return concat((self.iloc[:, [i]].isin(values[i]) - for i, col in enumerate(self.columns)), axis=1) - else: - return concat((self.iloc[:, [i]].isin(values[col]) - for i, col in enumerate(self.columns)), axis=1) - - + return concat((self.iloc[:, [i]].isin(values[col]) + for i, col in enumerate(self.columns)), axis=1) + elif isinstance(values, Series): + if not values.index.is_unique: + raise ValueError("ValueError: cannot compute isin with" + " a duplicate axis.") + return self.eq(values.reindex_like(self), axis='index') + elif isinstance(values, DataFrame): + if not (values.columns.is_unique and values.index.is_unique): + raise ValueError("ValueError: cannot compute isin with" + " a duplicate axis.") + return self.eq(values.reindex_like(self)) else: if not is_list_like(values): raise TypeError("only list-like or dict-like objects are" diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 3f5eef8c04f7d..f6db680d30061 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -11431,20 +11431,6 @@ def test_isin_dict(self): result = df.isin(d) assert_frame_equal(result, expected) - # iloc - df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']}) - d = {0: ['a']} - expected = DataFrame(False, df.index, df.columns) - - # without using iloc - result = df.isin(d) - assert_frame_equal(result, expected) - - # using iloc - result = df.isin(d, iloc=True) - expected.iloc[0, 0] = True - assert_frame_equal(result, expected) - def test_isin_with_string_scalar(self): #GH4763 df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'], @@ -11456,6 +11442,84 @@ def test_isin_with_string_scalar(self): with tm.assertRaises(TypeError): df.isin('aaa') + def test_isin_df(self): + df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]}) + df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]}) + expected = DataFrame(False, df1.index, df1.columns) + result = df1.isin(df2) + expected['A'].loc[[1, 3]] = True + expected['B'].loc[[0, 2]] = True + assert_frame_equal(result, expected) + + # partial overlapping columns + df2.columns = ['A', 'C'] + result = df1.isin(df2) + expected['B'] = False + assert_frame_equal(result, expected) + + def test_isin_df_dupe_values(self): + df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]}) + # just cols duped + df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], + columns=['B', 'B']) + with tm.assertRaises(ValueError): + df1.isin(df2) + + # just index duped + df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], + columns=['A', 'B'], index=[0, 0, 1, 1]) + with tm.assertRaises(ValueError): + df1.isin(df2) + + # cols and index: + df2.columns = ['B', 'B'] + with tm.assertRaises(ValueError): + df1.isin(df2) + + def test_isin_dupe_self(self): + other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]}) + df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A','A']) + result = df.isin(other) + expected = DataFrame(False, index=df.index, columns=df.columns) + expected.loc[0] = True + expected.iloc[1, 1] = True + assert_frame_equal(result, expected) + + + def test_isin_against_series(self): + df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]}, + index=['a', 'b', 'c', 'd']) + s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd']) + expected = DataFrame(False, index=df.index, columns=df.columns) + expected['A'].loc['a'] = True + expected.loc['d'] = True + result = df.isin(s) + assert_frame_equal(result, expected) + + def test_isin_multiIndex(self): + idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'), + (0, 'b', 'bar'), (0, 'b', 'baz'), + (2, 'a', 'foo'), (2, 'a', 'bar'), + (2, 'c', 'bar'), (2, 'c', 'baz'), + (1, 'b', 'foo'), (1, 'b', 'bar'), + (1, 'c', 'bar'), (1, 'c', 'baz')]) + df1 = DataFrame({'A': np.ones(12), + 'B': np.zeros(12)}, index=idx) + df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + 'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]}) + # against regular index + expected = DataFrame(False, index=df1.index, columns=df1.columns) + result = df1.isin(df2) + assert_frame_equal(result, expected) + + df2.index = idx + expected = df2.values.astype(np.bool) + expected[:, 1] = ~expected[:, 1] + expected = DataFrame(expected, columns=['A', 'B'], index=idx) + + result = df1.isin(df2) + assert_frame_equal(result, expected) + def test_to_csv_date_format(self): from pandas import to_datetime pname = '__tmp_to_csv_date_format__'
API: Series should respect index labels. This would be incompatible, but there hasn't been any official release since `isin` was merged. Will close #4421. Still a WIP for now. Needs more testing. Thoughts on adding an argument to ignore the index? i.e. ``` python In [150]: df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]}, index=['a','b','c','d']) In [151]: s = pd.Series([1, 3, 11, 12], index=['a','b','c','d']) In [152]: df.isin(s) Out[152]: A B a True False b False False c False False d False False In [152]: df.isin(s, ignore_index=True) Out[152]: A B a True False b False False c True False d False False ``` I would say there's no need for an `ignore_index` since you could just do `df.isin(s.values)`.
https://api.github.com/repos/pandas-dev/pandas/pulls/5199
2013-10-12T22:26:09Z
2013-10-17T20:49:58Z
2013-10-17T20:49:58Z
2017-04-05T02:05:43Z
BUG: Bug in to_datetime with a format and coerce=True not raising (GH5195)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 55f786d263a0a..f899849475df8 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -593,6 +593,7 @@ Bug Fixes - Compound dtypes in a constructor raise ``NotImplementedError`` (:issue:`5191`) - Bug in comparing duplicate frames (:issue:`4421`) related - Bug in describe on duplicate frames + - Bug in ``to_datetime`` with a format and ``coerce=True`` not raising (:issue:`5195`) pandas 0.12.0 ------------- diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 473ea21da1585..7f11fa5873fe7 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -879,6 +879,29 @@ def test_to_datetime_on_datetime64_series(self): result = to_datetime(s) self.assertEquals(result[0], s[0]) + def test_to_datetime_with_apply(self): + + # this is only locale tested with US/None locales + import locale + (lang,encoding) = locale.getlocale() + if lang is not None: + raise nose.SkipTest("format codes cannot work with a locale of {0}".format(lang)) + + # GH 5195 + # with a format and coerce a single item to_datetime fails + td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3]) + expected = pd.to_datetime(td, format='%b %y') + result = td.apply(pd.to_datetime, format='%b %y') + assert_series_equal(result, expected) + + td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3]) + self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y')) + self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y')) + expected = pd.to_datetime(td, format='%b %y', coerce=True) + + result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True)) + assert_series_equal(result, expected) + def test_nat_vector_field_access(self): idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000']) diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 793d9409e662e..3d8803237931d 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -112,7 +112,7 @@ def _convert_listlike(arg, box): # fallback if result is None: - result = tslib.array_strptime(arg, format) + result = tslib.array_strptime(arg, format, coerce=coerce) else: result = tslib.array_to_datetime(arg, raise_=errors == 'raise', utc=utc, dayfirst=dayfirst, diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index c6c2b418f553d..372de1e7c1b21 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1174,7 +1174,7 @@ def repr_timedelta64(object value): return "%s%02d:%02d:%s" % (sign_pretty, hours, minutes, seconds_pretty) -def array_strptime(ndarray[object] values, object fmt): +def array_strptime(ndarray[object] values, object fmt, coerce=False): cdef: Py_ssize_t i, n = len(values) pandas_datetimestruct dts @@ -1237,9 +1237,15 @@ def array_strptime(ndarray[object] values, object fmt): for i in range(n): found = format_regex.match(values[i]) if not found: + if coerce: + iresult[i] = iNaT + continue raise ValueError("time data %r does not match format %r" % (values[i], fmt)) if len(values[i]) != found.end(): + if coerce: + iresult[i] = iNaT + continue raise ValueError("unconverted data remains: %s" % values[i][found.end():]) year = 1900
closes #5195
https://api.github.com/repos/pandas-dev/pandas/pulls/5197
2013-10-12T22:13:49Z
2013-10-12T23:53:46Z
2013-10-12T23:53:46Z
2014-06-24T06:53:46Z
BUG: Bug in comparing duplicate frames (GH4421) related
diff --git a/doc/source/release.rst b/doc/source/release.rst index 5001c5142f330..55f786d263a0a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -591,6 +591,8 @@ Bug Fixes (:issue:`5123`, :issue:`5125`) - Allow duplicate indices when performing operations that align (:issue:`5185`) - Compound dtypes in a constructor raise ``NotImplementedError`` (:issue:`5191`) + - Bug in comparing duplicate frames (:issue:`4421`) related + - Bug in describe on duplicate frames pandas 0.12.0 ------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 24a4e4800e750..967da6102ae1a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2848,28 +2848,34 @@ def _combine_const(self, other, func, raise_on_error=True): new_data = self._data.eval(func, other, raise_on_error=raise_on_error) return self._constructor(new_data) + + def _compare_frame_evaluate(self, other, func, str_rep): + + # unique + if self.columns.is_unique: + def _compare(a, b): + return dict([(col, func(a[col], b[col])) for col in a.columns]) + new_data = expressions.evaluate(_compare, str_rep, self, other) + return self._constructor(data=new_data, index=self.index, + columns=self.columns, copy=False) + # non-unique + else: + def _compare(a, b): + return [func(a.iloc[:,i], b.iloc[:,i]) for i, col in enumerate(a.columns)] + new_data = expressions.evaluate(_compare, str_rep, self, other) + return self._constructor(data=new_data, index=self.columns, + columns=self.index, copy=False).T + def _compare_frame(self, other, func, str_rep): if not self._indexed_same(other): raise ValueError('Can only compare identically-labeled ' 'DataFrame objects') - - def _compare(a, b): - return dict([(col, func(a[col], b[col])) for col in a.columns]) - new_data = expressions.evaluate(_compare, str_rep, self, other) - - return self._constructor(data=new_data, index=self.index, - columns=self.columns, copy=False) + return self._compare_frame_evaluate(other, func, str_rep) def _flex_compare_frame(self, other, func, str_rep, level): if not self._indexed_same(other): self, other = self.align(other, 'outer', level=level) - - def _compare(a, b): - return dict([(col, func(a[col], b[col])) for col in a.columns]) - new_data = expressions.evaluate(_compare, str_rep, self, other) - - return self._constructor(data=new_data, index=self.index, - columns=self.columns, copy=False) + return self._compare_frame_evaluate(other, func, str_rep) def combine(self, other, func, fill_value=None, overwrite=True): """ @@ -3792,8 +3798,8 @@ def pretty_name(x): destat = [] - for column in numdata.columns: - series = self[column] + for i in range(len(numdata.columns)): + series = numdata.iloc[:,i] destat.append([series.count(), series.mean(), series.std(), series.min(), series.quantile(lb), series.median(), series.quantile(ub), series.max()]) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 6e7683d29a934..fe0f9244c31a3 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -3179,6 +3179,14 @@ def check(result, expected=None): expected = DataFrame([[1],[1],[1]],columns=['bar']) check(df,expected) + # describe + df = DataFrame([[1,1,1],[2,2,2],[3,3,3]],columns=['bar','a','a'],dtype='float64') + result = df.describe() + s = df.iloc[:,0].describe() + expected = pd.concat([ s, s, s],keys=df.columns,axis=1) + check(result,expected) + + def test_column_dups_indexing(self): def check(result, expected=None): @@ -3217,6 +3225,18 @@ def check(result, expected=None): result = df1.sub(df2) assert_frame_equal(result,expected) + # equality + df1 = DataFrame([[1,2],[2,np.nan],[3,4],[4,4]],columns=['A','B']) + df2 = DataFrame([[0,1],[2,4],[2,np.nan],[4,5]],columns=['A','A']) + + # not-comparing like-labelled + self.assertRaises(ValueError, lambda : df1 == df2) + + df1r = df1.reindex_like(df2) + result = df1r == df2 + expected = DataFrame([[False,True],[True,False],[False,False],[True,False]],columns=['A','A']) + assert_frame_equal(result,expected) + def test_insert_benchmark(self): # from the vb_suite/frame_methods/frame_insert_columns N = 10
BUG: Bug in describe on duplicate frames related to #4421
https://api.github.com/repos/pandas-dev/pandas/pulls/5194
2013-10-12T21:16:16Z
2013-10-12T21:28:42Z
2013-10-12T21:28:42Z
2014-06-22T17:39:20Z
BUG: raise NotImplementedError on passing of compound dtypes in constructors (GH5191)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 5d256ddf6dca3..5001c5142f330 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -590,6 +590,7 @@ Bug Fixes - Fixed segfault on ``isnull(MultiIndex)`` (now raises an error instead) (:issue:`5123`, :issue:`5125`) - Allow duplicate indices when performing operations that align (:issue:`5185`) + - Compound dtypes in a constructor raise ``NotImplementedError`` (:issue:`5191`) pandas 0.12.0 ------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 126ed9242ecdd..24a4e4800e750 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -189,6 +189,8 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if data is None: data = {} + if dtype is not None: + dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._data @@ -276,9 +278,6 @@ def _init_dict(self, data, index, columns, dtype=None): Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ - if dtype is not None: - dtype = np.dtype(dtype) - if columns is not None: columns = _ensure_index(columns) @@ -4659,9 +4658,6 @@ def _get_names_from_index(data): def _homogenize(data, index, dtype=None): from pandas.core.series import _sanitize_array - if dtype is not None: - dtype = np.dtype(dtype) - oindex = None homogenized = [] diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 556a7652b9270..3fca45b00d565 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -89,6 +89,18 @@ def __init__(self, data, axes=None, copy=False, dtype=None, fastpath=False): object.__setattr__(self, '_data', data) object.__setattr__(self, '_item_cache', {}) + def _validate_dtype(self, dtype): + """ validate the passed dtype """ + + if dtype is not None: + dtype = np.dtype(dtype) + + # a compound dtype + if dtype.kind == 'V': + raise NotImplementedError("compound dtypes are not implemented" + "in the {0} constructor".format(self.__class__.__name__)) + return dtype + def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): """ passed a manager and a axes dict """ for a, axe in axes.items(): diff --git a/pandas/core/panel.py b/pandas/core/panel.py index f0d9dbe9c5877..1389445b29943 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -140,6 +140,8 @@ def _init_data(self, data, copy, dtype, **kwargs): """ if data is None: data = {} + if dtype is not None: + dtype = self._validate_dtype(dtype) passed_axes = [kwargs.get(a) for a in self._AXIS_ORDERS] axes = None diff --git a/pandas/core/series.py b/pandas/core/series.py index e475495c63164..fba3e946de0b0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -139,6 +139,8 @@ def __init__(self, data=None, index=None, dtype=None, name=None, if data is None: data = {} + if dtype is not None: + dtype = self._validate_dtype(dtype) if isinstance(data, MultiIndex): raise NotImplementedError diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 697f9c94aff93..6e7683d29a934 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -10709,6 +10709,21 @@ def test_constructor_series_copy(self): self.assert_(not (series['A'] == 5).all()) + def test_constructor_compound_dtypes(self): + # GH 5191 + # compound dtypes should raise not-implementederror + + def f(dtype): + return DataFrame(data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9)), + columns=["A", "B", "C"], dtype=dtype) + + self.assertRaises(NotImplementedError, f, [("A","datetime64[h]"), ("B","str"), ("C","int32")]) + + # these work (though results may be unexpected) + f('int64') + f('float64') + f('M8[ns]') + def test_assign_columns(self): self.frame['hi'] = 'there' diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index c9ef3ea4e217c..ce0cb909cf1c5 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -86,7 +86,7 @@ def _construct(self, shape, value=None, dtype=None, **kwargs): arr = np.repeat(arr,new_shape).reshape(shape) else: arr = np.random.randn(*shape) - return self._typ(arr,**kwargs) + return self._typ(arr,dtype=dtype,**kwargs) def _compare(self, result, expected): self._comparator(result,expected) @@ -210,6 +210,20 @@ def test_downcast(self): expected = o.astype(np.int64) self._compare(result, expected) + def test_constructor_compound_dtypes(self): + # GH 5191 + # compound dtypes should raise not-implementederror + + def f(dtype): + return self._construct(shape=3, dtype=dtype) + + self.assertRaises(NotImplementedError, f, [("A","datetime64[h]"), ("B","str"), ("C","int32")]) + + # these work (though results may be unexpected) + f('int64') + f('float64') + f('M8[ns]') + class TestSeries(unittest.TestCase, Generic): _typ = Series _comparator = lambda self, x, y: assert_series_equal(x,y)
closes #5191
https://api.github.com/repos/pandas-dev/pandas/pulls/5192
2013-10-12T17:33:55Z
2013-10-12T17:45:29Z
2013-10-12T17:45:28Z
2014-06-27T19:08:14Z
CLN/ENH: Stop instantiating all offsets on load.
diff --git a/doc/source/release.rst b/doc/source/release.rst index b74b1f9252709..9be06d60f0f16 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -394,6 +394,10 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` bs4/lxml (:issue:`4770`). - Removed the ``keep_internal`` keyword parameter in ``pandas/core/groupby.py`` because it wasn't being used (:issue:`5102`). + - Base ``DateOffsets`` are no longer all instantiated on importing pandas, + instead they are generated and cached on the fly. The internal + representation and handling of DateOffsets has also been clarified. + (:issue:`5189`, related :issue:`5004`) .. _release.bug_fixes-0.13.0: diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index d1fd51c073f83..4878ebfccf915 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -12,7 +12,6 @@ import pandas.core.common as com import pandas.lib as lib import pandas.tslib as tslib -from pandas import _np_version_under1p7 class FreqGroup(object): @@ -125,162 +124,15 @@ def _get_freq_str(base, mult=1): MonthEnd, BMonthBegin, BMonthEnd, QuarterBegin, QuarterEnd, BQuarterBegin, BQuarterEnd, YearBegin, YearEnd, - BYearBegin, BYearEnd, + BYearBegin, BYearEnd, _make_offset ) try: cday = CDay() except NotImplementedError: cday = None -_offset_map = { - 'D': Day(), - 'C': cday, - 'B': BDay(), - 'H': Hour(), - 'T': Minute(), - 'S': Second(), - 'L': Milli(), - 'U': Micro(), - None: None, - - # Monthly - Calendar - 'M': MonthEnd(), - 'MS': MonthBegin(), - - # Monthly - Business - 'BM': BMonthEnd(), - 'BMS': BMonthBegin(), - - # Annual - Calendar - 'A-JAN': YearEnd(month=1), - 'A-FEB': YearEnd(month=2), - 'A-MAR': YearEnd(month=3), - 'A-APR': YearEnd(month=4), - 'A-MAY': YearEnd(month=5), - 'A-JUN': YearEnd(month=6), - 'A-JUL': YearEnd(month=7), - 'A-AUG': YearEnd(month=8), - 'A-SEP': YearEnd(month=9), - 'A-OCT': YearEnd(month=10), - 'A-NOV': YearEnd(month=11), - 'A-DEC': YearEnd(month=12), - - # Annual - Calendar (start) - 'AS-JAN': YearBegin(month=1), - 'AS-FEB': YearBegin(month=2), - 'AS-MAR': YearBegin(month=3), - 'AS-APR': YearBegin(month=4), - 'AS-MAY': YearBegin(month=5), - 'AS-JUN': YearBegin(month=6), - 'AS-JUL': YearBegin(month=7), - 'AS-AUG': YearBegin(month=8), - 'AS-SEP': YearBegin(month=9), - 'AS-OCT': YearBegin(month=10), - 'AS-NOV': YearBegin(month=11), - 'AS-DEC': YearBegin(month=12), - - # Annual - Business - 'BA-JAN': BYearEnd(month=1), - 'BA-FEB': BYearEnd(month=2), - 'BA-MAR': BYearEnd(month=3), - 'BA-APR': BYearEnd(month=4), - 'BA-MAY': BYearEnd(month=5), - 'BA-JUN': BYearEnd(month=6), - 'BA-JUL': BYearEnd(month=7), - 'BA-AUG': BYearEnd(month=8), - 'BA-SEP': BYearEnd(month=9), - 'BA-OCT': BYearEnd(month=10), - 'BA-NOV': BYearEnd(month=11), - 'BA-DEC': BYearEnd(month=12), - - # Annual - Business (Start) - 'BAS-JAN': BYearBegin(month=1), - 'BAS-FEB': BYearBegin(month=2), - 'BAS-MAR': BYearBegin(month=3), - 'BAS-APR': BYearBegin(month=4), - 'BAS-MAY': BYearBegin(month=5), - 'BAS-JUN': BYearBegin(month=6), - 'BAS-JUL': BYearBegin(month=7), - 'BAS-AUG': BYearBegin(month=8), - 'BAS-SEP': BYearBegin(month=9), - 'BAS-OCT': BYearBegin(month=10), - 'BAS-NOV': BYearBegin(month=11), - 'BAS-DEC': BYearBegin(month=12), - - # Quarterly - Calendar - # 'Q' : QuarterEnd(startingMonth=3), - 'Q-JAN': QuarterEnd(startingMonth=1), - 'Q-FEB': QuarterEnd(startingMonth=2), - 'Q-MAR': QuarterEnd(startingMonth=3), - 'Q-APR': QuarterEnd(startingMonth=4), - 'Q-MAY': QuarterEnd(startingMonth=5), - 'Q-JUN': QuarterEnd(startingMonth=6), - 'Q-JUL': QuarterEnd(startingMonth=7), - 'Q-AUG': QuarterEnd(startingMonth=8), - 'Q-SEP': QuarterEnd(startingMonth=9), - 'Q-OCT': QuarterEnd(startingMonth=10), - 'Q-NOV': QuarterEnd(startingMonth=11), - 'Q-DEC': QuarterEnd(startingMonth=12), - - # Quarterly - Calendar (Start) - 'QS': QuarterBegin(startingMonth=1), - 'QS-JAN': QuarterBegin(startingMonth=1), - 'QS-FEB': QuarterBegin(startingMonth=2), - 'QS-MAR': QuarterBegin(startingMonth=3), - 'QS-APR': QuarterBegin(startingMonth=4), - 'QS-MAY': QuarterBegin(startingMonth=5), - 'QS-JUN': QuarterBegin(startingMonth=6), - 'QS-JUL': QuarterBegin(startingMonth=7), - 'QS-AUG': QuarterBegin(startingMonth=8), - 'QS-SEP': QuarterBegin(startingMonth=9), - 'QS-OCT': QuarterBegin(startingMonth=10), - 'QS-NOV': QuarterBegin(startingMonth=11), - 'QS-DEC': QuarterBegin(startingMonth=12), - - # Quarterly - Business - 'BQ-JAN': BQuarterEnd(startingMonth=1), - 'BQ-FEB': BQuarterEnd(startingMonth=2), - 'BQ-MAR': BQuarterEnd(startingMonth=3), - - 'BQ': BQuarterEnd(startingMonth=12), - 'BQ-APR': BQuarterEnd(startingMonth=4), - 'BQ-MAY': BQuarterEnd(startingMonth=5), - 'BQ-JUN': BQuarterEnd(startingMonth=6), - 'BQ-JUL': BQuarterEnd(startingMonth=7), - 'BQ-AUG': BQuarterEnd(startingMonth=8), - 'BQ-SEP': BQuarterEnd(startingMonth=9), - 'BQ-OCT': BQuarterEnd(startingMonth=10), - 'BQ-NOV': BQuarterEnd(startingMonth=11), - 'BQ-DEC': BQuarterEnd(startingMonth=12), - - # Quarterly - Business (Start) - 'BQS-JAN': BQuarterBegin(startingMonth=1), - 'BQS': BQuarterBegin(startingMonth=1), - 'BQS-FEB': BQuarterBegin(startingMonth=2), - 'BQS-MAR': BQuarterBegin(startingMonth=3), - 'BQS-APR': BQuarterBegin(startingMonth=4), - 'BQS-MAY': BQuarterBegin(startingMonth=5), - 'BQS-JUN': BQuarterBegin(startingMonth=6), - 'BQS-JUL': BQuarterBegin(startingMonth=7), - 'BQS-AUG': BQuarterBegin(startingMonth=8), - 'BQS-SEP': BQuarterBegin(startingMonth=9), - 'BQS-OCT': BQuarterBegin(startingMonth=10), - 'BQS-NOV': BQuarterBegin(startingMonth=11), - 'BQS-DEC': BQuarterBegin(startingMonth=12), - - # Weekly - 'W-MON': Week(weekday=0), - 'W-TUE': Week(weekday=1), - 'W-WED': Week(weekday=2), - 'W-THU': Week(weekday=3), - 'W-FRI': Week(weekday=4), - 'W-SAT': Week(weekday=5), - 'W-SUN': Week(weekday=6), - -} - -if not _np_version_under1p7: - _offset_map['N'] = Nano() +#: cache of previously seen offsets +_offset_map = {} _offset_to_period_map = { 'WEEKDAY': 'D', @@ -386,15 +238,6 @@ def get_period_alias(offset_str): _legacy_reverse_map = dict((v, k) for k, v in reversed(sorted(compat.iteritems(_rule_aliases)))) -# for helping out with pretty-printing and name-lookups - -_offset_names = {} -for name, offset in compat.iteritems(_offset_map): - if offset is None: - continue - offset.name = name - _offset_names[offset] = name - def inferTimeRule(index): from pandas.tseries.index import DatetimeIndex @@ -513,22 +356,21 @@ def get_offset(name): else: if name in _rule_aliases: name = _rule_aliases[name] - - offset = _offset_map.get(name) - - if offset is not None: - return offset - else: - raise ValueError('Bad rule name requested: %s.' % name) + try: + if name not in _offset_map: + # generate and cache offset + offset = _make_offset(name) + _offset_map[name] = offset + return _offset_map[name] + except (ValueError, TypeError, KeyError): + # bad prefix or suffix + pass + raise ValueError('Bad rule name requested: %s.' % name) getOffset = get_offset -def hasOffsetName(offset): - return offset in _offset_names - - def get_offset_name(offset): """ Return rule name associated with a DateOffset object @@ -537,11 +379,18 @@ def get_offset_name(offset): -------- get_offset_name(BMonthEnd(1)) --> 'EOM' """ - name = _offset_names.get(offset) - - if name is not None: - return name - else: + if offset is None: + raise ValueError("Offset can't be none!") + # Hack because this is what it did before... + if isinstance(offset, BDay): + if offset.n != 1: + raise ValueError('Bad rule given: %s.' % 'BusinessDays') + else: + return offset.rule_code + try: + return offset.freqstr + except AttributeError: + # Bad offset, give useful error. raise ValueError('Bad rule given: %s.' % offset) @@ -549,7 +398,7 @@ def get_legacy_offset_name(offset): """ Return the pre pandas 0.8.0 name for the date offset """ - name = _offset_names.get(offset) + name = offset.name return _legacy_reverse_map.get(name, name) get_offset_name = get_offset_name @@ -652,7 +501,7 @@ def _period_alias_dictionary(): L_aliases = ["L", "MS", "MILLISECOND", "MILLISECONDLY"] U_aliases = ["U", "US", "MICROSECOND", "MICROSECONDLY"] N_aliases = ["N", "NS", "NANOSECOND", "NANOSECONDLY"] - + for k in M_aliases: alias_dict[k] = 'M' @@ -679,7 +528,7 @@ def _period_alias_dictionary(): for k in N_aliases: alias_dict[k] = 'N' - + A_prefixes = ["A", "Y", "ANN", "ANNUAL", "ANNUALLY", "YR", "YEAR", "YEARLY"] diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 309b6fbb9a51a..a9488f74cb65a 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -8,7 +8,6 @@ # import after tools, dateutil check from dateutil.relativedelta import relativedelta import pandas.tslib as tslib -import numpy as np from pandas import _np_version_under1p7 __all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay', @@ -21,6 +20,7 @@ #---------------------------------------------------------------------- # DateOffset + class ApplyTypeError(TypeError): # sentinel class for catching the apply error to return NotImplemented pass @@ -108,7 +108,7 @@ def _should_cache(self): def _params(self): attrs = [(k, v) for k, v in compat.iteritems(vars(self)) if k not in ['kwds', '_offset', 'name', 'normalize', - 'busdaycalendar']] + 'busdaycalendar', '_named']] attrs.extend(list(self.kwds.items())) attrs = sorted(set(attrs)) @@ -116,15 +116,14 @@ def _params(self): return params def __repr__(self): - if hasattr(self, 'name') and len(self.name): - return self.name - + if hasattr(self, '_named'): + return self._named className = getattr(self, '_outputName', type(self).__name__) exclude = set(['n', 'inc']) attrs = [] for attr in sorted(self.__dict__): if ((attr == 'kwds' and len(self.kwds) == 0) - or attr.startswith('_')): + or attr.startswith('_')): continue elif attr == 'kwds': kwds_new = {} @@ -152,6 +151,13 @@ def __repr__(self): out += '>' return out + @property + def name(self): + if hasattr(self, '_named'): + return self._named + else: + return self.rule_code + def __eq__(self, other): if other is None: return False @@ -234,9 +240,14 @@ def onOffset(self, dt): b = ((dt + self) - self) return a == b + # way to get around weirdness with rule_code + @property + def _prefix(self): + raise NotImplementedError('Prefix not defined') + @property def rule_code(self): - raise NotImplementedError + return self._prefix @property def freqstr(self): @@ -253,10 +264,20 @@ def freqstr(self): return fstr -class BusinessDay(CacheableOffset, DateOffset): +class SingleConstructorOffset(DateOffset): + @classmethod + def _from_name(cls, suffix=None): + # default _from_name calls cls with no args + if suffix: + raise ValueError("Bad freq suffix %s" % suffix) + return cls() + + +class BusinessDay(CacheableOffset, SingleConstructorOffset): """ DateOffset subclass representing possibly n business days """ + _prefix = 'B' def __init__(self, n=1, **kwds): self.n = int(n) @@ -264,14 +285,12 @@ def __init__(self, n=1, **kwds): self.offset = kwds.get('offset', timedelta(0)) self.normalize = kwds.get('normalize', False) - @property - def rule_code(self): - return 'B' - - def __repr__(self): #TODO: Figure out if this should be merged into DateOffset - if hasattr(self, 'name') and len(self.name): - return self.name - + # TODO: Combine this with DateOffset by defining a whitelisted set of + # attributes on each object rather than the existing behavior of iterating + # over internal ``__dict__`` + def __repr__(self): + if hasattr(self, '_named'): + return self._named className = getattr(self, '_outputName', self.__class__.__name__) attrs = [] @@ -411,6 +430,7 @@ class CustomBusinessDay(BusinessDay): """ _cacheable = False + _prefix = 'C' def __init__(self, n=1, **kwds): # Check we have the required numpy version @@ -450,10 +470,6 @@ def __setstate__(self, state): self.__dict__ = state self._set_busdaycalendar() - @property - def rule_code(self): - return 'C' - @staticmethod def _to_dt64(dt, dtype='datetime64'): if isinstance(dt, (datetime, compat.string_types)): @@ -503,11 +519,21 @@ def onOffset(self, dt): return np.is_busday(day64, busdaycal=self.busdaycalendar) -class MonthEnd(CacheableOffset, DateOffset): +class MonthOffset(SingleConstructorOffset): + @property + def name(self): + if self.isAnchored: + return self.rule_code + else: + return "%s-%s" % (self.rule_code, _int_to_month[self.n]) + + +class MonthEnd(CacheableOffset, MonthOffset): """DateOffset of one month end""" def apply(self, other): - other = datetime(other.year, other.month, other.day, tzinfo=other.tzinfo) + other = datetime(other.year, other.month, other.day, + tzinfo=other.tzinfo) n = self.n _, days_in_month = tslib.monthrange(other.year, other.month) @@ -523,12 +549,10 @@ def onOffset(cls, dt): days_in_month = tslib.monthrange(dt.year, dt.month)[1] return dt.day == days_in_month - @property - def rule_code(self): - return 'M' + _prefix = 'M' -class MonthBegin(CacheableOffset, DateOffset): +class MonthBegin(CacheableOffset, MonthOffset): """DateOffset of one month at beginning""" def apply(self, other): @@ -544,12 +568,10 @@ def apply(self, other): def onOffset(cls, dt): return dt.day == 1 - @property - def rule_code(self): - return 'MS' + _prefix = 'MS' -class BusinessMonthEnd(CacheableOffset, DateOffset): +class BusinessMonthEnd(CacheableOffset, MonthOffset): """DateOffset increments between business EOM dates""" def isAnchored(self): @@ -574,12 +596,10 @@ def apply(self, other): other = other - BDay() return other - @property - def rule_code(self): - return 'BM' + _prefix = 'BM' -class BusinessMonthBegin(CacheableOffset, DateOffset): +class BusinessMonthBegin(CacheableOffset, MonthOffset): """DateOffset of one business month at beginning""" def apply(self, other): @@ -611,9 +631,7 @@ def onOffset(cls, dt): else: return dt.day == 1 - @property - def rule_code(self): - return 'BMS' + _prefix = 'BMS' class Week(CacheableOffset, DateOffset): @@ -665,15 +683,25 @@ def apply(self, other): def onOffset(self, dt): return dt.weekday() == self.weekday + _prefix = 'W' + @property def rule_code(self): suffix = '' if self.weekday is not None: - suffix = '-%s' % (_weekday_dict[self.weekday]) - return 'W' + suffix + suffix = '-%s' % (_int_to_weekday[self.weekday]) + return self._prefix + suffix + + @classmethod + def _from_name(cls, suffix=None): + if not suffix: + weekday = None + else: + weekday = _weekday_to_int[suffix] + return cls(weekday=weekday) -_weekday_dict = { +_int_to_weekday = { 0: 'MON', 1: 'TUE', 2: 'WED', @@ -683,6 +711,8 @@ def rule_code(self): 6: 'SUN' } +_weekday_to_int = dict((v, k) for k, v in _int_to_weekday.items()) + class WeekOfMonth(CacheableOffset, DateOffset): """ @@ -736,7 +766,8 @@ def apply(self, other): else: months = self.n + 1 - return self.getOffsetOfMonth(other + relativedelta(months=months, day=1)) + return self.getOffsetOfMonth(other + relativedelta(months=months, + day=1)) def getOffsetOfMonth(self, dt): w = Week(weekday=self.weekday) @@ -754,27 +785,69 @@ def onOffset(self, dt): @property def rule_code(self): - suffix = '-%d%s' % (self.week + 1, _weekday_dict.get(self.weekday, '')) - return 'WOM' + suffix - + return '%s-%d%s' % (self._prefix, self.week + 1, + _int_to_weekday.get(self.weekday, '')) -class BQuarterEnd(CacheableOffset, DateOffset): - """DateOffset increments between business Quarter dates - startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ... - startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ... - startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ... - """ - _outputName = 'BusinessQuarterEnd' + _prefix = 'WOM' + @classmethod + def _from_name(cls, suffix=None): + if not suffix: + raise ValueError("Prefix %r requires a suffix." % (cls._prefix)) + # TODO: handle n here... + # only one digit weeks (1 --> week 0, 2 --> week 1, etc.) + week = int(suffix[0]) - 1 + weekday = _weekday_to_int[suffix[1:]] + return cls(week=week, weekday=weekday) + + +class QuarterOffset(DateOffset): + """Quarter representation - doesn't call super""" + + #: default month for __init__ + _default_startingMonth = None + #: default month in _from_name + _from_name_startingMonth = None + + # TODO: Consider combining QuarterOffset and YearOffset __init__ at some + # point def __init__(self, n=1, **kwds): self.n = n - self.startingMonth = kwds.get('startingMonth', 3) + self.startingMonth = kwds.get('startingMonth', + self._default_startingMonth) self.kwds = kwds def isAnchored(self): return (self.n == 1 and self.startingMonth is not None) + @classmethod + def _from_name(cls, suffix=None): + kwargs = {} + if suffix: + kwargs['startingMonth'] = _month_to_int[suffix] + else: + if cls._from_name_startingMonth is not None: + kwargs['startingMonth'] = cls._from_name_startingMonth + return cls(**kwargs) + + @property + def rule_code(self): + return '%s-%s' % (self._prefix, _int_to_month[self.startingMonth]) + + +class BQuarterEnd(CacheableOffset, QuarterOffset): + """DateOffset increments between business Quarter dates + startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ... + startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ... + startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ... + """ + _outputName = 'BusinessQuarterEnd' + _default_startingMonth = 3 + # 'BQ' + _from_name_startingMonth = 12 + _prefix = 'BQ' + def apply(self, other): n = self.n @@ -802,13 +875,8 @@ def onOffset(self, dt): modMonth = (dt.month - self.startingMonth) % 3 return BMonthEnd().onOffset(dt) and modMonth == 0 - @property - def rule_code(self): - suffix = '-%s' % _month_dict[self.startingMonth] - return 'BQ' + suffix - -_month_dict = { +_int_to_month = { 1: 'JAN', 2: 'FEB', 3: 'MAR', @@ -823,18 +891,16 @@ def rule_code(self): 12: 'DEC' } +_month_to_int = dict((v, k) for k, v in _int_to_month.items()) -class BQuarterBegin(CacheableOffset, DateOffset): - _outputName = "BusinessQuarterBegin" - - def __init__(self, n=1, **kwds): - self.n = n - self.startingMonth = kwds.get('startingMonth', 3) - self.kwds = kwds - - def isAnchored(self): - return (self.n == 1 and self.startingMonth is not None) +# TODO: This is basically the same as BQuarterEnd +class BQuarterBegin(CacheableOffset, QuarterOffset): + _outputName = "BusinessQuarterBegin" + # I suspect this is wrong for *all* of them. + _default_startingMonth = 3 + _from_name_startingMonth = 1 + _prefix = 'BQS' def apply(self, other): n = self.n @@ -864,19 +930,16 @@ def apply(self, other): other.microsecond) return result - @property - def rule_code(self): - suffix = '-%s' % _month_dict[self.startingMonth] - return 'BQS' + suffix - -class QuarterEnd(CacheableOffset, DateOffset): +class QuarterEnd(CacheableOffset, QuarterOffset): """DateOffset increments between business Quarter dates startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ... startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ... startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ... """ _outputName = 'QuarterEnd' + _default_startingMonth = 3 + _prefix = 'Q' def __init__(self, n=1, **kwds): self.n = n @@ -907,20 +970,12 @@ def onOffset(self, dt): modMonth = (dt.month - self.startingMonth) % 3 return MonthEnd().onOffset(dt) and modMonth == 0 - @property - def rule_code(self): - suffix = '-%s' % _month_dict[self.startingMonth] - return 'Q' + suffix - -class QuarterBegin(CacheableOffset, DateOffset): +class QuarterBegin(CacheableOffset, QuarterOffset): _outputName = 'QuarterBegin' - - def __init__(self, n=1, **kwds): - self.n = n - self.startingMonth = kwds.get('startingMonth', 3) - - self.kwds = kwds + _default_startingMonth = 3 + _from_name_startingMonth = 1 + _prefix = 'QS' def isAnchored(self): return (self.n == 1 and self.startingMonth is not None) @@ -943,24 +998,36 @@ def apply(self, other): other = other + relativedelta(months=3 * n - monthsSince, day=1) return other - @property - def rule_code(self): - suffix = '-%s' % _month_dict[self.startingMonth] - return 'QS' + suffix - -class BYearEnd(CacheableOffset, DateOffset): - """DateOffset increments between business EOM dates""" - _outputName = 'BusinessYearEnd' +class YearOffset(DateOffset): + """DateOffset that just needs a month""" def __init__(self, n=1, **kwds): - self.month = kwds.get('month', 12) + self.month = kwds.get('month', self._default_month) if self.month < 1 or self.month > 12: raise ValueError('Month must go from 1 to 12') DateOffset.__init__(self, n=n, **kwds) + @classmethod + def _from_name(cls, suffix=None): + kwargs = {} + if suffix: + kwargs['month'] = _month_to_int[suffix] + return cls(**kwargs) + + @property + def rule_code(self): + return '%s-%s' % (self._prefix, _int_to_month[self.month]) + + +class BYearEnd(CacheableOffset, YearOffset): + """DateOffset increments between business EOM dates""" + _outputName = 'BusinessYearEnd' + _default_month = 12 + _prefix = 'BA' + def apply(self, other): n = self.n @@ -990,23 +1057,12 @@ def apply(self, other): return result - @property - def rule_code(self): - suffix = '-%s' % _month_dict[self.month] - return 'BA' + suffix - -class BYearBegin(CacheableOffset, DateOffset): +class BYearBegin(CacheableOffset, YearOffset): """DateOffset increments between business year begin dates""" _outputName = 'BusinessYearBegin' - - def __init__(self, n=1, **kwds): - self.month = kwds.get('month', 1) - - if self.month < 1 or self.month > 12: - raise ValueError('Month must go from 1 to 12') - - DateOffset.__init__(self, n=n, **kwds) + _default_month = 1 + _prefix = 'BAS' def apply(self, other): n = self.n @@ -1032,22 +1088,11 @@ def apply(self, other): first = _get_firstbday(wkday) return datetime(other.year, self.month, first) - @property - def rule_code(self): - suffix = '-%s' % _month_dict[self.month] - return 'BAS' + suffix - -class YearEnd(CacheableOffset, DateOffset): +class YearEnd(CacheableOffset, YearOffset): """DateOffset increments between calendar year ends""" - - def __init__(self, n=1, **kwds): - self.month = kwds.get('month', 12) - - if self.month < 1 or self.month > 12: - raise ValueError('Month must go from 1 to 12') - - DateOffset.__init__(self, n=n, **kwds) + _default_month = 12 + _prefix = 'A' def apply(self, other): def _increment(date): @@ -1074,8 +1119,8 @@ def _decrement(date): date.microsecond) def _rollf(date): - if (date.month != self.month or - date.day < tslib.monthrange(date.year, date.month)[1]): + if date.month != self.month or\ + date.day < tslib.monthrange(date.year, date.month)[1]: date = _increment(date) return date @@ -1099,22 +1144,11 @@ def onOffset(self, dt): wkday, days_in_month = tslib.monthrange(dt.year, self.month) return self.month == dt.month and dt.day == days_in_month - @property - def rule_code(self): - suffix = '-%s' % _month_dict[self.month] - return 'A' + suffix - -class YearBegin(CacheableOffset, DateOffset): +class YearBegin(CacheableOffset, YearOffset): """DateOffset increments between calendar year begin dates""" - - def __init__(self, n=1, **kwds): - self.month = kwds.get('month', 1) - - if self.month < 1 or self.month > 12: - raise ValueError('Month must go from 1 to 12') - - DateOffset.__init__(self, n=n, **kwds) + _default_month = 1 + _prefix = 'AS' def apply(self, other): def _increment(date): @@ -1127,7 +1161,7 @@ def _increment(date): def _decrement(date): year = date.year if date.month < self.month or (date.month == self.month and - date.day == 1): + date.day == 1): year -= 1 return datetime(year, self.month, 1, date.hour, date.minute, date.second, date.microsecond) @@ -1156,11 +1190,6 @@ def _rollf(date): def onOffset(self, dt): return dt.month == self.month and dt.day == 1 - @property - def rule_code(self): - suffix = '-%s' % _month_dict[self.month] - return 'AS' + suffix - #---------------------------------------------------------------------- # Ticks @@ -1175,7 +1204,7 @@ def f(self, other): return f -class Tick(DateOffset): +class Tick(SingleConstructorOffset): _inc = timedelta(microseconds=1000) __gt__ = _tick_comp(operator.gt) @@ -1242,11 +1271,7 @@ def apply(self, other): else: raise ApplyTypeError('Unhandled type: %s' % type(other).__name__) - _rule_base = 'undefined' - - @property - def rule_code(self): - return self._rule_base + _prefix = 'undefined' def isAnchored(self): return False @@ -1287,36 +1312,36 @@ def _delta_to_nanoseconds(delta): class Day(CacheableOffset, Tick): _inc = timedelta(1) - _rule_base = 'D' + _prefix = 'D' class Hour(Tick): _inc = timedelta(0, 3600) - _rule_base = 'H' + _prefix = 'H' class Minute(Tick): _inc = timedelta(0, 60) - _rule_base = 'T' + _prefix = 'T' class Second(Tick): _inc = timedelta(0, 1) - _rule_base = 'S' + _prefix = 'S' class Milli(Tick): - _rule_base = 'L' + _prefix = 'L' class Micro(Tick): _inc = timedelta(microseconds=1) - _rule_base = 'U' + _prefix = 'U' class Nano(Tick): _inc = np.timedelta64(1, 'ns') if not _np_version_under1p7 else 1 - _rule_base = 'N' + _prefix = 'N' BDay = BusinessDay @@ -1402,3 +1427,46 @@ def generate_range(start=None, end=None, periods=None, if next_date <= cur: raise ValueError('Offset %s did not increment date' % offset) cur = next_date + +prefix_mapping = dict((offset._prefix, offset) for offset in [ + YearBegin, # 'AS' + YearEnd, # 'A' + BYearBegin, # 'BAS' + BYearEnd, # 'BA' + BusinessDay, # 'B' + BusinessMonthBegin, # 'BMS' + BusinessMonthEnd, # 'BM' + BQuarterEnd, # 'BQ' + BQuarterBegin, # 'BQS' + CustomBusinessDay, # 'C' + MonthEnd, # 'M' + MonthBegin, # 'MS' + Week, # 'W' + Second, # 'S' + Minute, # 'T' + Micro, # 'U' + QuarterEnd, # 'Q' + QuarterBegin, # 'QS' + Milli, # 'L' + Hour, # 'H' + Day, # 'D' + WeekOfMonth, # 'WOM' +]) + +if not _np_version_under1p7: + # Only 1.7+ supports nanosecond resolution + prefix_mapping['N'] = Nano + + +def _make_offset(key): + """Gets offset based on key. KeyError if prefix is bad, ValueError if + suffix is bad. All handled by `get_offset` in tseries/frequencies. Not + public.""" + if key is None: + return None + split = key.replace('@', '-').split('-') + klass = prefix_mapping[split[0]] + # handles case where there's no suffix (and will TypeError if too many '-') + obj = klass._from_name(*split[1:]) + obj._named = key + return obj diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index 0f7a356e84664..8592a2c2d8d9c 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -13,7 +13,7 @@ DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second, Day, Micro, Milli, Nano, WeekOfMonth, format, ole2datetime, QuarterEnd, to_datetime, normalize_date, - get_offset, get_offset_name, hasOffsetName, get_standard_freq) + get_offset, get_offset_name, get_standard_freq) from pandas.tseries.frequencies import _offset_map from pandas.tseries.index import _to_m8, DatetimeIndex, _daterange_cache @@ -99,6 +99,7 @@ class TestDateOffset(unittest.TestCase): def setUp(self): self.d = Timestamp(datetime(2008, 1, 2)) + _offset_map.clear() def test_repr(self): repr(DateOffset()) @@ -1747,11 +1748,6 @@ def test_compare_ticks(): assert(kls(3) != kls(4)) -def test_hasOffsetName(): - assert hasOffsetName(BDay()) - assert not hasOffsetName(BDay(2)) - - def test_get_offset_name(): assertRaisesRegexp(ValueError, 'Bad rule.*BusinessDays', get_offset_name, BDay(2)) @@ -1766,17 +1762,17 @@ def test_get_offset_name(): def test_get_offset(): assertRaisesRegexp(ValueError, "rule.*GIBBERISH", get_offset, 'gibberish') + assertRaisesRegexp(ValueError, "rule.*QS-JAN-B", get_offset, 'QS-JAN-B') + pairs = [('B', BDay()), ('b', BDay()), ('bm', BMonthEnd()), + ('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)), + ('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)), + ('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4)), + ('w@Sat', Week(weekday=5))] - assert get_offset('B') == BDay() - assert get_offset('b') == BDay() - assert get_offset('bm') == BMonthEnd() - assert get_offset('Bm') == BMonthEnd() - assert get_offset('W-MON') == Week(weekday=0) - assert get_offset('W-TUE') == Week(weekday=1) - assert get_offset('W-WED') == Week(weekday=2) - assert get_offset('W-THU') == Week(weekday=3) - assert get_offset('W-FRI') == Week(weekday=4) - assert get_offset('w@Sat') == Week(weekday=5) + for name, expected in pairs: + offset = get_offset(name) + assert offset == expected, ("Expected %r to yield %r (actual: %r)" % + (name, expected, offset)) def test_parse_time_string(): @@ -1813,7 +1809,7 @@ def test_quarterly_dont_normalize(): class TestOffsetAliases(unittest.TestCase): def setUp(self): - pass + _offset_map.clear() def test_alias_equality(self): for k, v in compat.iteritems(_offset_map): @@ -1824,15 +1820,17 @@ def test_alias_equality(self): def test_rule_code(self): lst = ['M', 'MS', 'BM', 'BMS', 'D', 'B', 'H', 'T', 'S', 'L', 'U'] for k in lst: - assert k == _offset_map[k].rule_code - assert k == (_offset_map[k] * 3).rule_code + self.assertEqual(k, get_offset(k).rule_code) + # should be cached - this is kind of an internals test... + assert k in _offset_map + self.assertEqual(k, (get_offset(k) * 3).rule_code) suffix_lst = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] base = 'W' for v in suffix_lst: alias = '-'.join([base, v]) - assert alias == _offset_map[alias].rule_code - assert alias == (_offset_map[alias] * 5).rule_code + self.assertEqual(alias, get_offset(alias).rule_code) + self.assertEqual(alias, (get_offset(alias) * 5).rule_code) suffix_lst = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] @@ -1840,8 +1838,8 @@ def test_rule_code(self): for base in base_lst: for v in suffix_lst: alias = '-'.join([base, v]) - assert alias == _offset_map[alias].rule_code - assert alias == (_offset_map[alias] * 5).rule_code + self.assertEqual(alias, get_offset(alias).rule_code) + self.assertEqual(alias, (get_offset(alias) * 5).rule_code) def test_apply_ticks(): @@ -1900,6 +1898,7 @@ def test_all_cacheableoffsets(self): def setUp(self): _daterange_cache.clear() + _offset_map.clear() def run_X_index_creation(self, cls): inst1 = cls() @@ -1927,6 +1926,26 @@ def test_week_of_month_index_creation(self): self.assertTrue(inst2 in _daterange_cache) +class TestReprNames(unittest.TestCase): + def test_str_for_named_is_name(self): + # look at all the amazing combinations! + month_prefixes = ['A', 'AS', 'BA', 'BAS', 'Q', 'BQ', 'BQS', 'QS'] + names = [prefix + '-' + month for prefix in month_prefixes + for month in ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', + 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']] + days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] + names += ['W-' + day for day in days] + names += ['WOM-' + week + day for week in ('1', '2', '3', '4') + for day in days] + #singletons + names += ['S', 'T', 'U', 'BM', 'BMS', 'BQ', 'QS'] # No 'Q' + _offset_map.clear() + for name in names: + offset = get_offset(name) + self.assertEqual(repr(offset), name) + self.assertEqual(str(offset), name) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
Implements @wesm's suggestion and clears the way for new offsets. Very simple to extend - map a string to your class in mapping, define a `from_name` classmethod, then class gets passed everything after the `-` (if anything) to its method `from_name()`. Caches to dict afterwards (right now doesn't use cacheable, but could take advantage of that). cc @cancan101 For reference, here are all the existing offset: output aliases (using types because otherwise prints exactly the same). [(None, NoneType), ('A-APR', pandas.tseries.offsets.YearEnd), ('A-AUG', pandas.tseries.offsets.YearEnd), ('A-DEC', pandas.tseries.offsets.YearEnd), ('A-FEB', pandas.tseries.offsets.YearEnd), ('A-JAN', pandas.tseries.offsets.YearEnd), ('A-JUL', pandas.tseries.offsets.YearEnd), ('A-JUN', pandas.tseries.offsets.YearEnd), ('A-MAR', pandas.tseries.offsets.YearEnd), ('A-MAY', pandas.tseries.offsets.YearEnd), ('A-NOV', pandas.tseries.offsets.YearEnd), ('A-OCT', pandas.tseries.offsets.YearEnd), ('A-SEP', pandas.tseries.offsets.YearEnd), ('AS-APR', pandas.tseries.offsets.YearBegin), ('AS-AUG', pandas.tseries.offsets.YearBegin), ('AS-DEC', pandas.tseries.offsets.YearBegin), ('AS-FEB', pandas.tseries.offsets.YearBegin), ('AS-JAN', pandas.tseries.offsets.YearBegin), ('AS-JUL', pandas.tseries.offsets.YearBegin), ('AS-JUN', pandas.tseries.offsets.YearBegin), ('AS-MAR', pandas.tseries.offsets.YearBegin), ('AS-MAY', pandas.tseries.offsets.YearBegin), ('AS-NOV', pandas.tseries.offsets.YearBegin), ('AS-OCT', pandas.tseries.offsets.YearBegin), ('AS-SEP', pandas.tseries.offsets.YearBegin), ('B', pandas.tseries.offsets.BusinessDay), ('BA-APR', pandas.tseries.offsets.BYearEnd), ('BA-AUG', pandas.tseries.offsets.BYearEnd), ('BA-DEC', pandas.tseries.offsets.BYearEnd), ('BA-FEB', pandas.tseries.offsets.BYearEnd), ('BA-JAN', pandas.tseries.offsets.BYearEnd), ('BA-JUL', pandas.tseries.offsets.BYearEnd), ('BA-JUN', pandas.tseries.offsets.BYearEnd), ('BA-MAR', pandas.tseries.offsets.BYearEnd), ('BA-MAY', pandas.tseries.offsets.BYearEnd), ('BA-NOV', pandas.tseries.offsets.BYearEnd), ('BA-OCT', pandas.tseries.offsets.BYearEnd), ('BA-SEP', pandas.tseries.offsets.BYearEnd), ('BAS-APR', pandas.tseries.offsets.BYearBegin), ('BAS-AUG', pandas.tseries.offsets.BYearBegin), ('BAS-DEC', pandas.tseries.offsets.BYearBegin), ('BAS-FEB', pandas.tseries.offsets.BYearBegin), ('BAS-JAN', pandas.tseries.offsets.BYearBegin), ('BAS-JUL', pandas.tseries.offsets.BYearBegin), ('BAS-JUN', pandas.tseries.offsets.BYearBegin), ('BAS-MAR', pandas.tseries.offsets.BYearBegin), ('BAS-MAY', pandas.tseries.offsets.BYearBegin), ('BAS-NOV', pandas.tseries.offsets.BYearBegin), ('BAS-OCT', pandas.tseries.offsets.BYearBegin), ('BAS-SEP', pandas.tseries.offsets.BYearBegin), ('BM', pandas.tseries.offsets.BusinessMonthEnd), ('BMS', pandas.tseries.offsets.BusinessMonthBegin), ('BQ', pandas.tseries.offsets.BQuarterEnd), ('BQ-APR', pandas.tseries.offsets.BQuarterEnd), ('BQ-AUG', pandas.tseries.offsets.BQuarterEnd), ('BQ-DEC', pandas.tseries.offsets.BQuarterEnd), ('BQ-FEB', pandas.tseries.offsets.BQuarterEnd), ('BQ-JAN', pandas.tseries.offsets.BQuarterEnd), ('BQ-JUL', pandas.tseries.offsets.BQuarterEnd), ('BQ-JUN', pandas.tseries.offsets.BQuarterEnd), ('BQ-MAR', pandas.tseries.offsets.BQuarterEnd), ('BQ-MAY', pandas.tseries.offsets.BQuarterEnd), ('BQ-NOV', pandas.tseries.offsets.BQuarterEnd), ('BQ-OCT', pandas.tseries.offsets.BQuarterEnd), ('BQ-SEP', pandas.tseries.offsets.BQuarterEnd), ('BQS', pandas.tseries.offsets.BQuarterBegin), ('BQS-APR', pandas.tseries.offsets.BQuarterBegin), ('BQS-AUG', pandas.tseries.offsets.BQuarterBegin), ('BQS-DEC', pandas.tseries.offsets.BQuarterBegin), ('BQS-FEB', pandas.tseries.offsets.BQuarterBegin), ('BQS-JAN', pandas.tseries.offsets.BQuarterBegin), ('BQS-JUL', pandas.tseries.offsets.BQuarterBegin), ('BQS-JUN', pandas.tseries.offsets.BQuarterBegin), ('BQS-MAR', pandas.tseries.offsets.BQuarterBegin), ('BQS-MAY', pandas.tseries.offsets.BQuarterBegin), ('BQS-NOV', pandas.tseries.offsets.BQuarterBegin), ('BQS-OCT', pandas.tseries.offsets.BQuarterBegin), ('BQS-SEP', pandas.tseries.offsets.BQuarterBegin), ('Q-APR', pandas.tseries.offsets.QuarterEnd), ('Q-AUG', pandas.tseries.offsets.QuarterEnd), ('Q-DEC', pandas.tseries.offsets.QuarterEnd), ('Q-FEB', pandas.tseries.offsets.QuarterEnd), ('Q-JAN', pandas.tseries.offsets.QuarterEnd), ('Q-JUL', pandas.tseries.offsets.QuarterEnd), ('Q-JUN', pandas.tseries.offsets.QuarterEnd), ('Q-MAR', pandas.tseries.offsets.QuarterEnd), ('Q-MAY', pandas.tseries.offsets.QuarterEnd), ('Q-NOV', pandas.tseries.offsets.QuarterEnd), ('Q-OCT', pandas.tseries.offsets.QuarterEnd), ('Q-SEP', pandas.tseries.offsets.QuarterEnd), ('QS', pandas.tseries.offsets.QuarterBegin), ('QS-APR', pandas.tseries.offsets.QuarterBegin), ('QS-AUG', pandas.tseries.offsets.QuarterBegin), ('QS-DEC', pandas.tseries.offsets.QuarterBegin), ('QS-FEB', pandas.tseries.offsets.QuarterBegin), ('QS-JAN', pandas.tseries.offsets.QuarterBegin), ('QS-JUL', pandas.tseries.offsets.QuarterBegin), ('QS-JUN', pandas.tseries.offsets.QuarterBegin), ('QS-MAR', pandas.tseries.offsets.QuarterBegin), ('QS-MAY', pandas.tseries.offsets.QuarterBegin), ('QS-NOV', pandas.tseries.offsets.QuarterBegin), ('QS-OCT', pandas.tseries.offsets.QuarterBegin), ('QS-SEP', pandas.tseries.offsets.QuarterBegin), ('S', pandas.tseries.offsets.Second), ('T', pandas.tseries.offsets.Minute), ('U', pandas.tseries.offsets.Micro), ('W-FRI', pandas.tseries.offsets.Week), ('W-MON', pandas.tseries.offsets.Week), ('W-SAT', pandas.tseries.offsets.Week), ('W-SUN', pandas.tseries.offsets.Week), ('W-THU', pandas.tseries.offsets.Week), ('W-TUE', pandas.tseries.offsets.Week), ('W-WED', pandas.tseries.offsets.Week), ('WOM-1FRI', pandas.tseries.offsets.WeekOfMonth), ('WOM-1MON', pandas.tseries.offsets.WeekOfMonth), ('WOM-1THU', pandas.tseries.offsets.WeekOfMonth), ('WOM-1TUE', pandas.tseries.offsets.WeekOfMonth), ('WOM-1WED', pandas.tseries.offsets.WeekOfMonth), ('WOM-2FRI', pandas.tseries.offsets.WeekOfMonth), ('WOM-2MON', pandas.tseries.offsets.WeekOfMonth), ('WOM-2THU', pandas.tseries.offsets.WeekOfMonth), ('WOM-2TUE', pandas.tseries.offsets.WeekOfMonth), ('WOM-2WED', pandas.tseries.offsets.WeekOfMonth), ('WOM-3FRI', pandas.tseries.offsets.WeekOfMonth), ('WOM-3MON', pandas.tseries.offsets.WeekOfMonth), ('WOM-3THU', pandas.tseries.offsets.WeekOfMonth), ('WOM-3TUE', pandas.tseries.offsets.WeekOfMonth), ('WOM-3WED', pandas.tseries.offsets.WeekOfMonth), ('WOM-4FRI', pandas.tseries.offsets.WeekOfMonth), ('WOM-4MON', pandas.tseries.offsets.WeekOfMonth), ('WOM-4THU', pandas.tseries.offsets.WeekOfMonth), ('WOM-4TUE', pandas.tseries.offsets.WeekOfMonth), ('WOM-4WED', pandas.tseries.offsets.WeekOfMonth)]
https://api.github.com/repos/pandas-dev/pandas/pulls/5189
2013-10-12T04:28:45Z
2013-10-15T00:58:56Z
2013-10-15T00:58:56Z
2014-06-12T14:25:30Z
CLN/BUG/ENH: Raise AttributeError with pd.options
diff --git a/pandas/core/config.py b/pandas/core/config.py index 9f864e720dbfb..9de596142e7e0 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -65,6 +65,11 @@ _reserved_keys = ['all'] # keys which have a special meaning +class OptionError(AttributeError, KeyError): + """Exception for pandas.options, backwards compatible with KeyError + checks""" + + ########################################## # User API @@ -73,9 +78,9 @@ def _get_single_key(pat, silent): if len(keys) == 0: if not silent: _warn_if_deprecated(pat) - raise KeyError('No such keys(s): %r' % pat) + raise OptionError('No such keys(s): %r' % pat) if len(keys) > 1: - raise KeyError('Pattern matched multiple keys') + raise OptionError('Pattern matched multiple keys') key = keys[0] if not silent: @@ -147,7 +152,7 @@ def _describe_option(pat='', _print_desc=True): keys = _select_options(pat) if len(keys) == 0: - raise KeyError('No such keys(s)') + raise OptionError('No such keys(s)') s = u('') for k in keys: # filter by pat @@ -164,7 +169,7 @@ def _reset_option(pat): keys = _select_options(pat) if len(keys) == 0: - raise KeyError('No such keys(s)') + raise OptionError('No such keys(s)') if len(keys) > 1 and len(pat) < 4 and pat != 'all': raise ValueError('You must specify at least 4 characters when ' @@ -195,7 +200,7 @@ def __setattr__(self, key, val): if key in self.d and not isinstance(self.d[key], dict): _set_option(prefix, val) else: - raise KeyError("You can only set the value of existing options") + raise OptionError("You can only set the value of existing options") def __getattr__(self, key): prefix = object.__getattribute__(self, "prefix") @@ -211,6 +216,7 @@ def __getattr__(self, key): def __dir__(self): return list(self.d.keys()) + # For user convenience, we'd like to have the available options described # in the docstring. For dev convenience we'd like to generate the docstrings # dynamically instead of maintaining them by hand. To this, we use the @@ -255,7 +261,7 @@ def __doc__(self): Raises ------ -KeyError if no such option exists +OptionError if no such option exists {opts_desc} """ @@ -281,7 +287,7 @@ def __doc__(self): Raises ------ -KeyError if no such option exists +OptionError if no such option exists {opts_desc} """ @@ -398,9 +404,9 @@ def register_option(key, defval, doc='', validator=None, cb=None): key = key.lower() if key in _registered_options: - raise KeyError("Option '%s' has already been registered" % key) + raise OptionError("Option '%s' has already been registered" % key) if key in _reserved_keys: - raise KeyError("Option '%s' is a reserved key" % key) + raise OptionError("Option '%s' is a reserved key" % key) # the default value should be legal if validator: @@ -418,14 +424,14 @@ def register_option(key, defval, doc='', validator=None, cb=None): cursor = _global_config for i, p in enumerate(path[:-1]): if not isinstance(cursor, dict): - raise KeyError("Path prefix to option '%s' is already an option" + raise OptionError("Path prefix to option '%s' is already an option" % '.'.join(path[:i])) if p not in cursor: cursor[p] = {} cursor = cursor[p] if not isinstance(cursor, dict): - raise KeyError("Path prefix to option '%s' is already an option" + raise OptionError("Path prefix to option '%s' is already an option" % '.'.join(path[:-1])) cursor[path[-1]] = defval # initialize @@ -470,14 +476,14 @@ def deprecate_option(key, msg=None, rkey=None, removal_ver=None): Raises ------ - KeyError - if key has already been deprecated. + OptionError - if key has already been deprecated. """ key = key.lower() if key in _deprecated_options: - raise KeyError("Option '%s' has already been defined as deprecated." + raise OptionError("Option '%s' has already been defined as deprecated." % key) _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
Fixes #5182, closes #5186. It's called OptionError now and it subclasses both AttributeError (for hasattr) and KeyError (for backwards compatibility). Hurray!
https://api.github.com/repos/pandas-dev/pandas/pulls/5188
2013-10-11T23:13:23Z
2013-10-12T01:29:49Z
2013-10-12T01:29:49Z
2014-07-16T08:34:07Z
BUG: Allow duplicate indices when performing operations that align (GH5185)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 3892d5b77a08e..a160da315b05a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -586,6 +586,7 @@ Bug Fixes context manager. - Fixed segfault on ``isnull(MultiIndex)`` (now raises an error instead) (:issue:`5123`, :issue:`5125`) + - Allow duplicate indices when performing operations that align (:issue:`5185`) pandas 0.12.0 ------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b10af7f909405..556a7652b9270 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2455,10 +2455,12 @@ def _align_frame(self, other, join='outer', axis=None, level=None, left = self._reindex_with_indexers({0: [join_index, ilidx], 1: [join_columns, clidx]}, - copy=copy, fill_value=fill_value) + copy=copy, fill_value=fill_value, + allow_dups=True) right = other._reindex_with_indexers({0: [join_index, iridx], 1: [join_columns, cridx]}, - copy=copy, fill_value=fill_value) + copy=copy, fill_value=fill_value, + allow_dups=True) if method is not None: left = left.fillna(axis=fill_axis, method=method, limit=limit) diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index 6f83ee90dd9da..ce29b6974de86 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -572,7 +572,8 @@ def _reindex_columns(self, columns, copy, level, fill_value, limit=None, return SparseDataFrame(sdict, index=self.index, columns=columns, default_fill_value=self._default_fill_value) - def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, limit=None, copy=False): + def _reindex_with_indexers(self, reindexers, method=None, fill_value=None, limit=None, + copy=False, allow_dups=False): if method is not None or limit is not None: raise NotImplementedError("cannot reindex with a method or limit with sparse") diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 64a45d344f2a9..dd260ebacbc16 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -3209,6 +3209,14 @@ def check(result, expected=None): df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64') self.assertRaises(ValueError, lambda : df[df.A > 6]) + # dup aligining operations should work + # GH 5185 + df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3]) + df2 = DataFrame([1, 2, 3], index=[1, 2, 3]) + expected = DataFrame([0,2,0,2,2],index=[1,1,2,2,3]) + result = df1.sub(df2) + assert_frame_equal(result,expected) + def test_insert_benchmark(self): # from the vb_suite/frame_methods/frame_insert_columns N = 10
closes #5185 ``` In [1]: df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3]) In [2]: df2 = DataFrame([1, 2, 3], index=[1, 2, 3]) In [3]: df1-df2 Out[3]: 0 1 0 1 2 2 0 2 2 3 2 In [4]: df1.sub(df2) Out[4]: 0 1 0 1 2 2 0 2 2 3 2 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/5187
2013-10-11T23:10:20Z
2013-10-12T13:54:10Z
2013-10-12T13:54:10Z
2014-06-14T18:00:07Z
fix replacement key for deprecated option `display.height`
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 797e979963ae2..13f7a3dbe7d4a 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -252,7 +252,7 @@ def mpl_style_cb(key): cf.deprecate_option('display.height', msg=pc_height_deprecation_warning, - rkey='display.height') + rkey='display.max_rows') tc_sim_interactive_doc = """ : boolean
replacement key for deprecated option `display.height` was set to `display.height` itself. I believe it should be `display.max_rows` instead. This avoid a weird message in `describe_option()` : ``` display.height: [default: 60] [currently: 60] : int Deprecated. (Deprecated, use `display.height` instead.) ``` (I spotted it while reading the online doc http://pandas.pydata.org/pandas-docs/dev/basics.html?highlight=display.height#working-with-package-options )
https://api.github.com/repos/pandas-dev/pandas/pulls/5181
2013-10-11T14:19:03Z
2013-10-14T01:53:38Z
2013-10-14T01:53:38Z
2014-07-16T08:34:04Z
TST: addtl pytables tests
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0411934b9ef87..0fff14013efe4 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1801,7 +1801,7 @@ def convert(self, values, nan_rep, encoding): elif dtype == u('date'): try: self.data = np.array( - [date.fromordinal(v) for v in data], dtype=object) + [date.fromordinal(v) for v in self.data], dtype=object) except (ValueError): self.data = np.array( [date.fromtimestamp(v) for v in self.data], dtype=object) @@ -3882,7 +3882,7 @@ def _unconvert_index(data, kind, encoding=None): [date.fromordinal(v) for v in data], dtype=object) except (ValueError): index = np.array( - [date.fromtimestamp(v) for v in self.data], dtype=object) + [date.fromtimestamp(v) for v in data], dtype=object) elif kind in (u('integer'), u('float')): index = np.array(data) elif kind in (u('string')): diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 835198400cd5a..4f03f29d77ea3 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -1809,6 +1809,16 @@ def test_store_timezone(self): import time import os + # original method + with ensure_clean(self.path) as store: + + today = datetime.date(2013,9,10) + df = DataFrame([1,2,3], index = [today, today, today]) + store['obj1'] = df + result = store['obj1'] + assert_frame_equal(result, df) + + # with tz setting orig_tz = os.environ.get('TZ') def setTZ(tz):
related #5045
https://api.github.com/repos/pandas-dev/pandas/pulls/5180
2013-10-11T12:14:26Z
2013-10-11T12:26:22Z
2013-10-11T12:26:22Z
2014-07-16T08:34:03Z
GBQ: Updated Documentation, and added method to generic.py
diff --git a/doc/source/api.rst b/doc/source/api.rst index 46d77d0dcceb7..5706fa7864ed5 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -89,8 +89,19 @@ SQL read_frame write_frame +Google BigQuery +~~~~~~~~~~~~~~~ +.. currentmodule:: pandas.io.gbq + +.. autosummary:: + :toctree: generated/ + + read_gbq + to_gbq + .. currentmodule:: pandas + STATA ~~~~~ diff --git a/doc/source/io.rst b/doc/source/io.rst index 3e9359743b7a4..e75de91582b49 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2932,56 +2932,76 @@ if the source datatypes are compatible with BigQuery ones. For specifics on the service itself, see `here <https://developers.google.com/bigquery/>`__ As an example, suppose you want to load all data from an existing table -``test_dataset.test_table`` into BigQuery and pull it into a ``DataFrame``. +: `test_dataset.test_table` +into BigQuery and pull it into a DataFrame. -:: +.. code-block:: python from pandas.io import gbq - data_frame = gbq.read_gbq('SELECT * FROM test_dataset.test_table') + + # Insert your BigQuery Project ID Here + # Can be found in the web console, or + # using the command line tool `bq ls` + projectid = "xxxxxxxx" + + data_frame = gbq.read_gbq('SELECT * FROM test_dataset.test_table', project_id = projectid) -The user will then be authenticated by the ``bq`` command line client - +The user will then be authenticated by the `bq` command line client - this usually involves the default browser opening to a login page, though the process can be done entirely from command line if necessary. -Datasets and additional parameters can be either configured with ``bq``, -passed in as options to :func:`~pandas.read_gbq`, or set using Google's -``gflags`` (this is not officially supported by this module, though care was -taken to ensure that they should be followed regardless of how you call the +Datasets and additional parameters can be either configured with `bq`, +passed in as options to `read_gbq`, or set using Google's gflags (this +is not officially supported by this module, though care was taken +to ensure that they should be followed regardless of how you call the method). Additionally, you can define which column to use as an index as well as a preferred column order as follows: -:: +.. code-block:: python data_frame = gbq.read_gbq('SELECT * FROM test_dataset.test_table', index_col='index_column_name', - col_order='[col1, col2, col3,...]') + col_order='[col1, col2, col3,...]', project_id = projectid) -Finally, if you would like to create a BigQuery table, `my_dataset.my_table`, -from the rows of DataFrame, `df`: +Finally, if you would like to create a BigQuery table, `my_dataset.my_table`, from the rows of DataFrame, `df`: -:: +.. code-block:: python - df = pandas.DataFrame({'string_col_name': ['hello'], - 'integer_col_name': [1], - 'boolean_col_name': [True]}) + df = pandas.DataFrame({'string_col_name' : ['hello'], + 'integer_col_name' : [1], + 'boolean_col_name' : [True]}) schema = ['STRING', 'INTEGER', 'BOOLEAN'] - data_frame = gbq.to_gbq(df, 'my_dataset.my_table', if_exists='fail', - schema=schema) + data_frame = gbq.to_gbq(df, 'my_dataset.my_table', + if_exists='fail', schema = schema, project_id = projectid) To add more rows to this, simply: -:: +.. code-block:: python - df2 = pandas.DataFrame({'string_col_name': ['hello2'], - 'integer_col_name': [2], - 'boolean_col_name': [False]}) - data_frame = gbq.to_gbq(df2, 'my_dataset.my_table', if_exists='append') + df2 = pandas.DataFrame({'string_col_name' : ['hello2'], + 'integer_col_name' : [2], + 'boolean_col_name' : [False]}) + data_frame = gbq.to_gbq(df2, 'my_dataset.my_table', if_exists='append', project_id = projectid) .. note:: - There is a hard cap on BigQuery result sets, at 128MB compressed. Also, the - BigQuery SQL query language has some oddities, see `here - <https://developers.google.com/bigquery/query-reference>`__ + A default project id can be set using the command line: + `bq init`. + + There is a hard cap on BigQuery result sets, at 128MB compressed. Also, the BigQuery SQL query language has some oddities, + see `here <https://developers.google.com/bigquery/query-reference>`__ + + You can access the management console to determine project id's by: + <https://code.google.com/apis/console/b/0/?noredirect> + +.. warning:: + + To use this module, you will need a BigQuery account. See + <https://cloud.google.com/products/big-query> for details. + + As of 10/10/13, there is a bug in Google's API preventing result sets + from being larger than 100,000 rows. A patch is scheduled for the week of + 10/14/13. .. _io.stata: diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index 6bf32b2343084..14e120fdff672 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -8,7 +8,7 @@ enhancements along with a large number of bug fixes. Highlights include support for a new index type ``Float64Index``, support for new methods of interpolation, updated ``timedelta`` operations, and a new string manipulation method ``extract``. Several experimental features are added, including new ``eval/query`` methods for expression evaluation, support for ``msgpack`` serialization, -and an io interface to google's ``BigQuery``. +and an io interface to Google's ``BigQuery``. .. warning:: @@ -648,6 +648,69 @@ Experimental os.remove('foo.msg') +- ``pandas.io.gbq`` provides a simple way to extract from, and load data into, + Google's BigQuery Data Sets by way of pandas DataFrames. BigQuery is a high + performance SQL-like database service, useful for performing ad-hoc queries + against extremely large datasets. :ref:`See the docs<io.gbq>` + + .. code-block:: python + + from pandas.io import gbq + + # A query to select the average monthly temperatures in the + # in the year 2000 across the USA. The dataset, + # publicata:samples.gsod, is available on all BigQuery accounts, + # and is based on NOAA gsod data. + + query = """SELECT station_number as STATION, + month as MONTH, AVG(mean_temp) as MEAN_TEMP + FROM publicdata:samples.gsod + WHERE YEAR = 2000 + GROUP BY STATION, MONTH + ORDER BY STATION, MONTH ASC""" + + # Fetch the result set for this query + + # Your Google BigQuery Project ID + # To find this, see your dashboard: + # https://code.google.com/apis/console/b/0/?noredirect + projectid = xxxxxxxxx; + + df = gbq.read_gbq(query, project_id = projectid) + + # Use pandas to process and reshape the dataset + + df2 = df.pivot(index='STATION', columns='MONTH', values='MEAN_TEMP') + df3 = pandas.concat([df2.min(), df2.mean(), df2.max()], + axis=1,keys=["Min Tem", "Mean Temp", "Max Temp"]) + + The resulting dataframe is: + + ``` + Min Tem Mean Temp Max Temp + MONTH + 1 -53.336667 39.827892 89.770968 + 2 -49.837500 43.685219 93.437932 + 3 -77.926087 48.708355 96.099998 + 4 -82.892858 55.070087 97.317240 + 5 -92.378261 61.428117 102.042856 + 6 -77.703334 65.858888 102.900000 + 7 -87.821428 68.169663 106.510714 + 8 -89.431999 68.614215 105.500000 + 9 -86.611112 63.436935 107.142856 + 10 -78.209677 56.880838 92.103333 + 11 -50.125000 48.861228 94.996428 + 12 -50.332258 42.286879 94.396774 + ``` + .. warning:: + + To use this module, you will need a BigQuery account. See + <https://cloud.google.com/products/big-query> for details. + + As of 10/10/13, there is a bug in Google's API preventing result sets + from being larger than 100,000 rows. A patch is scheduled for the week of + 10/14/13. + .. _whatsnew_0130.refactoring: Internal Refactoring diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 967da6102ae1a..7013ad4f9b02b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -671,6 +671,41 @@ def to_dict(self, outtype='dict'): else: # pragma: no cover raise ValueError("outtype %s not understood" % outtype) + def to_gbq(self, destination_table, schema=None, col_order=None, if_exists='fail', **kwargs): + """ + Write a DataFrame to a Google BigQuery table. If the table exists, + the DataFrame will be appended. If not, a new table will be created, + in which case the schema will have to be specified. By default, + rows will be written in the order they appear in the DataFrame, though + the user may specify an alternative order. + + Parameters + --------------- + destination_table: string + name of table to be written, in the form 'dataset.tablename' + schema : sequence (optional) + list of column types in order for data to be inserted, e.g. ['INTEGER', 'TIMESTAMP', 'BOOLEAN'] + col_order: sequence (optional) + order which columns are to be inserted, e.g. ['primary_key', 'birthday', 'username'] + if_exists: {'fail', 'replace', 'append'} (optional) + fail: If table exists, do nothing. + replace: If table exists, drop it, recreate it, and insert data. + append: If table exists, insert data. Create if does not exist. + kwargs are passed to the Client constructor + + Raises: + ------ + SchemaMissing: + Raised if the 'if_exists' parameter is set to 'replace', but no schema is specified + TableExists: + Raised if the specified 'destination_table' exists but the 'if_exists' parameter is set to 'fail' (the default) + InvalidSchema: + Raised if the 'schema' parameter does not match the provided DataFrame + """ + + from pandas.io import gbq + return gbq.to_gbq(self, destination_table, schema=None, col_order=None, if_exists='fail', **kwargs) + @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, coerce_float=False, nrows=None):
Added some documentation for GBQ module
https://api.github.com/repos/pandas-dev/pandas/pulls/5179
2013-10-10T22:20:22Z
2013-10-15T00:57:42Z
2013-10-15T00:57:42Z
2014-06-23T01:43:05Z
PERF: calling get_indexer twice when a specialized indexer (e.g. pad) could be called instead
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index 611da0e536759..afc5a70393c70 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -490,7 +490,7 @@ Enhancements mask dfi[mask.any(1)] - :ref:`See the docs<indexing.basics.indexing_isin>` for more. + :ref:`See the docs<indexing.basics.indexing_isin>` for more. - ``Series`` now supports a ``to_frame`` method to convert it to a single-column DataFrame (:issue:`5164`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3409fdf00b370..335f772bbc04c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1241,8 +1241,19 @@ def _reindex_axes(self, axes, level, limit, method, fill_value, copy, takeable=F labels = _ensure_index(labels) axis = self._get_axis_number(a) - new_index, indexer = self._get_axis(a).reindex( - labels, level=level, limit=limit, takeable=takeable) + ax = self._get_axis(a) + try: + new_index, indexer = ax.reindex(labels, level=level, + limit=limit, method=method, takeable=takeable) + except (ValueError): + + # catch trying to reindex a non-monotonic index with a specialized indexer + # e.g. pad, so fallback to the regular indexer + # this will show up on reindexing a not-naturally ordering series, e.g. + # Series([1,2,3,4],index=['a','b','c','d']).reindex(['c','b','g'],method='pad') + new_index, indexer = ax.reindex(labels, level=level, + limit=limit, method=None, takeable=takeable) + obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, method=method, fill_value=fill_value, limit=limit, copy=copy) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index df62edf2f7833..697f99fcca81f 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -224,6 +224,7 @@ def reindex_items_from(self, new_ref_items, indexer=None, method=None, fill_valu if indexer is None: new_ref_items, indexer = self.items.reindex(new_ref_items, limit=limit) + needs_fill = method is not None and limit is None if fill_value is None: fill_value = self.fill_value @@ -245,14 +246,13 @@ def reindex_items_from(self, new_ref_items, indexer=None, method=None, fill_valu new_items = self.items.take(masked_idx) # fill if needed - fill_method = method is not None or limit is not None - if fill_method: + if needs_fill: new_values = com.interpolate_2d(new_values, method=method, limit=limit, fill_value=fill_value) block = make_block(new_values, new_items, new_ref_items, ndim=self.ndim, fastpath=True) # down cast if needed - if not self.is_float and (fill_method or notnull(fill_value)): + if not self.is_float and (needs_fill or notnull(fill_value)): block = block.downcast() return block
better on reindexing ``` Invoked with : --ncalls: 3 --repeats: 3 ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- frame_reindex_upcast | 12.3383 | 12.7693 | 0.9662 | frame_reindex_both_axes | 36.2797 | 36.2826 | 0.9999 | frame_reindex_both_axes_ix | 34.7884 | 34.7476 | 1.0012 | frame_reindex_axis1 | 555.0433 | 553.5540 | 1.0027 | frame_reindex_axis0 | 87.8770 | 82.8171 | 1.0611 | reindex_fillna_pad | 0.6303 | 0.5744 | 1.0974 | reindex_frame_level_align | 0.6430 | 0.5836 | 1.1017 | reindex_fillna_backfill | 0.6286 | 0.5697 | 1.1035 | reindex_frame_level_reindex | 0.5973 | 0.5407 | 1.1048 | reindex_fillna_pad_float32 | 0.5320 | 0.4800 | 1.1083 | reindex_fillna_backfill_float32 | 0.5307 | 0.4773 | 1.1119 | dataframe_reindex | 0.4213 | 0.3650 | 1.1541 | frame_reindex_columns | 0.3730 | 0.3120 | 1.1954 | reindex_multiindex | 1.4537 | 1.1006 | 1.3208 | reindex_daterange_pad | 1.2333 | 0.6417 | 1.9221 | reindex_daterange_backfill | 1.2383 | 0.6430 | 1.9258 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [661adc5] : PERF: calling get_indexer twice when a specialized indexer (e.g. pad) could be called instead Base [8c0a34f] : RLS: set released to True, edit release dates ```
https://api.github.com/repos/pandas-dev/pandas/pulls/5178
2013-10-10T21:51:56Z
2013-10-10T22:17:34Z
2013-10-10T22:17:34Z
2014-07-16T08:33:58Z
API: make allclose comparison on dtype downcasting (GH5174)
diff --git a/pandas/core/common.py b/pandas/core/common.py index 64599327f72ec..c87bea2abc2c2 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1053,7 +1053,7 @@ def _possibly_downcast_to_dtype(result, dtype): # do a test on the first element, if it fails then we are done r = result.ravel() arr = np.array([ r[0] ]) - if (arr != arr.astype(dtype)).item(): + if not np.allclose(arr,arr.astype(dtype)): return result # a comparable, e.g. a Decimal may slip in here @@ -1062,8 +1062,14 @@ def _possibly_downcast_to_dtype(result, dtype): if issubclass(result.dtype.type, (np.object_,np.number)) and notnull(result).all(): new_result = result.astype(dtype) - if (new_result == result).all(): - return new_result + try: + if np.allclose(new_result,result): + return new_result + except: + + # comparison of an object dtype with a number type could hit here + if (new_result == result).all(): + return new_result except: pass diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 0d3a1cfe9dfe1..df62edf2f7833 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -376,10 +376,10 @@ def downcast(self, dtypes=None): dtype = dtypes.get(item, self._downcast_dtype) if dtype is None: - nv = _block_shape(values[i]) + nv = _block_shape(values[i],ndim=self.ndim) else: nv = _possibly_downcast_to_dtype(values[i], dtype) - nv = _block_shape(nv) + nv = _block_shape(nv,ndim=self.ndim) blocks.append(make_block(nv, Index([item]), self.ref_items, ndim=self.ndim, fastpath=True)) diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index e9902bf4d1195..c9ef3ea4e217c 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -59,7 +59,7 @@ def _axes(self): """ return the axes for my object typ """ return self._typ._AXIS_ORDERS - def _construct(self, shape, value=None, **kwargs): + def _construct(self, shape, value=None, dtype=None, **kwargs): """ construct an object for the given shape if value is specified use that if its a scalar if value is an array, repeat it as needed """ @@ -74,7 +74,7 @@ def _construct(self, shape, value=None, **kwargs): # remove the info axis kwargs.pop(self._typ._info_axis_name,None) else: - arr = np.empty(shape) + arr = np.empty(shape,dtype=dtype) arr.fill(value) else: fshape = np.prod(shape) @@ -184,6 +184,32 @@ def test_numpy_1_7_compat_numeric_methods(self): if f is not None: f(o) + def test_downcast(self): + # test close downcasting + + o = self._construct(shape=4, value=9, dtype=np.int64) + result = o.copy() + result._data = o._data.downcast(dtypes='infer') + self._compare(result, o) + + o = self._construct(shape=4, value=9.) + expected = o.astype(np.int64) + result = o.copy() + result._data = o._data.downcast(dtypes='infer') + self._compare(result, expected) + + o = self._construct(shape=4, value=9.5) + result = o.copy() + result._data = o._data.downcast(dtypes='infer') + self._compare(result, o) + + # are close + o = self._construct(shape=4, value=9.000000000005) + result = o.copy() + result._data = o._data.downcast(dtypes='infer') + expected = o.astype(np.int64) + self._compare(result, expected) + class TestSeries(unittest.TestCase, Generic): _typ = Series _comparator = lambda self, x, y: assert_series_equal(x,y) @@ -335,7 +361,7 @@ def test_interp_quad(self): _skip_if_no_scipy() sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4]) result = sq.interpolate(method='quadratic') - expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4]) + expected = Series([1, 4, 9, 16], index=[1, 2, 3, 4]) assert_series_equal(result, expected) def test_interp_scipy_basic(self): @@ -589,7 +615,7 @@ def test_spline(self): _skip_if_no_scipy() s = Series([1, 2, np.nan, 4, 5, np.nan, 7]) result = s.interpolate(method='spline', order=1) - expected = Series([1., 2, 3, 4, 5, 6, 7]) # dtype? + expected = Series([1, 2, 3, 4, 5, 6, 7]) assert_series_equal(result, expected)
TST: update interpolate tests to account for correct dtype inferernce closes #5174
https://api.github.com/repos/pandas-dev/pandas/pulls/5177
2013-10-10T18:45:36Z
2013-10-10T19:54:28Z
2013-10-10T19:54:28Z
2014-07-16T08:33:56Z
RVT: revert 4912 change of 4030 nose import bug
diff --git a/pandas/__init__.py b/pandas/__init__.py index 803cda264b250..b33150cc64079 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -43,8 +43,6 @@ from pandas.io.api import * from pandas.computation.api import * -from pandas.util.testing import debug - from pandas.tools.describe import value_range from pandas.tools.merge import merge, concat, ordered_merge from pandas.tools.pivot import pivot_table, crosstab diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 4787c82282a1f..dfe81237ee15d 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -20,8 +20,6 @@ from numpy.random import randn, rand import numpy as np -import nose - import pandas as pd from pandas.core.common import isnull, _is_sequence import pandas.core.index as index @@ -88,12 +86,14 @@ def close(fignum=None): def mplskip(cls): """Skip a TestCase instance if matplotlib isn't installed""" + @classmethod def setUpClass(cls): try: import matplotlib as mpl mpl.use("Agg", warn=False) except ImportError: + import nose raise nose.SkipTest("matplotlib not installed") cls.setUpClass = setUpClass
https://api.github.com/repos/pandas-dev/pandas/pulls/5170
2013-10-10T04:37:53Z
2013-10-10T05:00:17Z
2013-10-10T05:00:17Z
2014-07-16T08:33:53Z
DOC/CLN: A few fixes and cleanup for doc warnings/errors
diff --git a/doc/source/io.rst b/doc/source/io.rst index 6ed71a1d40690..3e9359743b7a4 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2932,55 +2932,56 @@ if the source datatypes are compatible with BigQuery ones. For specifics on the service itself, see `here <https://developers.google.com/bigquery/>`__ As an example, suppose you want to load all data from an existing table -: `test_dataset.test_table` -into BigQuery and pull it into a DataFrame. +``test_dataset.test_table`` into BigQuery and pull it into a ``DataFrame``. -.. code-block:: python +:: from pandas.io import gbq data_frame = gbq.read_gbq('SELECT * FROM test_dataset.test_table') -The user will then be authenticated by the `bq` command line client - +The user will then be authenticated by the ``bq`` command line client - this usually involves the default browser opening to a login page, though the process can be done entirely from command line if necessary. -Datasets and additional parameters can be either configured with `bq`, -passed in as options to `read_gbq`, or set using Google's gflags (this -is not officially supported by this module, though care was taken -to ensure that they should be followed regardless of how you call the +Datasets and additional parameters can be either configured with ``bq``, +passed in as options to :func:`~pandas.read_gbq`, or set using Google's +``gflags`` (this is not officially supported by this module, though care was +taken to ensure that they should be followed regardless of how you call the method). Additionally, you can define which column to use as an index as well as a preferred column order as follows: -.. code-block:: python +:: data_frame = gbq.read_gbq('SELECT * FROM test_dataset.test_table', index_col='index_column_name', col_order='[col1, col2, col3,...]') -Finally, if you would like to create a BigQuery table, `my_dataset.my_table`, from the rows of DataFrame, `df`: +Finally, if you would like to create a BigQuery table, `my_dataset.my_table`, +from the rows of DataFrame, `df`: -.. code-block:: python +:: - df = pandas.DataFrame({'string_col_name' : ['hello'], - 'integer_col_name' : [1], - 'boolean_col_name' : [True]}) + df = pandas.DataFrame({'string_col_name': ['hello'], + 'integer_col_name': [1], + 'boolean_col_name': [True]}) schema = ['STRING', 'INTEGER', 'BOOLEAN'] - data_frame = gbq.to_gbq(df, 'my_dataset.my_table', - if_exists='fail', schema = schema) + data_frame = gbq.to_gbq(df, 'my_dataset.my_table', if_exists='fail', + schema=schema) To add more rows to this, simply: -.. code-block:: python +:: - df2 = pandas.DataFrame({'string_col_name' : ['hello2'], - 'integer_col_name' : [2], - 'boolean_col_name' : [False]}) + df2 = pandas.DataFrame({'string_col_name': ['hello2'], + 'integer_col_name': [2], + 'boolean_col_name': [False]}) data_frame = gbq.to_gbq(df2, 'my_dataset.my_table', if_exists='append') .. note:: - There is a hard cap on BigQuery result sets, at 128MB compressed. Also, the BigQuery SQL query language has some oddities, - see `here <https://developers.google.com/bigquery/query-reference>`__ + There is a hard cap on BigQuery result sets, at 128MB compressed. Also, the + BigQuery SQL query language has some oddities, see `here + <https://developers.google.com/bigquery/query-reference>`__ .. _io.stata: diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index cee809da6719f..f953aeaa2a8a9 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -397,8 +397,11 @@ at the new values. .. _documentation: http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation .. _guide: http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html -Like other pandas fill methods, ``interpolate`` accepts a ``limit`` keyword argument. -Use this to limit the number of consecutive interpolations, keeping ``NaN`` s for interpolations that are too far from the last valid observation: + +Like other pandas fill methods, ``interpolate`` accepts a ``limit`` keyword +argument. Use this to limit the number of consecutive interpolations, keeping +``NaN`` values for interpolations that are too far from the last valid +observation: .. ipython:: python diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 335f772bbc04c..b10af7f909405 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1982,29 +1982,35 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, Parameters ---------- - method : {'linear', 'time', 'values', 'index' 'nearest', - 'zero', 'slinear', 'quadratic', 'cubic', - 'barycentric', 'krogh', 'polynomial', 'spline' - 'piecewise_polynomial', 'pchip'} - 'linear': ignore the index and treat the values as equally spaced. default - 'time': interpolation works on daily and higher resolution + method : {'linear', 'time', 'values', 'index' 'nearest', 'zero', + 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh', + 'polynomial', 'spline' 'piecewise_polynomial', 'pchip'} + + * 'linear': ignore the index and treat the values as equally + spaced. default + * 'time': interpolation works on daily and higher resolution data to interpolate given length of interval - 'index': use the actual numerical values of the index - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', - 'polynomial' is passed to `scipy.interpolate.interp1d` with the order given - both 'polynomial' and 'spline' requre that you also specify and order (int) - e.g. df.interpolate(method='polynomial', order=4) - 'krogh', 'piecewise_polynomial', 'spline', and 'pchip' are all wrappers - around the scipy interpolation methods of similar names. See the - scipy documentation for more on their behavior: - http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation - http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html + * 'index': use the actual numerical values of the index + * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', + 'barycentric', 'polynomial' is passed to + `scipy.interpolate.interp1d` with the order given both + 'polynomial' and 'spline' requre that you also specify and order + (int) e.g. df.interpolate(method='polynomial', order=4) + * 'krogh', 'piecewise_polynomial', 'spline', and 'pchip' are all + wrappers around the scipy interpolation methods of similar + names. See the scipy documentation for more on their behavior: + http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation + http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html + axis : {0, 1}, default 0 - 0: fill column-by-column - 1: fill row-by-row - limit : int, default None. Maximum number of consecutive NaNs to fill. + * 0: fill column-by-column + * 1: fill row-by-row + limit : int, default None. + Maximum number of consecutive NaNs to fill. inplace : bool, default False + Update the NDFrame in place if possible. downcast : optional, 'infer' or None, defaults to 'infer' + Downcast dtypes if possible. Returns ------- diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 1185e9514f7fc..f0d9dbe9c5877 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -391,8 +391,8 @@ def to_excel(self, path, na_rep='', engine=None, **kwargs): ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. - Keyword Arguments - ----------------- + Other Parameters + ---------------- float_format : string, default None Format string for floating point numbers cols : sequence, optional @@ -409,6 +409,8 @@ def to_excel(self, path, na_rep='', engine=None, **kwargs): startow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame + Notes + ----- Keyword arguments (and na_rep) are passed to the ``to_excel`` method for each DataFrame written. """ diff --git a/pandas/io/html.py b/pandas/io/html.py index 96bedbf390af6..f3cfa3a16807a 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -782,7 +782,10 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None, latest information on table attributes for the modern web. parse_dates : bool, optional - See :func:`~pandas.read_csv` for details. + See :func:`~pandas.io.parsers.read_csv` for more details. In 0.13, this + parameter can sometimes interact strangely with ``infer_types``. If you + get a large number of ``NaT`` values in your results, consider passing + ``infer_types=False`` and manually converting types afterwards. tupleize_cols : bool, optional If ``False`` try to parse multiple header rows into a @@ -824,12 +827,12 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None, See Also -------- - pandas.read_csv + pandas.io.parsers.read_csv """ if infer_types is not None: warnings.warn("infer_types will have no effect in 0.14", FutureWarning) else: - infer_types = True # TODO: remove in 0.14 + infer_types = True # TODO: remove effect of this in 0.14 # Type check here. We don't want to parse only to fail because of an # invalid value of an integer skiprows.
closes #5126.
https://api.github.com/repos/pandas-dev/pandas/pulls/5169
2013-10-10T02:21:30Z
2013-10-11T20:56:30Z
2013-10-11T20:56:30Z
2014-07-16T08:33:51Z