title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
make nanops work when ndim==1 and axis==0 ( issue #7354 )
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index c764d92376c33..a93ccc1b9111e 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -215,6 +215,8 @@ Bug Fixes - Bug in ``quantile`` ignoring the axis keyword argument (:issue`7306`) - Bug where ``nanops._maybe_null_out`` doesn't work with complex numbers (:issue:`7353`) +- Bug where several ``nanops`` functions fail when ``axis==0`` for + 1-dimensional ``nan`` arrays (:issue:`7354`) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index c0cb2e6ee6ceb..3be194f9673f3 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -260,7 +260,7 @@ def nanmean(values, axis=None, skipna=True): the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_max)) count = _get_counts(mask, axis) - if axis is not None: + if axis is not None and getattr(the_sum, 'ndim', False): the_mean = the_sum / count ct_mask = count == 0 if ct_mask.any(): @@ -517,7 +517,7 @@ def nanprod(values, axis=None, skipna=True): def _maybe_arg_null_out(result, axis, mask, skipna): # helper function for nanargmin/nanargmax - if axis is None: + if axis is None or not getattr(result, 'ndim', False): if skipna: if mask.all(): result = -1 @@ -544,7 +544,7 @@ def _get_counts(mask, axis): def _maybe_null_out(result, axis, mask): - if axis is not None: + if axis is not None and getattr(result, 'ndim', False): null_mask = (mask.shape[axis] - mask.sum(axis)) == 0 if null_mask.any(): if np.iscomplexobj(result): diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index d0df6c2dc3e8b..47ca288177946 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -145,7 +145,7 @@ def check_fun_data(self, testfunc, targfunc, 'kwargs: %s' % kwargs) raise - if testarval.ndim <= 2: + if testarval.ndim <= 1: return try: @@ -245,7 +245,7 @@ def _nanmean_wrap(self, value, *args, **kwargs): dtype = value.dtype res = nanops.nanmean(value, *args, **kwargs) if dtype.kind == 'O': - res = np.round(res, decimals=15) + res = np.round(res, decimals=13) return res def _mean_wrap(self, value, *args, **kwargs): @@ -254,7 +254,7 @@ def _mean_wrap(self, value, *args, **kwargs): value = value.astype('c16') res = np.mean(value, *args, **kwargs) if dtype.kind == 'O': - res = np.round(res, decimals=15) + res = np.round(res, decimals=13) return res def test_nanmean(self):
This fixes issue #7354, where some `nanops` functions fail for 1-dimensional arrays for the argument `axis=0`.
https://api.github.com/repos/pandas-dev/pandas/pulls/7437
2014-06-12T11:09:11Z
2014-06-12T12:59:15Z
null
2014-06-12T13:12:55Z
API: Improved inference of datetime/timedelta with mixed null objects. (GH7431)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 92e19ba43ccb7..04d859f9636dc 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -46,10 +46,8 @@ API changes day = offsets.Day(normalize=True) day.apply(Timestamp('2014-01-01 09:00')) - - - - +- Improved inference of datetime/timedelta with mixed null objects. Regression from 0.13.1 in interpretation of an object Index + with all null elements (:issue:`7431`) - Openpyxl now raises a ValueError on construction of the openpyxl writer instead of warning on pandas import (:issue:`7284`). diff --git a/pandas/core/common.py b/pandas/core/common.py index e9ae26d0c7c81..113beefeac974 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1782,24 +1782,81 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False): value.dtype == np.object_)): pass + # try to infer if we have a datetimelike here + # otherwise pass thru else: - # we might have a array (or single object) that is datetime like, - # and no dtype is passed don't change the value unless we find a - # datetime set - v = value - if not is_list_like(v): - v = [v] - if len(v): - inferred_type = lib.infer_dtype(v) - if inferred_type in ['datetime', 'datetime64']: - try: - value = tslib.array_to_datetime(np.array(v)) - except: - pass - elif inferred_type in ['timedelta', 'timedelta64']: - from pandas.tseries.timedeltas import \ - _possibly_cast_to_timedelta - value = _possibly_cast_to_timedelta(value, coerce='compat') + value = _possibly_infer_to_datetimelike(value) + + return value + +def _possibly_infer_to_datetimelike(value): + # we might have a array (or single object) that is datetime like, + # and no dtype is passed don't change the value unless we find a + # datetime/timedelta set + + # this is pretty strict in that a datetime/timedelta is REQUIRED + # in addition to possible nulls/string likes + + # ONLY strings are NOT datetimelike + + v = value + if not is_list_like(v): + v = [v] + if not isinstance(v, np.ndarray): + v = np.array(v) + shape = v.shape + if not v.ndim == 1: + v = v.ravel() + + if len(v): + + def _try_datetime(v): + # safe coerce to datetime64 + try: + return tslib.array_to_datetime(v, raise_=True).reshape(shape) + except: + return v + + def _try_timedelta(v): + # safe coerce to timedelta64 + + # will try first with a string & object conversion + from pandas.tseries.timedeltas import to_timedelta + try: + return to_timedelta(v).values.reshape(shape) + except: + + # this is for compat with numpy < 1.7 + # but string-likes will fail here + + from pandas.tseries.timedeltas import \ + _possibly_cast_to_timedelta + try: + return _possibly_cast_to_timedelta(v, coerce='compat').reshape(shape) + except: + return v + + # do a quick inference for perf + sample = v[:min(3,len(v))] + inferred_type = lib.infer_dtype(sample) + + if inferred_type in ['datetime', 'datetime64']: + value = _try_datetime(v) + elif inferred_type in ['timedelta', 'timedelta64']: + value = _try_timedelta(v) + + # its possible to have nulls intermixed within the datetime or timedelta + # these will in general have an inferred_type of 'mixed', so have to try + # both datetime and timedelta + + # try timedelta first to avoid spurious datetime conversions + # e.g. '00:00:01' is a timedelta but technically is also a datetime + elif inferred_type in ['mixed']: + + if lib.is_possible_datetimelike_array(_ensure_object(v)): + value = _try_timedelta(v) + if lib.infer_dtype(value) in ['mixed']: + value = _try_datetime(v) return value diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 52db14d43fe05..105c0c3985cc1 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -12,7 +12,8 @@ _NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like, ABCSparseSeries, _infer_dtype_from_scalar, _is_null_datelike_scalar, - is_timedelta64_dtype, is_datetime64_dtype,) + is_timedelta64_dtype, is_datetime64_dtype, + _possibly_infer_to_datetimelike) from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (_maybe_convert_indices, _length_of_indexer) import pandas.core.common as com @@ -1807,26 +1808,21 @@ def make_block(values, placement, klass=None, ndim=None, elif issubclass(vtype, np.complexfloating): klass = ComplexBlock - # try to infer a DatetimeBlock, or set to an ObjectBlock else: + # we want to infer here if its a datetimelike if its object type + # this is pretty strict in that it requires a datetime/timedelta + # value IN addition to possible nulls/strings + # an array of ONLY strings will not be inferred if np.prod(values.shape): - flat = values.ravel() - - # try with just the first element; we just need to see if - # this is a datetime or not - inferred_type = lib.infer_dtype(flat[0:1]) - if inferred_type in ['datetime', 'datetime64']: - - # we have an object array that has been inferred as - # datetime, so convert it - try: - values = tslib.array_to_datetime( - flat).reshape(values.shape) - if issubclass(values.dtype.type, np.datetime64): - klass = DatetimeBlock - except: # it already object, so leave it - pass + result = _possibly_infer_to_datetimelike(values) + vtype = result.dtype.type + if issubclass(vtype, np.datetime64): + klass = DatetimeBlock + values = result + elif (issubclass(vtype, np.timedelta64)): + klass = TimeDeltaBlock + values = result if klass is None: klass = ObjectBlock @@ -2525,7 +2521,7 @@ def _consolidate_inplace(self): self._known_consolidated = True self._rebuild_blknos_and_blklocs() - def get(self, item): + def get(self, item, fastpath=True): """ Return values for selected item (ndarray or BlockManager). """ @@ -2543,7 +2539,7 @@ def get(self, item): else: raise ValueError("cannot label index with a null key") - return self.iget(loc) + return self.iget(loc, fastpath=fastpath) else: if isnull(item): @@ -2553,8 +2549,25 @@ def get(self, item): return self.reindex_indexer(new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True) - def iget(self, i): - return self.blocks[self._blknos[i]].iget(self._blklocs[i]) + def iget(self, i, fastpath=True): + """ + Return the data as a SingleBlockManager if fastpath=True and possible + + Otherwise return as a ndarray + + """ + + block = self.blocks[self._blknos[i]] + values = block.iget(self._blklocs[i]) + if not fastpath or block.is_sparse or values.ndim != 1: + return values + + # fastpath shortcut for select a single-dim from a 2-dim BM + return SingleBlockManager([ block.make_block_same_class(values, + placement=slice(0, len(values)), + fastpath=True) ], + self.axes[1]) + def get_scalar(self, tup): """ diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py index abf7905f4d904..a6bd94153c3bd 100644 --- a/pandas/io/tests/test_json/test_pandas.py +++ b/pandas/io/tests/test_json/test_pandas.py @@ -4,8 +4,8 @@ import os import numpy as np - -from pandas import Series, DataFrame, DatetimeIndex, Timestamp +import nose +from pandas import Series, DataFrame, DatetimeIndex, Timestamp, _np_version_under1p7 import pandas as pd read_json = pd.read_json @@ -600,11 +600,29 @@ def test_url(self): for c in ['created_at', 'closed_at', 'updated_at']: self.assertEqual(result[c].dtype, 'datetime64[ns]') - def test_default_handler(self): + def test_timedelta(self): + if _np_version_under1p7: + raise nose.SkipTest("numpy < 1.7") + from datetime import timedelta + converter = lambda x: pd.to_timedelta(x,unit='ms') + + s = Series([timedelta(23), timedelta(seconds=5)]) + self.assertEqual(s.dtype,'timedelta64[ns]') + assert_series_equal(s, pd.read_json(s.to_json(),typ='series').apply(converter)) + frame = DataFrame([timedelta(23), timedelta(seconds=5)]) + self.assertEqual(frame[0].dtype,'timedelta64[ns]') + assert_frame_equal( + frame, pd.read_json(frame.to_json()).apply(converter)) + + def test_default_handler(self): + from datetime import timedelta + + frame = DataFrame([timedelta(23), timedelta(seconds=5), 42]) self.assertRaises(OverflowError, frame.to_json) - expected = DataFrame([str(timedelta(23)), str(timedelta(seconds=5))]) + + expected = DataFrame([str(timedelta(23)), str(timedelta(seconds=5)), 42]) assert_frame_equal( expected, pd.read_json(frame.to_json(default_handler=str))) diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index 19c1fc7522961..bd23135be3d1e 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -172,6 +172,27 @@ def infer_dtype_list(list values): pass +def is_possible_datetimelike_array(object arr): + # determine if we have a possible datetimelike (or null-like) array + cdef: + Py_ssize_t i, n = len(arr) + bint seen_timedelta = 0, seen_datetime = 0 + object v + + for i in range(n): + v = arr[i] + if util.is_string_object(v): + continue + elif util._checknull(v): + continue + elif is_datetime(v): + seen_datetime=1 + elif is_timedelta(v): + seen_timedelta=1 + else: + return False + return seen_datetime or seen_timedelta + cdef inline bint is_null_datetimelike(v): # determine if we have a null for a timedelta/datetime (or integer versions)x if util._checknull(v): @@ -331,61 +352,84 @@ def is_unicode_array(ndarray values): def is_datetime_array(ndarray[object] values): - cdef int i, n = len(values) + cdef int i, null_count = 0, n = len(values) cdef object v if n == 0: return False + + # return False for all nulls for i in range(n): v = values[i] - if not (is_datetime(v) or is_null_datetimelike(v)): + if is_null_datetimelike(v): + # we are a regular null + if util._checknull(v): + null_count += 1 + elif not is_datetime(v): return False - return True - + return null_count != n def is_datetime64_array(ndarray values): - cdef int i, n = len(values) + cdef int i, null_count = 0, n = len(values) cdef object v if n == 0: return False + + # return False for all nulls for i in range(n): v = values[i] - if not (util.is_datetime64_object(v) or is_null_datetimelike(v)): + if is_null_datetimelike(v): + # we are a regular null + if util._checknull(v): + null_count += 1 + elif not util.is_datetime64_object(v): return False - return True + return null_count != n def is_timedelta_array(ndarray values): - cdef int i, n = len(values) + cdef int i, null_count = 0, n = len(values) cdef object v if n == 0: return False for i in range(n): v = values[i] - if not (PyDelta_Check(v) or is_null_datetimelike(v)): + if is_null_datetimelike(v): + # we are a regular null + if util._checknull(v): + null_count += 1 + elif not PyDelta_Check(v): return False - return True + return null_count != n def is_timedelta64_array(ndarray values): - cdef int i, n = len(values) + cdef int i, null_count = 0, n = len(values) cdef object v if n == 0: return False for i in range(n): v = values[i] - if not (util.is_timedelta64_object(v) or is_null_datetimelike(v)): + if is_null_datetimelike(v): + # we are a regular null + if util._checknull(v): + null_count += 1 + elif not util.is_timedelta64_object(v): return False - return True + return null_count != n def is_timedelta_or_timedelta64_array(ndarray values): """ infer with timedeltas and/or nat/none """ - cdef int i, n = len(values) + cdef int i, null_count = 0, n = len(values) cdef object v if n == 0: return False for i in range(n): v = values[i] - if not (is_timedelta(v) or is_null_datetimelike(v)): + if is_null_datetimelike(v): + # we are a regular null + if util._checknull(v): + null_count += 1 + elif not is_timedelta(v): return False - return True + return null_count != n def is_date_array(ndarray[object] values): cdef int i, n = len(values) diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index 5962e2447fbc9..e8308c09cef90 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -356,7 +356,9 @@ def test_get_scalar(self): for item in self.mgr.items: for i, index in enumerate(self.mgr.axes[1]): res = self.mgr.get_scalar((item, index)) - exp = self.mgr.get(item)[i] + exp = self.mgr.get(item, fastpath=False)[i] + assert_almost_equal(res, exp) + exp = self.mgr.get(item).values[i] assert_almost_equal(res, exp) def test_get(self): @@ -366,19 +368,22 @@ def test_get(self): placement=np.arange(3)) mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)]) - assert_almost_equal(mgr.get('a'), values[0]) - assert_almost_equal(mgr.get('b'), values[1]) - assert_almost_equal(mgr.get('c'), values[2]) + assert_almost_equal(mgr.get('a', fastpath=False), values[0]) + assert_almost_equal(mgr.get('b', fastpath=False), values[1]) + assert_almost_equal(mgr.get('c', fastpath=False), values[2]) + assert_almost_equal(mgr.get('a').values, values[0]) + assert_almost_equal(mgr.get('b').values, values[1]) + assert_almost_equal(mgr.get('c').values, values[2]) def test_set(self): mgr = create_mgr('a,b,c: int', item_shape=(3,)) mgr.set('d', np.array(['foo'] * 3)) mgr.set('b', np.array(['bar'] * 3)) - assert_almost_equal(mgr.get('a'), [0] * 3) - assert_almost_equal(mgr.get('b'), ['bar'] * 3) - assert_almost_equal(mgr.get('c'), [2] * 3) - assert_almost_equal(mgr.get('d'), ['foo'] * 3) + assert_almost_equal(mgr.get('a').values, [0] * 3) + assert_almost_equal(mgr.get('b').values, ['bar'] * 3) + assert_almost_equal(mgr.get('c').values, [2] * 3) + assert_almost_equal(mgr.get('d').values, ['foo'] * 3) def test_insert(self): self.mgr.insert(0, 'inserted', np.arange(N)) @@ -580,10 +585,14 @@ def test_reindex_items(self): reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0) self.assertEqual(reindexed.nblocks, 2) assert_almost_equal(reindexed.items, ['g', 'c', 'a', 'd']) - assert_almost_equal(mgr.get('g'), reindexed.get('g')) - assert_almost_equal(mgr.get('c'), reindexed.get('c')) - assert_almost_equal(mgr.get('a'), reindexed.get('a')) - assert_almost_equal(mgr.get('d'), reindexed.get('d')) + assert_almost_equal(mgr.get('g',fastpath=False), reindexed.get('g',fastpath=False)) + assert_almost_equal(mgr.get('c',fastpath=False), reindexed.get('c',fastpath=False)) + assert_almost_equal(mgr.get('a',fastpath=False), reindexed.get('a',fastpath=False)) + assert_almost_equal(mgr.get('d',fastpath=False), reindexed.get('d',fastpath=False)) + assert_almost_equal(mgr.get('g').values, reindexed.get('g').values) + assert_almost_equal(mgr.get('c').values, reindexed.get('c').values) + assert_almost_equal(mgr.get('a').values, reindexed.get('a').values) + assert_almost_equal(mgr.get('d').values, reindexed.get('d').values) def test_multiindex_xs(self): mgr = create_mgr('a,b,c: f8; d,e,f: i8') @@ -608,16 +617,19 @@ def test_get_numeric_data(self): numeric = mgr.get_numeric_data() assert_almost_equal(numeric.items, ['int', 'float', 'complex', 'bool']) - assert_almost_equal(mgr.get('float'), numeric.get('float')) + assert_almost_equal(mgr.get('float',fastpath=False), numeric.get('float',fastpath=False)) + assert_almost_equal(mgr.get('float').values, numeric.get('float').values) # Check sharing numeric.set('float', np.array([100., 200., 300.])) - assert_almost_equal(mgr.get('float'), np.array([100., 200., 300.])) + assert_almost_equal(mgr.get('float',fastpath=False), np.array([100., 200., 300.])) + assert_almost_equal(mgr.get('float').values, np.array([100., 200., 300.])) numeric2 = mgr.get_numeric_data(copy=True) assert_almost_equal(numeric.items, ['int', 'float', 'complex', 'bool']) numeric2.set('float', np.array([1000., 2000., 3000.])) - assert_almost_equal(mgr.get('float'), np.array([100., 200., 300.])) + assert_almost_equal(mgr.get('float',fastpath=False), np.array([100., 200., 300.])) + assert_almost_equal(mgr.get('float').values, np.array([100., 200., 300.])) def test_get_bool_data(self): mgr = create_mgr('int: int; float: float; complex: complex;' @@ -627,15 +639,18 @@ def test_get_bool_data(self): bools = mgr.get_bool_data() assert_almost_equal(bools.items, ['bool']) - assert_almost_equal(mgr.get('bool'), bools.get('bool')) + assert_almost_equal(mgr.get('bool',fastpath=False), bools.get('bool',fastpath=False)) + assert_almost_equal(mgr.get('bool').values, bools.get('bool').values) bools.set('bool', np.array([True, False, True])) - assert_almost_equal(mgr.get('bool'), [True, False, True]) + assert_almost_equal(mgr.get('bool',fastpath=False), [True, False, True]) + assert_almost_equal(mgr.get('bool').values, [True, False, True]) # Check sharing bools2 = mgr.get_bool_data(copy=True) bools2.set('bool', np.array([False, True, False])) - assert_almost_equal(mgr.get('bool'), [True, False, True]) + assert_almost_equal(mgr.get('bool',fastpath=False), [True, False, True]) + assert_almost_equal(mgr.get('bool').values, [True, False, True]) def test_unicode_repr_doesnt_raise(self): str_repr = repr(create_mgr(u('b,\u05d0: object'))) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 85e451541d39c..2e3a9d922bb47 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -581,6 +581,12 @@ def test_constructor_pass_none(self): s = Series(None, index=lrange(5), dtype=object) self.assertEqual(s.dtype, np.object_) + # GH 7431 + # inference on the index + s = Series(index=np.array([None])) + expected = Series(index=Index([None])) + assert_series_equal(s,expected) + def test_constructor_cast(self): self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float) @@ -669,6 +675,16 @@ def test_constructor_dtype_datetime64(self): self.assert_numpy_array_equal(series1.values,dates2) self.assertEqual(series1.dtype,object) + # these will correctly infer a datetime + s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001']) + self.assertEqual(s.dtype,'datetime64[ns]') + s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001']) + self.assertEqual(s.dtype,'datetime64[ns]') + s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001']) + self.assertEqual(s.dtype,'datetime64[ns]') + s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001']) + self.assertEqual(s.dtype,'datetime64[ns]') + def test_constructor_dict(self): d = {'a': 0., 'b': 1., 'c': 2.} result = Series(d, index=['b', 'c', 'd', 'a']) @@ -2462,6 +2478,18 @@ def f(): td = Series([timedelta(days=i) for i in range(3)] + ['foo']) self.assertEqual(td.dtype, 'object') + # these will correctly infer a timedelta + # but only on numpy > 1.7 as the cython path will only be used + if not _np_version_under1p7: + s = Series([None, pd.NaT, '1 Day']) + self.assertEqual(s.dtype,'timedelta64[ns]') + s = Series([np.nan, pd.NaT, '1 Day']) + self.assertEqual(s.dtype,'timedelta64[ns]') + s = Series([pd.NaT, None, '1 Day']) + self.assertEqual(s.dtype,'timedelta64[ns]') + s = Series([pd.NaT, np.nan, '1 Day']) + self.assertEqual(s.dtype,'timedelta64[ns]') + def test_operators_timedelta64(self): # invalid ops @@ -2939,12 +2967,12 @@ def test_datetime64_fillna(self): # GH 6587 # make sure that we are treating as integer when filling + # this also tests inference of a datetime-like with NaT's s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001']) expected = Series(['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001'], dtype='M8[ns]') result = s.fillna(method='backfill') assert_series_equal(result, expected) - def test_fillna_int(self): s = Series(np.random.randint(-100, 100, 50)) s.fillna(method='ffill', inplace=True) diff --git a/pandas/tests/test_tseries.py b/pandas/tests/test_tseries.py index 64bf577f12c9f..66d5dcc72d776 100644 --- a/pandas/tests/test_tseries.py +++ b/pandas/tests/test_tseries.py @@ -658,6 +658,13 @@ def test_to_object_array_tuples(self): except ImportError: pass + def test_object(self): + + # GH 7431 + # cannot infer more than this as only a single element + arr = np.array([None],dtype='O') + result = lib.infer_dtype(arr) + self.assertEqual(result, 'mixed') class TestMoments(tm.TestCase): pass diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index c8f62a731d32b..62b43cc0b189a 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -58,7 +58,7 @@ def _skip_if_has_locale(): lang, _ = locale.getlocale() if lang is not None: raise nose.SkipTest("Specific locale is set {0}".format(lang)) - + def _skip_if_windows_python_3(): if sys.version_info > (3,) and sys.platform == 'win32': raise nose.SkipTest("not used on python 3/win32") diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py index b812c0637b0ad..1dc8b5cfea132 100644 --- a/pandas/tseries/timedeltas.py +++ b/pandas/tseries/timedeltas.py @@ -9,7 +9,7 @@ import pandas.tslib as tslib from pandas import compat, _np_version_under1p7 from pandas.core.common import (ABCSeries, is_integer, is_integer_dtype, is_timedelta64_dtype, - _values_from_object, is_list_like, isnull) + _values_from_object, is_list_like, isnull, _ensure_object) repr_timedelta = tslib.repr_timedelta64 repr_timedelta64 = tslib.repr_timedelta64 @@ -46,7 +46,7 @@ def _convert_listlike(arg, box, unit): value = arg.astype('timedelta64[{0}]'.format(unit)).astype('timedelta64[ns]') else: try: - value = tslib.array_to_timedelta64(_ensure_object(arg),unit=unit) + value = tslib.array_to_timedelta64(_ensure_object(arg), unit=unit) except: value = np.array([ _coerce_scalar_to_timedelta_type(r, unit=unit) for r in arg ]) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 62e3b120c9d64..c36d34b2199d8 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1326,7 +1326,7 @@ def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, return oresult -def array_to_timedelta64(ndarray[object] values, coerce=False): +def array_to_timedelta64(ndarray[object] values, unit='ns', coerce=False): """ convert an ndarray to an array of ints that are timedeltas force conversion if coerce = True, else will raise if cannot convert """ @@ -1339,7 +1339,7 @@ def array_to_timedelta64(ndarray[object] values, coerce=False): iresult = result.view('i8') for i in range(n): - result[i] = convert_to_timedelta64(values[i], 'ns', coerce) + result[i] = convert_to_timedelta64(values[i], unit, coerce) return iresult def convert_to_timedelta(object ts, object unit='ns', coerce=False): @@ -1363,16 +1363,16 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce): # handle the numpy < 1.7 case """ if _checknull_with_nat(ts): - ts = np.timedelta64(iNaT) + return np.timedelta64(iNaT) elif util.is_datetime64_object(ts): # only accept a NaT here if ts.astype('int64') == iNaT: - ts = np.timedelta64(iNaT) + return np.timedelta64(iNaT) elif isinstance(ts, np.timedelta64): ts = ts.astype("m8[{0}]".format(unit.lower())) elif is_integer_object(ts): if ts == iNaT: - ts = np.timedelta64(iNaT) + return np.timedelta64(iNaT) else: if util.is_array(ts): ts = ts.astype('int64').item() @@ -1381,6 +1381,11 @@ cdef inline convert_to_timedelta64(object ts, object unit, object coerce): ts = timedelta(microseconds=ts/1000.0) else: ts = np.timedelta64(ts) + elif util.is_string_object(ts): + if ts in _nat_strings or coerce: + return np.timedelta64(iNaT) + else: + raise ValueError("Invalid type for timedelta scalar: %s" % type(ts)) if _np_version_under1p7: if not isinstance(ts, timedelta):
- regression from 0.13.1 in interpretation of an object Index - additional tests to validate datetimelike inferences closes #7431
https://api.github.com/repos/pandas-dev/pandas/pulls/7435
2014-06-11T18:51:00Z
2014-06-13T15:22:05Z
2014-06-13T15:22:05Z
2014-06-13T15:26:37Z
ENH: select_dypes impl
diff --git a/doc/source/api.rst b/doc/source/api.rst index bc257ffa0ad6c..c3cccca3251e4 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -524,6 +524,7 @@ Attributes and underlying data DataFrame.ftypes DataFrame.get_dtype_counts DataFrame.get_ftype_counts + DataFrame.select_dtypes DataFrame.values DataFrame.axes DataFrame.ndim diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 1979b180b71b9..ec8456089f452 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1552,3 +1552,84 @@ While float dtypes are unchanged. casted = dfa[df2>0] casted casted.dtypes + +Selecting columns based on ``dtype`` +------------------------------------ + +.. _basics.selectdtypes: + +.. versionadded:: 0.14.1 + +The :meth:`~pandas.DataFrame.select_dtypes` method implements subsetting of columns +based on their ``dtype``. + +First, let's create a :class:`~pandas.DataFrame` with a slew of different +dtypes: + +.. ipython:: python + + df = DataFrame({'string': list('abc'), + 'int64': list(range(1, 4)), + 'uint8': np.arange(3, 6).astype('u1'), + 'float64': np.arange(4.0, 7.0), + 'bool1': [True, False, True], + 'bool2': [False, True, False], + 'dates': pd.date_range('now', periods=3).values}) + df['tdeltas'] = df.dates.diff() + df['uint64'] = np.arange(3, 6).astype('u8') + df['other_dates'] = pd.date_range('20130101', periods=3).values + df + + +``select_dtypes`` has two parameters ``include`` and ``exclude`` that allow you to +say "give me the columns WITH these dtypes" (``include``) and/or "give the +columns WITHOUT these dtypes" (``exclude``). + +For example, to select ``bool`` columns + +.. ipython:: python + + df.select_dtypes(include=[bool]) + +You can also pass the name of a dtype in the `numpy dtype hierarchy +<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__: + +.. ipython:: python + + df.select_dtypes(include=['bool']) + +:meth:`~pandas.DataFrame.select_dtypes` also works with generic dtypes as well. + +For example, to select all numeric and boolean columns while excluding unsigned +integers + +.. ipython:: python + + df.select_dtypes(include=['number', 'bool'], exclude=['unsignedinteger']) + +To select string columns you must use the ``object`` dtype: + +.. ipython:: python + + df.select_dtypes(include=['object']) + +To see all the child dtypes of a generic ``dtype`` like ``numpy.number`` you +can define a function that returns a tree of child dtypes: + +.. ipython:: python + + def subdtypes(dtype): + subs = dtype.__subclasses__() + if not subs: + return dtype + return [dtype, [subdtypes(dt) for dt in subs]] + +All numpy dtypes are subclasses of ``numpy.generic``: + +.. ipython:: python + + subdtypes(np.generic) + +.. note:: + + The ``include`` and ``exclude`` parameters must be non-string sequences. diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 0226e5041639a..79f6a918a0e93 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -91,6 +91,8 @@ Enhancements +- Add :meth:`~pandas.DataFrame.select_dtypes` method to allow selection of + columns based on dtype (:issue:`7316`). See :ref:`the docs <basics.selectdtypes>`. diff --git a/pandas/core/common.py b/pandas/core/common.py index 8791dcc124a6e..bb7f43511e905 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1603,6 +1603,66 @@ def _get_fill_func(method): #---------------------------------------------------------------------- # Lots of little utilities +def _validate_date_like_dtype(dtype): + try: + typ = np.datetime_data(dtype)[0] + except ValueError as e: + raise TypeError('%s' % e) + if typ != 'generic' and typ != 'ns': + raise ValueError('%r is too specific of a frequency, try passing %r' + % (dtype.name, dtype.type.__name__)) + + +def _invalidate_string_dtypes(dtype_set): + """Change string like dtypes to object for ``DataFrame.select_dtypes()``.""" + non_string_dtypes = dtype_set - _string_dtypes + if non_string_dtypes != dtype_set: + raise TypeError("string dtypes are not allowed, use 'object' instead") + + +def _get_dtype_from_object(dtype): + """Get a numpy dtype.type-style object. + + Notes + ----- + If nothing can be found, returns ``object``. + """ + # type object from a dtype + if isinstance(dtype, type) and issubclass(dtype, np.generic): + return dtype + elif isinstance(dtype, np.dtype): # dtype object + try: + _validate_date_like_dtype(dtype) + except TypeError: + # should still pass if we don't have a datelike + pass + return dtype.type + elif isinstance(dtype, compat.string_types): + if dtype == 'datetime' or dtype == 'timedelta': + dtype += '64' + try: + return _get_dtype_from_object(getattr(np, dtype)) + except AttributeError: + # handles cases like _get_dtype(int) + # i.e., python objects that are valid dtypes (unlike user-defined + # types, in general) + pass + return _get_dtype_from_object(np.dtype(dtype)) + + +_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type, + compat.text_type))) + + +def _get_info_slice(obj, indexer): + """Slice the info axis of `obj` with `indexer`.""" + if not hasattr(obj, '_info_axis_number'): + raise TypeError('object of type %r has no info axis' % + type(obj).__name__) + slices = [slice(None)] * obj.ndim + slices[obj._info_axis_number] = indexer + return tuple(slices) + def _maybe_box(indexer, values, obj, key): @@ -1613,6 +1673,7 @@ def _maybe_box(indexer, values, obj, key): # return the value return values + def _maybe_box_datetimelike(value): # turn a datetime like into a Timestamp/timedelta as needed @@ -1797,6 +1858,7 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False): return value + def _possibly_infer_to_datetimelike(value): # we might have a array (or single object) that is datetime like, # and no dtype is passed don't change the value unless we find a diff --git a/pandas/core/frame.py b/pandas/core/frame.py index da9fb44f80b09..413f3daa52a52 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -12,6 +12,7 @@ # pylint: disable=E1101,E1103 # pylint: disable=W0212,W0231,W0703,W0622 +import functools import collections import itertools import sys @@ -25,19 +26,18 @@ from pandas.core.common import (isnull, notnull, PandasError, _try_sort, _default_index, _maybe_upcast, _is_sequence, _infer_dtype_from_scalar, _values_from_object, - is_list_like) + is_list_like, _get_dtype) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (_maybe_droplevels, _convert_to_index_sliceable, - _check_bool_indexer, _maybe_convert_indices) + _check_bool_indexer) from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, create_block_manager_from_blocks) from pandas.core.series import Series import pandas.computation.expressions as expressions from pandas.computation.eval import eval as _eval -from pandas.computation.scope import _ensure_scope from numpy import percentile as _quantile from pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u, OrderedDict, raise_with_traceback) @@ -1867,6 +1867,118 @@ def eval(self, expr, **kwargs): kwargs['resolvers'] = kwargs.get('resolvers', ()) + resolvers return _eval(expr, **kwargs) + def select_dtypes(self, include=None, exclude=None): + """Return a subset of a DataFrame including/excluding columns based on + their ``dtype``. + + Parameters + ---------- + include, exclude : list-like + A list of dtypes or strings to be included/excluded. You must pass + in a non-empty sequence for at least one of these. + + Raises + ------ + ValueError + * If both of ``include`` and ``exclude`` are empty + * If ``include`` and ``exclude`` have overlapping elements + * If any kind of string dtype is passed in. + TypeError + * If either of ``include`` or ``exclude`` is not a sequence + + Returns + ------- + subset : DataFrame + The subset of the frame including the dtypes in ``include`` and + excluding the dtypes in ``exclude``. + + Notes + ----- + * To select all *numeric* types use the numpy dtype ``numpy.number`` + * To select strings you must use the ``object`` dtype, but note that + this will return *all* object dtype columns + * See the `numpy dtype hierarchy + <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ + + Examples + -------- + >>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'), + ... 'b': [True, False] * 3, + ... 'c': [1.0, 2.0] * 3}) + >>> df + a b c + 0 0.3962 True 1 + 1 0.1459 False 2 + 2 0.2623 True 1 + 3 0.0764 False 2 + 4 -0.9703 True 1 + 5 -1.2094 False 2 + >>> df.select_dtypes(include=['float64']) + c + 0 1 + 1 2 + 2 1 + 3 2 + 4 1 + 5 2 + >>> df.select_dtypes(exclude=['floating']) + b + 0 True + 1 False + 2 True + 3 False + 4 True + 5 False + """ + include, exclude = include or (), exclude or () + if not (com.is_list_like(include) and com.is_list_like(exclude)): + raise TypeError('include and exclude must both be non-string' + ' sequences') + selection = tuple(map(frozenset, (include, exclude))) + + if not any(selection): + raise ValueError('at least one of include or exclude must be ' + 'nonempty') + + # convert the myriad valid dtypes object to a single representation + include, exclude = map(lambda x: + frozenset(map(com._get_dtype_from_object, x)), + selection) + for dtypes in (include, exclude): + com._invalidate_string_dtypes(dtypes) + + # can't both include AND exclude! + if not include.isdisjoint(exclude): + raise ValueError('include and exclude overlap on %s' + % (include & exclude)) + + # empty include/exclude -> defaults to True + # three cases (we've already raised if both are empty) + # case 1: empty include, nonempty exclude + # we have True, True, ... True for include, same for exclude + # in the loop below we get the excluded + # and when we call '&' below we get only the excluded + # case 2: nonempty include, empty exclude + # same as case 1, but with include + # case 3: both nonempty + # the "union" of the logic of case 1 and case 2: + # we get the included and excluded, and return their logical and + include_these = Series(not bool(include), index=self.columns) + exclude_these = Series(not bool(exclude), index=self.columns) + + def is_dtype_instance_mapper(column, dtype): + return column, functools.partial(issubclass, dtype.type) + + for column, f in itertools.starmap(is_dtype_instance_mapper, + self.dtypes.iteritems()): + if include: # checks for the case of empty include or exclude + include_these[column] = any(map(f, include)) + if exclude: + exclude_these[column] = not any(map(f, exclude)) + + dtype_indexer = include_these & exclude_these + return self.loc[com._get_info_slice(self, dtype_indexer)] + def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] if values.ndim == 2: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index d7f8d235d4229..dab61af2f6de7 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -12996,6 +12996,123 @@ def test_set_index_names(self): # Check equality tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2) + def test_select_dtypes_include(self): + df = DataFrame({'a': list('abc'), + 'b': list(range(1, 4)), + 'c': np.arange(3, 6).astype('u1'), + 'd': np.arange(4.0, 7.0), + 'e': [True, False, True]}) + ri = df.select_dtypes(include=[np.number]) + ei = df[['b', 'c', 'd']] + tm.assert_frame_equal(ri, ei) + + def test_select_dtypes_exclude(self): + df = DataFrame({'a': list('abc'), + 'b': list(range(1, 4)), + 'c': np.arange(3, 6).astype('u1'), + 'd': np.arange(4.0, 7.0), + 'e': [True, False, True]}) + re = df.select_dtypes(exclude=[np.number]) + ee = df[['a', 'e']] + tm.assert_frame_equal(re, ee) + + def test_select_dtypes_exclude_include(self): + df = DataFrame({'a': list('abc'), + 'b': list(range(1, 4)), + 'c': np.arange(3, 6).astype('u1'), + 'd': np.arange(4.0, 7.0), + 'e': [True, False, True], + 'f': pd.date_range('now', periods=3).values}) + exclude = np.datetime64, + include = np.bool_, 'integer' + r = df.select_dtypes(include=include, exclude=exclude) + e = df[['b', 'c', 'e']] + tm.assert_frame_equal(r, e) + + exclude = 'datetime', + include = 'bool', 'int' + r = df.select_dtypes(include=include, exclude=exclude) + e = df[['b', 'e']] + tm.assert_frame_equal(r, e) + + def test_select_dtypes_not_an_attr_but_still_valid_dtype(self): + df = DataFrame({'a': list('abc'), + 'b': list(range(1, 4)), + 'c': np.arange(3, 6).astype('u1'), + 'd': np.arange(4.0, 7.0), + 'e': [True, False, True], + 'f': pd.date_range('now', periods=3).values}) + df['g'] = df.f.diff() + assert not hasattr(np, 'u8') + r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta']) + e = df[['a', 'b']] + tm.assert_frame_equal(r, e) + + r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]']) + e = df[['a', 'b', 'g']] + tm.assert_frame_equal(r, e) + + def test_select_dtypes_empty(self): + df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))}) + with tm.assertRaisesRegexp(ValueError, 'at least one of include or ' + 'exclude must be nonempty'): + df.select_dtypes() + + def test_select_dtypes_raises_on_string(self): + df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))}) + with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'): + df.select_dtypes(include='object') + with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'): + df.select_dtypes(exclude='object') + with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'): + df.select_dtypes(include=int, exclude='object') + + def test_select_dtypes_bad_datetime64(self): + df = DataFrame({'a': list('abc'), + 'b': list(range(1, 4)), + 'c': np.arange(3, 6).astype('u1'), + 'd': np.arange(4.0, 7.0), + 'e': [True, False, True], + 'f': pd.date_range('now', periods=3).values}) + with tm.assertRaisesRegexp(ValueError, '.+ is too specific'): + df.select_dtypes(include=['datetime64[D]']) + + with tm.assertRaisesRegexp(ValueError, '.+ is too specific'): + df.select_dtypes(exclude=['datetime64[as]']) + + def test_select_dtypes_str_raises(self): + df = DataFrame({'a': list('abc'), + 'g': list(u('abc')), + 'b': list(range(1, 4)), + 'c': np.arange(3, 6).astype('u1'), + 'd': np.arange(4.0, 7.0), + 'e': [True, False, True], + 'f': pd.date_range('now', periods=3).values}) + string_dtypes = set((str, 'str', np.string_, 'S1', + 'unicode', np.unicode_, 'U1')) + try: + string_dtypes.add(unicode) + except NameError: + pass + for dt in string_dtypes: + with tm.assertRaisesRegexp(TypeError, + 'string dtypes are not allowed'): + df.select_dtypes(include=[dt]) + with tm.assertRaisesRegexp(TypeError, + 'string dtypes are not allowed'): + df.select_dtypes(exclude=[dt]) + + def test_select_dtypes_bad_arg_raises(self): + df = DataFrame({'a': list('abc'), + 'g': list(u('abc')), + 'b': list(range(1, 4)), + 'c': np.arange(3, 6).astype('u1'), + 'd': np.arange(4.0, 7.0), + 'e': [True, False, True], + 'f': pd.date_range('now', periods=3).values}) + with tm.assertRaisesRegexp(TypeError, 'data type.*not understood'): + df.select_dtypes(['blargy, blarg, blarg']) + def skip_if_no_ne(engine='numexpr'): if engine == 'numexpr': @@ -13931,6 +14048,7 @@ def test_query_string_scalar_variable(self): for parser, engine in product(['pandas'], ENGINES): yield self.check_query_string_scalar_variable, parser, engine + class TestDataFrameEvalNumExprPandas(tm.TestCase): @classmethod
closes #7316 examples: ``` In [8]: paste df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0), 'e': [True, False, True], 'f': [False, True, False], 'g': pd.date_range('now', periods=3).values}) df['h'] = df.g.diff() df['i'] = np.arange(3, 6).astype('u8') df['j'] = pd.date_range('20130101', periods=3).values df ## -- End pasted text -- Out[8]: a b c d e f g h i j 0 a 1 3 4 True False 2014-06-22 23:40:53 NaT 3 2013-01-01 1 b 2 4 5 False True 2014-06-23 23:40:53 1 days 4 2013-01-02 2 c 3 5 6 True False 2014-06-24 23:40:53 1 days 5 2013-01-03 In [9]: paste df.select_type(include=[bool]) ## -- End pasted text -- Out[9]: e f 0 True False 1 False True 2 True False In [10]: paste df.select_type(include=['number', 'bool'], exclude=['unsignedinteger']) ## -- End pasted text -- Out[10]: b d e f h 0 1 4 True False NaT 1 2 5 False True 1 days 2 3 6 True False 1 days In [11]: np.timedelta64.mro() # this is an integer type Out[11]: [numpy.timedelta64, numpy.signedinteger, numpy.integer, numpy.number, numpy.generic, object] In [13]: paste df.select_type(include=['object']) ## -- End pasted text -- Out[13]: a 0 a 1 b 2 c ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7434
2014-06-11T18:22:54Z
2014-07-07T16:22:20Z
2014-07-07T16:22:20Z
2014-07-09T20:53:14Z
BUG: Bug in multi-index slicing with datetimelike ranges (strings and Timestamps) (GH7429)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 6e3b23898d08f..266180f1867d1 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -127,15 +127,6 @@ Enhancements Performance ~~~~~~~~~~~ - - - - - - - - - - Improvements in dtype inference for numeric operations involving yielding performance gains for dtypes: ``int64``, ``timedelta64``, ``datetime64`` (:issue:`7223`) @@ -166,13 +157,13 @@ Bug Fixes -- BUG in ``DatetimeIndex.insert`` doesn't preserve ``name`` and ``tz`` (:issue:`7299`) -- BUG in ``DatetimeIndex.asobject`` doesn't preserve ``name`` (:issue:`7299`) - +- Bug in ``DatetimeIndex.insert`` doesn't preserve ``name`` and ``tz`` (:issue:`7299`) +- Bug in ``DatetimeIndex.asobject`` doesn't preserve ``name`` (:issue:`7299`) +- Bug in multi-index slicing with datetimelike ranges (strings and Timestamps), (:issue:`7429`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 8bf7a3db78b31..23837a4bc63b9 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -12,7 +12,6 @@ import pandas.index as _index from pandas.lib import Timestamp, is_datetime_array from pandas.core.base import FrozenList, FrozenNDArray, IndexOpsMixin - from pandas.util.decorators import cache_readonly, deprecate from pandas.core.common import isnull, array_equivalent import pandas.core.common as com @@ -3532,7 +3531,16 @@ def _get_level_indexer(self, key, level=0): stop = level_index.get_loc(key.stop or len(level_index)-1) step = key.step - if level > 0 or self.lexsort_depth == 0 or step is not None: + if isinstance(start,slice) or isinstance(stop,slice): + # we have a slice for start and/or stop + # a partial date slicer on a DatetimeIndex generates a slice + # note that the stop ALREADY includes the stopped point (if + # it was a string sliced) + m = np.zeros(len(labels),dtype=bool) + m[np.in1d(labels,np.arange(start.start,stop.stop,step))] = True + return m + + elif level > 0 or self.lexsort_depth == 0 or step is not None: # need to have like semantics here to right # searching as when we are using a slice # so include the stop+1 (so we include stop) @@ -3571,6 +3579,8 @@ def get_locs(self, tup): for passing to iloc """ + from pandas.core.indexing import _is_null_slice + # must be lexsorted to at least as many levels if not self.is_lexsorted_for_tuple(tup): raise KeyError('MultiIndex Slicing requires the index to be fully lexsorted' @@ -3598,10 +3608,12 @@ def _convert_indexer(r): ranges.append(reduce( np.logical_or,[ _convert_indexer(self._get_level_indexer(x, level=i) ) for x in k ])) - elif k == slice(None): - # include all from this level + elif _is_null_slice(k): + # empty slice pass + elif isinstance(k,slice): + # a slice, include BOTH of the labels ranges.append(self._get_level_indexer(k,level=i)) else: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 45262575dcb37..c4550a18492cb 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1635,16 +1635,12 @@ def _maybe_convert_ix(*args): def _is_nested_tuple(tup, labels): # check for a compatiable nested tuple and multiindexes among the axes - if not isinstance(tup, tuple): return False # are we nested tuple of: tuple,list,slice for i, k in enumerate(tup): - #if i > len(axes): - # raise IndexingError("invalid indxing tuple passed, has too many indexers for this object") - #ax = axes[i] if isinstance(k, (tuple, list, slice)): return isinstance(labels, MultiIndex) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 1945236f4efe8..c074c4333a774 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1565,6 +1565,35 @@ def test_multiindex_slicers_non_unique(self): self.assertFalse(result.index.is_unique) assert_frame_equal(result, expected) + def test_multiindex_slicers_datetimelike(self): + + # GH 7429 + # buggy/inconsistent behavior when slicing with datetime-like + import datetime + dates = [datetime.datetime(2012,1,1,12,12,12) + datetime.timedelta(days=i) for i in range(6)] + freq = [1,2] + index = MultiIndex.from_product([dates,freq], names=['date','frequency']) + + df = DataFrame(np.arange(6*2*4,dtype='int64').reshape(-1,4),index=index,columns=list('ABCD')) + + # multi-axis slicing + idx = pd.IndexSlice + expected = df.iloc[[0,2,4],[0,1]] + result = df.loc[(slice(Timestamp('2012-01-01 12:12:12'),Timestamp('2012-01-03 12:12:12')),slice(1,1)), slice('A','B')] + assert_frame_equal(result,expected) + + result = df.loc[(idx[Timestamp('2012-01-01 12:12:12'):Timestamp('2012-01-03 12:12:12')],idx[1:1]), slice('A','B')] + assert_frame_equal(result,expected) + + result = df.loc[(slice(Timestamp('2012-01-01 12:12:12'),Timestamp('2012-01-03 12:12:12')),1), slice('A','B')] + assert_frame_equal(result,expected) + + # with strings + result = df.loc[(slice('2012-01-01 12:12:12','2012-01-03 12:12:12'),slice(1,1)), slice('A','B')] + assert_frame_equal(result,expected) + + result = df.loc[(idx['2012-01-01 12:12:12':'2012-01-03 12:12:12'],1), idx['A','B']] + assert_frame_equal(result,expected) def test_per_axis_per_level_doc_examples(self):
closes #7429
https://api.github.com/repos/pandas-dev/pandas/pulls/7430
2014-06-11T14:02:04Z
2014-06-11T14:43:24Z
2014-06-11T14:43:24Z
2014-06-12T07:02:34Z
make nanops._maybe_null_out work with complex numbers ( issue #7353 )
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 86e73ccbde01c..c764d92376c33 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -213,7 +213,8 @@ Bug Fixes - Bug where bool objects were converted to ``nan`` in ``convert_objects`` (:issue:`7416`). - Bug in ``quantile`` ignoring the axis keyword argument (:issue`7306`) - +- Bug where ``nanops._maybe_null_out`` doesn't work with complex numbers + (:issue:`7353`) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index b40334c1857ac..c0cb2e6ee6ceb 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -547,7 +547,10 @@ def _maybe_null_out(result, axis, mask): if axis is not None: null_mask = (mask.shape[axis] - mask.sum(axis)) == 0 if null_mask.any(): - result = result.astype('f8') + if np.iscomplexobj(result): + result = result.astype('c16') + else: + result = result.astype('f8') result[null_mask] = np.nan else: null_mask = mask.size - mask.sum() diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index e704c95608494..d0df6c2dc3e8b 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -239,7 +239,6 @@ def test_nanall(self): def test_nansum(self): self.check_funs(nanops.nansum, np.sum, - allow_complex=False, allow_str=False, allow_date=False) def _nanmean_wrap(self, value, *args, **kwargs): @@ -291,13 +290,11 @@ def _minmax_wrap(self, value, axis=None, func=None): def test_nanmin(self): func = partial(self._minmax_wrap, func=np.min) self.check_funs(nanops.nanmin, func, - allow_complex=False, allow_str=False, allow_obj=False) def test_nanmax(self): func = partial(self._minmax_wrap, func=np.max) self.check_funs(nanops.nanmax, func, - allow_complex=False, allow_str=False, allow_obj=False) def _argminmax_wrap(self, value, axis=None, func=None): @@ -355,7 +352,6 @@ def test_nankurt(self): def test_nanprod(self): self.check_funs(nanops.nanprod, np.prod, - allow_complex=False, allow_str=False, allow_date=False) def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
This fixes #7353 where `nanops._maybe_null_out`, and thus other functions that call on it, don't work with complex numbers.
https://api.github.com/repos/pandas-dev/pandas/pulls/7428
2014-06-11T13:17:42Z
2014-06-12T10:46:48Z
2014-06-12T10:46:48Z
2014-06-12T11:03:13Z
add unit tests for nanops
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 3055017a3148d..58338a47d9465 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -12,7 +12,6 @@ import pandas.core.common as com import pandas.util.testing as tm import pandas.core.config as cf -from pandas.core import nanops _multiprocess_can_split_ = True @@ -394,54 +393,6 @@ def test_ensure_int32(): assert(result.dtype == np.int32) -class TestEnsureNumeric(tm.TestCase): - def test_numeric_values(self): - # Test integer - self.assertEqual(nanops._ensure_numeric(1), 1, 'Failed for int') - # Test float - self.assertEqual(nanops._ensure_numeric(1.1), 1.1, 'Failed for float') - # Test complex - self.assertEqual(nanops._ensure_numeric(1 + 2j), 1 + 2j, - 'Failed for complex') - - def test_ndarray(self): - # Test numeric ndarray - values = np.array([1, 2, 3]) - self.assertTrue(np.allclose(nanops._ensure_numeric(values), values), - 'Failed for numeric ndarray') - - # Test object ndarray - o_values = values.astype(object) - self.assertTrue(np.allclose(nanops._ensure_numeric(o_values), values), - 'Failed for object ndarray') - - # Test convertible string ndarray - s_values = np.array(['1', '2', '3'], dtype=object) - self.assertTrue(np.allclose(nanops._ensure_numeric(s_values), values), - 'Failed for convertible string ndarray') - - # Test non-convertible string ndarray - s_values = np.array(['foo', 'bar', 'baz'], dtype=object) - self.assertRaises(ValueError, - lambda: nanops._ensure_numeric(s_values)) - - def test_convertable_values(self): - self.assertTrue(np.allclose(nanops._ensure_numeric('1'), 1.0), - 'Failed for convertible integer string') - self.assertTrue(np.allclose(nanops._ensure_numeric('1.1'), 1.1), - 'Failed for convertible float string') - self.assertTrue(np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j), - 'Failed for convertible complex string') - - def test_non_convertable_values(self): - self.assertRaises(TypeError, - lambda: nanops._ensure_numeric('foo')) - self.assertRaises(TypeError, - lambda: nanops._ensure_numeric({})) - self.assertRaises(TypeError, - lambda: nanops._ensure_numeric([])) - - def test_ensure_platform_int(): # verify that when we create certain types of indices diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py new file mode 100644 index 0000000000000..a4497cb63b329 --- /dev/null +++ b/pandas/tests/test_nanops.py @@ -0,0 +1,726 @@ +from __future__ import division, print_function + +from functools import partial + +import numpy as np + +from pandas.core.common import isnull +import pandas.core.nanops as nanops +import pandas.util.testing as tm + +nanops._USE_BOTTLENECK = False + + +class TestnanopsDataFrame(tm.TestCase): + def setUp(self): + self.arr_shape = (11, 7, 5) + + self.arr_float = np.random.randn(*self.arr_shape) + self.arr_float1 = np.random.randn(*self.arr_shape) + self.arr_complex = self.arr_float + self.arr_float1*1j + self.arr_int = np.random.randint(-10, 10, self.arr_shape) + self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0 + self.arr_str = np.abs(self.arr_float).astype('S') + self.arr_utf = np.abs(self.arr_float).astype('U') + self.arr_date = np.random.randint(0, 20000, + self.arr_shape).astype('M8[ns]') + self.arr_tdelta = np.random.randint(0, 20000, + self.arr_shape).astype('m8[ns]') + + self.arr_nan = np.tile(np.nan, self.arr_shape) + self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan]) + self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan]) + self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1]) + self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan]) + + self.arr_inf = self.arr_float*np.inf + self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf]) + self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf]) + self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1]) + self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf]) + + self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf]) + self.arr_float_nan_inf = np.vstack([self.arr_float, + self.arr_nan, + self.arr_inf]) + self.arr_nan_float1_inf = np.vstack([self.arr_float, + self.arr_inf, + self.arr_nan]) + self.arr_nan_nan_inf = np.vstack([self.arr_nan, + self.arr_nan, + self.arr_inf]) + self.arr_obj = np.vstack([self.arr_float.astype('O'), + self.arr_int.astype('O'), + self.arr_bool.astype('O'), + self.arr_complex.astype('O'), + self.arr_str.astype('O'), + self.arr_utf.astype('O'), + self.arr_date.astype('O'), + self.arr_tdelta.astype('O')]) + + self.arr_nan_nanj = self.arr_nan + self.arr_nan*1j + self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj]) + + self.arr_nan_infj = self.arr_inf*1j + self.arr_complex_nan_infj = np.vstack([self.arr_complex, + self.arr_nan_infj]) + + self.arr_float_2d = self.arr_float[:, :, 0] + self.arr_float1_2d = self.arr_float1[:, :, 0] + self.arr_complex_2d = self.arr_complex[:, :, 0] + self.arr_int_2d = self.arr_int[:, :, 0] + self.arr_bool_2d = self.arr_bool[:, :, 0] + self.arr_str_2d = self.arr_str[:, :, 0] + self.arr_utf_2d = self.arr_utf[:, :, 0] + self.arr_date_2d = self.arr_date[:, :, 0] + self.arr_tdelta_2d = self.arr_tdelta[:, :, 0] + + self.arr_nan_2d = self.arr_nan[:, :, 0] + self.arr_float_nan_2d = self.arr_float_nan[:, :, 0] + self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0] + self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0] + self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0] + self.arr_nan_nanj_2d = self.arr_nan_nanj[:, :, 0] + self.arr_complex_nan_2d = self.arr_complex_nan[:, :, 0] + + self.arr_inf_2d = self.arr_inf[:, :, 0] + self.arr_float_inf_2d = self.arr_float_inf[:, :, 0] + self.arr_nan_inf_2d = self.arr_nan_inf[:, :, 0] + self.arr_float_nan_inf_2d = self.arr_float_nan_inf[:, :, 0] + self.arr_nan_nan_inf_2d = self.arr_nan_nan_inf[:, :, 0] + + self.arr_float_1d = self.arr_float[:, 0, 0] + self.arr_float1_1d = self.arr_float1[:, 0, 0] + self.arr_complex_1d = self.arr_complex[:, 0, 0] + self.arr_int_1d = self.arr_int[:, 0, 0] + self.arr_bool_1d = self.arr_bool[:, 0, 0] + self.arr_str_1d = self.arr_str[:, 0, 0] + self.arr_utf_1d = self.arr_utf[:, 0, 0] + self.arr_date_1d = self.arr_date[:, 0, 0] + self.arr_tdelta_1d = self.arr_tdelta[:, 0, 0] + + self.arr_nan_1d = self.arr_nan[:, 0, 0] + self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0] + self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0] + self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0] + self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0] + self.arr_nan_nanj_1d = self.arr_nan_nanj[:, 0, 0] + self.arr_complex_nan_1d = self.arr_complex_nan[:, 0, 0] + + self.arr_inf_1d = self.arr_inf.ravel() + self.arr_float_inf_1d = self.arr_float_inf[:, 0, 0] + self.arr_nan_inf_1d = self.arr_nan_inf[:, 0, 0] + self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0] + self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0] + + def check_results(self, targ, res, axis): + res = getattr(res, 'asm8', res) + res = getattr(res, 'values', res) + if axis != 0 and hasattr(targ, 'shape') and targ.ndim: + res = np.split(res, [targ.shape[0]], axis=0)[0] + tm.assert_almost_equal(targ, res) + + def check_fun_data(self, testfunc, targfunc, + testarval, targarval, targarnanval, **kwargs): + for axis in list(range(targarval.ndim)): + for skipna in [False, True]: + targartempval = targarval if skipna else targarnanval + try: + targ = targfunc(targartempval, axis=axis, **kwargs) + res = testfunc(testarval, axis=axis, skipna=skipna, + **kwargs) + self.check_results(targ, res, axis) + if skipna: + res = testfunc(testarval, axis=axis) + self.check_results(targ, res, axis) + if axis is None: + res = testfunc(testarval, skipna=skipna) + self.check_results(targ, res, axis) + if skipna and axis is None: + res = testfunc(testarval) + self.check_results(targ, res, axis) + except BaseException as exc: + exc.args += ('axis: %s of %s' % (axis, testarval.ndim-1), + 'skipna: %s' % skipna, + 'kwargs: %s' % kwargs) + raise + + if testarval.ndim <= 2: + return + + try: + testarval2 = np.take(testarval, 0, axis=-1) + targarval2 = np.take(targarval, 0, axis=-1) + targarnanval2 = np.take(targarnanval, 0, axis=-1) + except ValueError: + return + self.check_fun_data(testfunc, targfunc, + testarval2, targarval2, targarnanval2, + **kwargs) + + def check_fun(self, testfunc, targfunc, + testar, targar=None, targarnan=None, + **kwargs): + if targar is None: + targar = testar + if targarnan is None: + targarnan = testar + testarval = getattr(self, testar) + targarval = getattr(self, targar) + targarnanval = getattr(self, targarnan) + try: + self.check_fun_data(testfunc, targfunc, + testarval, targarval, targarnanval, **kwargs) + except BaseException as exc: + exc.args += ('testar: %s' % testar, + 'targar: %s' % targar, + 'targarnan: %s' % targarnan) + raise + + def check_funs(self, testfunc, targfunc, + allow_complex=True, allow_all_nan=True, allow_str=True, + allow_date=True, allow_obj=True, + **kwargs): + self.check_fun(testfunc, targfunc, 'arr_float', **kwargs) + self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float', + **kwargs) + self.check_fun(testfunc, targfunc, 'arr_int', **kwargs) + self.check_fun(testfunc, targfunc, 'arr_bool', **kwargs) + objs = [self.arr_float.astype('O'), + self.arr_int.astype('O'), + self.arr_bool.astype('O')] + + if allow_all_nan: + self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs) + + if allow_complex: + self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs) + self.check_fun(testfunc, targfunc, + 'arr_complex_nan', 'arr_complex', **kwargs) + if allow_all_nan: + self.check_fun(testfunc, targfunc, 'arr_nan_nanj', **kwargs) + objs += [self.arr_complex.astype('O')] + + if allow_str: + self.check_fun(testfunc, targfunc, 'arr_str', **kwargs) + self.check_fun(testfunc, targfunc, 'arr_utf', **kwargs) + objs += [self.arr_str.astype('O'), + self.arr_utf.astype('O')] + + if allow_date: + self.check_fun(testfunc, targfunc, 'arr_date', **kwargs) + self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs) + objs += [self.arr_date.astype('O'), + self.arr_tdelta.astype('O')] + + if allow_obj: + self.arr_obj = np.vstack(objs) + self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs) + + def check_funs_ddof(self, testfunc, targfunc, + allow_complex=True, allow_all_nan=True, allow_str=True, + allow_date=True, allow_obj=True,): + for ddof in range(3): + try: + self.check_funs(self, testfunc, targfunc, + allow_complex, allow_all_nan, allow_str, + allow_date, allow_obj, + ddof=ddof) + except BaseException as exc: + exc.args += ('ddof %s' % ddof,) + + def test_nanany(self): + self.check_funs(nanops.nanany, np.any, + allow_all_nan=False, allow_str=False, allow_date=False) + + def test_nanall(self): + self.check_funs(nanops.nanall, np.all, + allow_all_nan=False, allow_str=False, allow_date=False) + + def test_nansum(self): + self.check_funs(nanops.nansum, np.sum, + allow_complex=False, + allow_str=False, allow_date=False) + + def _nanmean_wrap(self, value, *args, **kwargs): + dtype = value.dtype + res = nanops.nanmean(value, *args, **kwargs) + if dtype.kind == 'O': + res = np.round(res, decimals=15) + return res + + def _mean_wrap(self, value, *args, **kwargs): + dtype = value.dtype + if dtype.kind == 'O': + value = value.astype('c16') + res = np.mean(value, *args, **kwargs) + if dtype.kind == 'O': + res = np.round(res, decimals=15) + return res + + def test_nanmean(self): + self.check_funs(self._nanmean_wrap, self._mean_wrap, + allow_complex=False, allow_obj=False, + allow_str=False, allow_date=False) + + def _median_wrap(self, value, *args, **kwargs): + if value.dtype.kind == 'O': + value = value.astype('c16') + res = np.median(value, *args, **kwargs) + return res + + def test_nanmedian(self): + self.check_funs(nanops.nanmedian, self._median_wrap, + allow_complex=False, allow_str=False, allow_date=False) + + def test_nanvar(self): + self.check_funs_ddof(nanops.nanvar, np.var, + allow_complex=False, allow_date=False) + + def test_nansem(self): + tm.skip_if_no_package('scipy') + from scipy.stats import sem + self.check_funs_ddof(nanops.nansem, np.var, + allow_complex=False, allow_date=False) + + def _minmax_wrap(self, value, axis=None, func=None): + res = func(value, axis) + if res.dtype.kind == 'm': + res = np.atleast_1d(res) + return res + + def test_nanmin(self): + func = partial(self._minmax_wrap, func=np.min) + self.check_funs(nanops.nanmin, func, + allow_complex=False, + allow_str=False, allow_obj=False) + + def test_nanmax(self): + func = partial(self._minmax_wrap, func=np.max) + self.check_funs(nanops.nanmax, func, + allow_complex=False, + allow_str=False, allow_obj=False) + + def _argminmax_wrap(self, value, axis=None, func=None): + res = func(value, axis) + nans = np.min(value, axis) + nullnan = isnull(nans) + if res.ndim: + res[nullnan] = -1 + elif (hasattr(nullnan, 'all') and nullnan.all() or + not hasattr(nullnan, 'all') and nullnan): + res = -1 + return res + + def test_nanargmax(self): + func = partial(self._argminmax_wrap, func=np.argmax) + self.check_funs(nanops.nanargmax, func, + allow_str=False, allow_obj=False) + + def test_nanargmin(self): + func = partial(self._argminmax_wrap, func=np.argmin) + if tm.sys.version_info[0:2] == (2, 6): + self.check_funs(nanops.nanargmin, func, + allow_date=False, + allow_str=False, allow_obj=False) + else: + self.check_funs(nanops.nanargmin, func, + allow_str=False, allow_obj=False) + + def _skew_kurt_wrap(self, values, axis=None, func=None): + if not isinstance(values.dtype.type, np.floating): + values = values.astype('f8') + result = func(values, axis=axis, bias=False) + # fix for handling cases where all elements in an axis are the same + if isinstance(result, np.ndarray): + result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0 + return result + elif np.max(values) == np.min(values): + return 0. + return result + + def test_nanskew(self): + tm.skip_if_no_package('scipy') + from scipy.stats import skew + func = partial(self._skew_kurt_wrap, func=skew) + self.check_funs(nanops.nanskew, func, + allow_complex=False, allow_str=False, allow_date=False) + + def test_nankurt(self): + tm.skip_if_no_package('scipy') + from scipy.stats import kurtosis + func1 = partial(kurtosis, fisher=True) + func = partial(self._skew_kurt_wrap, func=func1) + self.check_funs(nanops.nankurt, func, + allow_complex=False, allow_str=False, allow_date=False) + + def test_nanprod(self): + self.check_funs(nanops.nanprod, np.prod, + allow_complex=False, + allow_str=False, allow_date=False) + + def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, + **kwargs) + res01 = checkfun(self.arr_float_2d, self.arr_float1_2d, + min_periods=len(self.arr_float_2d)-1, + **kwargs) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, + **kwargs) + res11 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, + min_periods=len(self.arr_float_2d)-1, + **kwargs) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, + **kwargs) + res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, + **kwargs) + res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, + **kwargs) + res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, + **kwargs) + res24 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, + min_periods=len(self.arr_float_2d)-1, + **kwargs) + res25 = checkfun(self.arr_float_2d, self.arr_float1_2d, + min_periods=len(self.arr_float_2d)+1, + **kwargs) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, + **kwargs) + res01 = checkfun(self.arr_float_1d, self.arr_float1_1d, + min_periods=len(self.arr_float_1d)-1, + **kwargs) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_1d, + self.arr_float1_nan_1d, + **kwargs) + res11 = checkfun(self.arr_float_nan_1d, + self.arr_float1_nan_1d, + min_periods=len(self.arr_float_1d)-1, + **kwargs) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, + **kwargs) + res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, + **kwargs) + res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, + **kwargs) + res23 = checkfun(self.arr_float_nan_1d, + self.arr_nan_float1_1d, + **kwargs) + res24 = checkfun(self.arr_float_nan_1d, + self.arr_nan_float1_1d, + min_periods=len(self.arr_float_1d)-1, + **kwargs) + res25 = checkfun(self.arr_float_1d, + self.arr_float1_1d, + min_periods=len(self.arr_float_1d)+1, + **kwargs) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def test_nancorr(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, + self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1) + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, + self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, + method='pearson') + + def test_nancorr_pearson(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, + self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, + method='pearson') + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, + self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, + method='pearson') + + def test_nancorr_kendall(self): + tm.skip_if_no_package('scipy') + from scipy.stats import kendalltau + targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, + method='kendall') + targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, + method='kendall') + + def test_nancorr_spearman(self): + tm.skip_if_no_package('scipy') + from scipy.stats import spearmanr + targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, + method='spearman') + targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, + method='spearman') + + def test_nancov(self): + targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1) + targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1) + + def check_nancomp(self, checkfun, targ0): + arr_float = self.arr_float + arr_float1 = self.arr_float1 + arr_nan = self.arr_nan + arr_nan_nan = self.arr_nan_nan + arr_float_nan = self.arr_float_nan + arr_float1_nan = self.arr_float1_nan + arr_nan_float1 = self.arr_nan_float1 + + while targ0.ndim: + try: + res0 = checkfun(arr_float, arr_float1) + tm.assert_almost_equal(targ0, res0) + + if targ0.ndim > 1: + targ1 = np.vstack([targ0, arr_nan]) + else: + targ1 = np.hstack([targ0, arr_nan]) + res1 = checkfun(arr_float_nan, arr_float1_nan) + tm.assert_almost_equal(targ1, res1) + + targ2 = arr_nan_nan + res2 = checkfun(arr_float_nan, arr_nan_float1) + tm.assert_almost_equal(targ2, res2) + except Exception as exc: + exc.args += ('ndim: %s' % arr_float.ndim,) + raise + + try: + arr_float = np.take(arr_float, 0, axis=-1) + arr_float1 = np.take(arr_float1, 0, axis=-1) + arr_nan = np.take(arr_nan, 0, axis=-1) + arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1) + arr_float_nan = np.take(arr_float_nan, 0, axis=-1) + arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1) + arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1) + targ0 = np.take(targ0, 0, axis=-1) + except ValueError: + break + + def test_nangt(self): + targ0 = self.arr_float > self.arr_float1 + self.check_nancomp(nanops.nangt, targ0) + + def test_nange(self): + targ0 = self.arr_float >= self.arr_float1 + self.check_nancomp(nanops.nange, targ0) + + def test_nanlt(self): + targ0 = self.arr_float < self.arr_float1 + self.check_nancomp(nanops.nanlt, targ0) + + def test_nanle(self): + targ0 = self.arr_float <= self.arr_float1 + self.check_nancomp(nanops.nanle, targ0) + + def test_naneq(self): + targ0 = self.arr_float == self.arr_float1 + self.check_nancomp(nanops.naneq, targ0) + + def test_nanne(self): + targ0 = self.arr_float != self.arr_float1 + self.check_nancomp(nanops.nanne, targ0) + + def check_bool(self, func, value, correct, *args, **kwargs): + while getattr(value, 'ndim', True): + try: + res0 = func(value, *args, **kwargs) + if correct: + self.assertTrue(res0) + else: + self.assertFalse(res0) + except BaseException as exc: + exc.args += ('dim: %s' % getattr(value, 'ndim', value),) + raise + if not hasattr(value, 'ndim'): + break + try: + value = np.take(value, 0, axis=-1) + except ValueError: + break + + def test__has_infs(self): + pairs = [('arr_complex_1d', False), + ('arr_int_1d', False), + ('arr_bool_1d', False), + ('arr_str_1d', False), + ('arr_utf_1d', False), + ('arr_complex_1d', False), + ('arr_complex_nan_1d', False), + + ('arr_nan_nanj_1d', False)] + pairs_float = [('arr_float_1d', False), + ('arr_nan_1d', False), + ('arr_float_nan_1d', False), + ('arr_nan_nan_1d', False), + + ('arr_float_inf_1d', True), + ('arr_inf_1d', True), + ('arr_nan_inf_1d', True), + ('arr_float_nan_inf_1d', True), + ('arr_nan_nan_inf_1d', True)] + + for arr, correct in pairs: + val = getattr(self, arr) + try: + self.check_bool(nanops._has_infs, val, correct) + except BaseException as exc: + exc.args += (arr,) + raise + + for arr, correct in pairs_float: + val = getattr(self, arr) + try: + self.check_bool(nanops._has_infs, val, correct) + self.check_bool(nanops._has_infs, val.astype('f4'), correct) + except BaseException as exc: + exc.args += (arr,) + raise + + def test__isfinite(self): + pairs = [('arr_complex', False), + ('arr_int', False), + ('arr_bool', False), + ('arr_str', False), + ('arr_utf', False), + ('arr_complex', False), + ('arr_complex_nan', True), + + ('arr_nan_nanj', True), + ('arr_nan_infj', True), + ('arr_complex_nan_infj', True)] + pairs_float = [('arr_float', False), + ('arr_nan', True), + ('arr_float_nan', True), + ('arr_nan_nan', True), + + ('arr_float_inf', True), + ('arr_inf', True), + ('arr_nan_inf', True), + ('arr_float_nan_inf', True), + ('arr_nan_nan_inf', True)] + + func1 = lambda x: np.any(nanops._isfinite(x).ravel()) + func2 = lambda x: np.any(nanops._isfinite(x).values.ravel()) + for arr, correct in pairs: + val = getattr(self, arr) + try: + self.check_bool(func1, val, correct) + except BaseException as exc: + exc.args += (arr,) + raise + + for arr, correct in pairs_float: + val = getattr(self, arr) + try: + self.check_bool(func1, val, correct) + self.check_bool(func1, val.astype('f4'), correct) + self.check_bool(func1, val.astype('f2'), correct) + except BaseException as exc: + exc.args += (arr,) + raise + + def test__bn_ok_dtype(self): + self.assertTrue(nanops._bn_ok_dtype(self.arr_float.dtype, 'test')) + self.assertTrue(nanops._bn_ok_dtype(self.arr_complex.dtype, 'test')) + self.assertTrue(nanops._bn_ok_dtype(self.arr_int.dtype, 'test')) + self.assertTrue(nanops._bn_ok_dtype(self.arr_bool.dtype, 'test')) + self.assertTrue(nanops._bn_ok_dtype(self.arr_str.dtype, 'test')) + self.assertTrue(nanops._bn_ok_dtype(self.arr_utf.dtype, 'test')) + self.assertFalse(nanops._bn_ok_dtype(self.arr_date.dtype, 'test')) + self.assertFalse(nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test')) + self.assertFalse(nanops._bn_ok_dtype(self.arr_obj.dtype, 'test')) + + +class TestEnsureNumeric(tm.TestCase): + def test_numeric_values(self): + # Test integer + self.assertEqual(nanops._ensure_numeric(1), 1, 'Failed for int') + # Test float + self.assertEqual(nanops._ensure_numeric(1.1), 1.1, 'Failed for float') + # Test complex + self.assertEqual(nanops._ensure_numeric(1 + 2j), 1 + 2j, + 'Failed for complex') + + def test_ndarray(self): + # Test numeric ndarray + values = np.array([1, 2, 3]) + self.assertTrue(np.allclose(nanops._ensure_numeric(values), values), + 'Failed for numeric ndarray') + + # Test object ndarray + o_values = values.astype(object) + self.assertTrue(np.allclose(nanops._ensure_numeric(o_values), values), + 'Failed for object ndarray') + + # Test convertible string ndarray + s_values = np.array(['1', '2', '3'], dtype=object) + self.assertTrue(np.allclose(nanops._ensure_numeric(s_values), values), + 'Failed for convertible string ndarray') + + # Test non-convertible string ndarray + s_values = np.array(['foo', 'bar', 'baz'], dtype=object) + self.assertRaises(ValueError, + lambda: nanops._ensure_numeric(s_values)) + + def test_convertable_values(self): + self.assertTrue(np.allclose(nanops._ensure_numeric('1'), 1.0), + 'Failed for convertible integer string') + self.assertTrue(np.allclose(nanops._ensure_numeric('1.1'), 1.1), + 'Failed for convertible float string') + self.assertTrue(np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j), + 'Failed for convertible complex string') + + def test_non_convertable_values(self): + self.assertRaises(TypeError, + lambda: nanops._ensure_numeric('foo')) + self.assertRaises(TypeError, + lambda: nanops._ensure_numeric({})) + self.assertRaises(TypeError, + lambda: nanops._ensure_numeric([])) + + +if __name__ == '__main__': + import nose + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', + '-s'], exit=False)
This adds unit tests for `pandas.core.nanops`. It also moves existing nanops tests from `test_common` to `test_nanops`.
https://api.github.com/repos/pandas-dev/pandas/pulls/7427
2014-06-11T10:55:32Z
2014-06-11T12:30:46Z
2014-06-11T12:30:46Z
2014-06-20T14:42:51Z
FIX value_counts should skip NaT
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 2b76da1434ba3..e5222f9e33c1c 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -75,7 +75,7 @@ Enhancements - +- Add ``dropna`` argument to ``value_counts`` and ``nunique`` (:issue:`5569`). @@ -159,7 +159,7 @@ Bug Fixes - +- Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 954f18ccb69b8..1aec8561807c9 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -168,7 +168,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1): def value_counts(values, sort=True, ascending=False, normalize=False, - bins=None): + bins=None, dropna=True): """ Compute a histogram of the counts of non-null values @@ -184,6 +184,8 @@ def value_counts(values, sort=True, ascending=False, normalize=False, bins : integer, optional Rather than count values, group them into half-open bins, convenience for pd.cut, only works with numeric data + dropna : boolean, default False + Don't include counts of NaN Returns ------- @@ -202,25 +204,31 @@ def value_counts(values, sort=True, ascending=False, normalize=False, raise TypeError("bins argument only works with numeric data.") values = cat.labels - if com.is_integer_dtype(values.dtype): + dtype = values.dtype + if com.is_integer_dtype(dtype): values = com._ensure_int64(values) keys, counts = htable.value_count_int64(values) elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)): - dtype = values.dtype values = values.view(np.int64) keys, counts = htable.value_count_int64(values) + if dropna: + from pandas.tslib import iNaT + msk = keys != iNaT + keys, counts = keys[msk], counts[msk] # convert the keys back to the dtype we came in - keys = Series(keys, dtype=dtype) + keys = keys.astype(dtype) else: - mask = com.isnull(values) values = com._ensure_object(values) + mask = com.isnull(values) keys, counts = htable.value_count_object(values, mask) + if not dropna: + keys = np.insert(keys, 0, np.NaN) + counts = np.insert(counts, 0, mask.sum()) result = Series(counts, index=com._values_from_object(keys)) - if bins is not None: # TODO: This next line should be more efficient result = result.reindex(np.arange(len(cat.levels)), fill_value=0) diff --git a/pandas/core/base.py b/pandas/core/base.py index 6bbcc33c2271b..b43883885e962 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -245,7 +245,7 @@ def min(self): return pandas.core.nanops.nanmin(self.values) def value_counts(self, normalize=False, sort=True, ascending=False, - bins=None): + bins=None, dropna=True): """ Returns object containing counts of unique values. The resulting object will be in descending order so that the first element is the most @@ -263,6 +263,8 @@ def value_counts(self, normalize=False, sort=True, ascending=False, bins : integer, optional Rather than count values, group them into half-open bins, a convenience for pd.cut, only works with numeric data + dropna : boolean, default False + Don't include counts of NaN Returns ------- @@ -270,7 +272,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, """ from pandas.core.algorithms import value_counts return value_counts(self.values, sort=sort, ascending=ascending, - normalize=normalize, bins=bins) + normalize=normalize, bins=bins, dropna=dropna) def unique(self): """ @@ -284,7 +286,7 @@ def unique(self): from pandas.core.nanops import unique1d return unique1d(self.values) - def nunique(self): + def nunique(self, dropna=True): """ Return count of unique elements in the object. Excludes NA values. @@ -292,7 +294,7 @@ def nunique(self): ------- nunique : int """ - return len(self.value_counts()) + return len(self.value_counts(dropna=dropna)) def factorize(self, sort=False, na_sentinel=-1): """ diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 6828c1d0528ea..ec2c64242f146 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -237,6 +237,19 @@ def test_value_counts_dtypes(self): self.assertRaises(TypeError, lambda s: algos.value_counts(s, bins=1), ['1', 1]) + def test_value_counts_nat(self): + td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]') + dt = pd.to_datetime(['NaT', '2014-01-01']) + + for s in [td, dt]: + vc = algos.value_counts(s) + vc_with_na = algos.value_counts(s, dropna=False) + self.assertEqual(len(vc), 1) + self.assertEqual(len(vc_with_na), 2) + + exp_dt = pd.Series({pd.Timestamp('2014-01-01 00:00:00'): 1}) + tm.assert_series_equal(algos.value_counts(dt), exp_dt) + # TODO same for (timedelta) def test_quantile(): s = Series(np.random.randn(100)) diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 4aaab3b2c52a5..6c8dd3478835f 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -292,12 +292,13 @@ def test_value_counts_unique_nunique(self): o = klass(np.repeat(values, range(1, len(o) + 1))) if isinstance(o, DatetimeIndex): - # DatetimeIndex: nan is casted to Nat and included - expected_s = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1]) + expected_s_na = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1]) + expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1]) else: - # nan is excluded - expected_s = Series(range(10, 2, -1), index=values[9:1:-1], dtype='int64') + expected_s_na = Series(list(range(10, 2, -1)) +[3], index=values[9:0:-1], dtype='int64') + expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1], dtype='int64') + tm.assert_series_equal(o.value_counts(dropna=False), expected_s_na) tm.assert_series_equal(o.value_counts(), expected_s) # numpy_array_equal cannot compare arrays includes nan @@ -309,10 +310,8 @@ def test_value_counts_unique_nunique(self): else: self.assertTrue(pd.isnull(result[0])) - if isinstance(o, DatetimeIndex): - self.assertEqual(o.nunique(), 9) - else: - self.assertEqual(o.nunique(), 8) + self.assertEqual(o.nunique(), 8) + self.assertEqual(o.nunique(dropna=False), 9) def test_value_counts_inferred(self): klasses = [Index, Series] @@ -406,6 +405,9 @@ def test_value_counts_inferred(self): result = s.value_counts() self.assertEqual(result.index.dtype, 'datetime64[ns]') + tm.assert_series_equal(result, expected_s) + + result = s.value_counts(dropna=False) expected_s[pd.NaT] = 1 tm.assert_series_equal(result, expected_s) @@ -415,7 +417,8 @@ def test_value_counts_inferred(self): self.assert_numpy_array_equal(unique[:3], expected) self.assertTrue(unique[3] is pd.NaT or unique[3].astype('int64') == pd.tslib.iNaT) - self.assertEqual(s.nunique(), 4) + self.assertEqual(s.nunique(), 3) + self.assertEqual(s.nunique(dropna=False), 4) # timedelta64[ns] td = df.dt - df.dt + timedelta(1) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 04210b4f0c88f..ddd6c26748d3e 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -106,16 +106,19 @@ def test_index_unique(self): self.assertEqual(result.name, 'foo') self.assertTrue(result.equals(expected)) - # NaT + # NaT, note this is excluded arr = [ 1370745748 + t for t in range(20) ] + [iNaT] idx = DatetimeIndex(arr * 3) self.assertTrue(idx.unique().equals(DatetimeIndex(arr))) - self.assertEqual(idx.nunique(), 21) + self.assertEqual(idx.nunique(), 20) + self.assertEqual(idx.nunique(dropna=False), 21) arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT] idx = DatetimeIndex(arr * 3) self.assertTrue(idx.unique().equals(DatetimeIndex(arr))) - self.assertEqual(idx.nunique(), 21) + self.assertEqual(idx.nunique(), 20) + self.assertEqual(idx.nunique(dropna=False), 21) + def test_index_dupes_contains(self): d = datetime(2011, 12, 5, 20, 30)
fixes #7423 fixes #5569.
https://api.github.com/repos/pandas-dev/pandas/pulls/7424
2014-06-11T00:00:33Z
2014-06-17T12:01:37Z
2014-06-17T12:01:37Z
2014-06-17T15:21:34Z
PERF: Series.transform speedups (GH6496)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 2b76da1434ba3..04231f08787f4 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -137,7 +137,7 @@ Performance - Improvements in dtype inference for numeric operations involving yielding performance gains for dtypes: ``int64``, ``timedelta64``, ``datetime64`` (:issue:`7223`) - +- Improvements in Series.transform for signifcant performance gains (:issue`6496`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index e6af3c20bea00..c50df6f9bb08f 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -14,7 +14,7 @@ from pandas.core.categorical import Categorical from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame -from pandas.core.index import Index, MultiIndex, _ensure_index +from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes from pandas.core.internals import BlockManager, make_block from pandas.core.series import Series from pandas.core.panel import Panel @@ -425,7 +425,7 @@ def convert(key, s): return Timestamp(key).asm8 return key - sample = list(self.indices)[0] + sample = next(iter(self.indices)) if isinstance(sample, tuple): if not isinstance(name, tuple): raise ValueError("must supply a tuple to get_group with multiple grouping keys") @@ -2193,33 +2193,37 @@ def transform(self, func, *args, **kwargs): ------- transformed : Series """ - result = self._selected_obj.copy() - if hasattr(result, 'values'): - result = result.values - dtype = result.dtype + dtype = self._selected_obj.dtype if isinstance(func, compat.string_types): wrapper = lambda x: getattr(x, func)(*args, **kwargs) else: wrapper = lambda x: func(x, *args, **kwargs) - for name, group in self: + result = self._selected_obj.values.copy() + for i, (name, group) in enumerate(self): object.__setattr__(group, 'name', name) res = wrapper(group) + if hasattr(res, 'values'): res = res.values - # need to do a safe put here, as the dtype may be different - # this needs to be an ndarray - result = Series(result) - result.iloc[self._get_index(name)] = res - result = result.values + # may need to astype + try: + common_type = np.common_type(np.array(res), result) + if common_type != result.dtype: + result = result.astype(common_type) + except: + pass + + indexer = self._get_index(name) + result[indexer] = res - # downcast if we can (and need) result = _possibly_downcast_to_dtype(result, dtype) - return self._selected_obj.__class__(result, index=self._selected_obj.index, - name=self._selected_obj.name) + return self._selected_obj.__class__(result, + index=self._selected_obj.index, + name=self._selected_obj.name) def filter(self, func, dropna=True, *args, **kwargs): """ diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 1da51ce824120..14380c83de79e 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -126,8 +126,10 @@ def checkit(dtype): assert_series_equal(agged, grouped.mean()) assert_series_equal(grouped.agg(np.sum), grouped.sum()) + expected = grouped.apply(lambda x: x * x.sum()) transformed = grouped.transform(lambda x: x * x.sum()) self.assertEqual(transformed[7], 12) + assert_series_equal(transformed, expected) value_grouped = data.groupby(data) assert_series_equal(value_grouped.aggregate(np.mean), agged) diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index 6f2132ff9b154..f61c60d939907 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -376,3 +376,21 @@ def f(g): """ groupby_transform = Benchmark("data.groupby(level='security_id').transform(f_fillna)", setup) + +setup = common_setup + """ +np.random.seed(0) + +N = 120000 +N_TRANSITIONS = 1400 + +# generate groups +transition_points = np.random.permutation(np.arange(N))[:N_TRANSITIONS] +transition_points.sort() +transitions = np.zeros((N,), dtype=np.bool) +transitions[transition_points] = True +g = transitions.cumsum() + +df = DataFrame({ 'signal' : np.random.rand(N)}) +""" + +groupby_transform2 = Benchmark("df['signal'].groupby(g).transform(np.mean)", setup)
closes #6496 turns out indexing into an array rather than building it up as a list using `concat` is faster (but have to be careful of type changes). ``` # this PR In [11]: %timeit df['signal'].groupby(g).transform(np.mean) 10 loops, best of 3: 158 ms per loop # master In [11]: %timeit df['signal'].groupby(g).transform(np.mean) 1 loops, best of 3: 601 ms per loop ``` ``` In [1]: np.random.seed(0) In [2]: N = 120000 In [3]: N_TRANSITIONS = 1400 In [5]: transition_points = np.random.permutation(np.arange(N))[:N_TRANSITIONS] In [6]: transition_points.sort() In [7]: transitions = np.zeros((N,), dtype=np.bool) In [8]: transitions[transition_points] = True In [9]: g = transitions.cumsum() In [10]: df = DataFrame({ 'signal' : np.random.rand(N)}) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7421
2014-06-10T20:44:05Z
2014-06-11T14:06:07Z
2014-06-11T14:06:07Z
2014-06-12T17:45:45Z
DOC: update df.as_matrix and df.values. Closes gh7413
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4500a9181f5d9..f486d48b58651 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1929,25 +1929,39 @@ def _get_bool_data(self): def as_matrix(self, columns=None): """ - Convert the frame to its Numpy-array matrix representation. Columns - are presented in sorted order unless a specific list of columns is - provided. - - NOTE: the dtype will be a lower-common-denominator dtype (implicit - upcasting) that is to say if the dtypes (even of numeric types) - are mixed, the one that accommodates all will be chosen use this - with care if you are not dealing with the blocks - - e.g. if the dtypes are float16,float32 -> float32 - float16,float32,float64 -> float64 - int32,uint8 -> int32 - + Convert the frame to its Numpy-array representation. + + Parameters + ---------- + columns: list, optional, default:None + If None, return all columns, otherwise, returns specified columns. Returns ------- values : ndarray If the caller is heterogeneous and contains booleans or objects, - the result will be of dtype=object + the result will be of dtype=object. See Notes. + + + Notes + ----- + Return is NOT a Numpy-matrix, rather, a Numpy-array. + + The dtype will be a lower-common-denominator dtype (implicit + upcasting); that is to say if the dtypes (even of numeric types) + are mixed, the one that accommodates all will be chosen. Use this + with care if you are not dealing with the blocks. + + e.g. If the dtypes are float16 and float32, dtype will be upcast to + float32. If dtypes are int32 and uint8, dtype will be upcase to + int32. + + This method is provided for backwards compatibility. Generally, + it is recommended to use '.values'. + + See Also + -------- + pandas.DataFrame.values """ self._consolidate_inplace() if self._AXIS_REVERSED: @@ -1956,7 +1970,19 @@ def as_matrix(self, columns=None): @property def values(self): - "Numpy representation of NDFrame" + """Numpy representation of NDFrame + + Notes + ----- + The dtype will be a lower-common-denominator dtype (implicit + upcasting); that is to say if the dtypes (even of numeric types) + are mixed, the one that accommodates all will be chosen. Use this + with care if you are not dealing with the blocks. + + e.g. If the dtypes are float16 and float32, dtype will be upcast to + float32. If dtypes are int32 and uint8, dtype will be upcase to + int32. + """ return self.as_matrix() @property
https://api.github.com/repos/pandas-dev/pandas/pulls/7417
2014-06-10T15:41:11Z
2014-06-14T08:36:00Z
2014-06-14T08:36:00Z
2015-01-17T05:25:19Z
BUG/DTYPES: preserve bools in convert_objects
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 2b797dc295354..2b76da1434ba3 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -216,3 +216,5 @@ Bug Fixes (:issue:`7408`) - Bug where ``NaT`` wasn't repr'd correctly in a ``MultiIndex`` (:issue:`7406`, :issue:`7409`). +- Bug where bool objects were converted to ``nan`` in ``convert_objects`` + (:issue:`7416`). diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index 3aa71ad02ba6a..19c1fc7522961 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -427,86 +427,91 @@ cdef extern from "parse_helper.h": cdef double fINT64_MAX = <double> INT64_MAX cdef double fINT64_MIN = <double> INT64_MIN -def maybe_convert_numeric(ndarray[object] values, set na_values, - convert_empty=True, coerce_numeric=False): + +def maybe_convert_numeric(object[:] values, set na_values, + bint convert_empty=True, bint coerce_numeric=False): ''' Type inference function-- convert strings to numeric (potentially) and convert to proper dtype array ''' cdef: int status - Py_ssize_t i, n - ndarray[float64_t] floats - ndarray[complex128_t] complexes - ndarray[int64_t] ints - bint seen_float = 0 - bint seen_complex = 0 + Py_ssize_t i, n = values.size + ndarray[float64_t] floats = np.empty(n, dtype='f8') + ndarray[complex128_t] complexes = np.empty(n, dtype='c16') + ndarray[int64_t] ints = np.empty(n, dtype='i8') + ndarray[uint8_t] bools = np.empty(n, dtype='u1') + bint seen_float = False + bint seen_complex = False + bint seen_int = False + bint seen_bool = False object val float64_t fval - n = len(values) - - floats = np.empty(n, dtype='f8') - complexes = np.empty(n, dtype='c16') - ints = np.empty(n, dtype='i8') - - for i from 0 <= i < n: + for i in range(n): val = values[i] if val in na_values: floats[i] = complexes[i] = nan - seen_float = 1 + seen_float = True elif util.is_float_object(val): floats[i] = complexes[i] = val - seen_float = 1 + seen_float = True elif util.is_integer_object(val): floats[i] = ints[i] = val - seen_int = 1 + seen_int = True + elif util.is_bool_object(val): + floats[i] = ints[i] = bools[i] = val + seen_bool = True elif val is None: floats[i] = complexes[i] = nan - seen_float = 1 - elif hasattr(val,'__len__') and len(val) == 0: + seen_float = True + elif hasattr(val, '__len__') and len(val) == 0: if convert_empty or coerce_numeric: floats[i] = complexes[i] = nan - seen_float = 1 + seen_float = True else: raise ValueError('Empty string encountered') elif util.is_complex_object(val): complexes[i] = val - seen_complex = 1 + seen_complex = True else: try: status = floatify(val, &fval) floats[i] = fval if not seen_float: if '.' in val or fval == INF or fval == NEGINF: - seen_float = 1 + seen_float = True elif 'inf' in val: # special case to handle +/-inf - seen_float = 1 + seen_float = True elif fval < fINT64_MAX and fval > fINT64_MIN: try: ints[i] = int(val) except ValueError: ints[i] = <int64_t> fval else: - seen_float = 1 + seen_float = True except: if not coerce_numeric: raise floats[i] = nan - seen_float = 1 - + seen_float = True if seen_complex: return complexes elif seen_float: return floats - else: + elif seen_int: return ints + elif seen_bool: + return bools.view(np.bool_) + return ints + def maybe_convert_objects(ndarray[object] objects, bint try_float=0, - bint safe=0, bint convert_datetime=0, bint convert_timedelta=0): + bint safe=0, bint convert_datetime=0, + bint convert_timedelta=0): ''' Type inference function-- convert object array to proper dtype ''' diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 3881ed5277b85..85e451541d39c 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -5025,6 +5025,18 @@ def test_convert_objects(self): result = s.convert_objects(convert_dates='coerce') assert_series_equal(result, s) + def test_convert_objects_preserve_bool(self): + s = Series([1, True, 3, 5], dtype=object) + r = s.convert_objects(convert_numeric=True) + e = Series([1, 1, 3, 5], dtype='i8') + tm.assert_series_equal(r, e) + + def test_convert_objects_preserve_all_bool(self): + s = Series([False, True, False, False], dtype=object) + r = s.convert_objects(convert_numeric=True) + e = Series([False, True, False, False], dtype=bool) + tm.assert_series_equal(r, e) + def test_apply_args(self): s = Series(['foo,bar'])
closes #7126
https://api.github.com/repos/pandas-dev/pandas/pulls/7416
2014-06-10T14:54:38Z
2014-06-10T21:52:01Z
2014-06-10T21:52:01Z
2014-06-13T05:13:26Z
TST7337: Fix more tests in windows
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index edaae26acb29e..dd30527b1f82d 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -2067,6 +2067,10 @@ def test_append_with_timezones_dateutil(self): except ImportError: raise nose.SkipTest + # use maybe_get_tz instead of dateutil.tz.gettz to handle the windows filename issues. + from pandas.tslib import maybe_get_tz + gettz = lambda x: maybe_get_tz('dateutil/' + x) + def compare(a, b): tm.assert_frame_equal(a, b) @@ -2082,7 +2086,7 @@ def compare(a, b): with ensure_clean_store(self.path) as store: _maybe_remove(store, 'df_tz') - df = DataFrame(dict(A=[ Timestamp('20130102 2:00:00', tz=dateutil.tz.gettz('US/Eastern')) + timedelta(hours=1) * i for i in range(5) ])) + df = DataFrame(dict(A=[ Timestamp('20130102 2:00:00', tz=gettz('US/Eastern')) + timedelta(hours=1) * i for i in range(5) ])) store.append('df_tz', df, data_columns=['A']) result = store['df_tz'] compare(result, df) @@ -2093,14 +2097,14 @@ def compare(a, b): _maybe_remove(store, 'df_tz') # ensure we include dates in DST and STD time here. - df = DataFrame(dict(A=Timestamp('20130102', tz=dateutil.tz.gettz('US/Eastern')), B=Timestamp('20130603', tz=dateutil.tz.gettz('US/Eastern'))), index=range(5)) + df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130603', tz=gettz('US/Eastern'))), index=range(5)) store.append('df_tz', df) result = store['df_tz'] compare(result, df) assert_frame_equal(result, df) _maybe_remove(store, 'df_tz') - df = DataFrame(dict(A=Timestamp('20130102', tz=dateutil.tz.gettz('US/Eastern')), B=Timestamp('20130102', tz=dateutil.tz.gettz('EET'))), index=range(5)) + df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('EET'))), index=range(5)) self.assertRaises(TypeError, store.append, 'df_tz', df) # this is ok @@ -2111,14 +2115,14 @@ def compare(a, b): assert_frame_equal(result, df) # can't append with diff timezone - df = DataFrame(dict(A=Timestamp('20130102', tz=dateutil.tz.gettz('US/Eastern')), B=Timestamp('20130102', tz=dateutil.tz.gettz('CET'))), index=range(5)) + df = DataFrame(dict(A=Timestamp('20130102', tz=gettz('US/Eastern')), B=Timestamp('20130102', tz=gettz('CET'))), index=range(5)) self.assertRaises(ValueError, store.append, 'df_tz', df) # as index with ensure_clean_store(self.path) as store: # GH 4098 example - df = DataFrame(dict(A=Series(lrange(3), index=date_range('2000-1-1', periods=3, freq='H', tz=dateutil.tz.gettz('US/Eastern'))))) + df = DataFrame(dict(A=Series(lrange(3), index=date_range('2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern'))))) _maybe_remove(store, 'df') store.put('df', df) diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index 51c533df863e6..e6da490e1f722 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -789,6 +789,9 @@ def localize(self, tz, x): return x.replace(tzinfo=tz) def test_utc_with_system_utc(self): + if sys.platform == 'win32': + raise nose.SkipTest('Skipped on win32 due to dateutil bug.') + from pandas.tslib import maybe_get_tz # from system utc to real utc
Hopefully final attempt at fixing the windows test issues from dateutil timezone work. Related discussion on previous pull request [here](https://github.com/pydata/pandas/pull/7362).
https://api.github.com/repos/pandas-dev/pandas/pulls/7414
2014-06-10T10:58:11Z
2014-06-10T12:45:03Z
2014-06-10T12:45:03Z
2014-06-12T20:12:48Z
BUG: fix repring of nat multiindex and fix neg indexing in datetimeindex
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 1c564fbf76f59..ca7d6a11d38f1 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -214,3 +214,7 @@ Bug Fixes - Bug in ``.ix`` getitem should always return a Series (:issue:`7150`) - Bug in multi-index slicing with incomplete indexers (:issue:`7399`) - Bug in multi-index slicing with a step in a sliced level (:issue:`7400`) +- Bug where negative indexers in ``DatetimeIndex`` were not correctly sliced + (:issue:`7408`) +- Bug where ``NaT`` wasn't repr'd correctly in a ``MultiIndex`` (:issue:`7406`, + :issue:`7409`). diff --git a/pandas/core/index.py b/pandas/core/index.py index 69edf8d9c3f42..8bf7a3db78b31 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2610,12 +2610,14 @@ def get_level_values(self, level): return values def format(self, space=2, sparsify=None, adjoin=True, names=False, - na_rep='NaN', formatter=None): + na_rep=None, formatter=None): if len(self) == 0: return [] stringified_levels = [] for lev, lab in zip(self.levels, self.labels): + na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type) + if len(lev) > 0: formatted = lev.take(lab).format(formatter=formatter) @@ -2624,12 +2626,12 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False, mask = lab == -1 if mask.any(): formatted = np.array(formatted, dtype=object) - formatted[mask] = na_rep + formatted[mask] = na formatted = formatted.tolist() else: # weird all NA case - formatted = [com.pprint_thing(na_rep if isnull(x) else x, + formatted = [com.pprint_thing(na if isnull(x) else x, escape_chars=('\t', '\r', '\n')) for x in com.take_1d(lev.values, lab)] stringified_levels.append(formatted) @@ -4041,3 +4043,12 @@ def _all_indexes_same(indexes): if not first.equals(index): return False return True + + +def _get_na_rep(dtype): + return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN') + + +def _get_na_value(dtype): + return {np.datetime64: tslib.NaT, np.timedelta64: tslib.NaT}.get(dtype, + np.nan) diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index a3a2e6849bce4..e1712be7b5a5f 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -17,8 +17,7 @@ import pandas.core.common as com import pandas.algos as algos -from pandas.core.index import Index, MultiIndex -from pandas.tseries.period import PeriodIndex +from pandas.core.index import MultiIndex, _get_na_value class _Unstacker(object): @@ -83,7 +82,7 @@ def __init__(self, values, index, level=-1, value_columns=None): def _make_index(lev, lab): values = _make_index_array_level(lev.values, lab) - i = lev._simple_new(values, lev.name, + i = lev._simple_new(values, lev.name, freq=getattr(lev, 'freq', None), tz=getattr(lev, 'tz', None)) return i @@ -262,7 +261,7 @@ def _make_index_array_level(lev, lab): l = np.arange(len(lab)) mask_labels = np.empty(len(mask[mask]), dtype=object) - mask_labels.fill(np.nan) + mask_labels.fill(_get_na_value(lev.dtype.type)) mask_indexer = com._ensure_int64(l[mask]) labels = lev @@ -638,7 +637,7 @@ def melt(frame, id_vars=None, value_vars=None, This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other - columns, considered measured variables (`value_vars`), are "unpivoted" to + columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. @@ -680,7 +679,7 @@ def melt(frame, id_vars=None, value_vars=None, 0 a B 1 1 b B 3 2 c B 5 - + >>> pd.melt(df, id_vars=['A'], value_vars=['B', 'C']) A variable value 0 a B 1 @@ -702,7 +701,7 @@ def melt(frame, id_vars=None, value_vars=None, If you have multi-index columns: >>> df.columns = [list('ABC'), list('DEF')] - >>> df + >>> df A B C D E F 0 a 1 2 @@ -901,7 +900,7 @@ def get_var_names(df, regex): return df.filter(regex=regex).columns.tolist() def melt_stub(df, stub, i, j): - varnames = get_var_names(df, "^"+stub) + varnames = get_var_names(df, "^" + stub) newdf = melt(df, id_vars=i, value_vars=varnames, value_name=stub, var_name=j) newdf_j = newdf[j].str.replace(stub, "") @@ -971,6 +970,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False): Examples -------- + >>> import pandas as pd >>> s = pd.Series(list('abca')) >>> get_dummies(s) diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 53c4e0a44e8e9..3324040391340 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -576,7 +576,7 @@ def maybe_indices_to_slice(ndarray[int64_t] indices): cdef: Py_ssize_t i, n = len(indices) - if n == 0: + if not n or indices[0] < 0: return indices for i in range(1, n): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7354c57498561..eaf3086c611d8 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -11665,6 +11665,13 @@ def test_unstack_non_unique_index_names(self): with tm.assertRaises(ValueError): df.T.stack('c1') + def test_repr_with_mi_nat(self): + df = DataFrame({'X': [1, 2]}, + index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']]) + res = repr(df) + exp = ' X\nNaT a 1\n2013-01-01 b 2' + nose.tools.assert_equal(res, exp) + def test_reset_index(self): stacked = self.frame.stack()[::2] stacked = DataFrame({'foo': stacked, 'bar': stacked}) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 29aed792bfe11..0752ec52c9a1e 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -873,6 +873,11 @@ def test_outer_join_sort(self): expected = right_idx.astype(object).union(left_idx.astype(object)) tm.assert_index_equal(joined, expected) + def test_nan_first_take_datetime(self): + idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')]) + res = idx.take([-1, 0, 1]) + exp = Index([idx[-1], idx[0], idx[1]]) + tm.assert_index_equal(res, exp) class TestFloat64Index(tm.TestCase): _multiprocess_can_split_ = True
closes #7406, #7408, #7409
https://api.github.com/repos/pandas-dev/pandas/pulls/7410
2014-06-09T15:11:54Z
2014-06-09T22:34:38Z
2014-06-09T22:34:38Z
2014-06-13T05:13:24Z
BUG: mi indexing bugs (GH7399,GH7400)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 586e47ff4f303..1c564fbf76f59 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -164,11 +164,6 @@ Bug Fixes -- Bug in ``.ix`` getitem should always return a Series (:issue:`7150`) - - - - @@ -216,3 +211,6 @@ Bug Fixes (:issue:`7366`). - Bug where ``NDFrame.replace()`` didn't correctly replace objects with ``Period`` values (:issue:`7379`). +- Bug in ``.ix`` getitem should always return a Series (:issue:`7150`) +- Bug in multi-index slicing with incomplete indexers (:issue:`7399`) +- Bug in multi-index slicing with a step in a sliced level (:issue:`7400`) diff --git a/pandas/core/index.py b/pandas/core/index.py index fbadd92c1329c..69edf8d9c3f42 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -3526,11 +3526,11 @@ def _get_level_indexer(self, key, level=0): # handle a slice, returnig a slice if we can # otherwise a boolean indexer - start = level_index.get_loc(key.start) - stop = level_index.get_loc(key.stop) + start = level_index.get_loc(key.start or 0) + stop = level_index.get_loc(key.stop or len(level_index)-1) step = key.step - if level > 0 or self.lexsort_depth == 0: + if level > 0 or self.lexsort_depth == 0 or step is not None: # need to have like semantics here to right # searching as when we are using a slice # so include the stop+1 (so we include stop) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 7610ccc6cdf73..1945236f4efe8 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1325,6 +1325,29 @@ def test_loc_multiindex(self): result = df.loc[[1,2]] assert_frame_equal(result, expected) + # GH 7399 + # incomplete indexers + s = pd.Series(np.arange(15,dtype='int64'),MultiIndex.from_product([range(5), ['a', 'b', 'c']])) + expected = s.loc[:, 'a':'c'] + + result = s.loc[0:4, 'a':'c'] + assert_series_equal(result, expected) + assert_series_equal(result, expected) + + result = s.loc[:4, 'a':'c'] + assert_series_equal(result, expected) + assert_series_equal(result, expected) + + result = s.loc[0:, 'a':'c'] + assert_series_equal(result, expected) + assert_series_equal(result, expected) + + # GH 7400 + # multiindexer gettitem with list of indexers skips wrong element + s = pd.Series(np.arange(15,dtype='int64'),MultiIndex.from_product([range(5), ['a', 'b', 'c']])) + expected = s.iloc[[6,7,8,12,13,14]] + result = s.loc[2:4:2, 'a':'c'] + assert_series_equal(result, expected) def test_series_getitem_multiindex(self):
closes #7399 closes #7400
https://api.github.com/repos/pandas-dev/pandas/pulls/7404
2014-06-09T13:20:02Z
2014-06-09T15:47:18Z
2014-06-09T15:47:17Z
2014-06-15T23:53:41Z
BUG: ix should return a Series for duplicate indices (GH7150)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 1cb6aadf3f40f..586e47ff4f303 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -164,7 +164,7 @@ Bug Fixes - +- Bug in ``.ix`` getitem should always return a Series (:issue:`7150`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 68e5810751d08..45262575dcb37 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -35,7 +35,6 @@ def __getitem__(self, arg): class IndexingError(Exception): pass - class _NDFrameIndexer(object): _valid_types = None _exception = KeyError @@ -61,7 +60,9 @@ def __iter__(self): def __getitem__(self, key): if type(key) is tuple: try: - return self.obj.get_value(*key) + values = self.obj.get_value(*key) + if np.isscalar(values): + return values except Exception: pass @@ -1101,8 +1102,6 @@ class _IXIndexer(_NDFrameIndexer): """ A primarily location based indexer, with integer fallback """ def _has_valid_type(self, key, axis): - ax = self.obj._get_axis(axis) - if isinstance(key, slice): return True diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 96c67f2ff795c..7610ccc6cdf73 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -3570,8 +3570,14 @@ def test_float_index_to_mixed(self): 'a': [10] * 10}), df) + def test_duplicate_ix_returns_series(self): + df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2], + columns=list('abc')) + r = df.ix[0.2, 'a'] + e = df.loc[0.2, 'a'] + tm.assert_series_equal(r, e) + if __name__ == '__main__': - import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
closes #7150
https://api.github.com/repos/pandas-dev/pandas/pulls/7402
2014-06-09T12:54:30Z
2014-06-09T13:16:42Z
2014-06-09T13:16:42Z
2014-06-15T23:54:11Z
DOC: existence docs and benchmarks.
diff --git a/bench/bench_existence.py b/bench/bench_existence.py new file mode 100644 index 0000000000000..a8487cdcd76ec --- /dev/null +++ b/bench/bench_existence.py @@ -0,0 +1,285 @@ +from __future__ import division + +import os +import sys +from itertools import cycle + +from timeit import Timer +import pandas as pd +import numpy as np +import matplotlib.pyplot as plt +from bokeh.mpl import to_bokeh +from numpy.random import randint + +from mpltools import style +style.use('ggplot') + +class ExistenceBenchmarks(object): + + + def time_py_dict(look_for, look_in): + df_look_for = pd.DataFrame(look_for, columns=['data']) + dict_look_in = dict(zip(look_in, look_in)) + + def time_this(): + result = df_look_for[[x in dict_look_in for x in df_look_for.data]] + return result.drop_duplicates().sort('data') + + return time_this + + + def time_isin_list(look_for, look_in): + df_look_for = pd.DataFrame(look_for, columns=['data']) + list_look_in = list(look_in) + + def time_this(): + result = df_look_for[df_look_for.data.isin(list_look_in)] + return result.drop_duplicates().sort('data') + + return time_this + + + def time_isin_dict(look_for, look_in): + df_look_for = pd.DataFrame(look_for, columns=['data']) + dict_look_in = dict(zip(look_in, look_in)) + + def time_this(): + result = df_look_for[df_look_for.data.isin(dict_look_in)] + return result.drop_duplicates().sort('data') + + return time_this + + + def time_isin_series(look_for, look_in): + series_look_in = pd.Series(look_in) + df_look_for = pd.DataFrame(look_for, columns=['data']) + + def time_this(): + result = df_look_for[df_look_for.data.isin(series_look_in)] + return result.drop_duplicates().sort('data') + + return time_this + + + def time_join(look_for, look_in): + series_look_in = pd.Series(look_in, index=look_in) + series_look_in.name = 'series_data' + df_look_for = pd.DataFrame(look_for, columns=['data'], index=look_for) + + def time_this(): + result = df_look_for.join(series_look_in, how='inner') + return result.drop_duplicates() + + return time_this + + + def time_join_no_dups(look_for, look_in): + series_look_in = pd.Series(look_in, index=look_in) + series_look_in.name = 'series_data' + df_look_for = pd.DataFrame(look_for, columns=['data'], index=look_for) + + def time_this(): + df_look_for.drop_duplicates(inplace=True) + series_look_in.drop_duplicates(inplace=True) + result = df_look_for.join(series_look_in, how='inner') + return result.sort('data') + + return time_this + + + def time_query_in(look_for, look_in): + series_look_in = pd.Series(look_in) + series_look_in.name = 'data' + df_look_for = pd.DataFrame(look_for, columns=['data']) + + def time_this(): + # series_look_in is not visible to .query unless defined in local function scope. + s_look_in = series_look_in + result = df_look_for.query('data in @s_look_in') + return result.drop_duplicates().sort('data') + + return time_this + + +def run_bench(to_time, repeat, look_sets, x_axis, linestyle='-'): + func_results = [] + markers = cycle(['o', 's', '+', '^', 'v', 'x', 'D', '*']) + + for time_func_name in to_time: + marker=markers.next() + colors = cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k']) + for set_name, look_set in look_sets: + color=colors.next() + plot_results = [] + for look_for, look_in in look_set: + func = ExistenceBenchmarks.__dict__[time_func_name](look_for, look_in) + result = func() + t = Timer(func) + elapsed = t.timeit(number=repeat) / repeat + name = time_func_name.replace('time_', '') + ' ' + set_name + ' (%.1f%%)' % ((len(result) / len(look_for)) * 100) + func_results.append((name, look_for, look_in, elapsed)) + plot_results.append(elapsed) + plt.plot(x_axis, plot_results, marker=marker, color=color, label=name, linestyle=linestyle) + + +def test_timed(to_time): + look_for = randint(0, 10000, 5000) + look_in = randint(5000, 15000, 5000) + + first_result = ExistenceBenchmarks.__dict__[to_time[0]](look_for, look_in)() + + for time_func_name in to_time[1:]: + func = ExistenceBenchmarks.__dict__[time_func_name](look_for, look_in) + result = func() + if np.array_equal(first_result['data'].values, result['data'].values): + pass + else: + raise AssertionError("%s and %s have unmatched output." % (to_time[0], time_func_name)) + + +if __name__ == '__main__': + + pandas_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) + static_path = os.path.join(pandas_dir, 'doc', 'source', '_static') + join_path = lambda p: os.path.join(static_path, p) + + to_time = [key for key in ExistenceBenchmarks.__dict__ if key.startswith('time_')] + + + if len(sys.argv) != 2: + print 'usage: <--test, --run>' + print '\t--test : Ensure that all timed functions are returning identical output.' + print '\t--run : Generate plots for all timed functions.' + sys.exit() + + if sys.argv[1] == '--test': + test_timed(to_time) + + elif sys.argv[1] == '--run': + test_timed(to_time) + + def save_plot(filename, subtitle): + fname = join_path(filename) + plt.axes().set_xscale('log') + x1,x2,y1,y2 = plt.axis() + # plt.axis((x1, x2, 0, y_limit)) + plt.legend(loc=2, prop={'size':8}) + plt.title('Existence Comparisons%s' % subtitle) + plt.xlabel('% Overlap of X Elements') + plt.ylabel('Time(s)') + plt.savefig(fname) + plt.clf() + + def unordered(exp_range, repeat): + rng = [2**x for x in exp_range] + + # 25% overlap + look_set_25 = \ + [(randint(0, 100*i, 50*i), randint(75*i, 175*i, 50*i)) for i in rng] + + look_set_50 = \ + [(randint(0, 100*i, 50*i), randint(50*i, 150*i, 50*i)) for i in rng] + + look_set_75 = \ + [(randint(0, 100*i, 50*i), randint(25*i, 125*i, 50*i)) for i in rng] + + look_set_100 = \ + [(randint(0, 100*i, 50*i), randint(0*i, 100*i, 50*i)) for i in rng] + + look_sets = [] + look_sets.append(('25% overlap', look_set_25)) + look_sets.append(('50% overlap', look_set_50)) + look_sets.append(('75% overlap', look_set_75)) + look_sets.append(('100% overlap', look_set_100)) + + x_axis = [100*i for i in rng] + run_bench(to_time, 10, look_sets, x_axis, linestyle='-') + + + def from_ordered(exp_range, repeat): + rng = [2**x for x in exp_range] + + # 25% overlap + look_set_25 = \ + [(sorted(randint(0, 100*i, 50*i)), randint(75*i, 175*i, 50*i)) for i in rng] + + look_set_50 = \ + [(sorted(randint(0, 100*i, 50*i)), randint(50*i, 150*i, 50*i)) for i in rng] + + look_set_75 = \ + [(sorted(randint(0, 100*i, 50*i)), randint(25*i, 125*i, 50*i)) for i in rng] + + look_set_100 = \ + [(sorted(randint(0, 100*i, 50*i)), randint(0*i, 100*i, 50*i)) for i in rng] + + look_sets = [] + look_sets.append(('25% overlap, for-ordered', look_set_25)) + look_sets.append(('50% overlap, for-ordered', look_set_50)) + look_sets.append(('75% overlap, for-ordered', look_set_75)) + look_sets.append(('100% overlap, for-ordered', look_set_100)) + + x_axis = [100*i for i in rng] + run_bench(to_time, 10, look_sets, x_axis, linestyle='-.') + + + def both_ordered(exp_range, repeat): + rng = [2**x for x in exp_range] + + # 25% overlap + look_set_25 = \ + [(sorted(randint(0, 100*i, 50*i)), sorted(randint(75*i, 175*i, 50*i))) for i in rng] + + look_set_50 = \ + [(sorted(randint(0, 100*i, 50*i)), sorted(randint(50*i, 150*i, 50*i))) for i in rng] + + look_set_75 = \ + [(sorted(randint(0, 100*i, 50*i)), sorted(randint(25*i, 125*i, 50*i))) for i in rng] + + look_set_100 = \ + [(sorted(randint(0, 100*i, 50*i)), sorted(randint(0*i, 100*i, 50*i))) for i in rng] + + look_sets = [] + look_sets.append(('25% overlap, both-ordered', look_set_25)) + look_sets.append(('50% overlap, both-ordered', look_set_50)) + look_sets.append(('75% overlap, both-ordered', look_set_75)) + look_sets.append(('100% overlap, both-ordered', look_set_100)) + + x_axis = [100*i for i in rng] + run_bench(to_time, repeat, look_sets, x_axis, linestyle=':') + + + plt.figure(figsize=(32, 24)) + unordered(range(1, 10), 10) + from_ordered(range(1, 10), 10) + both_ordered(range(1, 10), 10) + save_plot('existence-perf-small.png', ': Small') + + plt.figure(figsize=(32, 24)) + unordered(range(10, 15), 3) + from_ordered(range(10, 15), 3) + both_ordered(range(10, 15), 3) + save_plot('existence-perf-large.png', ': Large') + + plt.figure(figsize=(16, 12)) + unordered(range(1, 10), 10) + save_plot('existence-perf-unordered-small.png', ': Unordered Small') + + plt.figure(figsize=(16, 12)) + from_ordered(range(1, 10), 10) + save_plot('existence-perf-from-ordered-small.png', ': From-Ordered Small') + + plt.figure(figsize=(16, 12)) + both_ordered(range(1, 10), 10) + save_plot('existence-perf-both-ordered-small.png', ': Both-Ordered Small') + + plt.figure(figsize=(16, 12)) + unordered(range(10, 15), 3) + save_plot('existence-perf-unordered-large.png', ': Unordered Large') + + plt.figure(figsize=(16, 12)) + from_ordered(range(10, 15), 3) + save_plot('existence-perf-from-ordered-large.png', ': From-Ordered Large') + + plt.figure(figsize=(16, 12)) + both_ordered(range(10, 15), 3) + save_plot('existence-perf-both-ordered-large.png', ': Both-Ordered Large') \ No newline at end of file diff --git a/doc/source/_static/existence-perf-large.png b/doc/source/_static/existence-perf-large.png new file mode 100644 index 0000000000000..5c0766a2afb3c Binary files /dev/null and b/doc/source/_static/existence-perf-large.png differ diff --git a/doc/source/_static/existence-perf-small.png b/doc/source/_static/existence-perf-small.png new file mode 100644 index 0000000000000..6150cc47037a5 Binary files /dev/null and b/doc/source/_static/existence-perf-small.png differ diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index 00c76632ce17b..554543c0f51df 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -668,3 +668,171 @@ In general, :meth:`DataFrame.query`/:func:`pandas.eval` will evaluate the subexpressions that *can* be evaluated by ``numexpr`` and those that must be evaluated in Python space transparently to the user. This is done by inferring the result type of an expression from its arguments and operators. + +Existence (IsIn, Inner Join, Dict/Hash, Query) +---------------------------------------------------- + +Existence is the process of testing if an item exists in another list of items, and +in the case of a DataFrame, we're testing each value of a column for existence in +another collection of items. + +There are a number of different ways to test for existence using pandas and the +following methods are a few of those. The comments correspond to the legend +in the plots further down. + + +:meth:`DataFrame.isin` + +.. code-block:: python + + # isin_list + df[df.index.isin(lst)] + # isin_dict + df[df.index.isin(dct)] + # isin_series + df[df.index.isin(series)] + + + +:meth:`DataFrame.query` + +.. code-block:: python + + # The '@' symbol is used with `query` to reference local variables. Names + # without '@' will reference the DataFrame's columns or index. + + # query_in list + df.query('index in @lst') + # query_in Series + df.query('index in @series') + + # A list can be used with `query('.. == ..')` to test for existence + # but other data structures such as the `pandas.Series` have + # a different behaviour. + + df.query('index == @lst') + + +:meth:`DataFrame.apply` + +.. code-block:: python + + df[df.index.apply(lambda x: x in lst)] + + +:meth:`DataFrame.join` + +.. code-block:: python + + # join + df.join(lst, how='inner') + + # this can actually be fast for small DataFrames + df[[x in dct for x in df.index]] + + # isin_series, query_in Series, pydict, + # join and isin_list are included in the plots below. + + +As seen below, generally using a ``Series`` is better than using pure python data +structures for anything larger than very small datasets of around 1000 records. +The fastest two being ``join(series)``: + +.. code-block:: python + + lst = range(1000000) + series = Series(lst, name='data') + + df = DataFrame(lst, columns=['ID']) + + df.join(series, how='inner') + # 100 loops, best of 3: 19.2 ms per loop + +list vs Series: + +.. code-block:: python + + df[df.index.isin(lst)] + # 1 loops, best of 3: 1.06 s per loop + + df[df.index.isin(series)] + # 1 loops, best of 3: 477 ms per loop + +df.index vs df.column doesn't make a difference here: + +.. code-block:: python + + df[df.ID.isin(series)] + # 1 loops, best of 3: 474 ms per loop + + df[df.index.isin(series)] + # 1 loops, best of 3: 475 ms per loop + +The ``query`` 'in' syntax has the same performance as ``isin``. + +.. code-block:: python + + df.query('index in @lst') + # 1 loops, best of 3: 1.04 s per loop + + df.query('index in @series') + # 1 loops, best of 3: 451 ms per loop + + df.query('index == @lst') + # 1 loops, best of 3: 1.03 s per loop + + +For ``join``, the data must be the index in the ``DataFrame`` and the index in the ``Series`` +for the best performance. The ``Series`` must also have a ``name``. ``join`` defaults to a +left join so we need to specify 'inner' for existence. + +.. code-block:: python + + df.join(series, how='inner') + # 100 loops, best of 3: 19.7 ms per loop + +Smaller datasets: + +.. code-block:: python + + df = DataFrame([1,2,3,4], columns=['ID']) + lst = range(10000) + dct = dict(zip(lst, lst)) + series = Series(lst, name='data') + + df.join(series, how='inner') + # 1000 loops, best of 3: 866 us per loop + + df[df.ID.isin(dct)] + # 1000 loops, best of 3: 809 us per loop + + df[df.ID.isin(lst)] + # 1000 loops, best of 3: 853 us per loop + + df[df.ID.isin(series)] + # 100 loops, best of 3: 2.22 ms per loop + +It's actually faster to use ``apply`` or a list comprehension for these small cases. + +.. code-block:: python + + df[[x in dct for x in df.ID]] + # 1000 loops, best of 3: 266 us per loop + + df[df.ID.apply(lambda x: x in dct)] + # 1000 loops, best of 3: 364 us per loop + + +Here is a visualization of some of the benchmarks above. You can see that except for with +very small datasets, ``isin(Series)`` and ``join(Series)`` quickly become faster than the +pure python data structures. + +.. image:: _static/existence-perf-small.png + +However, ``isin(Series)`` still presents fairly poor exponential performance where ``join`` is quite +fast for large datasets. There is some overhead involved in ensuring your data is the index +in both your left and right datasets but that time should be clearly outweighed by the gains of +the join itself. For extremely large datasets, you may start bumping into memory limits since ``join`` +does not perform any disk chunking, etc. + +.. image:: _static/existence-perf-large.png \ No newline at end of file
![enhancing_performance](https://cloud.githubusercontent.com/assets/1731217/3224419/636f5dfc-f034-11e3-90e8-1469a162b674.png) I've included some documentation on existence-type associations as requested by Jeff in [this question](http://stackoverflow.com/questions/23945493/a-faster-alternative-to-pandas-isin-function/) over at Stack Overflow. The content includes a couple of plots generated by the script bench/bench_existence.py but I was not able to find any automation that executed those bench/bench_*.py scripts, so I may be missing a hook somewhere. That script also takes a couple of minutes to run.
https://api.github.com/repos/pandas-dev/pandas/pulls/7398
2014-06-09T03:25:04Z
2015-07-12T15:00:43Z
null
2015-07-12T15:00:43Z
to_stata uint16
diff --git a/pandas/io/stata.py b/pandas/io/stata.py index b67a1be8d43d6..d1827a793ba71 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -230,13 +230,13 @@ def _cast_to_stata_types(data): ws = '' for col in data: dtype = data[col].dtype - if dtype == np.int8: + if dtype in (np.int8, np.uint8): if data[col].max() > 100 or data[col].min() < -127: data[col] = data[col].astype(np.int16) - elif dtype == np.int16: + elif dtype in (np.int16, np.uint16): if data[col].max() > 32740 or data[col].min() < -32767: data[col] = data[col].astype(np.int32) - elif dtype == np.int64: + elif dtype in (np.int32, np.uint32, np.int64, np.uint64): if data[col].max() <= 2147483620 and data[col].min() >= -2147483647: data[col] = data[col].astype(np.int32) else: @@ -990,11 +990,11 @@ def _dtype_to_stata_type(dtype): return chr(255) elif dtype == np.float32: return chr(254) - elif dtype == np.int32: + elif dtype in (np.int32, np.uint32): return chr(253) - elif dtype == np.int16: + elif dtype in (np.int16, np.uint16): return chr(252) - elif dtype == np.int8: + elif dtype in (np.int8, np.uint8): return chr(251) else: # pragma : no cover raise ValueError("Data type %s not currently understood. " @@ -1023,9 +1023,9 @@ def _dtype_to_default_stata_fmt(dtype): return "%10.0g" elif dtype == np.float32: return "%9.0g" - elif dtype == np.int32: + elif dtype in (np.int32, np.uint32): return "%12.0g" - elif dtype == np.int8 or dtype == np.int16: + elif dtype in (np.int8, np.uint8, np.int16, np.uint16): return "%8.0g" else: # pragma : no cover raise ValueError("Data type %s not currently understood. "
Simple changes to io/stata.py to write unsigned integers to Stata files. Sorry about the poor commit messages--I'm still new to git and wasn't able to correct it cleanly. (See issue #7365)
https://api.github.com/repos/pandas-dev/pandas/pulls/7397
2014-06-09T01:54:45Z
2014-07-17T03:05:18Z
null
2014-07-17T03:05:28Z
BUG: grouped hist and scatter use old figsize default
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index e8c7a6f9ab462..653048285330e 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -175,6 +175,7 @@ Bug Fixes - Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`) +- Bug in grouped ``hist`` and ``scatter`` plots use old ``figsize`` default (:issue:`7394`) - Bug in ``Panel.apply`` with a multi-index as an axis (:issue:`7469`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index c49607eef1b42..1ebdf51d849e7 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -609,16 +609,16 @@ def test_hist_layout_with_by(self): df = self.hist_df axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1)) - self._check_axes_shape(axes, axes_num=2, layout=(2, 1), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=2, layout=(2, 1)) axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1)) - self._check_axes_shape(axes, axes_num=4, layout=(4, 1), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2)) - self._check_axes_shape(axes, axes_num=3, layout=(2, 2), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) - axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 2)) - self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(10, 5)) + axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 2), figsize=(12, 7)) + self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7)) @slow def test_hist_no_overlap(self): @@ -2255,11 +2255,11 @@ def test_grouped_hist(self): df = DataFrame(randn(500, 2), columns=['A', 'B']) df['C'] = np.random.randint(0, 4, 500) axes = plotting.grouped_hist(df.A, by=df.C) - self._check_axes_shape(axes, axes_num=4, layout=(2, 2), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=4, layout=(2, 2)) tm.close() axes = df.hist(by=df.C) - self._check_axes_shape(axes, axes_num=4, layout=(2, 2), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=4, layout=(2, 2)) tm.close() # make sure kwargs to hist are handled @@ -2281,6 +2281,9 @@ def test_grouped_hist(self): with tm.assertRaises(AttributeError): plotting.grouped_hist(df.A, by=df.C, foo='bar') + with tm.assert_produces_warning(FutureWarning): + df.hist(by='C', figsize='default') + @slow def test_grouped_box_return_type(self): df = self.hist_df @@ -2366,29 +2369,28 @@ def test_grouped_hist_layout(self): layout=(1, 3)) axes = _check_plot_works(df.hist, column='height', by=df.gender, layout=(2, 1)) - self._check_axes_shape(axes, axes_num=2, layout=(2, 1), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=2, layout=(2, 1)) axes = _check_plot_works(df.hist, column='height', by=df.category, layout=(4, 1)) - self._check_axes_shape(axes, axes_num=4, layout=(4, 1), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) axes = _check_plot_works(df.hist, column='height', by=df.category, layout=(4, 2), figsize=(12, 8)) - self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 8)) # GH 6769 axes = _check_plot_works(df.hist, column='height', by='classroom', layout=(2, 2)) - self._check_axes_shape(axes, axes_num=3, layout=(2, 2), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) # without column axes = _check_plot_works(df.hist, by='classroom') - self._check_axes_shape(axes, axes_num=3, layout=(2, 2), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) axes = _check_plot_works(df.hist, by='gender', layout=(3, 5)) - self._check_axes_shape(axes, axes_num=2, layout=(3, 5), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=2, layout=(3, 5)) axes = _check_plot_works(df.hist, column=['height', 'weight', 'category']) - self._check_axes_shape(axes, axes_num=3, layout=(2, 2), figsize=(10, 5)) + self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) @slow def test_axis_share_x(self): diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 37a982acc0bbd..1dc94852454a7 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2734,9 +2734,11 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, rot=0, ax=None, **kwargs): from pandas import DataFrame - # allow to specify mpl default with 'default' - if figsize is None or figsize == 'default': - figsize = (10, 5) # our default + if figsize == 'default': + # allowed to specify mpl default with 'default' + warnings.warn("figsize='default' is deprecated. Specify figure" + "size by tuple instead", FutureWarning) + figsize = None grouped = data.groupby(by) if column is not None: @@ -2744,10 +2746,6 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, naxes = len(grouped) nrows, ncols = _get_layout(naxes, layout=layout) - if figsize is None: - # our favorite default beating matplotlib's idea of the - # default size - figsize = (10, 5) fig, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, figsize=figsize, sharex=sharex, sharey=sharey, ax=ax)
Grouped `hist` and `scatter` plots use `figsize` default which is different from pandas current and mpl.
https://api.github.com/repos/pandas-dev/pandas/pulls/7394
2014-06-08T04:26:27Z
2014-07-01T15:29:22Z
2014-07-01T15:29:22Z
2014-07-02T16:46:36Z
BUG/TST: test for groupby max nan subselection bug
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 4aae5dfea3982..1f1853186ac8a 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -4369,6 +4369,17 @@ def test_ops_general(self): exc.args += ('operation: %s' % op,) raise + def test_max_nan_bug(self): + raw = """,Date,app,File +2013-04-23,2013-04-23 00:00:00,,log080001.log +2013-05-06,2013-05-06 00:00:00,,log.log +2013-05-07,2013-05-07 00:00:00,OE,xlsx""" + df = pd.read_csv(StringIO(raw), parse_dates=[0]) + gb = df.groupby('Date') + r = gb[['File']].max() + e = gb['File'].max().to_frame() + tm.assert_frame_equal(r, e) + self.assertFalse(r['File'].isnull().any()) def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all()
closes #6346
https://api.github.com/repos/pandas-dev/pandas/pulls/7393
2014-06-08T01:29:44Z
2014-06-08T03:02:20Z
2014-06-08T03:02:20Z
2014-07-09T18:20:25Z
TST: Changed the error raised by no tables in data.Options
diff --git a/pandas/io/data.py b/pandas/io/data.py index e875e8aa3c6db..525a7ce64f0c2 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -664,7 +664,9 @@ def _get_option_data(self, month, year, expiry, table_loc, name): "element".format(url)) tables = root.xpath('.//table') ntables = len(tables) - if table_loc - 1 > ntables: + if ntables == 0: + raise RemoteDataError("No tables found at {0!r}".format(url)) + elif table_loc - 1 > ntables: raise IndexError("Table location {0} invalid, {1} tables" " found".format(table_loc, ntables))
Tests were failing if the scraper got the webpage but there weren't any tables in it. Added condition to raise RemoteDataError if no tables were found. Still IndexError if it can't find the correct table. fixes #7335
https://api.github.com/repos/pandas-dev/pandas/pulls/7392
2014-06-07T22:09:31Z
2014-06-08T01:35:01Z
2014-06-08T01:35:01Z
2014-06-17T16:37:06Z
BUG: inconsistent subplot ax handling
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 850e7e13db2ff..6188f182f1a82 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -176,6 +176,8 @@ Bug Fixes - Bug in ``to_timedelta`` that accepted invalid units and misinterpreted 'm/h' (:issue:`7611`, :issue: `6423`) - Bug in grouped ``hist`` and ``scatter`` plots use old ``figsize`` default (:issue:`7394`) +- Bug in plotting subplots with ``DataFrame.plot``, ``hist`` clears passed ``ax`` even if the number of subplots is one (:issue:`7391`). +- Bug in plotting subplots with ``DataFrame.boxplot`` with ``by`` kw raises ``ValueError`` if the number of subplots exceeds 1 (:issue:`7391`). - Bug in ``Panel.apply`` with a multi-index as an axis (:issue:`7469`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index d19d071833ea7..729aa83647590 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -859,6 +859,13 @@ def test_plot(self): axes = _check_plot_works(df.plot, kind='bar', subplots=True) self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) + # When ax is supplied and required number of axes is 1, + # passed ax should be used: + fig, ax = self.plt.subplots() + axes = df.plot(kind='bar', subplots=True, ax=ax) + self.assertEqual(len(axes), 1) + self.assertIs(ax.get_axes(), axes[0]) + def test_nonnumeric_exclude(self): df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}) ax = df.plot() @@ -1419,17 +1426,23 @@ def test_boxplot(self): df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2']) df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B']) + df['Y'] = Series(['A'] * 10) _check_plot_works(df.boxplot, by='X') - # When ax is supplied, existing axes should be used: + # When ax is supplied and required number of axes is 1, + # passed ax should be used: fig, ax = self.plt.subplots() axes = df.boxplot('Col1', by='X', ax=ax) self.assertIs(ax.get_axes(), axes) - # Multiple columns with an ax argument is not supported fig, ax = self.plt.subplots() - with tm.assertRaisesRegexp(ValueError, 'existing axis'): - df.boxplot(column=['Col1', 'Col2'], by='X', ax=ax) + axes = df.groupby('Y').boxplot(ax=ax, return_type='axes') + self.assertIs(ax.get_axes(), axes['A']) + + # Multiple columns with an ax argument should use same figure + fig, ax = self.plt.subplots() + axes = df.boxplot(column=['Col1', 'Col2'], by='X', ax=ax, return_type='axes') + self.assertIs(axes['Col1'].get_figure(), fig) # When by is None, check that all relevant lines are present in the dict fig, ax = self.plt.subplots() @@ -2180,32 +2193,32 @@ class TestDataFrameGroupByPlots(TestPlotBase): @slow def test_boxplot(self): grouped = self.hist_df.groupby(by='gender') - box = _check_plot_works(grouped.boxplot, return_type='dict') - self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2)) + axes = _check_plot_works(grouped.boxplot, return_type='axes') + self._check_axes_shape(axes.values(), axes_num=2, layout=(1, 2)) - box = _check_plot_works(grouped.boxplot, subplots=False, - return_type='dict') - self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2)) + axes = _check_plot_works(grouped.boxplot, subplots=False, + return_type='axes') + self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) tuples = lzip(string.ascii_letters[:10], range(10)) df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples)) grouped = df.groupby(level=1) - box = _check_plot_works(grouped.boxplot, return_type='dict') - self._check_axes_shape(self.plt.gcf().axes, axes_num=10, layout=(4, 3)) + axes = _check_plot_works(grouped.boxplot, return_type='axes') + self._check_axes_shape(axes.values(), axes_num=10, layout=(4, 3)) - box = _check_plot_works(grouped.boxplot, subplots=False, - return_type='dict') - self._check_axes_shape(self.plt.gcf().axes, axes_num=10, layout=(4, 3)) + axes = _check_plot_works(grouped.boxplot, subplots=False, + return_type='axes') + self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) grouped = df.unstack(level=1).groupby(level=0, axis=1) - box = _check_plot_works(grouped.boxplot, return_type='dict') - self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2)) + axes = _check_plot_works(grouped.boxplot, return_type='axes') + self._check_axes_shape(axes.values(), axes_num=3, layout=(2, 2)) - box = _check_plot_works(grouped.boxplot, subplots=False, - return_type='dict') - self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2)) + axes = _check_plot_works(grouped.boxplot, subplots=False, + return_type='axes') + self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) def test_series_plot_color_kwargs(self): # GH1890 diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 2b02523c143b4..779aa328e820f 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2665,7 +2665,8 @@ def plot_group(group, ax): def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, - rot=0, grid=True, figsize=None, layout=None, **kwds): + rot=0, grid=True, ax=None, figsize=None, + layout=None, **kwds): """ Make box plots from DataFrameGroupBy data. @@ -2712,7 +2713,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, naxes = len(grouped) nrows, ncols = _get_layout(naxes, layout=layout) fig, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, squeeze=False, - sharex=False, sharey=True) + ax=ax, sharex=False, sharey=True, figsize=figsize) axes = _flatten(axes) ret = compat.OrderedDict() @@ -2733,7 +2734,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, else: df = frames[0] ret = df.boxplot(column=column, fontsize=fontsize, rot=rot, - grid=grid, figsize=figsize, layout=layout, **kwds) + grid=grid, ax=ax, figsize=figsize, layout=layout, **kwds) return ret @@ -2779,17 +2780,10 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, by = [by] columns = data._get_numeric_data().columns - by naxes = len(columns) - - if ax is None: - nrows, ncols = _get_layout(naxes, layout=layout) - fig, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, - sharex=True, sharey=True, - figsize=figsize, ax=ax) - else: - if naxes > 1: - raise ValueError("Using an existing axis is not supported when plotting multiple columns.") - fig = ax.get_figure() - axes = ax.get_axes() + nrows, ncols = _get_layout(naxes, layout=layout) + fig, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, + sharex=True, sharey=True, + figsize=figsize, ax=ax) ravel_axes = _flatten(axes) @@ -2974,12 +2968,6 @@ def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze= if subplot_kw is None: subplot_kw = {} - if ax is None: - fig = plt.figure(**fig_kw) - else: - fig = ax.get_figure() - fig.clear() - # Create empty object array to hold all axes. It's easiest to make it 1-d # so we can just append subplots upon creation, and then nplots = nrows * ncols @@ -2989,6 +2977,21 @@ def _subplots(nrows=1, ncols=1, naxes=None, sharex=False, sharey=False, squeeze= elif nplots < naxes: raise ValueError("naxes {0} is larger than layour size defined by nrows * ncols".format(naxes)) + if ax is None: + fig = plt.figure(**fig_kw) + else: + fig = ax.get_figure() + # if ax is passed and a number of subplots is 1, return ax as it is + if naxes == 1: + if squeeze: + return fig, ax + else: + return fig, _flatten(ax) + else: + warnings.warn("To output multiple subplots, the figure containing the passed axes " + "is being cleared", UserWarning) + fig.clear() + axarr = np.empty(nplots, dtype=object) def on_right(i):
There is an inconsistency `DataFrame.plot`, `hist` and `boxplot` when subplot enabled (`subplots=True` or use `by` kw). ### Current behaviour - `plot` and `hist`: When `ax` kw is passed, plot will be drawn on the figure which the passed ax belongs. The figure will be once cleared even if the required number of subplot is 1. Thus, any artists contained in the passed `ax` will be flushed. - `box`: When `ax` kw is passed and a required number of subplots is 1, use the passed `ax` without clearing the existing plot. If more than 1 axes is required for the subplots, raises `ValueError`. ### Fix - When `ax` kw is passed and a required number of subplots is 1, use the passed `ax` without clearing the existing plot. - If more than 1 axes is required for the subplots, subplots will be output on the figure which the passed ax belongs. The figure will be once cleared if the number of subplots is more than 1.
https://api.github.com/repos/pandas-dev/pandas/pulls/7391
2014-06-07T21:45:50Z
2014-07-06T14:09:51Z
2014-07-06T14:09:51Z
2014-07-09T12:39:17Z
BUG: ix should return a Series for duplicate indices
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 68e5810751d08..c47fcb3a98347 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -36,6 +36,18 @@ class IndexingError(Exception): pass +def _reconstruct_from_dup_ix(key, obj, values): + if np.isscalar(values): + return values + assert len(key) == obj.ndim + + kwargs = dict() + for axis, name in obj._AXIS_NAMES.items(): + index = getattr(obj, name) + kwargs[name] = index[index.get_loc(key[axis])] + return obj._constructor(values, **kwargs).squeeze() + + class _NDFrameIndexer(object): _valid_types = None _exception = KeyError @@ -61,7 +73,8 @@ def __iter__(self): def __getitem__(self, key): if type(key) is tuple: try: - return self.obj.get_value(*key) + values = self.obj.get_value(*key) + return _reconstruct_from_dup_ix(key, self.obj, values) except Exception: pass @@ -1101,8 +1114,6 @@ class _IXIndexer(_NDFrameIndexer): """ A primarily location based indexer, with integer fallback """ def _has_valid_type(self, key, axis): - ax = self.obj._get_axis(axis) - if isinstance(key, slice): return True diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 96c67f2ff795c..7610ccc6cdf73 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -3570,8 +3570,14 @@ def test_float_index_to_mixed(self): 'a': [10] * 10}), df) + def test_duplicate_ix_returns_series(self): + df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2], + columns=list('abc')) + r = df.ix[0.2, 'a'] + e = df.loc[0.2, 'a'] + tm.assert_series_equal(r, e) + if __name__ == '__main__': - import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
closes #7150 it seems like there's a better way to do this ....
https://api.github.com/repos/pandas-dev/pandas/pulls/7390
2014-06-07T21:29:45Z
2014-06-09T12:54:56Z
null
2014-07-09T18:20:25Z
BLD/TST/DEV: update tox to use python 3.4
diff --git a/tox.ini b/tox.ini index 51480832284b9..b11a71f531524 100644 --- a/tox.ini +++ b/tox.ini @@ -4,15 +4,21 @@ # and then run "tox" from this directory. [tox] -envlist = py26, py27, py32, py33 +envlist = py26, py27, py32, py33, py34 [testenv] deps = cython - numpy >= 1.6.1 nose - pytz >= 2011k + pytz>=2011k + python-dateutil + beautifulsoup4 + lxml + openpyxl<2.0.0 + xlsxwriter + xlrd six + sqlalchemy # cd to anything but the default {toxinidir} which # contains the pandas subdirectory and confuses @@ -21,7 +27,7 @@ changedir = {envdir} commands = # TODO: --exe because of GH #761 - {envbindir}/nosetests --exe pandas -A "not network" + {envbindir}/nosetests --exe pandas {posargs:-A "not network and not disabled"} # cleanup the temp. build dir created by the tox build # /bin/rm -rf {toxinidir}/build @@ -37,9 +43,30 @@ commands = pip uninstall pandas -qy [testenv:py26] +deps = + numpy==1.6.1 + boto + bigquery + {[testenv]deps} [testenv:py27] +deps = + numpy==1.8.1 + boto + bigquery + {[testenv]deps} [testenv:py32] +deps = + numpy==1.7.1 + {[testenv]deps} [testenv:py33] +deps = + numpy==1.8.0 + {[testenv]deps} + +[testenv:py34] +deps = + numpy==1.8.0 + {[testenv]deps} diff --git a/tox.sh b/tox.sh deleted file mode 100755 index b68ffc7fdb91c..0000000000000 --- a/tox.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - - -if [ x"$1" == x"fast" ]; then - scripts/use_build_cache.py -fi; - -tox diff --git a/tox_prll.ini b/tox_prll.ini deleted file mode 100644 index 7ae399837b4e0..0000000000000 --- a/tox_prll.ini +++ /dev/null @@ -1,46 +0,0 @@ -# Tox (http://tox.testrun.org/) is a tool for running tests -# in multiple virtualenvs. This configuration file will run the -# test suite on all supported python versions. To use it, "pip install tox" -# and then run "tox" from this directory. - -[tox] -envlist = py26, py27, py32, py33 -sdistsrc = {env:DISTFILE} - -[testenv] -deps = - cython - numpy >= 1.6.1 - nose - pytz - six - -# cd to anything but the default {toxinidir} which -# contains the pandas subdirectory and confuses -# nose away from the fresh install in site-packages -changedir = {envdir} - -commands = - # TODO: --exe because of GH #761 - {envbindir}/nosetests --exe pandas -A "not network" - # cleanup the temp. build dir created by the tox build -# /bin/rm -rf {toxinidir}/build - - # quietly rollback the install. - # Note this line will only be reached if the - # previous lines succeed (in particular, the tests), - # but an uninstall is really only required when - # files are removed from the source tree, in which case, - # stale versions of files will will remain in the venv - # until the next time uninstall is run. - # - # tox should provide a preinstall-commands hook. - pip uninstall pandas -qy - -[testenv:py26] - -[testenv:py27] - -[testenv:py32] - -[testenv:py33] diff --git a/tox_prll.sh b/tox_prll.sh deleted file mode 100755 index a426d68297ac5..0000000000000 --- a/tox_prll.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -# -# tox has an undocumented (as of 1.4.2) config option called "sdistsrc" -# which can make a run use a pre-prepared sdist file. -# we prepare the sdist once , then launch the tox runs in parallel using it. -# -# currently (tox 1.4.2) We have to skip sdist generation when running in parallel -# or we get a race. -# - - -ENVS=$(cat tox.ini | grep envlist | tr "," " " | cut -d " " -f 3-) -TOX_INI_PAR="tox_prll.ini" - -if [ x"$1" == x"fast" ]; then - scripts/use_build_cache.py -fi; - -echo "[Creating distfile]" -tox --sdistonly -export DISTFILE="$(find .tox/dist -type f )" - -echo -e "[Starting tests]\n" -for e in $ENVS; do - echo "[launching tox for $e]" - tox -c "$TOX_INI_PAR" -e "$e" & -done -wait
- now installs any optional dep not relying on numpy for better local test coverage - you can now pass arguments that will go to nosetests (e.g., for running network tests) - removed tox_prll.ini because you can `pip install detox` and it will automatically build run and test across the configuration in tox.ini
https://api.github.com/repos/pandas-dev/pandas/pulls/7389
2014-06-07T20:37:53Z
2014-06-07T22:29:30Z
2014-06-07T22:29:30Z
2014-06-14T06:24:14Z
TST/BUG: use BytesIO for Python 3.4 TestEncoding
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py index a20a8945eeb11..326b7bc004564 100644 --- a/pandas/io/tests/test_html.py +++ b/pandas/io/tests/test_html.py @@ -20,7 +20,7 @@ from pandas import (DataFrame, MultiIndex, read_csv, Timestamp, Index, date_range, Series) -from pandas.compat import map, zip, StringIO, string_types +from pandas.compat import map, zip, StringIO, string_types, BytesIO from pandas.io.common import URLError, urlopen, file_path_to_url from pandas.io.html import read_html from pandas.parser import CParserError @@ -601,7 +601,7 @@ def read_filename(self, f, encoding): def read_file_like(self, f, encoding): with open(f, 'rb') as fobj: - return read_html(StringIO(fobj.read()), encoding=encoding, + return read_html(BytesIO(fobj.read()), encoding=encoding, index_col=0) def read_string(self, f, encoding):
null
https://api.github.com/repos/pandas-dev/pandas/pulls/7388
2014-06-07T19:00:57Z
2014-06-07T19:37:22Z
2014-06-07T19:37:22Z
2014-07-12T06:03:54Z
BUG: Revisit DatetimeIndex.insert doesnt preserve name and tz
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 2b76da1434ba3..6e3b23898d08f 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -166,6 +166,8 @@ Bug Fixes +- BUG in ``DatetimeIndex.insert`` doesn't preserve ``name`` and ``tz`` (:issue:`7299`) +- BUG in ``DatetimeIndex.asobject`` doesn't preserve ``name`` (:issue:`7299`) @@ -218,3 +220,4 @@ Bug Fixes :issue:`7409`). - Bug where bool objects were converted to ``nan`` in ``convert_objects`` (:issue:`7416`). + diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 42cc80cc5dc63..7f0e00105bba5 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -784,7 +784,7 @@ def tolist(self): def _get_object_index(self): boxfunc = lambda x: Timestamp(x, offset=self.offset, tz=self.tz) boxed_values = lib.map_infer(self.asi8, boxfunc) - return Index(boxed_values, dtype=object) + return Index(boxed_values, dtype=object, name=self.name) def to_pydatetime(self): """ @@ -1594,13 +1594,29 @@ def insert(self, loc, item): ------- new_index : Index """ + + freq = None if isinstance(item, datetime): + zone = tslib.get_timezone(self.tz) + izone = tslib.get_timezone(getattr(item, 'tzinfo', None)) + if zone != izone: + raise ValueError('Passed item and index have different timezone') + # check freq can be preserved on edge cases + if self.freq is not None: + if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]: + freq = self.freq + elif (loc == len(self)) and item - self.freq == self[-1]: + freq = self.freq item = _to_m8(item, tz=self.tz) try: - new_index = np.concatenate((self[:loc].asi8, - [item.view(np.int64)], - self[loc:].asi8)) - return DatetimeIndex(new_index, freq='infer') + new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)], + self[loc:].asi8)) + if self.tz is not None: + f = lambda x: tslib.tz_convert_single(x, 'UTC', self.tz) + new_dates = np.vectorize(f)(new_dates) + # new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz) + return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz) + except (AttributeError, TypeError): # fall back to object index diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 04210b4f0c88f..c8f62a731d32b 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2293,24 +2293,101 @@ def test_order(self): self.assertTrue(ordered[::-1].is_monotonic) self.assert_numpy_array_equal(dexer, [0, 2, 1]) + def test_asobject(self): + idx = date_range(start='2013-01-01', periods=4, freq='M', name='idx') + expected = Index([Timestamp('2013-01-31'), Timestamp('2013-02-28'), + Timestamp('2013-03-31'), Timestamp('2013-04-30')], + dtype=object, name='idx') + + result = idx.asobject + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + def test_insert(self): - idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) + idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'], name='idx') result = idx.insert(2, datetime(2000, 1, 5)) exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05', - '2000-01-02']) + '2000-01-02'], name='idx') self.assertTrue(result.equals(exp)) # insertion of non-datetime should coerce to object index result = idx.insert(1, 'inserted') expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1), - datetime(2000, 1, 2)]) + datetime(2000, 1, 2)], name='idx') self.assertNotIsInstance(result, DatetimeIndex) tm.assert_index_equal(result, expected) + self.assertEqual(result.name, expected.name) + + idx = date_range('1/1/2000', periods=3, freq='M', name='idx') + + # preserve freq + expected_0 = DatetimeIndex(['1999-12-31', '2000-01-31', '2000-02-29', + '2000-03-31'], name='idx', freq='M') + expected_3 = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31', + '2000-04-30'], name='idx', freq='M') + + # reset freq to None + expected_1_nofreq = DatetimeIndex(['2000-01-31', '2000-01-31', '2000-02-29', + '2000-03-31'], name='idx', freq=None) + expected_3_nofreq = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31', + '2000-01-02'], name='idx', freq=None) + + cases = [(0, datetime(1999, 12, 31), expected_0), + (-3, datetime(1999, 12, 31), expected_0), + (3, datetime(2000, 4, 30), expected_3), + (1, datetime(2000, 1, 31), expected_1_nofreq), + (3, datetime(2000, 1, 2), expected_3_nofreq)] + + for n, d, expected in cases: + result = idx.insert(n, d) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) + + # reset freq to None + result = idx.insert(3, datetime(2000, 1, 2)) + expected = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31', + '2000-01-02'], name='idx', freq=None) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertTrue(result.freq is None) + + # GH 7299 + _skip_if_no_pytz() + import pytz + + idx = date_range('1/1/2000', periods=3, freq='D', tz='Asia/Tokyo', name='idx') + with tm.assertRaises(ValueError): + result = idx.insert(3, pd.Timestamp('2000-01-04')) + with tm.assertRaises(ValueError): + result = idx.insert(3, datetime(2000, 1, 4)) + with tm.assertRaises(ValueError): + result = idx.insert(3, pd.Timestamp('2000-01-04', tz='US/Eastern')) + with tm.assertRaises(ValueError): + result = idx.insert(3, datetime(2000, 1, 4, tzinfo=pytz.timezone('US/Eastern'))) + + # preserve freq + expected = date_range('1/1/2000', periods=4, freq='D', tz='Asia/Tokyo', name='idx') + for d in [pd.Timestamp('2000-01-04', tz='Asia/Tokyo'), + datetime(2000, 1, 4, tzinfo=pytz.timezone('Asia/Tokyo'))]: + + result = idx.insert(3, d) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freqstr, expected.freq) + + expected = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03', + '2000-01-02'], name='idx', + tz='Asia/Tokyo', freq=None) + # reset freq to None + for d in [pd.Timestamp('2000-01-02', tz='Asia/Tokyo'), + datetime(2000, 1, 2, tzinfo=pytz.timezone('Asia/Tokyo'))]: + result = idx.insert(3, d) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertTrue(result.freq is None) - idx = date_range('1/1/2000', periods=3, freq='M') - result = idx.insert(3, datetime(2000, 4, 30)) - self.assertEqual(result.freqstr, 'M') def test_delete(self): idx = date_range(start='2000-01-01', periods=5, freq='M', name='idx')
Closes #7378. The previous error was unrelated to `insert` logic. It seems to be caused by DST related to `tslib` or `pytz`, and became obvious by the previous test case.
https://api.github.com/repos/pandas-dev/pandas/pulls/7386
2014-06-07T15:44:49Z
2014-06-11T12:48:06Z
2014-06-11T12:48:06Z
2014-06-15T03:13:46Z
ENH: automatic rpy2 instance conversion
diff --git a/pandas/rpy/common.py b/pandas/rpy/common.py index 55adad3610816..a0ccc72f7ab1c 100644 --- a/pandas/rpy/common.py +++ b/pandas/rpy/common.py @@ -10,38 +10,69 @@ import pandas as pd import pandas.core.common as com -import pandas.util.testing as _test from rpy2.robjects.packages import importr -from rpy2.robjects import r import rpy2.robjects as robj import itertools as IT __all__ = ['convert_robj', 'load_data', 'convert_to_r_dataframe', - 'convert_to_r_matrix'] + 'convert_to_r_matrix', 'r'] + + +def _assign(attr, obj): + if isinstance(obj, (pd.DataFrame, pd.Series)): + obj = convert_to_r_dataframe(obj) + return robj.r.assign(attr, obj) + + +# Unable to subclass robjects.R because +# it has special creation process using rinterface +class _RPandas(object): + + def __getattribute__(self, attr): + if attr == 'assign': + return _assign + return getattr(robj.r, attr) + + def __getitem__(self, item): + result = robj.r[item] + try: + result = convert_robj(result) + except TypeError: + pass + return result + + def __str__(self): + return str(robj.r) + + def __call__(self, string): + return robj.r(string) + + +r = _RPandas() def load_data(name, package=None, convert=True): if package: importr(package) - r.data(name) + robj.r.data(name) - robj = r[name] + r_obj = robj.r[name] if convert: - return convert_robj(robj) + return convert_robj(r_obj) else: - return robj + return r_obj def _rclass(obj): """ Return R class name for input object """ - return r['class'](obj)[0] + return robj.r['class'](obj)[0] def _is_null(obj): @@ -54,12 +85,12 @@ def _convert_list(obj): """ try: values = [convert_robj(x) for x in obj] - keys = r['names'](obj) + keys = robj.r['names'](obj) return dict(zip(keys, values)) except TypeError: # For state.division and state.region - factors = list(r['factor'](obj)) - level = list(r['levels'](obj)) + factors = list(robj.r['factor'](obj)) + level = list(robj.r['levels'](obj)) result = [level[index-1] for index in factors] return result @@ -77,9 +108,9 @@ def _list(item): # For iris3, HairEyeColor, UCBAdmissions, Titanic dim = list(obj.dim) values = np.array(list(obj)) - names = r['dimnames'](obj) + names = robj.r['dimnames'](obj) try: - columns = list(r['names'](names))[::-1] + columns = list(robj.r['names'](names))[::-1] except TypeError: columns = ['X{:d}'.format(i) for i in range(len(names))][::-1] columns.append('value') @@ -98,18 +129,18 @@ def _convert_vector(obj): # Check if the vector has extra information attached to it that can be used # as an index try: - attributes = set(r['attributes'](obj).names) + attributes = set(robj.r['attributes'](obj).names) except AttributeError: return list(obj) if 'names' in attributes: - return pd.Series(list(obj), index=r['names'](obj)) + return pd.Series(list(obj), index=robj.r['names'](obj)) elif 'tsp' in attributes: - return pd.Series(list(obj), index=r['time'](obj)) + return pd.Series(list(obj), index=robj.r['time'](obj)) elif 'labels' in attributes: - return pd.Series(list(obj), index=r['labels'](obj)) + return pd.Series(list(obj), index=robj.r['labels'](obj)) if _rclass(obj) == 'dist': # For 'eurodist'. WARNING: This results in a DataFrame, not a Series or list. - matrix = r['as.matrix'](obj) + matrix = robj.r['as.matrix'](obj) return convert_robj(matrix) else: return list(obj) @@ -167,7 +198,7 @@ def _convert_Matrix(mat): rows = mat.rownames columns = None if _is_null(columns) else list(columns) - index = r['time'](mat) if _is_null(rows) else list(rows) + index = robj.r['time'](mat) if _is_null(rows) else list(rows) return pd.DataFrame(np.array(mat), index=_check_int(index), columns=columns) @@ -310,6 +341,10 @@ def convert_to_r_dataframe(df, strings_as_factors=False): columns = rlc.OrdDict() + if isinstance(df, pd.Series): + name = df.name or 'X0' + df = pd.DataFrame(df, columns=[name]) + # FIXME: This doesn't handle MultiIndex for column in df: @@ -365,5 +400,6 @@ def convert_to_r_matrix(df, strings_as_factors=False): return r_matrix + if __name__ == '__main__': pass diff --git a/pandas/rpy/tests/test_common.py b/pandas/rpy/tests/test_common.py index a2e6d08d07b58..b0f20fed477af 100644 --- a/pandas/rpy/tests/test_common.py +++ b/pandas/rpy/tests/test_common.py @@ -92,6 +92,25 @@ def test_convert_r_dataframe(self): else: assert original == converted + def test_convert_r_dataframe_series(self): + + s_noname = tm.makeFloatSeries() + s_name = tm.makeFloatSeries() + s_name.name = 'Test' + + for series, key in [(s_noname, 'X0'), (s_name, s_name.name)]: + + r_dataframe = com.convert_to_r_dataframe(series) + + assert np.array_equal( + com.convert_robj(r_dataframe.rownames), series.index) + assert np.array_equal( + com.convert_robj(r_dataframe.colnames), np.array([key])) + + result = com.convert_robj(r_dataframe.rx2(key)) + result = np.array(result) + assert np.array_equal(result, series.values) + def test_convert_r_matrix(self): is_na = robj.baseenv.get("is.na") @@ -207,6 +226,28 @@ def test_factor(self): result = com.load_data(name) assert np.equal(result, factors) + def test_assign_revert(self): + df = tm.makeDataFrame() + com.r.assign('df', df) + # test R function call + com.r('head(df)') + result = com.r['df'] + tm.assert_frame_equal(df, result) + + df = tm.makeTimeDataFrame() + com.r.assign('df', df) + result = com.r['df'] + result.index = pd.DatetimeIndex(result.index) + tm.assert_frame_equal(df, result) + + s = tm.makeFloatSeries() + s.name = 'Test' + com.r.assign('s', s) + result = com.r['s'] + expected = pd.DataFrame(s, columns=['Test']) + tm.assert_frame_equal(expected, result) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], # '--with-coverage', '--cover-package=pandas.core'],
Derived from #7309. Create a wrapper for `robjects.r` in `pandas.rpy.common` to perform automatic pandas `DataFrame` and `Series` conversion. `Series` will be converted to R data.frame to preserve rownames (index). If looks OK, I'll modify the doc (#7309) based on following API. ``` import pandas as pd import pandas.rpy.common as com iris = com.load_data('iris') com.r.assign('iris', iris) returned = com.r['iris'] type(returned) # <class 'pandas.core.frame.DataFrame'> df = pd.DataFrame(np.random.randn(20, 5), index=pd.date_range(start='2011/01/01', freq='D', periods=20)) com.r.assign('df', df) returned = com.r['df'] type(returned) # <class 'pandas.core.frame.DataFrame'> s = pd.Series(np.random.randn(20), name='test') com.r.assign('s', s) returned = com.r['s'] type(returned) # <class 'pandas.core.frame.DataFrame'> ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7385
2014-06-07T14:08:11Z
2015-03-08T14:39:07Z
null
2023-05-11T01:12:29Z
BUG: correct Period comparisons
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 3e06a705487df..89ef5aead027b 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -35,6 +35,10 @@ API changes ``float`` (:issue:`7242`) - `StringMethods`` now work on empty Series (:issue:`7242`) +- ``Period`` objects no longer raise a ``TypeError`` when compared using ``==`` + with another object that *isn't* a ``Period``. See :issue:`7376`. Instead + when comparing a ``Period`` with another object using ``==`` if the other + object isn't a ``Period`` ``False`` is returned. .. _whatsnew_0141.prior_deprecations: @@ -123,3 +127,5 @@ Bug Fixes - Bug where a string column name assignment to a ``DataFrame`` with a ``Float64Index`` raised a ``TypeError`` during a call to ``np.isnan`` (:issue:`7366`). +- Bug where ``NDFrame.replace()`` didn't correctly replace objects with + ``Period`` values (:issue:`7379`). diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index c4475715386b9..7354c57498561 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -8392,6 +8392,52 @@ def test_replace_swapping_bug(self): expect = pd.DataFrame({'a': ['Y', 'N', 'Y']}) tm.assert_frame_equal(res, expect) + def test_replace_period(self): + d = {'fname': + {'out_augmented_AUG_2011.json': pd.Period(year=2011, month=8, freq='M'), + 'out_augmented_JAN_2011.json': pd.Period(year=2011, month=1, freq='M'), + 'out_augmented_MAY_2012.json': pd.Period(year=2012, month=5, freq='M'), + 'out_augmented_SUBSIDY_WEEK.json': pd.Period(year=2011, month=4, freq='M'), + 'out_augmented_AUG_2012.json': pd.Period(year=2012, month=8, freq='M'), + 'out_augmented_MAY_2011.json': pd.Period(year=2011, month=5, freq='M'), + 'out_augmented_SEP_2013.json': pd.Period(year=2013, month=9, freq='M')}} + + df = pd.DataFrame(['out_augmented_AUG_2012.json', + 'out_augmented_SEP_2013.json', + 'out_augmented_SUBSIDY_WEEK.json', + 'out_augmented_MAY_2012.json', + 'out_augmented_MAY_2011.json', + 'out_augmented_AUG_2011.json', + 'out_augmented_JAN_2011.json'], columns=['fname']) + tm.assert_equal(set(df.fname.values), set(d['fname'].keys())) + expected = DataFrame({'fname': [d['fname'][k] + for k in df.fname.values]}) + result = df.replace(d) + tm.assert_frame_equal(result, expected) + + def test_replace_datetime(self): + d = {'fname': + {'out_augmented_AUG_2011.json': pd.Timestamp('2011/08'), + 'out_augmented_JAN_2011.json': pd.Timestamp('2011/01'), + 'out_augmented_MAY_2012.json': pd.Timestamp('2012/05'), + 'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011/04'), + 'out_augmented_AUG_2012.json': pd.Timestamp('2012/08'), + 'out_augmented_MAY_2011.json': pd.Timestamp('2011/05'), + 'out_augmented_SEP_2013.json': pd.Timestamp('2013/09')}} + + df = pd.DataFrame(['out_augmented_AUG_2012.json', + 'out_augmented_SEP_2013.json', + 'out_augmented_SUBSIDY_WEEK.json', + 'out_augmented_MAY_2012.json', + 'out_augmented_MAY_2011.json', + 'out_augmented_AUG_2011.json', + 'out_augmented_JAN_2011.json'], columns=['fname']) + tm.assert_equal(set(df.fname.values), set(d['fname'].keys())) + expected = DataFrame({'fname': [d['fname'][k] + for k in df.fname.values]}) + result = df.replace(d) + tm.assert_frame_equal(result, expected) + def test_combine_multiple_frames_dtypes(self): # GH 2759 @@ -11245,7 +11291,6 @@ def test_rank(self): exp = df.astype(float).rank(1) assert_frame_equal(result, exp) - def test_rank2(self): from datetime import datetime df = DataFrame([[1, 3, 2], [1, 2, 3]]) @@ -11303,7 +11348,6 @@ def test_rank2(self): exp = DataFrame({"a":[ 3.5, 1. , 3.5, 5. , 6. , 7. , 2. ]}) assert_frame_equal(df.rank(), exp) - def test_rank_na_option(self): _skip_if_no_scipy() from scipy.stats import rankdata diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 5516c4634ea58..4dc9ff88b328a 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -138,12 +138,10 @@ def __eq__(self, other): raise ValueError("Cannot compare non-conforming periods") return (self.ordinal == other.ordinal and _gfc(self.freq) == _gfc(other.freq)) - else: - raise TypeError(other) - return False + return NotImplemented def __ne__(self, other): - return not self.__eq__(other) + return not self == other def __hash__(self): return hash((self.ordinal, self.freq)) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 169939c2f288a..81387c3736481 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2455,10 +2455,8 @@ def test_equal(self): def test_equal_Raises_Value(self): self.assertRaises(ValueError, self.january1.__eq__, self.day) - def test_equal_Raises_Type(self): - self.assertRaises(TypeError, self.january1.__eq__, 1) - def test_notEqual(self): + self.assertNotEqual(self.january1, 1) self.assertNotEqual(self.january1, self.february) def test_greater(self):
closes #7376
https://api.github.com/repos/pandas-dev/pandas/pulls/7379
2014-06-06T19:40:15Z
2014-06-06T23:28:58Z
2014-06-06T23:28:58Z
2014-06-25T19:42:16Z
ENH/BUG: Offset.apply dont preserve time
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 0303b41e42e55..795bbca673f77 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -539,6 +539,25 @@ The ``rollforward`` and ``rollback`` methods do exactly what you would expect: It's definitely worth exploring the ``pandas.tseries.offsets`` module and the various docstrings for the classes. +These operations (``apply``, ``rollforward`` and ``rollback``) preserves time (hour, minute, etc) information by default. To reset time, use ``normalize=True`` keyword when create offset instance. If ``normalize=True``, result is normalized after the function is applied. + + + .. ipython:: python + + day = Day() + day.apply(Timestamp('2014-01-01 09:00')) + + day = Day(normalize=True) + day.apply(Timestamp('2014-01-01 09:00')) + + hour = Hour() + hour.apply(Timestamp('2014-01-01 22:00')) + + hour = Hour(normalize=True) + hour.apply(Timestamp('2014-01-01 22:00')) + hour.apply(Timestamp('2014-01-01 23:00')) + + Parametric offsets ~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 2b76da1434ba3..01407856dfe18 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -33,8 +33,18 @@ API changes +- All ``offsets`` suppports ``normalize`` keyword to specify whether ``offsets.apply``, ``rollforward`` and ``rollback`` resets time (hour, minute, etc) or not (default ``False``, preserves time) (:issue:`7156`) + .. ipython:: python + + import pandas.tseries.offsets as offsets + + day = offsets.Day() + day.apply(Timestamp('2014-01-01 09:00')) + + day = offsets.Day(normalize=True) + day.apply(Timestamp('2014-01-01 09:00')) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 03ee51470763d..9cbef50f2d82f 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -38,15 +38,31 @@ def as_datetime(obj): obj = f() return obj -def apply_nat(func): +def apply_wraps(func): @functools.wraps(func) def wrapper(self, other): if other is tslib.NaT: return tslib.NaT - else: - return func(self, other) + if type(other) == date: + other = datetime(other.year, other.month, other.day) + + result = func(self, other) + + if self.normalize: + result = tslib.normalize_date(result) + + if isinstance(other, Timestamp) and not isinstance(result, Timestamp): + result = as_timestamp(result) + return result return wrapper + +def _is_normalized(dt): + if (dt.hour != 0 or dt.minute != 0 or dt.second != 0 + or dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0): + return False + return True + #---------------------------------------------------------------------- # DateOffset @@ -106,15 +122,16 @@ def __add__(date): _cacheable = False _normalize_cache = True - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = int(n) + self.normalize = normalize self.kwds = kwds if len(kwds) > 0: self._offset = relativedelta(**kwds) else: self._offset = timedelta(1) - @apply_nat + @apply_wraps def apply(self, other): other = as_datetime(other) if len(self.kwds) > 0: @@ -151,7 +168,7 @@ def __repr__(self): if hasattr(self, '_named'): return self._named className = getattr(self, '_outputName', type(self).__name__) - exclude = set(['n', 'inc']) + exclude = set(['n', 'inc', 'normalize']) attrs = [] for attr in sorted(self.__dict__): if ((attr == 'kwds' and len(self.kwds) == 0) @@ -242,25 +259,23 @@ def __rmul__(self, someInt): def __neg__(self): return self.__class__(-self.n, **self.kwds) + @apply_wraps def rollback(self, dt): """Roll provided date backward to next offset only if not on offset""" - if type(dt) == date: - dt = datetime(dt.year, dt.month, dt.day) - if not self.onOffset(dt): dt = dt - self.__class__(1, **self.kwds) return dt + @apply_wraps def rollforward(self, dt): """Roll provided date forward to next offset only if not on offset""" - if type(dt) == date: - dt = datetime(dt.year, dt.month, dt.day) - if not self.onOffset(dt): dt = dt + self.__class__(1, **self.kwds) return dt def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False # XXX, see #1395 if type(self) == DateOffset or isinstance(self, Tick): return True @@ -341,11 +356,11 @@ class BusinessDay(BusinessMixin, SingleConstructorOffset): """ _prefix = 'B' - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = int(n) + self.normalize = normalize self.kwds = kwds self.offset = kwds.get('offset', timedelta(0)) - self.normalize = kwds.get('normalize', False) @property def freqstr(self): @@ -398,7 +413,7 @@ def get_str(td): def isAnchored(self): return (self.n == 1) - @apply_nat + @apply_wraps def apply(self, other): if isinstance(other, datetime): n = self.n @@ -424,9 +439,6 @@ def apply(self, other): if result.weekday() < 5: n -= k - if self.normalize: - result = datetime(result.year, result.month, result.day) - if self.offset: result = result + self.offset @@ -439,8 +451,9 @@ def apply(self, other): raise ApplyTypeError('Only know how to combine business day with ' 'datetime or timedelta.') - @classmethod - def onOffset(cls, dt): + def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False return dt.weekday() < 5 @@ -472,11 +485,11 @@ class CustomBusinessDay(BusinessDay): _cacheable = False _prefix = 'C' - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = int(n) + self.normalize = normalize self.kwds = kwds self.offset = kwds.get('offset', timedelta(0)) - self.normalize = kwds.get('normalize', False) self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri') if 'calendar' in kwds: @@ -519,7 +532,7 @@ def __setstate__(self, state): self.__dict__ = state self._set_busdaycalendar() - @apply_nat + @apply_wraps def apply(self, other): if self.n <= 0: roll = 'forward' @@ -535,10 +548,7 @@ def apply(self, other): busdaycal=self.busdaycalendar) dt_date = np_incr_dt.astype(datetime) - if not self.normalize: - result = datetime.combine(dt_date,date_in.time()) - else: - result = dt_date + result = datetime.combine(dt_date, date_in.time()) if self.offset: result = result + self.offset @@ -552,11 +562,7 @@ def apply(self, other): np_incr_dt = np.busday_offset(np_day, self.n, roll=roll, busdaycal=self.busdaycalendar) - - if not self.normalize: - result = np_incr_dt + np_time - else: - result = np_incr_dt + result = np_incr_dt + np_time if self.offset: result = result + self.offset @@ -582,6 +588,8 @@ def _to_dt64(dt, dtype='datetime64'): return dt def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False day64 = self._to_dt64(dt,'datetime64[D]') return np.is_busday(day64, busdaycal=self.busdaycalendar) @@ -598,11 +606,8 @@ def name(self): class MonthEnd(MonthOffset): """DateOffset of one month end""" - @apply_nat + @apply_wraps def apply(self, other): - other = datetime(other.year, other.month, other.day, - tzinfo=other.tzinfo) - n = self.n _, days_in_month = tslib.monthrange(other.year, other.month) if other.day != days_in_month: @@ -612,8 +617,9 @@ def apply(self, other): other = as_datetime(other) + relativedelta(months=n, day=31) return as_timestamp(other) - @classmethod - def onOffset(cls, dt): + def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False days_in_month = tslib.monthrange(dt.year, dt.month)[1] return dt.day == days_in_month @@ -623,7 +629,7 @@ def onOffset(cls, dt): class MonthBegin(MonthOffset): """DateOffset of one month at beginning""" - @apply_nat + @apply_wraps def apply(self, other): n = self.n @@ -633,8 +639,9 @@ def apply(self, other): other = as_datetime(other) + relativedelta(months=n, day=1) return as_timestamp(other) - @classmethod - def onOffset(cls, dt): + def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False return dt.day == 1 _prefix = 'MS' @@ -646,9 +653,8 @@ class BusinessMonthEnd(MonthOffset): def isAnchored(self): return (self.n == 1) - @apply_nat + @apply_wraps def apply(self, other): - other = datetime(other.year, other.month, other.day) n = self.n @@ -672,7 +678,7 @@ def apply(self, other): class BusinessMonthBegin(MonthOffset): """DateOffset of one business month at beginning""" - @apply_nat + @apply_wraps def apply(self, other): n = self.n @@ -689,11 +695,13 @@ def apply(self, other): other = as_datetime(other) + relativedelta(months=n) wkday, _ = tslib.monthrange(other.year, other.month) first = _get_firstbday(wkday) - result = datetime(other.year, other.month, first) + result = datetime(other.year, other.month, first, other.hour, other.minute, + other.second, other.microsecond) return as_timestamp(result) - @classmethod - def onOffset(cls, dt): + def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False first_weekday, _ = tslib.monthrange(dt.year, dt.month) if first_weekday == 5: return dt.day == 3 @@ -730,16 +738,16 @@ class CustomBusinessMonthEnd(BusinessMixin, MonthOffset): _cacheable = False _prefix = 'CBM' - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = int(n) + self.normalize = normalize self.kwds = kwds self.offset = kwds.get('offset', timedelta(0)) - self.normalize = kwds.get('normalize', False) self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri') - self.cbday = CustomBusinessDay(n=self.n,**kwds) - self.m_offset = MonthEnd() + self.cbday = CustomBusinessDay(n=self.n, normalize=normalize, **kwds) + self.m_offset = MonthEnd(normalize=normalize) - @apply_nat + @apply_wraps def apply(self,other): n = self.n dt_in = other @@ -785,16 +793,16 @@ class CustomBusinessMonthBegin(BusinessMixin, MonthOffset): _cacheable = False _prefix = 'CBMS' - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = int(n) + self.normalize = normalize self.kwds = kwds self.offset = kwds.get('offset', timedelta(0)) - self.normalize = kwds.get('normalize', False) self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri') - self.cbday = CustomBusinessDay(n=self.n,**kwds) - self.m_offset = MonthBegin() + self.cbday = CustomBusinessDay(n=self.n, normalize=normalize, **kwds) + self.m_offset = MonthBegin(normalize=normalize) - @apply_nat + @apply_wraps def apply(self,other): n = self.n dt_in = other @@ -826,8 +834,9 @@ class Week(DateOffset): Always generate specific day of week. 0 for Monday """ - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = n + self.normalize = normalize self.weekday = kwds.get('weekday', None) if self.weekday is not None: @@ -841,8 +850,9 @@ def __init__(self, n=1, **kwds): def isAnchored(self): return (self.n == 1 and self.weekday is not None) - @apply_nat + @apply_wraps def apply(self, other): + base = other if self.weekday is None: return as_timestamp(as_datetime(other) + self.n * self._inc) @@ -863,9 +873,14 @@ def apply(self, other): other = as_datetime(other) for i in range(-k): other = other - self._inc - return as_timestamp(other) + + other = datetime(other.year, other.month, other.day, + base.hour, base.minute, base.second, base.microsecond) + return other def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False return dt.weekday() == self.weekday _prefix = 'W' @@ -926,8 +941,9 @@ class WeekOfMonth(DateOffset): 6: Sundays """ - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = n + self.normalize = normalize self.weekday = kwds['weekday'] self.week = kwds['week'] @@ -943,8 +959,9 @@ def __init__(self, n=1, **kwds): self.kwds = kwds - @apply_nat + @apply_wraps def apply(self, other): + base = other offsetOfMonth = self.getOffsetOfMonth(other) if offsetOfMonth > other: @@ -960,8 +977,10 @@ def apply(self, other): else: months = self.n + 1 - return self.getOffsetOfMonth(as_datetime(other) + relativedelta(months=months, - day=1)) + other = self.getOffsetOfMonth(as_datetime(other) + relativedelta(months=months, day=1)) + other = datetime(other.year, other.month, other.day, + base.hour, base.minute, base.second, base.microsecond) + return other def getOffsetOfMonth(self, dt): w = Week(weekday=self.weekday) @@ -975,7 +994,10 @@ def getOffsetOfMonth(self, dt): return d def onOffset(self, dt): - return dt == self.getOffsetOfMonth(dt) + if self.normalize and not _is_normalized(dt): + return False + d = datetime(dt.year, dt.month, dt.day) + return d == self.getOffsetOfMonth(dt) @property def rule_code(self): @@ -1010,8 +1032,9 @@ class LastWeekOfMonth(DateOffset): 5: Saturdays 6: Sundays """ - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = n + self.normalize = normalize self.weekday = kwds['weekday'] if self.n == 0: @@ -1023,7 +1046,7 @@ def __init__(self, n=1, **kwds): self.kwds = kwds - @apply_nat + @apply_wraps def apply(self, other): offsetOfMonth = self.getOffsetOfMonth(other) @@ -1044,7 +1067,7 @@ def apply(self, other): def getOffsetOfMonth(self, dt): m = MonthEnd() - d = datetime(dt.year, dt.month, 1) + d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute, dt.second, dt.microsecond) eom = m.rollforward(d) @@ -1053,6 +1076,8 @@ def getOffsetOfMonth(self, dt): return w.rollback(eom) def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False return dt == self.getOffsetOfMonth(dt) @property @@ -1080,8 +1105,9 @@ class QuarterOffset(DateOffset): # TODO: Consider combining QuarterOffset and YearOffset __init__ at some # point - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = n + self.normalize = normalize self.startingMonth = kwds.get('startingMonth', self._default_startingMonth) @@ -1117,7 +1143,7 @@ class BQuarterEnd(QuarterOffset): _from_name_startingMonth = 12 _prefix = 'BQ' - @apply_nat + @apply_wraps def apply(self, other): n = self.n @@ -1142,6 +1168,8 @@ def apply(self, other): return as_timestamp(other) def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False modMonth = (dt.month - self.startingMonth) % 3 return BMonthEnd().onOffset(dt) and modMonth == 0 @@ -1172,7 +1200,7 @@ class BQuarterBegin(QuarterOffset): _from_name_startingMonth = 1 _prefix = 'BQS' - @apply_nat + @apply_wraps def apply(self, other): n = self.n other = as_datetime(other) @@ -1213,8 +1241,9 @@ class QuarterEnd(QuarterOffset): _default_startingMonth = 3 _prefix = 'Q' - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = n + self.normalize = normalize self.startingMonth = kwds.get('startingMonth', 3) self.kwds = kwds @@ -1222,7 +1251,7 @@ def __init__(self, n=1, **kwds): def isAnchored(self): return (self.n == 1 and self.startingMonth is not None) - @apply_nat + @apply_wraps def apply(self, other): n = self.n other = as_datetime(other) @@ -1241,6 +1270,8 @@ def apply(self, other): return as_timestamp(other) def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False modMonth = (dt.month - self.startingMonth) % 3 return MonthEnd().onOffset(dt) and modMonth == 0 @@ -1254,7 +1285,7 @@ class QuarterBegin(QuarterOffset): def isAnchored(self): return (self.n == 1 and self.startingMonth is not None) - @apply_nat + @apply_wraps def apply(self, other): n = self.n other = as_datetime(other) @@ -1278,13 +1309,13 @@ def apply(self, other): class YearOffset(DateOffset): """DateOffset that just needs a month""" - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.month = kwds.get('month', self._default_month) if self.month < 1 or self.month > 12: raise ValueError('Month must go from 1 to 12') - DateOffset.__init__(self, n=n, **kwds) + DateOffset.__init__(self, n=n, normalize=normalize, **kwds) @classmethod def _from_name(cls, suffix=None): @@ -1304,7 +1335,7 @@ class BYearEnd(YearOffset): _default_month = 12 _prefix = 'BA' - @apply_nat + @apply_wraps def apply(self, other): n = self.n other = as_datetime(other) @@ -1342,7 +1373,7 @@ class BYearBegin(YearOffset): _default_month = 1 _prefix = 'BAS' - @apply_nat + @apply_wraps def apply(self, other): n = self.n other = as_datetime(other) @@ -1366,7 +1397,8 @@ def apply(self, other): other = other + relativedelta(years=years) wkday, days_in_month = tslib.monthrange(other.year, self.month) first = _get_firstbday(wkday) - return as_timestamp(datetime(other.year, self.month, first)) + return as_timestamp(datetime(other.year, self.month, first, other.hour, + other.minute, other.second, other.microsecond)) class YearEnd(YearOffset): @@ -1374,7 +1406,7 @@ class YearEnd(YearOffset): _default_month = 12 _prefix = 'A' - @apply_nat + @apply_wraps def apply(self, other): def _increment(date): if date.month == self.month: @@ -1422,6 +1454,8 @@ def _rollf(date): return as_timestamp(result) def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False wkday, days_in_month = tslib.monthrange(dt.year, self.month) return self.month == dt.month and dt.day == days_in_month @@ -1431,7 +1465,7 @@ class YearBegin(YearOffset): _default_month = 1 _prefix = 'AS' - @apply_nat + @apply_wraps def apply(self, other): def _increment(date): year = date.year @@ -1470,6 +1504,8 @@ def _rollf(date): return as_timestamp(result) def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False return dt.month == self.month and dt.day == 1 @@ -1515,8 +1551,9 @@ class FY5253(DateOffset): _suffix_prefix_last = 'L' _suffix_prefix_nearest = 'N' - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = n + self.normalize = normalize self.startingMonth = kwds['startingMonth'] self.weekday = kwds["weekday"] @@ -1543,6 +1580,9 @@ def isAnchored(self): and self.weekday is not None def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False + dt = datetime(dt.year, dt.month, dt.day) year_end = self.get_year_end(dt) if self.variation == "nearest": @@ -1552,7 +1592,7 @@ def onOffset(self, dt): else: return year_end == dt - @apply_nat + @apply_wraps def apply(self, other): n = self.n prev_year = self.get_year_end( @@ -1581,7 +1621,11 @@ def apply(self, other): else: assert False - return self.get_year_end(datetime(year + n, self.startingMonth, 1)) + result = self.get_year_end(datetime(year + n, self.startingMonth, 1)) + + result = datetime(result.year, result.month, result.day, + other.hour, other.minute, other.second, other.microsecond) + return result else: n = -n if other == prev_year: @@ -1602,7 +1646,11 @@ def apply(self, other): else: assert False - return self.get_year_end(datetime(year - n, self.startingMonth, 1)) + result = self.get_year_end(datetime(year - n, self.startingMonth, 1)) + + result = datetime(result.year, result.month, result.day, + other.hour, other.minute, other.second, other.microsecond) + return result def get_year_end(self, dt): if self.variation == "nearest": @@ -1721,8 +1769,9 @@ class FY5253Quarter(DateOffset): _prefix = 'REQ' - def __init__(self, n=1, **kwds): + def __init__(self, n=1, normalize=False, **kwds): self.n = n + self.normalize = normalize self.qtr_with_extra_week = kwds["qtr_with_extra_week"] @@ -1739,9 +1788,11 @@ def __init__(self, n=1, **kwds): def isAnchored(self): return self.n == 1 and self._offset.isAnchored() - @apply_nat + @apply_wraps def apply(self, other): + base = other other = as_datetime(other) + n = self.n if n > 0: @@ -1776,7 +1827,8 @@ def apply(self, other): other = end n -= 1 break - + other = datetime(other.year, other.month, other.day, + base.hour, base.minute, base.second, base.microsecond) return other def get_weeks(self, dt): @@ -1802,6 +1854,8 @@ def year_has_extra_week(self, dt): return week_in_year == 53 def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False if self._offset.onOffset(dt): return True @@ -1837,7 +1891,7 @@ class Easter(DateOffset): def __init__(self, n=1, **kwds): super(Easter, self).__init__(n, **kwds) - @apply_nat + @apply_wraps def apply(self, other): currentEaster = easter(other.year) @@ -1854,17 +1908,14 @@ def apply(self, other): new = easter(other.year + self.n + 1) else: new = easter(other.year + self.n) - - # FIXME: There has to be a better way to do this, but I don't know what it is - if isinstance(other, Timestamp): - return as_timestamp(new) - elif isinstance(other, datetime): - return datetime(new.year, new.month, new.day) - else: - return new - - @classmethod - def onOffset(cls, dt): + + new = datetime(new.year, new.month, new.day, other.hour, + other.minute, other.second, other.microsecond) + return new + + def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False return date(dt.year, dt.month, dt.day) == easter(dt.year) #---------------------------------------------------------------------- # Ticks @@ -1935,11 +1986,8 @@ def delta(self): def nanos(self): return _delta_to_nanoseconds(self.delta) - @apply_nat + @apply_wraps def apply(self, other): - if type(other) == date: - other = datetime(other.year, other.month, other.day) - if isinstance(other, (datetime, timedelta)): return other + self.delta elif isinstance(other, type(self)): diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index d3e306f3f956b..4fc7d281bc473 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -18,8 +18,8 @@ get_offset, get_offset_name, get_standard_freq) from pandas.tseries.frequencies import _offset_map -from pandas.tseries.index import _to_m8, DatetimeIndex, _daterange_cache -from pandas.tseries.tools import parse_time_string +from pandas.tseries.index import _to_m8, DatetimeIndex, _daterange_cache, date_range +from pandas.tseries.tools import parse_time_string, _maybe_get_tz import pandas.tseries.offsets as offsets from pandas.tslib import monthrange, OutOfBoundsDatetime, NaT @@ -100,21 +100,34 @@ def test_to_m8(): class Base(tm.TestCase): _offset = None - offset_types = [getattr(offsets, o) for o in offsets.__all__] - skip_np_u1p7 = [offsets.CustomBusinessDay, offsets.CDay, offsets.CustomBusinessMonthBegin, offsets.CustomBusinessMonthEnd, offsets.Nano] + _offset_types = [getattr(offsets, o) for o in offsets.__all__] + skip_np_u1p7 = [offsets.CustomBusinessDay, offsets.CDay, offsets.CustomBusinessMonthBegin, + offsets.CustomBusinessMonthEnd, offsets.Nano] - def _get_offset(self, klass, value=1): + @property + def offset_types(self): + if _np_version_under1p7: + return [o for o in self._offset_types if o not in self.skip_np_u1p7] + else: + return self._offset_types + + def _get_offset(self, klass, value=1, normalize=False): # create instance from offset class if klass is FY5253 or klass is FY5253Quarter: klass = klass(n=value, startingMonth=1, weekday=1, - qtr_with_extra_week=1, variation='last') - elif klass is WeekOfMonth or klass is LastWeekOfMonth: - klass = LastWeekOfMonth(n=value, weekday=5) + qtr_with_extra_week=1, variation='last', + normalize=normalize) + elif klass is LastWeekOfMonth: + klass = klass(n=value, weekday=5, normalize=normalize) + elif klass is WeekOfMonth: + klass = klass(n=value, week=1, weekday=5, normalize=normalize) + elif klass is Week: + klass = klass(n=value, weekday=5, normalize=normalize) else: try: - klass = klass(value) + klass = klass(value, normalize=normalize) except: - klass = klass() + klass = klass(normalize=normalize) return klass def test_apply_out_of_range(self): @@ -136,13 +149,45 @@ def test_apply_out_of_range(self): raise nose.SkipTest("cannot create out_of_range offset: {0} {1}".format(str(self).split('.')[-1],e)) -class TestOps(Base): +class TestCommon(Base): + + def setUp(self): + + # exected value created by Base._get_offset + # are applied to 2011/01/01 09:00 (Saturday) + # used for .apply and .rollforward + self.expecteds = {'Day': Timestamp('2011-01-02 09:00:00'), + 'BusinessDay': Timestamp('2011-01-03 09:00:00'), + 'CustomBusinessDay': Timestamp('2011-01-03 09:00:00'), + 'CustomBusinessMonthEnd': Timestamp('2011-01-31 09:00:00'), + 'CustomBusinessMonthBegin': Timestamp('2011-01-03 09:00:00'), + 'MonthBegin': Timestamp('2011-02-01 09:00:00'), + 'BusinessMonthBegin': Timestamp('2011-01-03 09:00:00'), + 'MonthEnd': Timestamp('2011-01-31 09:00:00'), + 'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'), + 'YearBegin': Timestamp('2012-01-01 09:00:00'), + 'BYearBegin': Timestamp('2011-01-03 09:00:00'), + 'YearEnd': Timestamp('2011-12-31 09:00:00'), + 'BYearEnd': Timestamp('2011-12-30 09:00:00'), + 'QuarterBegin': Timestamp('2011-03-01 09:00:00'), + 'BQuarterBegin': Timestamp('2011-03-01 09:00:00'), + 'QuarterEnd': Timestamp('2011-03-31 09:00:00'), + 'BQuarterEnd': Timestamp('2011-03-31 09:00:00'), + 'WeekOfMonth': Timestamp('2011-01-08 09:00:00'), + 'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'), + 'FY5253Quarter': Timestamp('2011-01-25 09:00:00'), + 'FY5253': Timestamp('2011-01-25 09:00:00'), + 'Week': Timestamp('2011-01-08 09:00:00'), + 'Easter': Timestamp('2011-04-24 09:00:00'), + 'Hour': Timestamp('2011-01-01 10:00:00'), + 'Minute': Timestamp('2011-01-01 09:01:00'), + 'Second': Timestamp('2011-01-01 09:00:01'), + 'Milli': Timestamp('2011-01-01 09:00:00.001000'), + 'Micro': Timestamp('2011-01-01 09:00:00.000001'), + 'Nano': Timestamp(np.datetime64('2011-01-01T09:00:00.000000001Z'))} def test_return_type(self): for offset in self.offset_types: - if _np_version_under1p7 and offset in self.skip_np_u1p7: - continue - offset = self._get_offset(offset) # make sure that we are returning a Timestamp @@ -156,6 +201,148 @@ def test_return_type(self): self.assertTrue(NaT - offset is NaT) self.assertTrue((-offset).apply(NaT) is NaT) + def _check_offsetfunc_works(self, offset, funcname, dt, expected, + normalize=False): + offset_s = self._get_offset(offset, normalize=normalize) + func = getattr(offset_s, funcname) + + result = func(dt) + self.assert_(isinstance(result, datetime)) + self.assertEqual(result, expected) + + result = func(Timestamp(dt)) + self.assert_(isinstance(result, Timestamp)) + self.assertEqual(result, expected) + + def _check_nanofunc_works(self, offset, funcname, dt, expected): + offset = self._get_offset(offset) + func = getattr(offset, funcname) + + t1 = Timestamp(dt) + self.assertEqual(func(t1), expected) + + def test_apply(self): + dt = datetime(2011, 1, 1, 9, 0) + + for offset in self.offset_types: + expected = self.expecteds[offset.__name__] + + if offset == Nano: + self._check_nanofunc_works(offset, 'apply', dt, expected) + else: + self._check_offsetfunc_works(offset, 'apply', dt, expected) + + expected = Timestamp(expected.date()) + self._check_offsetfunc_works(offset, 'apply', dt, expected, + normalize=True) + + def test_rollforward(self): + expecteds = self.expecteds.copy() + + # result will not be changed if the target is on the offset + no_changes = ['Day', 'MonthBegin', 'YearBegin', 'Week', 'Hour', 'Minute', + 'Second', 'Milli', 'Micro', 'Nano'] + for n in no_changes: + expecteds[n] = Timestamp('2011/01/01 09:00') + + # but be changed when normalize=True + norm_expected = expecteds.copy() + for k in norm_expected: + norm_expected[k] = Timestamp(norm_expected[k].date()) + + normalized = {'Day': Timestamp('2011-01-02 00:00:00'), + 'MonthBegin': Timestamp('2011-02-01 00:00:00'), + 'YearBegin': Timestamp('2012-01-01 00:00:00'), + 'Week': Timestamp('2011-01-08 00:00:00'), + 'Hour': Timestamp('2011-01-01 00:00:00'), + 'Minute': Timestamp('2011-01-01 00:00:00'), + 'Second': Timestamp('2011-01-01 00:00:00'), + 'Milli': Timestamp('2011-01-01 00:00:00'), + 'Micro': Timestamp('2011-01-01 00:00:00')} + norm_expected.update(normalized) + + dt = datetime(2011, 1, 1, 9, 0) + for offset in self.offset_types: + expected = expecteds[offset.__name__] + + if offset == Nano: + self._check_nanofunc_works(offset, 'rollforward', dt, expected) + else: + self._check_offsetfunc_works(offset, 'rollforward', dt, expected) + expected = norm_expected[offset.__name__] + self._check_offsetfunc_works(offset, 'rollforward', dt, expected, + normalize=True) + + def test_rollback(self): + expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'), + 'CustomBusinessDay': Timestamp('2010-12-31 09:00:00'), + 'CustomBusinessMonthEnd': Timestamp('2010-12-31 09:00:00'), + 'CustomBusinessMonthBegin': Timestamp('2010-12-01 09:00:00'), + 'BusinessMonthBegin': Timestamp('2010-12-01 09:00:00'), + 'MonthEnd': Timestamp('2010-12-31 09:00:00'), + 'BusinessMonthEnd': Timestamp('2010-12-31 09:00:00'), + 'BYearBegin': Timestamp('2010-01-01 09:00:00'), + 'YearEnd': Timestamp('2010-12-31 09:00:00'), + 'BYearEnd': Timestamp('2010-12-31 09:00:00'), + 'QuarterBegin': Timestamp('2010-12-01 09:00:00'), + 'BQuarterBegin': Timestamp('2010-12-01 09:00:00'), + 'QuarterEnd': Timestamp('2010-12-31 09:00:00'), + 'BQuarterEnd': Timestamp('2010-12-31 09:00:00'), + 'WeekOfMonth': Timestamp('2010-12-11 09:00:00'), + 'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'), + 'FY5253Quarter': Timestamp('2010-10-26 09:00:00'), + 'FY5253': Timestamp('2010-01-26 09:00:00'), + 'Easter': Timestamp('2010-04-04 09:00:00')} + + # result will not be changed if the target is on the offset + for n in ['Day', 'MonthBegin', 'YearBegin', 'Week', 'Hour', 'Minute', + 'Second', 'Milli', 'Micro', 'Nano']: + expecteds[n] = Timestamp('2011/01/01 09:00') + + # but be changed when normalize=True + norm_expected = expecteds.copy() + for k in norm_expected: + norm_expected[k] = Timestamp(norm_expected[k].date()) + + normalized = {'Day': Timestamp('2010-12-31 00:00:00'), + 'MonthBegin': Timestamp('2010-12-01 00:00:00'), + 'YearBegin': Timestamp('2010-01-01 00:00:00'), + 'Week': Timestamp('2010-12-25 00:00:00'), + 'Hour': Timestamp('2011-01-01 00:00:00'), + 'Minute': Timestamp('2011-01-01 00:00:00'), + 'Second': Timestamp('2011-01-01 00:00:00'), + 'Milli': Timestamp('2011-01-01 00:00:00'), + 'Micro': Timestamp('2011-01-01 00:00:00')} + norm_expected.update(normalized) + + dt = datetime(2011, 1, 1, 9, 0) + for offset in self.offset_types: + expected = expecteds[offset.__name__] + + if offset == Nano: + self._check_nanofunc_works(offset, 'rollback', dt, expected) + else: + self._check_offsetfunc_works(offset, 'rollback', dt, expected) + + expected = norm_expected[offset.__name__] + self._check_offsetfunc_works(offset, 'rollback', + dt, expected, normalize=True) + + def test_onOffset(self): + + for offset in self.offset_types: + + dt = self.expecteds[offset.__name__] + offset_s = self._get_offset(offset) + self.assert_(offset_s.onOffset(dt)) + + # when normalize=True, onOffset checks time is 00:00:00 + offset_n = self._get_offset(offset, normalize=True) + self.assert_(not offset_n.onOffset(dt)) + + date = datetime(dt.year, dt.month, dt.day) + self.assert_(offset_n.onOffset(date)) + class TestDateOffset(Base): _multiprocess_can_split_ = True @@ -1208,7 +1395,7 @@ def test_offset(self): def test_normalize(self): dt = datetime(2007, 1, 1, 3) - result = dt + BMonthEnd() + result = dt + BMonthEnd(normalize=True) expected = dt.replace(hour=0) + BMonthEnd() self.assertEqual(result, expected) @@ -1323,7 +1510,7 @@ def test_offset(self): def test_normalize(self): dt = datetime(2007, 1, 1, 3) - result = dt + MonthEnd() + result = dt + MonthEnd(normalize=True) expected = dt.replace(hour=0) + MonthEnd() self.assertEqual(result, expected)
Closes #7156.
https://api.github.com/repos/pandas-dev/pandas/pulls/7375
2014-06-06T16:08:58Z
2014-06-11T14:08:31Z
2014-06-11T14:08:31Z
2014-07-09T15:34:02Z
BUG: resample raises ValueError when NaT is included
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index cfdef3adb1f34..1f445173d569c 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -225,6 +225,7 @@ Bug Fixes +- BUG in ``resample`` raises ``ValueError`` when target contains ``NaT`` (:issue:`7227`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index c50df6f9bb08f..a90f00fd11e36 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -28,6 +28,7 @@ from pandas import _np_version_under1p7 import pandas.lib as lib from pandas.lib import Timestamp +import pandas.tslib as tslib import pandas.algos as _algos import pandas.hashtable as _hash @@ -1581,7 +1582,11 @@ def groups(self): # this is mainly for compat # GH 3881 - return dict(zip(self.binlabels,self.bins)) + result = {} + for key, value in zip(self.binlabels, self.bins): + if key is not tslib.NaT: + result[key] = value + return result @property def nkeys(self): @@ -1605,7 +1610,8 @@ def get_iterator(self, data, axis=0): start = 0 for edge, label in zip(self.bins, self.binlabels): - yield label, slicer(start,edge) + if label is not tslib.NaT: + yield label, slicer(start,edge) start = edge if start < length: @@ -1636,7 +1642,7 @@ def indices(self): i = 0 for label, bin in zip(self.binlabels, self.bins): - if i < bin: + if label is not tslib.NaT and i < bin: indices[label] = list(range(i, bin)) i = bin return indices @@ -1647,7 +1653,8 @@ def ngroups(self): @cache_readonly def result_index(self): - return self.binlabels + mask = self.binlabels.asi8 == tslib.iNaT + return self.binlabels[~mask] @property def levels(self): diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 3324040391340..89e681e6f1c90 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -968,6 +968,10 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner, int64_t l_bin, r_bin bint right_closed = closed == 'right' + mask = values == iNaT + nat_count = values[mask].size + values = values[~mask] + lenidx = len(values) lenbin = len(binner) @@ -981,7 +985,7 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner, if values[lenidx-1] > binner[lenbin-1]: raise ValueError("Values falls after last bin") - bins = np.empty(lenbin - 1, dtype=np.int64) + bins = np.empty(lenbin - 1, dtype=np.int64) j = 0 # index into values bc = 0 # bin count @@ -999,6 +1003,11 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner, bins[bc] = j bc += 1 + if nat_count > 0: + # shift bins by the number of NaT + bins = bins + nat_count + bins = np.insert(bins, 0, nat_count) + return bins diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 812dd5aba71e0..059a6bfd06719 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -13,6 +13,7 @@ from pandas.lib import Timestamp import pandas.lib as lib +import pandas.tslib as tslib _DEFAULT_METHOD = 'mean' @@ -186,6 +187,10 @@ def _get_time_bins(self, ax): elif not trimmed: labels = labels[:-1] + if (ax_values == tslib.iNaT).any(): + binner = binner.insert(0, tslib.NaT) + labels = labels.insert(0, tslib.NaT) + # if we end up with more labels than bins # adjust the labels # GH4076 @@ -352,14 +357,14 @@ def _get_range_edges(axis, offset, closed='left', base=0): if isinstance(offset, compat.string_types): offset = to_offset(offset) + first, last = axis.min(), axis.max() if isinstance(offset, Tick): day_nanos = _delta_to_nanoseconds(timedelta(1)) # #1165 if (day_nanos % offset.nanos) == 0: - return _adjust_dates_anchored(axis[0], axis[-1], offset, + return _adjust_dates_anchored(first, last, offset, closed=closed, base=base) - first, last = axis.min(), axis.max() if not isinstance(offset, Tick): # and first.time() != last.time(): # hack! first = tools.normalize_date(first) diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index cdf62af1fd90b..db496a708adbe 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -744,28 +744,32 @@ def test_resample_consistency(self): def test_resample_timegrouper(self): # GH 7227 - dates = [datetime(2014, 10, 1), datetime(2014, 9, 3), + dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3), datetime(2014, 11, 5), datetime(2014, 9, 5), datetime(2014, 10, 8), datetime(2014, 7, 15)] - df = DataFrame(dict(A=dates, B=np.arange(len(dates)))) - result = df.set_index('A').resample('M', how='count') - exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31', '2014-09-30', - '2014-10-31', '2014-11-30'], freq='M', name='A') - expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx) - assert_frame_equal(result, expected) + dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:] + dates3 = [pd.NaT] + dates1 + [pd.NaT] - result = df.groupby(pd.Grouper(freq='M', key='A')).count() - assert_frame_equal(result, expected) + for dates in [dates1, dates2, dates3]: + df = DataFrame(dict(A=dates, B=np.arange(len(dates)))) + result = df.set_index('A').resample('M', how='count') + exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31', '2014-09-30', + '2014-10-31', '2014-11-30'], freq='M', name='A') + expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx) + assert_frame_equal(result, expected) - df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates)))) - result = df.set_index('A').resample('M', how='count') - expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]}, - index=exp_idx, columns=['B', 'C']) - assert_frame_equal(result, expected) + result = df.groupby(pd.Grouper(freq='M', key='A')).count() + assert_frame_equal(result, expected) - result = df.groupby(pd.Grouper(freq='M', key='A')).count() - assert_frame_equal(result, expected) + df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates)))) + result = df.set_index('A').resample('M', how='count') + expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]}, + index=exp_idx, columns=['B', 'C']) + assert_frame_equal(result, expected) + + result = df.groupby(pd.Grouper(freq='M', key='A')).count() + assert_frame_equal(result, expected) def _simple_ts(start, end, freq='D'): @@ -1302,6 +1306,84 @@ def test_fails_on_no_datetime_index(self): "but got an instance of %r" % name): df.groupby(TimeGrouper('D')) + def test_aggregate_normal(self): + # check TimeGrouper's aggregation is identical as normal groupby + + n = 20 + data = np.random.randn(n, 4) + normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + normal_df['key'] = [1, 2, 3, 4, 5] * 4 + + dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3), + datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4 + + normal_grouped = normal_df.groupby('key') + dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) + + for func in ['min', 'max', 'prod', 'var', 'std', 'mean']: + expected = getattr(normal_grouped, func)() + dt_result = getattr(dt_grouped, func)() + expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key') + assert_frame_equal(expected, dt_result) + + for func in ['count', 'sum']: + expected = getattr(normal_grouped, func)() + expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key') + dt_result = getattr(dt_grouped, func)() + assert_frame_equal(expected, dt_result) + + """ + for func in ['first', 'last']: + expected = getattr(normal_grouped, func)() + expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key') + dt_result = getattr(dt_grouped, func)() + assert_frame_equal(expected, dt_result) + + for func in ['nth']: + expected = getattr(normal_grouped, func)(3) + expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key') + dt_result = getattr(dt_grouped, func)(3) + assert_frame_equal(expected, dt_result) + """ + # if TimeGrouper is used included, 'size' 'first','last' and 'nth' doesn't work yet + + def test_aggregate_with_nat(self): + # check TimeGrouper's aggregation is identical as normal groupby + + n = 20 + data = np.random.randn(n, 4) + normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + normal_df['key'] = [1, 2, np.nan, 4, 5] * 4 + + dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT, + datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4 + + normal_grouped = normal_df.groupby('key') + dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) + + for func in ['min', 'max', 'prod']: + normal_result = getattr(normal_grouped, func)() + dt_result = getattr(dt_grouped, func)() + pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], + index=[3], columns=['A', 'B', 'C', 'D']) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key') + assert_frame_equal(expected, dt_result) + + for func in ['count', 'sum']: + normal_result = getattr(normal_grouped, func)() + pad = DataFrame([[0, 0, 0, 0]], index=[3], columns=['A', 'B', 'C', 'D']) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key') + dt_result = getattr(dt_grouped, func)() + assert_frame_equal(expected, dt_result) + + # if NaT is included, 'var', 'std', 'mean', 'size', 'first','last' and 'nth' doesn't work yet + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
Closes #7227. `resample(how=count)` should work now. There are some aggregations which doesn't work with `TimeGrouper` yet, I'll open separate issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/7373
2014-06-06T15:12:56Z
2014-06-14T14:00:51Z
2014-06-14T14:00:51Z
2014-06-14T15:34:24Z
BUG: infer_freq results in None for hourly freq with timezone
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 70eab4dde8c1f..09ff6578160f8 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -668,8 +668,12 @@ class _FrequencyInferer(object): def __init__(self, index, warn=True): self.index = index self.values = np.asarray(index).view('i8') + if index.tz is not None: - self.values = tslib.date_normalize(self.values, index.tz) + f = lambda x: tslib.tz_convert_single(x, 'UTC', index.tz) + self.values = np.vectorize(f)(self.values) + # This cant work, because of DST + # self.values = tslib.tz_convert(self.values, 'UTC', index.tz) self.warn = warn diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py index e2e30fcb69440..9089ca85ac3bb 100644 --- a/pandas/tseries/tests/test_frequencies.py +++ b/pandas/tseries/tests/test_frequencies.py @@ -14,8 +14,7 @@ import pandas.tseries.frequencies as fmod import pandas.tseries.offsets as offsets from pandas.tseries.period import PeriodIndex - -import pandas.lib as lib +import pandas.compat as compat from pandas import _np_version_under1p7 import pandas.util.testing as tm @@ -258,19 +257,20 @@ def test_infer_freq(self): def test_infer_freq_tz(self): + freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'], + 'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'], + 'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'], + 'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'], + 'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'], + 'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00'] + } + # GH 7310 - for tz in [None, 'Asia/Tokyo', 'US/Pacific', 'Europe/Paris']: - dates = ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'] - idx = DatetimeIndex(dates) - self.assertEqual(idx.inferred_freq, 'M') - - dates = ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'] - idx = DatetimeIndex(dates) - self.assertEqual(idx.inferred_freq, 'D') - - dates = ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00'] - idx = DatetimeIndex(dates) - self.assertEqual(idx.inferred_freq, 'H') + for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris', + 'US/Pacific', 'US/Eastern']: + for expected, dates in compat.iteritems(freqs): + idx = DatetimeIndex(dates, tz=tz) + self.assertEqual(idx.inferred_freq, expected) def test_not_monotonic(self): rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
Found fix #7318 was incorrect. It works for lower frequencies than dates, but not for higher freq than hours. I couldn't detect as test cases are also incorrect... This should work for all the freqs with timezones. NOTE: I couldn't use normal `tslib.tz_convert` which returns incorrect results maybe caused by DST. Applying `tslib.tz_convert_single` can work though.
https://api.github.com/repos/pandas-dev/pandas/pulls/7371
2014-06-06T11:53:48Z
2014-06-06T13:00:33Z
2014-06-06T13:00:33Z
2014-07-05T04:49:40Z
ENH: change BlockManager pickle format to work with dup items
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 1aaf77625cf7f..96b611bc9afec 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -135,6 +135,9 @@ Enhancements - All offsets ``apply``, ``rollforward`` and ``rollback`` can now handle ``np.datetime64``, previously results in ``ApplyTypeError`` (:issue:`7452`) - ``Period`` and ``PeriodIndex`` can contain ``NaT`` in its values (:issue:`7485`) +- Support pickling ``Series``, ``DataFrame`` and ``Panel`` objects with + non-unique labels along *item* axis (``index``, ``columns`` and ``items`` + respectively) (:issue:`7370`). .. _whatsnew_0141.performance: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index accaf4ea5cd29..4f7f36dd4a14d 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1603,16 +1603,19 @@ class SparseBlock(Block): def __init__(self, values, placement, ndim=None, fastpath=False,): + # Placement must be converted to BlockPlacement via property setter + # before ndim logic, because placement may be a slice which doesn't + # have a length. + self.mgr_locs = placement + # kludgetastic if ndim is None: - if len(placement) != 1: + if len(self.mgr_locs) != 1: ndim = 1 else: ndim = 2 self.ndim = ndim - self.mgr_locs = placement - if not isinstance(values, SparseArray): raise TypeError("values must be SparseArray") @@ -2050,26 +2053,44 @@ def __getstate__(self): block_values = [b.values for b in self.blocks] block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] axes_array = [ax for ax in self.axes] - return axes_array, block_values, block_items - def __setstate__(self, state): - # discard anything after 3rd, support beta pickling format for a little - # while longer - ax_arrays, bvalues, bitems = state[:3] + extra_state = { + '0.14.1': { + 'axes': axes_array, + 'blocks': [dict(values=b.values, + mgr_locs=b.mgr_locs.indexer) + for b in self.blocks] + } + } - self.axes = [_ensure_index(ax) for ax in ax_arrays] - - blocks = [] - for values, items in zip(bvalues, bitems): + # First three elements of the state are to maintain forward + # compatibility with 0.13.1. + return axes_array, block_values, block_items, extra_state + def __setstate__(self, state): + def unpickle_block(values, mgr_locs): # numpy < 1.7 pickle compat if values.dtype == 'M8[us]': values = values.astype('M8[ns]') - - blk = make_block(values, - placement=self.axes[0].get_indexer(items)) - blocks.append(blk) - self.blocks = tuple(blocks) + return make_block(values, placement=mgr_locs) + + if (isinstance(state, tuple) and len(state) >= 4 + and '0.14.1' in state[3]): + state = state[3]['0.14.1'] + self.axes = [_ensure_index(ax) for ax in state['axes']] + self.blocks = tuple( + unpickle_block(b['values'], b['mgr_locs']) + for b in state['blocks']) + else: + # discard anything after 3rd, support beta pickling format for a + # little while longer + ax_arrays, bvalues, bitems = state[:3] + + self.axes = [_ensure_index(ax) for ax in ax_arrays] + self.blocks = tuple( + unpickle_block(values, + self.axes[0].get_indexer(items)) + for values, items in zip(bvalues, bitems)) self._post_setstate() diff --git a/pandas/io/tests/generate_legacy_pickles.py b/pandas/io/tests/generate_legacy_pickles.py index 08f63b0179db2..48d0fd57d831b 100644 --- a/pandas/io/tests/generate_legacy_pickles.py +++ b/pandas/io/tests/generate_legacy_pickles.py @@ -80,15 +80,21 @@ def create_data(): ts = TimeSeries(np.arange(10).astype(np.int64),index=date_range('20130101',periods=10)), mi = Series(np.arange(5).astype(np.float64),index=MultiIndex.from_tuples(tuple(zip(*[[1,1,2,2,2], [3,4,3,4,5]])), - names=['one','two']))) + names=['one','two'])), + dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A'])) + frame = dict(float = DataFrame(dict(A = series['float'], B = series['float'] + 1)), int = DataFrame(dict(A = series['int'] , B = series['int'] + 1)), mixed = DataFrame(dict([ (k,data[k]) for k in ['A','B','C','D']])), mi = DataFrame(dict(A = np.arange(5).astype(np.float64), B = np.arange(5).astype(np.int64)), index=MultiIndex.from_tuples(tuple(zip(*[['bar','bar','baz','baz','baz'], ['one','two','one','two','three']])), - names=['first','second']))) - panel = dict(float = Panel(dict(ItemA = frame['float'], ItemB = frame['float']+1))) + names=['first','second'])), + dup = DataFrame(np.arange(15).reshape(5, 3).astype(np.float64), + columns=['A', 'B', 'A'])) + panel = dict(float = Panel(dict(ItemA = frame['float'], ItemB = frame['float']+1)), + dup = Panel(np.arange(30).reshape(3, 5, 2).astype(np.float64), + items=['A', 'B', 'A'])) diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index e8308c09cef90..8a9010084fd99 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -352,6 +352,16 @@ def test_pickle(self): self.assertFalse(mgr2._is_consolidated) self.assertFalse(mgr2._known_consolidated) + def test_non_unique_pickle(self): + import pickle + mgr = create_mgr('a,a,a:f8') + mgr2 = pickle.loads(pickle.dumps(mgr)) + assert_frame_equal(DataFrame(mgr), DataFrame(mgr2)) + + mgr = create_mgr('a: f8; a: i8') + mgr2 = pickle.loads(pickle.dumps(mgr)) + assert_frame_equal(DataFrame(mgr), DataFrame(mgr2)) + def test_get_scalar(self): for item in self.mgr.items: for i, index in enumerate(self.mgr.axes[1]):
Old `BlockManager` pickle format stored items which were ambiguous and not enough for unpickling if non-unique. After recent BlockManager overhaul, #6745, it's now possible to it's no longer necessary to share items/ref-items between Blocks and their respective managers<del>, so Blocks can now be safely pickled/unpickled on their own</del>, but Blocks still cannot be safely pickled/unpickled on their own, because they a) are part of internal API and are subject to change and b) were never intended to be serialized like that and their setstate is written in non-forward-compatible manner. So, I've took the freedom to make the format more "extendable" by storing the state as the fourth element in BlockManager state tuple: ``` (axes_array, block_values, block_items, { "0.14.1": { "axes": axes_array, "blocks": [{"values": blk0.values, "mgr_locs": blk0.mgr_locs.indexer}, ...] } }) ``` I was aiming for the following design goals: - use the fact that pickle doesn't store same objects multiple times, i.e. adding an extra element to the tuple should add minimum overhead - make state as forward compatible as possible: new versions simply add new elements to that dictionary along with the old ones - make state as backward compatible as possible: no extra rules "if this then parse like that else ...", simply look up a supported version in the dictionary and use it - don't store classes that are subject to change, I almost got bitten with this one when tried to serialize blocks as a whole, 0.13.1 version, that had different (and incompatible) **getstate** implementation, croaked. This PR should close #7329. - [x] io pickle tests: verify 0.14.0 pickles are handled properly - [X] ensure forward compatibility with 13.1 - [x] generate 14.1 pickles (for which platforms?) - [x] ensure forward compatibility with 14.0 - [X] <del>ensure forward compatibility with 12.0 ?</del> not necessary - [x] make sure sparse containers with non-unique items work in those versions too
https://api.github.com/repos/pandas-dev/pandas/pulls/7370
2014-06-06T08:13:30Z
2014-07-01T10:11:23Z
2014-07-01T10:11:23Z
2014-07-01T10:15:36Z
add fix for opening zero observation dta files
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 3e06a705487df..22898c1b02c67 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -123,3 +123,4 @@ Bug Fixes - Bug where a string column name assignment to a ``DataFrame`` with a ``Float64Index`` raised a ``TypeError`` during a call to ``np.isnan`` (:issue:`7366`). +- Bug in ``StataReader.data`` where reading a 0-observation dta failed (:issue:`7369`) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index b67a1be8d43d6..7bb466794c44d 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -852,7 +852,10 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None): if convert_categoricals: self._read_value_labels() - data = DataFrame(data, columns=self.varlist, index=index) + if len(data)==0: + data = DataFrame(columns=self.varlist, index=index) + else: + data = DataFrame(data, columns=self.varlist, index=index) cols_ = np.where(self.dtyplist)[0] for i in cols_: diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index b4be08c6b1106..8221eccc3ebe3 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -72,6 +72,14 @@ def read_dta(self, file): def read_csv(self, file): return read_csv(file, parse_dates=True) + def test_read_empty_dta(self): + empty_ds = DataFrame(columns=['unit']) + # GH 7369, make sure can read a 0-obs dta file + with tm.ensure_clean() as path: + empty_ds.to_stata(path,write_index=False) + empty_ds2 = read_stata(path) + tm.assert_frame_equal(empty_ds, empty_ds2) + def test_read_dta1(self): reader_114 = StataReader(self.dta1_114) parsed_114 = reader_114.data()
Opening a Stata dta file with no observations (but having variables) resulted in an error. Example file: https://dl.dropboxusercontent.com/u/6705315/no_obs_v115.dta. ``` python >>> import pandas as pd >>> pd.read_stata("no_obs_v115.dta") Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\Python27\lib\site-packages\pandas\io\stata.py", line 49, in read_stata return reader.data(convert_dates, convert_categoricals, index) File "C:\Python27\lib\site-packages\pandas\io\stata.py", line 855, in data data = DataFrame(data, columns=self.varlist, index=index) File "C:\Python27\lib\site-packages\pandas\core\frame.py", line 255, in __init__ copy=copy) File "C:\Python27\lib\site-packages\pandas\core\frame.py", line 367, in _init_ndarray return create_block_manager_from_blocks([values.T], [columns, index]) File "C:\Python27\lib\site-packages\pandas\core\internals.py", line 3185, in create_block_manager_from_blocks construction_error(tot_items, blocks[0].shape[1:], axes, e) File "C:\Python27\lib\site-packages\pandas\core\internals.py", line 3166, in construction_error passed,implied)) ValueError: Shape of passed values is (0, 0), indices imply (1, 0) >>> pd.show_versions() INSTALLED VERSIONS ------------------ commit: None python: 2.7.6.final.0 python-bits: 32 OS: Windows OS-release: 7 machine: AMD64 processor: AMD64 Family 16 Model 6 Stepping 3, AuthenticAMD byteorder: little LC_ALL: None LANG: None pandas: 0.14.0 nose: 1.3.3 Cython: 0.20.1 numpy: 1.8.1 scipy: None statsmodels: None IPython: None sphinx: None patsy: None scikits.timeseries: None dateutil: 2.2 pytz: 2014.3 bottleneck: 0.8.0 tables: None numexpr: None matplotlib: None openpyxl: 1.8.6 xlrd: None xlwt: None xlsxwriter: None lxml: None bs4: None html5lib: None bq: None apiclient: None rpy2: None sqlalchemy: None pymysql: None psycopg2: None ``` The PR fixes this issue in stata.py, though maybe the issue should be fixed in DataFrame.
https://api.github.com/repos/pandas-dev/pandas/pulls/7369
2014-06-06T04:38:38Z
2014-06-13T20:32:59Z
2014-06-13T20:32:59Z
2014-06-13T20:33:41Z
BUG/REG: fix float64index -> mixed float assignment
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 0a89806c899a4..3e06a705487df 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -120,3 +120,6 @@ Bug Fixes - Bug all ``StringMethods`` now work on empty Series (:issue:`7242`) - Fix delegation of `read_sql` to `read_sql_query` when query does not contain 'select' (:issue:`7324`). +- Bug where a string column name assignment to a ``DataFrame`` with a + ``Float64Index`` raised a ``TypeError`` during a call to ``np.isnan`` + (:issue:`7366`). diff --git a/pandas/core/index.py b/pandas/core/index.py index 9ccc2e694f92f..fbadd92c1329c 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2060,11 +2060,14 @@ def __contains__(self, other): return False def get_loc(self, key): - if np.isnan(key): - try: - return self._nan_idxs.item() - except ValueError: - return self._nan_idxs + try: + if np.isnan(key): + try: + return self._nan_idxs.item() + except ValueError: + return self._nan_idxs + except (TypeError, NotImplementedError): + pass return super(Float64Index, self).get_loc(key) @property diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 062950cad43ed..96c67f2ff795c 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -3561,6 +3561,16 @@ def f(): warnings.filterwarnings(action='ignore', category=FutureWarning) + def test_float_index_to_mixed(self): + df = DataFrame({0.0: np.random.rand(10), + 1.0: np.random.rand(10)}) + df['a'] = 10 + tm.assert_frame_equal(DataFrame({0.0: df[0.0], + 1.0: df[1.0], + 'a': [10] * 10}), + df) + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
closes #7366
https://api.github.com/repos/pandas-dev/pandas/pulls/7368
2014-06-06T03:46:56Z
2014-06-06T04:32:03Z
2014-06-06T04:32:03Z
2014-06-15T12:57:35Z
BUG/REG: fix float64index -> mixed float assignment
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 0a89806c899a4..3e06a705487df 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -120,3 +120,6 @@ Bug Fixes - Bug all ``StringMethods`` now work on empty Series (:issue:`7242`) - Fix delegation of `read_sql` to `read_sql_query` when query does not contain 'select' (:issue:`7324`). +- Bug where a string column name assignment to a ``DataFrame`` with a + ``Float64Index`` raised a ``TypeError`` during a call to ``np.isnan`` + (:issue:`7366`). diff --git a/pandas/core/index.py b/pandas/core/index.py index 9ccc2e694f92f..f5fa47f0d9555 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2060,11 +2060,14 @@ def __contains__(self, other): return False def get_loc(self, key): - if np.isnan(key): - try: - return self._nan_idxs.item() - except ValueError: - return self._nan_idxs + try: + if np.isnan(key): + try: + return self._nan_idxs.item() + except ValueError: + return self._nan_idxs + except TypeError: + pass return super(Float64Index, self).get_loc(key) @property diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 062950cad43ed..96c67f2ff795c 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -3561,6 +3561,16 @@ def f(): warnings.filterwarnings(action='ignore', category=FutureWarning) + def test_float_index_to_mixed(self): + df = DataFrame({0.0: np.random.rand(10), + 1.0: np.random.rand(10)}) + df['a'] = 10 + tm.assert_frame_equal(DataFrame({0.0: df[0.0], + 1.0: df[1.0], + 'a': [10] * 10}), + df) + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
closes #7366
https://api.github.com/repos/pandas-dev/pandas/pulls/7367
2014-06-06T03:02:28Z
2014-06-06T03:45:40Z
null
2014-07-12T06:13:01Z
BUG: Fix error when reading postgres table with timezone #7139
diff --git a/doc/source/whatsnew/v0.16.1.txt b/doc/source/whatsnew/v0.16.1.txt index d130879b85475..54ba2ac6586d0 100644 --- a/doc/source/whatsnew/v0.16.1.txt +++ b/doc/source/whatsnew/v0.16.1.txt @@ -52,7 +52,7 @@ Bug Fixes - Bug in ``transform`` causing length mismatch when null entries were present and a fast aggregator was being used (:issue:`9697`) - +- Bug in ``read_sql_table`` error when reading postgres table with timezone (:issue:`7139`) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 117d7b4a9ceaa..7c70b4b1df492 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -83,14 +83,14 @@ def _handle_date_column(col, format=None): return to_datetime(col, **format) else: if format in ['D', 's', 'ms', 'us', 'ns']: - return to_datetime(col, coerce=True, unit=format) + return to_datetime(col, coerce=True, unit=format, utc=True) elif (issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer)): # parse dates as timestamp format = 's' if format is None else format - return to_datetime(col, coerce=True, unit=format) + return to_datetime(col, coerce=True, unit=format, utc=True) else: - return to_datetime(col, coerce=True, format=format) + return to_datetime(col, coerce=True, format=format, utc=True) def _parse_date_columns(data_frame, parse_dates): @@ -318,6 +318,10 @@ def read_sql_table(table_name, con, schema=None, index_col=None, ------- DataFrame + Notes + ----- + Any datetime values with time zone information will be converted to UTC + See also -------- read_sql_query : Read SQL query into a DataFrame. @@ -390,6 +394,11 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, ------- DataFrame + Notes + ----- + Any datetime values with time zone information parsed via the `parse_dates` + parameter will be converted to UTC + See also -------- read_sql_table : Read SQL database table into a DataFrame @@ -451,7 +460,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (and for backward compatibility) and will delegate to the specific function depending on the provided input (database - table name or sql query). + table name or sql query). The delegated function might have more specific + notes about their functionality not listed here. See also -------- diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 2db6f1e104770..ac266dd77c984 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -29,7 +29,7 @@ from datetime import datetime, date, time from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat -from pandas import date_range, to_datetime, to_timedelta +from pandas import date_range, to_datetime, to_timedelta, Timestamp import pandas.compat as compat from pandas.compat import StringIO, range, lrange, string_types from pandas.core.datetools import format as date_format @@ -100,6 +100,7 @@ 'postgresql': """CREATE TABLE types_test_data ( "TextCol" TEXT, "DateCol" TIMESTAMP, + "DateColWithTz" TIMESTAMP WITH TIME ZONE, "IntDateCol" INTEGER, "FloatCol" DOUBLE PRECISION, "IntCol" INTEGER, @@ -109,18 +110,36 @@ )""" }, 'insert_test_types': { - 'sqlite': """ + 'sqlite': { + 'query': """ INSERT INTO types_test_data VALUES(?, ?, ?, ?, ?, ?, ?, ?) """, - 'mysql': """ + 'fields': ( + 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol', + 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + ) + }, + 'mysql': { + 'query': """ INSERT INTO types_test_data VALUES("%s", %s, %s, %s, %s, %s, %s, %s) """, - 'postgresql': """ + 'fields': ( + 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol', + 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + ) + }, + 'postgresql': { + 'query': """ INSERT INTO types_test_data - VALUES(%s, %s, %s, %s, %s, %s, %s, %s) - """ + VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) + """, + 'fields': ( + 'TextCol', 'DateCol', 'DateColWithTz', 'IntDateCol', 'FloatCol', + 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + ) + }, }, 'read_parameters': { 'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?", @@ -218,11 +237,36 @@ def _load_raw_sql(self): self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor]) ins = SQL_STRINGS['insert_test_types'][self.flavor] - data = [( - 'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False), - ('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)] + data = [ + { + 'TextCol': 'first', + 'DateCol': '2000-01-03 00:00:00', + 'DateColWithTz': '2000-01-01 00:00:00-08:00', + 'IntDateCol': 535852800, + 'FloatCol': 10.10, + 'IntCol': 1, + 'BoolCol': False, + 'IntColWithNull': 1, + 'BoolColWithNull': False, + }, + { + 'TextCol': 'first', + 'DateCol': '2000-01-04 00:00:00', + 'DateColWithTz': '2000-06-01 00:00:00-07:00', + 'IntDateCol': 1356998400, + 'FloatCol': 10.10, + 'IntCol': 1, + 'BoolCol': False, + 'IntColWithNull': None, + 'BoolColWithNull': None, + }, + ] + for d in data: - self._get_exec().execute(ins, d) + self._get_exec().execute( + ins['query'], + [d[field] for field in ins['fields']] + ) def _count_rows(self, table_name): result = self._get_exec().execute( @@ -1512,6 +1556,19 @@ def test_schema_support(self): res2 = pdsql.read_table('test_schema_other2') tm.assert_frame_equal(res1, res2) + def test_datetime_with_time_zone(self): + # Test to see if we read the date column with timezones that + # the timezone information is converted to utc and into a + # np.datetime64 (GH #7139) + df = sql.read_sql_table("types_test_data", self.conn) + self.assertTrue(issubclass(df.DateColWithTz.dtype.type, np.datetime64), + "DateColWithTz loaded with incorrect type") + + # "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00" + self.assertEqual(df.DateColWithTz[0], Timestamp('2000-01-01 08:00:00')) + + # "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00" + self.assertEqual(df.DateColWithTz[1], Timestamp('2000-06-01 07:00:00')) #------------------------------------------------------------------------------ #--- Test Sqlite / MySQL fallback
Closes #7139, merged via https://github.com/pydata/pandas/commit/c3eeb577b2b06ecc65e96c5f5de69f0fba01c3dd Fixes an issue where read_sql_table() will throw an error if it is reading a postgres table with `timestamp with time zone` fields that contain entries with different time zones (such as for DST). This also adds a new keyword, convert_dates_to_utc, which optionally allows the caller to convert `timestamp with time zone` datetimes into UTC, which allows pandas to store them internally as `numpy.datetime64`s, allowing for more efficient storage and operation speed on these columns.
https://api.github.com/repos/pandas-dev/pandas/pulls/7364
2014-06-06T01:39:15Z
2015-04-13T21:40:43Z
null
2015-04-17T13:32:33Z
TST7337: Fix test failures on windows.
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index aed54ab0f5040..0303b41e42e55 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -12,6 +12,8 @@ randint = np.random.randint np.set_printoptions(precision=4, suppress=True) options.display.max_rows=15 + import dateutil + import pytz from dateutil.relativedelta import relativedelta from pandas.tseries.api import * from pandas.tseries.offsets import * @@ -1266,32 +1268,37 @@ common zones, the names are the same as ``pytz``. .. ipython:: python # pytz - rng_utc = date_range('3/6/2012 00:00', periods=10, freq='D', tz='UTC') - rng_utc.tz + rng_pytz = date_range('3/6/2012 00:00', periods=10, freq='D', + tz='Europe/London') + rng_pytz.tz # dateutil - rng_utc_dateutil = date_range('3/6/2012 00:00', periods=10, freq='D', - tz='dateutil/UTC') - rng_utc_dateutil.tz + rng_dateutil = date_range('3/6/2012 00:00', periods=10, freq='D', + tz='dateutil/Europe/London') + rng_dateutil.tz -You can also construct the timezone explicitly first, which gives you more control over which -time zone is used: + # dateutil - utc special case + rng_utc = date_range('3/6/2012 00:00', periods=10, freq='D', + tz=dateutil.tz.tzutc()) + rng_utc.tz + +Note that the ``UTC`` timezone is a special case in ``dateutil`` and should be constructed explicitly +as an instance of ``dateutil.tz.tzutc``. You can also construct other timezones explicitly first, +which gives you more control over which time zone is used: .. ipython:: python # pytz - import pytz - tz_pytz = pytz.timezone('UTC') - rng_utc = date_range('3/6/2012 00:00', periods=10, freq='D', tz=tz_pytz) - rng_utc.tz + tz_pytz = pytz.timezone('Europe/London') + rng_pytz = date_range('3/6/2012 00:00', periods=10, freq='D', + tz=tz_pytz) + rng_pytz.tz == tz_pytz # dateutil - import dateutil - tz_dateutil = dateutil.tz.gettz('UTC') - rng_utc_dateutil = date_range('3/6/2012 00:00', periods=10, freq='D', - tz=tz_dateutil) - rng_utc_dateutil.tz - + tz_dateutil = dateutil.tz.gettz('Europe/London') + rng_dateutil = date_range('3/6/2012 00:00', periods=10, freq='D', + tz=tz_dateutil) + rng_dateutil.tz == tz_dateutil Timestamps, like Python's ``datetime.datetime`` object can be either time zone naive or time zone aware. Naive time series and DatetimeIndex objects can be @@ -1313,9 +1320,10 @@ tz-aware data to another time zone: ts_utc.tz_convert('US/Eastern') .. warning:: - Be very wary of conversions between libraries as ``pytz`` and ``dateutil`` - may have different definitions of the time zones. This is more of a problem for - unusual timezones than for 'standard' zones like ``US/Eastern``. + + Be wary of conversions between libraries. For some zones ``pytz`` and ``dateutil`` have different + definitions of the zone. This is more of a problem for unusual timezones than for + 'standard' zones like ``US/Eastern``. Under the hood, all timestamps are stored in UTC. Scalar values from a ``DatetimeIndex`` with a time zone will have their fields (day, hour, minute) @@ -1359,8 +1367,6 @@ TimeSeries, aligning the data on the UTC timestamps: result result.index -.. _timeseries.timedeltas: - In some cases, localize cannot determine the DST and non-DST hours when there are duplicates. This often happens when reading files that simply duplicate the hours. The infer_dst argument in tz_localize will attempt @@ -1376,6 +1382,8 @@ to determine the right offset. rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', infer_dst=True) rng_hourly_eastern.values +.. _timeseries.timedeltas: + Time Deltas ----------- diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 586e47ff4f303..d38565008640f 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -104,11 +104,9 @@ Enhancements .. ipython:: python - rng_utc_dateutil = date_range('3/6/2012 00:00', - periods=10, - freq='D', - tz='dateutil/UTC') - rng_utc_dateutil.tz + rng = date_range('3/6/2012 00:00', periods=10, freq='D', + tz='dateutil/Europe/London') + rng.tz See :ref:`the docs <timeseries.timezone>`. diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 302b8ca9983e0..456d331156011 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -2946,7 +2946,7 @@ def test_tz_pytz(self): def test_tz_dateutil(self): _skip_if_no_dateutil() import dateutil - utc = dateutil.tz.gettz('UTC') + utc = dateutil.tz.tzutc() dt_date = datetime(2013, 1, 2, tzinfo=utc) self.assertEqual(str(dt_date), str(Timestamp(dt_date))) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 31e5363dd5abe..3881ed5277b85 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -4630,7 +4630,8 @@ def test_getitem_setitem_datetime_tz_pytz(self): def test_getitem_setitem_datetime_tz_dateutil(self): _skip_if_no_dateutil(); - from dateutil.tz import gettz as tz + from dateutil.tz import gettz, tzutc + tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil from pandas import date_range N = 50 diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py index dd84ee27caf0e..0a732ac7bc7e8 100644 --- a/pandas/tseries/tests/test_daterange.py +++ b/pandas/tseries/tests/test_daterange.py @@ -2,7 +2,7 @@ from pandas.compat import range import pickle import nose - +import sys import numpy as np from pandas.core.index import Index @@ -36,6 +36,11 @@ def _skip_if_no_cday(): raise nose.SkipTest("CustomBusinessDay not available.") +def _skip_if_windows_python_3(): + if sys.version_info > (3,) and sys.platform == 'win32': + raise nose.SkipTest("not used on python 3/win32") + + def eq_gen_range(kwargs, expected): rng = generate_range(**kwargs) assert(np.array_equal(list(rng), expected)) @@ -300,7 +305,7 @@ def test_summary_pytz(self): def test_summary_dateutil(self): _skip_if_no_dateutil() import dateutil - bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.gettz('UTC')).summary() + bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary() def test_misc(self): end = datetime(2009, 5, 13) @@ -391,8 +396,10 @@ def test_range_tz_pytz(self): def test_range_tz_dateutil(self): # GH 2906 _skip_if_no_dateutil() - from dateutil.tz import gettz as tz - + # Use maybe_get_tz to fix filename in tz under dateutil. + from pandas.tslib import maybe_get_tz + tz = lambda x: maybe_get_tz('dateutil/' + x) + start = datetime(2011, 1, 1, tzinfo=tz('US/Eastern')) end = datetime(2011, 1, 3, tzinfo=tz('US/Eastern')) @@ -428,6 +435,7 @@ def test_month_range_union_tz_pytz(self): early_dr.union(late_dr) def test_month_range_union_tz_dateutil(self): + _skip_if_windows_python_3() _skip_if_no_dateutil() from dateutil.tz import gettz as timezone tz = timezone('US/Eastern') @@ -633,7 +641,7 @@ def test_summary_pytz(self): def test_summary_dateutil(self): _skip_if_no_dateutil() import dateutil - cdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.gettz('UTC')).summary() + cdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary() def test_misc(self): end = datetime(2009, 5, 13) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 81387c3736481..38887ede2faca 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -85,7 +85,8 @@ def test_timestamp_tz_arg(self): def test_timestamp_tz_arg_dateutil(self): import dateutil - p = Period('1/1/2005', freq='M').to_timestamp(tz=dateutil.tz.gettz('Europe/Brussels')) + from pandas.tslib import maybe_get_tz + p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz('dateutil/Europe/Brussels')) self.assertEqual(p.tz, dateutil.tz.gettz('Europe/Brussels')) def test_timestamp_tz_arg_dateutil_from_string(self): diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 83cc5dcc7485f..04210b4f0c88f 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -58,6 +58,15 @@ def _skip_if_has_locale(): lang, _ = locale.getlocale() if lang is not None: raise nose.SkipTest("Specific locale is set {0}".format(lang)) + +def _skip_if_windows_python_3(): + if sys.version_info > (3,) and sys.platform == 'win32': + raise nose.SkipTest("not used on python 3/win32") + +def _skip_if_not_windows_python_3(): + if sys.version_info < (3,) or sys.platform != 'win32': + raise nose.SkipTest("only run on python 3/win32") + class TestTimeSeriesDuplicates(tm.TestCase): _multiprocess_can_split_ = True @@ -406,6 +415,16 @@ def test_timestamp_to_datetime(self): self.assertEqual(stamp, dtval) self.assertEqual(stamp.tzinfo, dtval.tzinfo) + def test_timestamp_to_datetime_dateutil(self): + _skip_if_no_pytz() + rng = date_range('20090415', '20090519', + tz='dateutil/US/Eastern') + + stamp = rng[0] + dtval = stamp.to_pydatetime() + self.assertEqual(stamp, dtval) + self.assertEqual(stamp.tzinfo, dtval.tzinfo) + def test_timestamp_to_datetime_explicit_pytz(self): _skip_if_no_pytz() import pytz @@ -418,6 +437,7 @@ def test_timestamp_to_datetime_explicit_pytz(self): self.assertEquals(stamp.tzinfo, dtval.tzinfo) def test_timestamp_to_datetime_explicit_dateutil(self): + _skip_if_windows_python_3() _skip_if_no_dateutil() import dateutil rng = date_range('20090415', '20090519', @@ -467,7 +487,7 @@ def _check_rng(rng): _check_rng(rng_eastern) _check_rng(rng_utc) - def test_index_convert_to_datetime_array_explicit_dateutil(self): + def test_index_convert_to_datetime_array_dateutil(self): _skip_if_no_dateutil() import dateutil @@ -480,8 +500,8 @@ def _check_rng(rng): self.assertEquals(x.tzinfo, stamp.tzinfo) rng = date_range('20090415', '20090519') - rng_eastern = date_range('20090415', '20090519', tz=dateutil.tz.gettz('US/Eastern')) - rng_utc = date_range('20090415', '20090519', tz=dateutil.tz.gettz('UTC')) + rng_eastern = date_range('20090415', '20090519', tz='dateutil/US/Eastern') + rng_utc = date_range('20090415', '20090519', tz=dateutil.tz.tzutc()) _check_rng(rng) _check_rng(rng_eastern) @@ -1560,14 +1580,14 @@ def test_to_period_tz_explicit_pytz(self): self.assert_(result == expected) self.assert_(ts.to_period().equals(xp)) - def test_to_period_tz_explicit_dateutil(self): + def test_to_period_tz_dateutil(self): _skip_if_no_dateutil() import dateutil from dateutil.tz import tzlocal xp = date_range('1/1/2000', '4/1/2000').to_period() - ts = date_range('1/1/2000', '4/1/2000', tz=dateutil.tz.gettz('US/Eastern')) + ts = date_range('1/1/2000', '4/1/2000', tz='dateutil/US/Eastern') result = ts.to_period()[0] expected = ts[0].to_period() @@ -1575,7 +1595,7 @@ def test_to_period_tz_explicit_dateutil(self): self.assert_(result == expected) self.assert_(ts.to_period().equals(xp)) - ts = date_range('1/1/2000', '4/1/2000', tz=dateutil.tz.gettz('UTC')) + ts = date_range('1/1/2000', '4/1/2000', tz=dateutil.tz.tzutc()) result = ts.to_period()[0] expected = ts[0].to_period() @@ -1793,17 +1813,17 @@ def test_append_concat_tz_explicit_pytz(self): appended = rng.append(rng2) self.assert_(appended.equals(rng3)) - def test_append_concat_tz_explicit_dateutil(self): + def test_append_concat_tz_dateutil(self): # GH 2938 _skip_if_no_dateutil() from dateutil.tz import gettz as timezone rng = date_range('5/8/2012 1:45', periods=10, freq='5T', - tz=timezone('US/Eastern')) + tz='dateutil/US/Eastern') rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T', - tz=timezone('US/Eastern')) + tz='dateutil/US/Eastern') rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T', - tz=timezone('US/Eastern')) + tz='dateutil/US/Eastern') ts = Series(np.random.randn(len(rng)), rng) df = DataFrame(np.random.randn(len(rng), 4), index=rng) ts2 = Series(np.random.randn(len(rng2)), rng2) @@ -2021,11 +2041,11 @@ def test_period_resample_with_local_timezone_dateutil(self): _skip_if_no_dateutil() import dateutil - local_timezone = dateutil.tz.gettz('America/Los_Angeles') + local_timezone = 'dateutil/America/Los_Angeles' - start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=dateutil.tz.gettz('UTC')) + start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=dateutil.tz.tzutc()) # 1 day later - end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=dateutil.tz.gettz('UTC')) + end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=dateutil.tz.tzutc()) index = pd.date_range(start, end, freq='H') @@ -2990,13 +3010,13 @@ def compare(x, y): def test_class_ops_dateutil(self): _skip_if_no_dateutil() - from dateutil.tz import gettz as timezone + from dateutil.tz import tzutc def compare(x,y): self.assertEqual(int(np.round(Timestamp(x).value/1e9)), int(np.round(Timestamp(y).value/1e9))) compare(Timestamp.now(),datetime.now()) - compare(Timestamp.now('UTC'), datetime.now(timezone('UTC'))) + compare(Timestamp.now('UTC'), datetime.now(tzutc())) compare(Timestamp.utcnow(),datetime.utcnow()) compare(Timestamp.today(),datetime.today()) @@ -3149,8 +3169,8 @@ def test_cant_compare_tz_naive_w_aware_explicit_pytz(self): def test_cant_compare_tz_naive_w_aware_dateutil(self): _skip_if_no_dateutil() - from dateutil.tz import gettz - utc = gettz('UTC') + from dateutil.tz import tzutc + utc = tzutc() # #1404 a = Timestamp('3/12/2012') b = Timestamp('3/12/2012', tz=utc) diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index 5fb1f9db620ae..51c533df863e6 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -770,8 +770,12 @@ def setUp(self): _skip_if_no_dateutil() def tz(self, tz): - ''' Construct a timezone object from a string. Overridden in subclass to parameterize tests. ''' - return dateutil.tz.gettz(tz) + ''' + Construct a dateutil timezone. + Use tslib.maybe_get_tz so that we get the filename on the tz right + on windows. See #7337. + ''' + return tslib.maybe_get_tz('dateutil/' + tz) def tzstr(self, tz): ''' Construct a timezone string from a string. Overridden in subclass to parameterize tests. ''' @@ -784,6 +788,19 @@ def cmptz(self, tz1, tz2): def localize(self, tz, x): return x.replace(tzinfo=tz) + def test_utc_with_system_utc(self): + from pandas.tslib import maybe_get_tz + + # from system utc to real utc + ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC')) + # check that the time hasn't changed. + self.assertEqual(ts, ts.tz_convert(dateutil.tz.tzutc())) + + # from system utc to real utc + ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC')) + # check that the time hasn't changed. + self.assertEqual(ts, ts.tz_convert(dateutil.tz.tzutc())) + class TestTimeZones(tm.TestCase): _multiprocess_can_split_ = True diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 9f1db62a54bf3..62e3b120c9d64 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1029,6 +1029,10 @@ cdef inline object _get_zone(object tz): return 'UTC' else: if _treat_tz_as_dateutil(tz): + if '.tar.gz' in tz._filename: + raise ValueError('Bad tz filename. Dateutil on python 3 on windows has a bug which causes tzfile._filename to be the same for all ' + 'timezone files. Please construct dateutil timezones implicitly by passing a string like "dateutil/Europe/London" ' + 'when you construct your pandas objects instead of passing a timezone object. See https://github.com/pydata/pandas/pull/7362') return 'dateutil/' + tz._filename else: # tz is a pytz timezone or unknown. @@ -1048,7 +1052,11 @@ cpdef inline object maybe_get_tz(object tz): ''' if isinstance(tz, string_types): if tz.startswith('dateutil/'): + zone = tz[9:] tz = _dateutil_gettz(tz[9:]) + # On Python 3 on Windows, the filename is not always set correctly. + if isinstance(tz, _dateutil_tzfile) and '.tar.gz' in tz._filename: + tz._filename = zone else: tz = pytz.timezone(tz) return tz @@ -1965,6 +1973,10 @@ cdef inline object _tz_cache_key(object tz): if isinstance(tz, _pytz_BaseTzInfo): return tz.zone elif isinstance(tz, _dateutil_tzfile): + if '.tar.gz' in tz._filename: + raise ValueError('Bad tz filename. Dateutil on python 3 on windows has a bug which causes tzfile._filename to be the same for all ' + 'timezone files. Please construct dateutil timezones implicitly by passing a string like "dateutil/Europe/London" ' + 'when you construct your pandas objects instead of passing a timezone object. See https://github.com/pydata/pandas/pull/7362') return tz._filename else: return None
closes #7337 Use dateutil.tz.tzutc() to construct UTC timezone instead of dateutil.tz.gettz('UTC') Update docs to reflect this. Tidy up examples in the timezones section of the docs.
https://api.github.com/repos/pandas-dev/pandas/pulls/7362
2014-06-05T21:40:15Z
2014-06-09T20:51:41Z
2014-06-09T20:51:41Z
2014-07-12T05:50:43Z
Add test for core.nanops and fix several bugs in nanops
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index b40334c1857ac..acc79e056689b 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -75,7 +75,8 @@ def f(values, axis=None, skipna=True, **kwds): result.fill(0) return result - if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name): + if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, + bn_name): result = bn_func(values, axis=axis, **kwds) # prefer to treat inf/-inf as NA, but must compute the func @@ -94,7 +95,8 @@ def f(values, axis=None, skipna=True, **kwds): def _bn_ok_dtype(dt, name): # Bottleneck chokes on datetime64 - if dt != np.object_ and not issubclass(dt.type, (np.datetime64, np.timedelta64)): + if dt != np.object_ and not issubclass(dt.type, (np.datetime64, + np.timedelta64)): # bottleneck does not properly upcast during the sum # so can overflow @@ -105,14 +107,18 @@ def _bn_ok_dtype(dt, name): return True return False + def _has_infs(result): if isinstance(result, np.ndarray): if result.dtype == 'f8': - return lib.has_infs_f8(result) + return lib.has_infs_f8(result.ravel()) elif result.dtype == 'f4': - return lib.has_infs_f4(result) + return lib.has_infs_f4(result.ravel()) + try: + return np.isinf(result).any() + except (TypeError, NotImplementedError) as e: + # if it doesn't support infs, then it can't have infs return False - return np.isinf(result) or np.isneginf(result) def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): @@ -175,8 +181,9 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, # return a platform independent precision dtype dtype_max = dtype - if dtype.kind == 'i' and not issubclass( - dtype.type, (np.bool, np.datetime64, np.timedelta64)): + if dtype.kind == 'i' and not issubclass(dtype.type, (np.bool, + np.datetime64, + np.timedelta64)): dtype_max = np.int64 elif dtype.kind in ['b'] or issubclass(dtype.type, np.bool): dtype_max = np.int64 @@ -190,7 +197,7 @@ def _isfinite(values): if issubclass(values.dtype.type, (np.timedelta64, np.datetime64)): return isnull(values) elif isinstance(values.dtype, object): - return ~np.isfinite(values.astype('float64')) + return ~np.isfinite(values.astype('complex128')) return ~np.isfinite(values) @@ -247,7 +254,7 @@ def nanall(values, axis=None, skipna=True): @bottleneck_switch(zero_value=0) def nansum(values, axis=None, skipna=True): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) - the_sum = values.sum(axis,dtype=dtype_max) + the_sum = values.sum(axis, dtype=dtype_max) the_sum = _maybe_null_out(the_sum, axis, mask) return _wrap_results(the_sum, dtype) @@ -260,7 +267,7 @@ def nanmean(values, axis=None, skipna=True): the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_max)) count = _get_counts(mask, axis) - if axis is not None: + if axis is not None and getattr(the_sum, 'ndim', False): the_mean = the_sum / count ct_mask = count == 0 if ct_mask.any(): @@ -286,6 +293,9 @@ def get_median(x): if values.dtype != np.float64: values = values.astype('f8') + if axis is None: + values = values.ravel() + notempty = values.size # an array from a frame @@ -358,7 +368,8 @@ def nansem(values, axis=None, skipna=True, ddof=1): @bottleneck_switch() def nanmin(values, axis=None, skipna=True): - values, mask, dtype, dtype_max = _get_values(values, skipna, fill_value_typ='+inf') + values, mask, dtype, dtype_max = _get_values(values, skipna, + fill_value_typ='+inf') # numpy 1.6.1 workaround in Python 3.x if (values.dtype == np.object_ and compat.PY3): @@ -374,7 +385,7 @@ def nanmin(values, axis=None, skipna=True): if ((axis is not None and values.shape[axis] == 0) or values.size == 0): try: - result = com.ensure_float(values.sum(axis,dtype=dtype_max)) + result = com.ensure_float(values.sum(axis, dtype=dtype_max)) result.fill(np.nan) except: result = np.nan @@ -387,7 +398,8 @@ def nanmin(values, axis=None, skipna=True): @bottleneck_switch() def nanmax(values, axis=None, skipna=True): - values, mask, dtype, dtype_max = _get_values(values, skipna, fill_value_typ='-inf') + values, mask, dtype, dtype_max = _get_values(values, skipna, + fill_value_typ='-inf') # numpy 1.6.1 workaround in Python 3.x if (values.dtype == np.object_ and compat.PY3): @@ -420,7 +432,7 @@ def nanargmax(values, axis=None, skipna=True): Returns -1 in the NA case """ values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf', - isfinite=True) + isfinite=True) result = values.argmax(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) return result @@ -431,7 +443,7 @@ def nanargmin(values, axis=None, skipna=True): Returns -1 in the NA case """ values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf', - isfinite=True) + isfinite=True) result = values.argmin(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) return result @@ -517,7 +529,7 @@ def nanprod(values, axis=None, skipna=True): def _maybe_arg_null_out(result, axis, mask, skipna): # helper function for nanargmin/nanargmax - if axis is None: + if axis is None or not result.ndim: if skipna: if mask.all(): result = -1 @@ -544,10 +556,13 @@ def _get_counts(mask, axis): def _maybe_null_out(result, axis, mask): - if axis is not None: + if axis is not None and getattr(result, 'ndim', False): null_mask = (mask.shape[axis] - mask.sum(axis)) == 0 if null_mask.any(): - result = result.astype('f8') + if np.iscomplexobj(result): + result = result.astype('c16') + else: + result = result.astype('f8') result[null_mask] = np.nan else: null_mask = mask.size - mask.sum() @@ -633,7 +648,11 @@ def nancov(a, b, min_periods=None): def _ensure_numeric(x): if isinstance(x, np.ndarray): if x.dtype == np.object_: - x = x.astype(np.float64) + try: + x = x.astype(np.complex128) + x = x.real if not np.any(x.imag) else x + except TypeError: + x = x.astype(np.float64) elif not (com.is_float(x) or com.is_integer(x) or com.is_complex(x)): try: x = float(x) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 3055017a3148d..58338a47d9465 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -12,7 +12,6 @@ import pandas.core.common as com import pandas.util.testing as tm import pandas.core.config as cf -from pandas.core import nanops _multiprocess_can_split_ = True @@ -394,54 +393,6 @@ def test_ensure_int32(): assert(result.dtype == np.int32) -class TestEnsureNumeric(tm.TestCase): - def test_numeric_values(self): - # Test integer - self.assertEqual(nanops._ensure_numeric(1), 1, 'Failed for int') - # Test float - self.assertEqual(nanops._ensure_numeric(1.1), 1.1, 'Failed for float') - # Test complex - self.assertEqual(nanops._ensure_numeric(1 + 2j), 1 + 2j, - 'Failed for complex') - - def test_ndarray(self): - # Test numeric ndarray - values = np.array([1, 2, 3]) - self.assertTrue(np.allclose(nanops._ensure_numeric(values), values), - 'Failed for numeric ndarray') - - # Test object ndarray - o_values = values.astype(object) - self.assertTrue(np.allclose(nanops._ensure_numeric(o_values), values), - 'Failed for object ndarray') - - # Test convertible string ndarray - s_values = np.array(['1', '2', '3'], dtype=object) - self.assertTrue(np.allclose(nanops._ensure_numeric(s_values), values), - 'Failed for convertible string ndarray') - - # Test non-convertible string ndarray - s_values = np.array(['foo', 'bar', 'baz'], dtype=object) - self.assertRaises(ValueError, - lambda: nanops._ensure_numeric(s_values)) - - def test_convertable_values(self): - self.assertTrue(np.allclose(nanops._ensure_numeric('1'), 1.0), - 'Failed for convertible integer string') - self.assertTrue(np.allclose(nanops._ensure_numeric('1.1'), 1.1), - 'Failed for convertible float string') - self.assertTrue(np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j), - 'Failed for convertible complex string') - - def test_non_convertable_values(self): - self.assertRaises(TypeError, - lambda: nanops._ensure_numeric('foo')) - self.assertRaises(TypeError, - lambda: nanops._ensure_numeric({})) - self.assertRaises(TypeError, - lambda: nanops._ensure_numeric([])) - - def test_ensure_platform_int(): # verify that when we create certain types of indices diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py new file mode 100644 index 0000000000000..6c1fcc96b5356 --- /dev/null +++ b/pandas/tests/test_nanops.py @@ -0,0 +1,695 @@ +from __future__ import division, print_function + +from functools import partial + +import numpy as np + +from pandas.core.common import isnull +import pandas.core.nanops as nanops +import pandas.util.testing as tm + +nanops._USE_BOTTLENECK = False + + +class TestnanopsDataFrame(tm.TestCase): + def setUp(self): + self.arr_shape = (11, 7, 5) + + self.arr_float = np.random.randn(*self.arr_shape) + self.arr_float1 = np.random.randn(*self.arr_shape) + self.arr_complex = self.arr_float + self.arr_float1*1j + self.arr_int = np.random.randint(-10, 10, self.arr_shape) + self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0 + self.arr_str = np.abs(self.arr_float).astype('S') + self.arr_utf = np.abs(self.arr_float).astype('U') + self.arr_date = np.random.randint(0, 20000, + self.arr_shape).astype('M8[ns]') + self.arr_tdelta = np.random.randint(0, 20000, + self.arr_shape).astype('m8[ns]') + + self.arr_nan = np.tile(np.nan, self.arr_shape) + self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan]) + self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan]) + self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1]) + self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan]) + + self.arr_inf = self.arr_float*np.inf + self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf]) + self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf]) + self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1]) + self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf]) + + self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf]) + self.arr_float_nan_inf = np.vstack([self.arr_float, + self.arr_nan, + self.arr_inf]) + self.arr_nan_float1_inf = np.vstack([self.arr_float, + self.arr_inf, + self.arr_nan]) + self.arr_nan_nan_inf = np.vstack([self.arr_nan, + self.arr_nan, + self.arr_inf]) + self.arr_obj = np.vstack([self.arr_float.astype('O'), + self.arr_int.astype('O'), + self.arr_bool.astype('O'), + self.arr_complex.astype('O'), + self.arr_str.astype('O'), + self.arr_utf.astype('O'), + self.arr_date.astype('O'), + self.arr_tdelta.astype('O')]) + + self.arr_nan_nanj = self.arr_nan + self.arr_nan*1j + self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj]) + + self.arr_nan_infj = self.arr_inf*1j + self.arr_complex_nan_infj = np.vstack([self.arr_complex, + self.arr_nan_infj]) + + self.arr_float_2d = self.arr_float[:, :, 0] + self.arr_float1_2d = self.arr_float1[:, :, 0] + + self.arr_nan_2d = self.arr_nan[:, :, 0] + self.arr_float_nan_2d = self.arr_float_nan[:, :, 0] + self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0] + self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0] + self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0] + + self.arr_float_1d = self.arr_float[:, 0, 0] + self.arr_float1_1d = self.arr_float1[:, 0, 0] + + self.arr_nan_1d = self.arr_nan[:, 0, 0] + self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0] + self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0] + self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0] + self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0] + + def check_results(self, targ, res, axis): + res = getattr(res, 'asm8', res) + res = getattr(res, 'values', res) + if axis != 0 and hasattr(targ, 'shape') and targ.ndim: + res = np.split(res, [targ.shape[0]], axis=0)[0] + tm.assert_almost_equal(targ, res) + + def check_fun_data(self, testfunc, targfunc, + testarval, targarval, targarnanval, **kwargs): + for axis in list(range(targarval.ndim))+[None]: + for skipna in [False, True]: + targartempval = targarval if skipna else targarnanval + try: + targ = targfunc(targartempval, axis=axis, **kwargs) + res = testfunc(testarval, axis=axis, skipna=skipna, + **kwargs) + self.check_results(targ, res, axis) + if skipna: + res = testfunc(testarval, axis=axis) + self.check_results(targ, res, axis) + if axis is None: + res = testfunc(testarval, skipna=skipna) + self.check_results(targ, res, axis) + if skipna and axis is None: + res = testfunc(testarval) + self.check_results(targ, res, axis) + except BaseException as exc: + exc.args += ('axis: %s of %s' % (axis, testarval.ndim-1), + 'skipna: %s' % skipna, + 'kwargs: %s' % kwargs) + raise + + if testarval.ndim <= 1: + return + + try: + testarval2 = np.take(testarval, 0, axis=-1) + targarval2 = np.take(targarval, 0, axis=-1) + targarnanval2 = np.take(targarnanval, 0, axis=-1) + except ValueError: + return + self.check_fun_data(testfunc, targfunc, + testarval2, targarval2, targarnanval2, + **kwargs) + + def check_fun(self, testfunc, targfunc, + testar, targar=None, targarnan=None, + **kwargs): + if targar is None: + targar = testar + if targarnan is None: + targarnan = testar + testarval = getattr(self, testar) + targarval = getattr(self, targar) + targarnanval = getattr(self, targarnan) + try: + self.check_fun_data(testfunc, targfunc, + testarval, targarval, targarnanval, **kwargs) + except BaseException as exc: + exc.args += ('testar: %s' % testar, + 'targar: %s' % targar, + 'targarnan: %s' % targarnan) + raise + + def check_funs(self, testfunc, targfunc, + allow_complex=True, allow_all_nan=True, allow_str=True, + allow_date=True, allow_obj=True, + **kwargs): + self.check_fun(testfunc, targfunc, 'arr_float', **kwargs) + self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float', + **kwargs) + self.check_fun(testfunc, targfunc, 'arr_int', **kwargs) + self.check_fun(testfunc, targfunc, 'arr_bool', **kwargs) + objs = [self.arr_float.astype('O'), + self.arr_int.astype('O'), + self.arr_bool.astype('O')] + + if allow_all_nan: + self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs) + + if allow_complex: + self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs) + self.check_fun(testfunc, targfunc, + 'arr_complex_nan', 'arr_complex', **kwargs) + if allow_all_nan: + self.check_fun(testfunc, targfunc, 'arr_nan_nanj', **kwargs) + objs += [self.arr_complex.astype('O')] + + if allow_str: + self.check_fun(testfunc, targfunc, 'arr_str', **kwargs) + self.check_fun(testfunc, targfunc, 'arr_utf', **kwargs) + objs += [self.arr_str.astype('O'), + self.arr_utf.astype('O')] + + if allow_date: + self.check_fun(testfunc, targfunc, 'arr_date', **kwargs) + self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs) + objs += [self.arr_date.astype('O'), + self.arr_tdelta.astype('O')] + + if allow_obj: + self.arr_obj = np.vstack(objs) + self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs) + + def check_funs_ddof(self, testfunc, targfunc, + allow_complex=True, allow_all_nan=True, allow_str=True, + allow_date=True, allow_obj=True,): + for ddof in range(3): + try: + self.check_funs(self, testfunc, targfunc, + allow_complex, allow_all_nan, allow_str, + allow_date, allow_obj, + ddof=ddof) + except BaseException as exc: + exc.args += ('ddof %s' % ddof,) + + def test_nanany(self): + self.check_funs(nanops.nanany, np.any, + allow_all_nan=False, allow_str=False, allow_date=False) + + def test_nanall(self): + self.check_funs(nanops.nanall, np.all, + allow_all_nan=False, allow_str=False, allow_date=False) + + def test_nansum(self): + self.check_funs(nanops.nansum, np.sum, + allow_str=False, allow_date=False) + + def _nanmean_wrap(self, value, *args, **kwargs): + dtype = value.dtype + res = nanops.nanmean(value, *args, **kwargs) + if dtype.kind == 'O': + res = np.round(res, decimals=15) + return res + + def _mean_wrap(self, value, *args, **kwargs): + dtype = value.dtype + if dtype.kind == 'O': + value = value.astype('c16') + res = np.mean(value, *args, **kwargs) + if dtype.kind == 'O': + res = np.round(res, decimals=15) + return res + + def test_nanmean(self): + self.check_funs(self._nanmean_wrap, self._mean_wrap, + allow_str=False, allow_date=False) + + def _median_wrap(self, value, *args, **kwargs): + if value.dtype.kind == 'O': + value = value.astype('c16') + res = np.median(value, *args, **kwargs) + return res + + def test_nanmedian(self): + self.check_funs(nanops.nanmedian, self._median_wrap, + allow_complex=False, allow_str=False, allow_date=False) + + def test_nanvar(self): + self.check_funs_ddof(nanops.nanvar, np.var, + allow_complex=False, allow_date=False) + + def test_nansem(self): + tm.skip_if_no_package('scipy') + from scipy.stats import sem + self.check_funs_ddof(nanops.nansem, np.var, + allow_complex=False, allow_date=False) + + def _minmax_wrap(self, value, axis=None, func=None): + res = func(value, axis) + if res.dtype.kind == 'm': + res = np.atleast_1d(res) + return res + + def test_nanmin(self): + func = partial(self._minmax_wrap, func=np.min) + self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False) + + def test_nanmax(self): + func = partial(self._minmax_wrap, func=np.max) + self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False) + + def _argminmax_wrap(self, value, axis=None, func=None): + res = func(value, axis) + nans = np.min(value, axis) + nullnan = isnull(nans) + if res.ndim: + res[nullnan] = -1 + elif (hasattr(nullnan, 'all') and nullnan.all() or + not hasattr(nullnan, 'all') and nullnan): + res = -1 + return res + + def test_nanargmax(self): + func = partial(self._argminmax_wrap, func=np.argmax) + self.check_funs(nanops.nanargmax, func, + allow_str=False, allow_obj=False) + + def test_nanargmin(self): + func = partial(self._argminmax_wrap, func=np.argmin) + if tm.sys.version_info[0:2] == (2, 6): + self.check_funs(nanops.nanargmin, func, + allow_date=False, + allow_str=False, allow_obj=False) + else: + self.check_funs(nanops.nanargmin, func, + allow_str=False, allow_obj=False) + + def _skew_kurt_wrap(self, values, axis=None, func=None): + if not isinstance(values.dtype.type, np.floating): + values = values.astype('f8') + result = func(values, axis=axis, bias=False) + # fix for handling cases where all elements in an axis are the same + if isinstance(result, np.ndarray): + result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0 + return result + elif np.max(values) == np.min(values): + return 0. + return result + + def test_nanskew(self): + tm.skip_if_no_package('scipy') + from scipy.stats import skew + func = partial(self._skew_kurt_wrap, func=skew) + self.check_funs(nanops.nanskew, func, + allow_complex=False, allow_str=False, allow_date=False) + + def test_nankurt(self): + tm.skip_if_no_package('scipy') + from scipy.stats import kurtosis + func1 = partial(kurtosis, fisher=True) + func = partial(self._skew_kurt_wrap, func=func1) + self.check_funs(nanops.nankurt, func, + allow_complex=False, allow_str=False, allow_date=False) + + def test_nanprod(self): + self.check_funs(nanops.nanprod, np.prod, + allow_str=False, allow_date=False) + + def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, + **kwargs) + res01 = checkfun(self.arr_float_2d, self.arr_float1_2d, + min_periods=len(self.arr_float_2d)-1, + **kwargs) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, + **kwargs) + res11 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, + min_periods=len(self.arr_float_2d)-1, + **kwargs) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, + **kwargs) + res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, + **kwargs) + res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, + **kwargs) + res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, + **kwargs) + res24 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, + min_periods=len(self.arr_float_2d)-1, + **kwargs) + res25 = checkfun(self.arr_float_2d, self.arr_float1_2d, + min_periods=len(self.arr_float_2d)+1, + **kwargs) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, + **kwargs) + res01 = checkfun(self.arr_float_1d, self.arr_float1_1d, + min_periods=len(self.arr_float_1d)-1, + **kwargs) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_1d, + self.arr_float1_nan_1d, + **kwargs) + res11 = checkfun(self.arr_float_nan_1d, + self.arr_float1_nan_1d, + min_periods=len(self.arr_float_1d)-1, + **kwargs) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, + **kwargs) + res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, + **kwargs) + res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, + **kwargs) + res23 = checkfun(self.arr_float_nan_1d, + self.arr_nan_float1_1d, + **kwargs) + res24 = checkfun(self.arr_float_nan_1d, + self.arr_nan_float1_1d, + min_periods=len(self.arr_float_1d)-1, + **kwargs) + res25 = checkfun(self.arr_float_1d, + self.arr_float1_1d, + min_periods=len(self.arr_float_1d)+1, + **kwargs) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def test_nancorr(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, + self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1) + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, + self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, + method='pearson') + + def test_nancorr_pearson(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, + self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, + method='pearson') + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, + self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, + method='pearson') + + def test_nancorr_kendall(self): + tm.skip_if_no_package('scipy') + from scipy.stats import kendalltau + targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, + method='kendall') + targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, + method='kendall') + + def test_nancorr_spearman(self): + tm.skip_if_no_package('scipy') + from scipy.stats import spearmanr + targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, + method='spearman') + targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, + method='spearman') + + def test_nancov(self): + targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1) + targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1) + + def check_nancomp(self, checkfun, targ0): + arr_float = self.arr_float + arr_float1 = self.arr_float1 + arr_nan = self.arr_nan + arr_nan_nan = self.arr_nan_nan + arr_float_nan = self.arr_float_nan + arr_float1_nan = self.arr_float1_nan + arr_nan_float1 = self.arr_nan_float1 + + while targ0.ndim: + try: + res0 = checkfun(arr_float, arr_float1) + tm.assert_almost_equal(targ0, res0) + + if targ0.ndim > 1: + targ1 = np.vstack([targ0, arr_nan]) + else: + targ1 = np.hstack([targ0, arr_nan]) + res1 = checkfun(arr_float_nan, arr_float1_nan) + tm.assert_almost_equal(targ1, res1) + + targ2 = arr_nan_nan + res2 = checkfun(arr_float_nan, arr_nan_float1) + tm.assert_almost_equal(targ2, res2) + except Exception as exc: + exc.args += ('ndim: %s' % arr_float.ndim,) + raise + + try: + arr_float = np.take(arr_float, 0, axis=-1) + arr_float1 = np.take(arr_float1, 0, axis=-1) + arr_nan = np.take(arr_nan, 0, axis=-1) + arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1) + arr_float_nan = np.take(arr_float_nan, 0, axis=-1) + arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1) + arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1) + targ0 = np.take(targ0, 0, axis=-1) + except ValueError: + break + + def test_nangt(self): + targ0 = self.arr_float > self.arr_float1 + self.check_nancomp(nanops.nangt, targ0) + + def test_nange(self): + targ0 = self.arr_float >= self.arr_float1 + self.check_nancomp(nanops.nange, targ0) + + def test_nanlt(self): + targ0 = self.arr_float < self.arr_float1 + self.check_nancomp(nanops.nanlt, targ0) + + def test_nanle(self): + targ0 = self.arr_float <= self.arr_float1 + self.check_nancomp(nanops.nanle, targ0) + + def test_naneq(self): + targ0 = self.arr_float == self.arr_float1 + self.check_nancomp(nanops.naneq, targ0) + + def test_nanne(self): + targ0 = self.arr_float != self.arr_float1 + self.check_nancomp(nanops.nanne, targ0) + + def check_bool(self, func, value, correct, *args, **kwargs): + while getattr(value, 'ndim', True): + try: + res0 = func(value, *args, **kwargs) + if correct: + self.assertTrue(res0) + else: + self.assertFalse(res0) + except BaseException as exc: + exc.args += ('dim: %s' % getattr(value, 'ndim', value),) + raise + if not hasattr(value, 'ndim'): + break + try: + value = np.take(value, 0, axis=-1) + except ValueError: + break + + def test__has_infs(self): + pairs = [('arr_complex', False), + ('arr_int', False), + ('arr_bool', False), + ('arr_str', False), + ('arr_utf', False), + ('arr_complex', False), + ('arr_complex_nan', False), + + ('arr_nan_nanj', False), + ('arr_nan_infj', True), + ('arr_complex_nan_infj', True)] + pairs_float = [('arr_float', False), + ('arr_nan', False), + ('arr_float_nan', False), + ('arr_nan_nan', False), + + ('arr_float_inf', True), + ('arr_inf', True), + ('arr_nan_inf', True), + ('arr_float_nan_inf', True), + ('arr_nan_nan_inf', True)] + + for arr, correct in pairs: + val = getattr(self, arr) + try: + self.check_bool(nanops._has_infs, val, correct) + except BaseException as exc: + exc.args += (arr,) + raise + + for arr, correct in pairs_float: + val = getattr(self, arr) + try: + self.check_bool(nanops._has_infs, val, correct) + self.check_bool(nanops._has_infs, val.astype('f4'), correct) + self.check_bool(nanops._has_infs, val.astype('f2'), correct) + except BaseException as exc: + exc.args += (arr,) + raise + + def test__isfinite(self): + pairs = [('arr_complex', False), + ('arr_int', False), + ('arr_bool', False), + ('arr_str', False), + ('arr_utf', False), + ('arr_complex', False), + ('arr_complex_nan', True), + + ('arr_nan_nanj', True), + ('arr_nan_infj', True), + ('arr_complex_nan_infj', True)] + pairs_float = [('arr_float', False), + ('arr_nan', True), + ('arr_float_nan', True), + ('arr_nan_nan', True), + + ('arr_float_inf', True), + ('arr_inf', True), + ('arr_nan_inf', True), + ('arr_float_nan_inf', True), + ('arr_nan_nan_inf', True)] + + func1 = lambda x: np.any(nanops._isfinite(x).ravel()) + func2 = lambda x: np.any(nanops._isfinite(x).values.ravel()) + for arr, correct in pairs: + val = getattr(self, arr) + try: + self.check_bool(func1, val, correct) + except BaseException as exc: + exc.args += (arr,) + raise + + for arr, correct in pairs_float: + val = getattr(self, arr) + try: + self.check_bool(func1, val, correct) + self.check_bool(func1, val.astype('f4'), correct) + self.check_bool(func1, val.astype('f2'), correct) + except BaseException as exc: + exc.args += (arr,) + raise + + def test__bn_ok_dtype(self): + self.assertTrue(nanops._bn_ok_dtype(self.arr_float.dtype, 'test')) + self.assertTrue(nanops._bn_ok_dtype(self.arr_complex.dtype, 'test')) + self.assertTrue(nanops._bn_ok_dtype(self.arr_int.dtype, 'test')) + self.assertTrue(nanops._bn_ok_dtype(self.arr_bool.dtype, 'test')) + self.assertTrue(nanops._bn_ok_dtype(self.arr_str.dtype, 'test')) + self.assertTrue(nanops._bn_ok_dtype(self.arr_utf.dtype, 'test')) + self.assertFalse(nanops._bn_ok_dtype(self.arr_date.dtype, 'test')) + self.assertFalse(nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test')) + self.assertFalse(nanops._bn_ok_dtype(self.arr_obj.dtype, 'test')) + + +class TestEnsureNumeric(tm.TestCase): + def test_numeric_values(self): + # Test integer + self.assertEqual(nanops._ensure_numeric(1), 1, 'Failed for int') + # Test float + self.assertEqual(nanops._ensure_numeric(1.1), 1.1, 'Failed for float') + # Test complex + self.assertEqual(nanops._ensure_numeric(1 + 2j), 1 + 2j, + 'Failed for complex') + + def test_ndarray(self): + # Test numeric ndarray + values = np.array([1, 2, 3]) + self.assertTrue(np.allclose(nanops._ensure_numeric(values), values), + 'Failed for numeric ndarray') + + # Test object ndarray + o_values = values.astype(object) + self.assertTrue(np.allclose(nanops._ensure_numeric(o_values), values), + 'Failed for object ndarray') + + # Test convertible string ndarray + s_values = np.array(['1', '2', '3'], dtype=object) + self.assertTrue(np.allclose(nanops._ensure_numeric(s_values), values), + 'Failed for convertible string ndarray') + + # Test non-convertible string ndarray + s_values = np.array(['foo', 'bar', 'baz'], dtype=object) + self.assertRaises(ValueError, + lambda: nanops._ensure_numeric(s_values)) + + def test_convertable_values(self): + self.assertTrue(np.allclose(nanops._ensure_numeric('1'), 1.0), + 'Failed for convertible integer string') + self.assertTrue(np.allclose(nanops._ensure_numeric('1.1'), 1.1), + 'Failed for convertible float string') + self.assertTrue(np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j), + 'Failed for convertible complex string') + + def test_non_convertable_values(self): + self.assertRaises(TypeError, + lambda: nanops._ensure_numeric('foo')) + self.assertRaises(TypeError, + lambda: nanops._ensure_numeric({})) + self.assertRaises(TypeError, + lambda: nanops._ensure_numeric([])) + + def test_fail(self): + self.assertTrue(False) + + +if __name__ == '__main__': + import nose + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', + '-s'], exit=False)
This pull request accomplishes two main things: 1. It adds unit tests for `nanops` 2. It fixes several bugs identified by the unit tests related #7352 related #7353 related #7354 related #7357
https://api.github.com/repos/pandas-dev/pandas/pulls/7358
2014-06-05T17:08:03Z
2014-06-11T14:33:38Z
null
2014-06-22T15:28:29Z
ENH/GBY: add nlargest/nsmallest to Series.groupby
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index c0db87d58ef08..22f1414c4f2b0 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -664,6 +664,18 @@ In this example, we chopped the collection of time series into yearly chunks then independently called :ref:`fillna <missing_data.fillna>` on the groups. +.. versionadded:: 0.14.1 + +The ``nlargest`` and ``nsmallest`` methods work on ``Series`` style groupbys: + +.. ipython:: python + + s = Series([9, 8, 7, 5, 19, 1, 4.2, 3.3]) + g = Series(list('abababab')) + gb = s.groupby(g) + gb.nlargest(3) + gb.nsmallest(3) + .. _groupby.apply: Flexible ``apply`` diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 9373b59025399..1cb6aadf3f40f 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -114,6 +114,9 @@ Enhancements - Implemented ``sem`` (standard error of the mean) operation for ``Series``, ``DataFrame``, ``Panel``, and ``Groupby`` (:issue:`6897`) +- Add ``nlargest`` and ``nsmallest`` to the ``Series`` ``groupby`` whitelist, + which means you can now use these methods on a ``SeriesGroupBy`` object + (:issue:`7053`). diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 2714e9f22cd95..e6af3c20bea00 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -78,7 +78,8 @@ _series_apply_whitelist = \ (_common_apply_whitelist - set(['boxplot'])) | \ - frozenset(['dtype', 'value_counts', 'unique', 'nunique']) + frozenset(['dtype', 'value_counts', 'unique', 'nunique', + 'nlargest', 'nsmallest']) _dataframe_apply_whitelist = \ _common_apply_whitelist | frozenset(['dtypes', 'corrwith']) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 1f1853186ac8a..1da51ce824120 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -4047,6 +4047,7 @@ def test_groupby_whitelist(self): 'value_counts', 'diff', 'unique', 'nunique', + 'nlargest', 'nsmallest', ]) for obj, whitelist in zip((df, s), @@ -4381,6 +4382,27 @@ def test_max_nan_bug(self): tm.assert_frame_equal(r, e) self.assertFalse(r['File'].isnull().any()) + def test_nlargest(self): + a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) + b = Series(list('a' * 5 + 'b' * 5)) + gb = a.groupby(b) + r = gb.nlargest(3) + e = Series([7, 5, 3, 10, 9, 6], + index=MultiIndex.from_arrays([list('aaabbb'), + [3, 2, 1, 9, 5, 8]])) + tm.assert_series_equal(r, e) + + def test_nsmallest(self): + a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) + b = Series(list('a' * 5 + 'b' * 5)) + gb = a.groupby(b) + r = gb.nsmallest(3) + e = Series([1, 2, 3, 0, 4, 6], + index=MultiIndex.from_arrays([list('aaabbb'), + [0, 4, 1, 6, 7, 8]])) + tm.assert_series_equal(r, e) + + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all()
closes #7053
https://api.github.com/repos/pandas-dev/pandas/pulls/7356
2014-06-05T16:49:29Z
2014-06-09T03:00:56Z
2014-06-09T03:00:56Z
2014-06-20T10:13:21Z
CLN: Simplify boxplot and tests
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index e0bb179132b34..c49607eef1b42 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -356,6 +356,54 @@ def _check_has_errorbars(self, axes, xerr=0, yerr=0): self.assertEqual(xerr, xerr_count) self.assertEqual(yerr, yerr_count) + def _check_box_return_type(self, returned, return_type, expected_keys=None): + """ + Check box returned type is correct + + Parameters + ---------- + returned : object to be tested, returned from boxplot + return_type : str + return_type passed to boxplot + expected_keys : list-like, optional + group labels in subplot case. If not passed, + the function checks assuming boxplot uses single ax + """ + from matplotlib.axes import Axes + types = {'dict': dict, 'axes': Axes, 'both': tuple} + if expected_keys is None: + # should be fixed when the returning default is changed + if return_type is None: + return_type = 'dict' + + self.assertTrue(isinstance(returned, types[return_type])) + if return_type == 'both': + self.assertIsInstance(returned.ax, Axes) + self.assertIsInstance(returned.lines, dict) + else: + # should be fixed when the returning default is changed + if return_type is None: + for r in self._flatten_visible(returned): + self.assertIsInstance(r, Axes) + return + + self.assertTrue(isinstance(returned, OrderedDict)) + self.assertEqual(sorted(returned.keys()), sorted(expected_keys)) + for key, value in iteritems(returned): + self.assertTrue(isinstance(value, types[return_type])) + # check returned dict has correct mapping + if return_type == 'axes': + self.assertEqual(value.get_title(), key) + elif return_type == 'both': + self.assertEqual(value.ax.get_title(), key) + self.assertIsInstance(value.ax, Axes) + self.assertIsInstance(value.lines, dict) + elif return_type == 'dict': + line = value['medians'][0] + self.assertEqual(line.get_axes().get_title(), key) + else: + raise AssertionError + @tm.mplskip class TestSeriesPlots(TestPlotBase): @@ -1421,65 +1469,20 @@ def test_boxplot_return_type(self): with tm.assert_produces_warning(FutureWarning): result = df.boxplot() - self.assertIsInstance(result, dict) # change to Axes in future + # change to Axes in future + self._check_box_return_type(result, 'dict') with tm.assert_produces_warning(False): result = df.boxplot(return_type='dict') - self.assertIsInstance(result, dict) + self._check_box_return_type(result, 'dict') with tm.assert_produces_warning(False): result = df.boxplot(return_type='axes') - self.assertIsInstance(result, mpl.axes.Axes) + self._check_box_return_type(result, 'axes') with tm.assert_produces_warning(False): result = df.boxplot(return_type='both') - self.assertIsInstance(result, tuple) - - @slow - def test_boxplot_return_type_by(self): - import matplotlib as mpl - - df = DataFrame(np.random.randn(10, 2)) - df['g'] = ['a'] * 5 + ['b'] * 5 - - # old style: return_type=None - result = df.boxplot(by='g') - self.assertIsInstance(result, np.ndarray) - self.assertIsInstance(result[0], mpl.axes.Axes) - - result = df.boxplot(by='g', return_type='dict') - self.assertIsInstance(result, dict) - self.assertIsInstance(result[0], dict) - - result = df.boxplot(by='g', return_type='axes') - self.assertIsInstance(result, dict) - self.assertIsInstance(result[0], mpl.axes.Axes) - - result = df.boxplot(by='g', return_type='both') - self.assertIsInstance(result, dict) - self.assertIsInstance(result[0], tuple) - self.assertIsInstance(result[0][0], mpl.axes.Axes) - self.assertIsInstance(result[0][1], dict) - - # now for groupby - with tm.assert_produces_warning(FutureWarning): - result = df.groupby('g').boxplot() - self.assertIsInstance(result, dict) - self.assertIsInstance(result['a'], dict) - - result = df.groupby('g').boxplot(return_type='dict') - self.assertIsInstance(result, dict) - self.assertIsInstance(result['a'], dict) - - result = df.groupby('g').boxplot(return_type='axes') - self.assertIsInstance(result, dict) - self.assertIsInstance(result['a'], mpl.axes.Axes) - - result = df.groupby('g').boxplot(return_type='both') - self.assertIsInstance(result, dict) - self.assertIsInstance(result['a'], tuple) - self.assertIsInstance(result['a'][0], mpl.axes.Axes) - self.assertIsInstance(result['a'][1], dict) + self._check_box_return_type(result, 'both') @slow def test_kde(self): @@ -2278,47 +2281,39 @@ def test_grouped_hist(self): with tm.assertRaises(AttributeError): plotting.grouped_hist(df.A, by=df.C, foo='bar') - def _check_box_dict(self, returned, return_type, - expected_klass, expected_keys): - self.assertTrue(isinstance(returned, OrderedDict)) - self.assertEqual(sorted(returned.keys()), sorted(expected_keys)) - for key, value in iteritems(returned): - self.assertTrue(isinstance(value, expected_klass)) - # check returned dict has correct mapping - if return_type == 'axes': - self.assertEqual(value.get_title(), key) - elif return_type == 'both': - self.assertEqual(value.ax.get_title(), key) - elif return_type == 'dict': - line = value['medians'][0] - self.assertEqual(line.get_axes().get_title(), key) - else: - raise AssertionError - @slow def test_grouped_box_return_type(self): - import matplotlib.axes - df = self.hist_df + # old style: return_type=None + result = df.boxplot(by='gender') + self.assertIsInstance(result, np.ndarray) + self._check_box_return_type(result, None, + expected_keys=['height', 'weight', 'category']) + + # now for groupby + with tm.assert_produces_warning(FutureWarning): + result = df.groupby('gender').boxplot() + self._check_box_return_type(result, 'dict', expected_keys=['Male', 'Female']) + columns2 = 'X B C D A G Y N Q O'.split() df2 = DataFrame(random.randn(50, 10), columns=columns2) categories2 = 'A B C D E F G H I J'.split() df2['category'] = categories2 * 5 - types = {'dict': dict, 'axes': matplotlib.axes.Axes, 'both': tuple} - for t, klass in iteritems(types): + for t in ['dict', 'axes', 'both']: returned = df.groupby('classroom').boxplot(return_type=t) - self._check_box_dict(returned, t, klass, ['A', 'B', 'C']) + self._check_box_return_type(returned, t, expected_keys=['A', 'B', 'C']) returned = df.boxplot(by='classroom', return_type=t) - self._check_box_dict(returned, t, klass, ['height', 'weight', 'category']) + self._check_box_return_type(returned, t, + expected_keys=['height', 'weight', 'category']) returned = df2.groupby('category').boxplot(return_type=t) - self._check_box_dict(returned, t, klass, categories2) + self._check_box_return_type(returned, t, expected_keys=categories2) returned = df2.boxplot(by='category', return_type=t) - self._check_box_dict(returned, t, klass, columns2) + self._check_box_return_type(returned, t, expected_keys=columns2) @slow def test_grouped_box_layout(self): diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 814c1f60cea50..37a982acc0bbd 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2323,13 +2323,11 @@ def boxplot(data, column=None, by=None, ax=None, fontsize=None, if return_type not in valid_types: raise ValueError("return_type") - from pandas import Series, DataFrame if isinstance(data, Series): data = DataFrame({'x': data}) column = 'x' - def _get_colors(): return _get_standard_colors(color=kwds.get('color'), num_colors=1) @@ -2340,8 +2338,9 @@ def maybe_color_bp(bp): setp(bp['whiskers'],color=colors[0],alpha=1) setp(bp['medians'],color=colors[2],alpha=1) - def plot_group(grouped, ax): - keys, values = zip(*grouped) + BP = namedtuple("Boxplot", ['ax', 'lines']) # namedtuple to hold results + + def plot_group(keys, values, ax): keys = [com.pprint_thing(x) for x in keys] values = [remove_na(v) for v in values] bp = ax.boxplot(values, **kwds) @@ -2350,7 +2349,14 @@ def plot_group(grouped, ax): else: ax.set_yticklabels(keys, rotation=rot, fontsize=fontsize) maybe_color_bp(bp) - return bp + + # Return axes in multiplot case, maybe revisit later # 985 + if return_type == 'dict': + return bp + elif return_type == 'both': + return BP(ax=ax, lines=bp) + else: + return ax colors = _get_colors() if column is None: @@ -2361,56 +2367,14 @@ def plot_group(grouped, ax): else: columns = [column] - BP = namedtuple("Boxplot", ['ax', 'lines']) # namedtuple to hold results - if by is not None: - fig, axes, d = _grouped_plot_by_column(plot_group, data, columns=columns, - by=by, grid=grid, figsize=figsize, - ax=ax, layout=layout) - - # Return axes in multiplot case, maybe revisit later # 985 - if return_type is None: - ret = axes - if return_type == 'axes': - ret = compat.OrderedDict() - axes = _flatten(axes)[:len(d)] - for k, ax in zip(d.keys(), axes): - ret[k] = ax - elif return_type == 'dict': - ret = d - elif return_type == 'both': - ret = compat.OrderedDict() - axes = _flatten(axes)[:len(d)] - for (k, line), ax in zip(d.items(), axes): - ret[k] = BP(ax=ax, lines=line) + result = _grouped_plot_by_column(plot_group, data, columns=columns, + by=by, grid=grid, figsize=figsize, + ax=ax, layout=layout, return_type=return_type) else: if layout is not None: raise ValueError("The 'layout' keyword is not supported when " "'by' is None") - if ax is None: - ax = _gca() - fig = ax.get_figure() - data = data._get_numeric_data() - if columns: - cols = columns - else: - cols = data.columns - keys = [com.pprint_thing(x) for x in cols] - - # Return boxplot dict in single plot case - - clean_values = [remove_na(x) for x in data[cols].values.T] - - bp = ax.boxplot(clean_values, **kwds) - maybe_color_bp(bp) - - if kwds.get('vert', 1): - ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize) - else: - ax.set_yticklabels(keys, rotation=rot, fontsize=fontsize) - ax.grid(grid) - - ret = ax if return_type is None: msg = ("\nThe default value for 'return_type' will change to " @@ -2420,13 +2384,18 @@ def plot_group(grouped, ax): "return_type='dict'.") warnings.warn(msg, FutureWarning) return_type = 'dict' - if return_type == 'dict': - ret = bp - elif return_type == 'both': - ret = BP(ax=ret, lines=bp) + if ax is None: + ax = _gca() + data = data._get_numeric_data() + if columns is None: + columns = data.columns + else: + data = data[columns] - fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) - return ret + result = plot_group(columns, data.values.T, ax) + ax.grid(grid) + + return result def format_date_labels(ax, rot): @@ -2734,7 +2703,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, if subplots is True: naxes = len(grouped) nrows, ncols = _get_layout(naxes, layout=layout) - _, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, squeeze=False, + fig, axes = _subplots(nrows=nrows, ncols=ncols, naxes=naxes, squeeze=False, sharex=False, sharey=True) axes = _flatten(axes) @@ -2744,6 +2713,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, rot=rot, grid=grid, **kwds) ax.set_title(com.pprint_thing(key)) ret[key] = d + fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) else: from pandas.tools.merge import concat keys, frames = zip(*grouped) @@ -2795,9 +2765,8 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, def _grouped_plot_by_column(plotf, data, columns=None, by=None, numeric_only=True, grid=False, - figsize=None, ax=None, layout=None, **kwargs): - from pandas.core.frame import DataFrame - + figsize=None, ax=None, layout=None, return_type=None, + **kwargs): grouped = data.groupby(by) if columns is None: if not isinstance(by, (list, tuple)): @@ -2818,20 +2787,26 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, ravel_axes = _flatten(axes) - out_dict = compat.OrderedDict() + result = compat.OrderedDict() for i, col in enumerate(columns): ax = ravel_axes[i] gp_col = grouped[col] - re_plotf = plotf(gp_col, ax, **kwargs) + keys, values = zip(*gp_col) + re_plotf = plotf(keys, values, ax, **kwargs) ax.set_title(col) ax.set_xlabel(com.pprint_thing(by)) + result[col] = re_plotf ax.grid(grid) - out_dict[col] = re_plotf + + # Return axes in multiplot case, maybe revisit later # 985 + if return_type is None: + result = axes byline = by[0] if len(by) == 1 else by fig.suptitle('Boxplot grouped by %s' % byline) + fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) - return fig, axes, out_dict + return result def table(ax, data, rowLabels=None, colLabels=None,
Simplified `boxplot` func, and removed duplicated tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/7351
2014-06-05T15:33:48Z
2014-06-17T14:36:59Z
2014-06-17T14:36:59Z
2014-06-17T14:37:02Z
BUG: Bug in .loc with a list of indexers on a single-multi index level (that is not nested) GH7349
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index f83cd50bbd8c5..0bc30e14ed5fc 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -21,8 +21,6 @@ users upgrade to this version. - :ref:`Deprecations <whatsnew_0141.deprecations>` -- :ref:`Known Issues <whatsnew_0141.knownissues>` - - :ref:`Bug Fixes <whatsnew_0141.bug_fixes>` .. _whatsnew_0141.api: @@ -36,22 +34,21 @@ API changes containing ``NaN`` values - now also has ``dtype=object`` instead of ``float`` (:issue:`7242`) +- `StringMethods`` now work on empty Series (:issue:`7242`) + .. _whatsnew_0141.prior_deprecations: Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -There are prior version deprecations that are taking effect as of 0.14.1. +There are no prior version deprecations that are taking effect as of 0.14.1. .. _whatsnew_0141.deprecations: Deprecations ~~~~~~~~~~~~ -.. _whatsnew_0141.knownissues: - -Known Issues -~~~~~~~~~~~~ +There are no deprecations that are taking effect as of 0.14.1. .. _whatsnew_0141.enhancements: @@ -118,4 +115,4 @@ Bug Fixes - Bug in broadcasting with ``.div``, integer dtypes and divide-by-zero (:issue:`7325`) - Bug in ``CustomBusinessDay.apply`` raiases ``NameError`` when ``np.datetime64`` object is passed (:issue:`7196`) - Bug in ``MultiIndex.append``, ``concat`` and ``pivot_table`` don't preserve timezone (:issue:`6606`) -- Bug all ``StringMethods`` now work on empty Series (:issue:`7242`) +- Bug in ``.loc`` with a list of indexers on a single-multi index level (that is not nested) (:issue:`7349`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 518879105aa8b..68e5810751d08 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1243,23 +1243,37 @@ def _getitem_axis(self, key, axis=0, validate_iterable=False): return self._get_slice_axis(key, axis=axis) elif com._is_bool_indexer(key): return self._getbool_axis(key, axis=axis) - elif _is_list_like(key) and not (isinstance(key, tuple) and - isinstance(labels, MultiIndex)): + elif _is_list_like(key): - if hasattr(key, 'ndim') and key.ndim > 1: - raise ValueError('Cannot index with multidimensional key') + # GH 7349 + # possibly convert a list-like into a nested tuple + # but don't convert a list-like of tuples + if isinstance(labels, MultiIndex): + if not isinstance(key, tuple) and len(key) > 1 and not isinstance(key[0], tuple): + key = tuple([key]) - if validate_iterable: - self._has_valid_type(key, axis) - return self._getitem_iterable(key, axis=axis) - elif _is_nested_tuple(key, labels): - locs = labels.get_locs(key) - indexer = [ slice(None) ] * self.ndim - indexer[axis] = locs - return self.obj.iloc[tuple(indexer)] - else: - self._has_valid_type(key, axis) - return self._get_label(key, axis=axis) + # an iterable multi-selection + if not (isinstance(key, tuple) and + isinstance(labels, MultiIndex)): + + if hasattr(key, 'ndim') and key.ndim > 1: + raise ValueError('Cannot index with multidimensional key') + + if validate_iterable: + self._has_valid_type(key, axis) + + return self._getitem_iterable(key, axis=axis) + + # nested tuple slicing + if _is_nested_tuple(key, labels): + locs = labels.get_locs(key) + indexer = [ slice(None) ] * self.ndim + indexer[axis] = locs + return self.obj.iloc[tuple(indexer)] + + # fall thru to straight lookup + self._has_valid_type(key, axis) + return self._get_label(key, axis=axis) class _iLocIndexer(_LocationIndexer): diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index b61b1ab925396..062950cad43ed 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1317,6 +1317,15 @@ def test_loc_multiindex(self): result = df[attributes] assert_frame_equal(result, df) + # GH 7349 + # loc with a multi-index seems to be doing fallback + df = DataFrame(np.arange(12).reshape(-1,1),index=pd.MultiIndex.from_product([[1,2,3,4],[1,2,3]])) + + expected = df.loc[([1,2],),:] + result = df.loc[[1,2]] + assert_frame_equal(result, expected) + + def test_series_getitem_multiindex(self): # GH 6018
closes #7349
https://api.github.com/repos/pandas-dev/pandas/pulls/7350
2014-06-05T12:34:39Z
2014-06-05T13:30:46Z
2014-06-05T13:30:46Z
2014-06-27T12:37:06Z
TST7337: Try to resolve windows test failures.
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index e7385400e5962..4ae3508e06dd5 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1029,7 +1029,7 @@ cdef inline object _get_zone(object tz): return 'UTC' else: if _treat_tz_as_dateutil(tz): - return 'dateutil/' + tz._filename.split('zoneinfo/')[1] + return 'dateutil/' + tz._filename else: # tz is a pytz timezone or unknown. try: @@ -1047,9 +1047,8 @@ cpdef inline object maybe_get_tz(object tz): Otherwise, just return tz. ''' if isinstance(tz, string_types): - split_tz = tz.split('/', 1) - if split_tz[0] == 'dateutil': - tz = _dateutil_gettz(split_tz[1]) + if tz.startswith('dateutil/'): + tz = _dateutil_gettz(tz[9:]) else: tz = pytz.timezone(tz) return tz
This change fixes broken tests on windows following `ENH: Support dateutil timezones` closes #7337
https://api.github.com/repos/pandas-dev/pandas/pulls/7346
2014-06-04T20:19:04Z
2014-06-04T20:21:00Z
null
2014-07-09T11:44:26Z
COMPAT: tests compat with current timezones (pytz) (GH7339)
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index b3ae02320037c..5fb1f9db620ae 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -403,7 +403,8 @@ def test_with_tz(self): # normalized central = dr.tz_convert(tz) self.assertIs(central.tz, tz) - self.assertIs(central[0].tz, tz) + comp = self.localize(tz, central[0].to_pydatetime().replace(tzinfo=None)).tzinfo + self.assertIs(central[0].tz, comp) # compare vs a localized tz comp = self.localize(tz, dr[0].to_pydatetime().replace(tzinfo=None)).tzinfo
closes #7339
https://api.github.com/repos/pandas-dev/pandas/pulls/7343
2014-06-04T18:16:06Z
2014-06-04T20:17:29Z
2014-06-04T20:17:29Z
2014-07-12T06:28:16Z
PERF: better dtype inference for perf gains (GH7332)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index bb4ecddd58f16..2cc08abf9c57c 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -75,6 +75,10 @@ Enhancements Performance ~~~~~~~~~~~ +- Improvements in dtype inference for numeric operations involving yielding performance gains +for dtypes: ``int64``, ``timedelta64``, ``datetime64`` (:issue:`7223`) + + Experimental ~~~~~~~~~~~~ diff --git a/pandas/core/common.py b/pandas/core/common.py index d993112933fa9..e9ae26d0c7c81 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1753,7 +1753,7 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False): elif is_timedelta64: from pandas.tseries.timedeltas import \ _possibly_cast_to_timedelta - value = _possibly_cast_to_timedelta(value, coerce='compat') + value = _possibly_cast_to_timedelta(value, coerce='compat', dtype=dtype) except: pass diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 72a31296ba456..0f19634cb5a38 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -333,7 +333,7 @@ def _convert_to_array(self, values, name=None, other=None): values = values.to_series() elif inferred_type in ('timedelta', 'timedelta64'): # have a timedelta, convert to to ns here - values = _possibly_cast_to_timedelta(values, coerce=coerce) + values = _possibly_cast_to_timedelta(values, coerce=coerce, dtype='timedelta64[ns]') elif inferred_type == 'integer': # py3 compat where dtype is 'm' but is an integer if values.dtype.kind == 'm': diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index 34060d0c57a4e..3aa71ad02ba6a 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -17,29 +17,47 @@ def is_complex(object obj): return util.is_complex_object(obj) _TYPE_MAP = { - np.int8: 'integer', - np.int16: 'integer', - np.int32: 'integer', - np.int64: 'integer', - np.uint8: 'integer', - np.uint16: 'integer', - np.uint32: 'integer', - np.uint64: 'integer', - np.float32: 'floating', - np.float64: 'floating', - np.complex128: 'complex', - np.complex128: 'complex', - np.string_: 'string', - np.unicode_: 'unicode', - np.bool_: 'boolean', - np.datetime64 : 'datetime64', - np.timedelta64 : 'timedelta64' + 'int8': 'integer', + 'int16': 'integer', + 'int32': 'integer', + 'int64': 'integer', + 'i' : 'integer', + 'uint8': 'integer', + 'uint16': 'integer', + 'uint32': 'integer', + 'uint64': 'integer', + 'u' : 'integer', + 'float32': 'floating', + 'float64': 'floating', + 'f' : 'floating', + 'complex128': 'complex', + 'c' : 'complex', + 'string': 'string', + 'S' : 'string', + 'unicode': 'unicode', + 'U' : 'unicode', + 'bool': 'boolean', + 'b' : 'boolean', + 'datetime64[ns]' : 'datetime64', + 'M' : 'datetime64', + 'timedelta64[ns]' : 'timedelta64', + 'm' : 'timedelta64', } +# types only exist on certain platform try: - _TYPE_MAP[np.float128] = 'floating' - _TYPE_MAP[np.complex256] = 'complex' - _TYPE_MAP[np.float16] = 'floating' + np.float128 + _TYPE_MAP['float128'] = 'floating' +except AttributeError: + pass +try: + np.complex256 + _TYPE_MAP['complex256'] = 'complex' +except AttributeError: + pass +try: + np.float16 + _TYPE_MAP['float16'] = 'floating' except AttributeError: pass @@ -60,7 +78,10 @@ def infer_dtype(object _values): values = getattr(values, 'values', values) - val_kind = values.dtype.type + val_name = values.dtype.name + if val_name in _TYPE_MAP: + return _TYPE_MAP[val_name] + val_kind = values.dtype.kind if val_kind in _TYPE_MAP: return _TYPE_MAP[val_kind] diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py index 0a5693cc55466..b812c0637b0ad 100644 --- a/pandas/tseries/timedeltas.py +++ b/pandas/tseries/timedeltas.py @@ -156,12 +156,13 @@ def convert(r=None, unit=None, m=m): # no converter raise ValueError("cannot create timedelta string converter for [{0}]".format(r)) -def _possibly_cast_to_timedelta(value, coerce=True): +def _possibly_cast_to_timedelta(value, coerce=True, dtype=None): """ try to cast to timedelta64, if already a timedeltalike, then make sure that we are [ns] (as numpy 1.6.2 is very buggy in this regards, don't force the conversion unless coerce is True if coerce='compat' force a compatibilty coercerion (to timedeltas) if needeed + if dtype is passed then this is the target dtype """ # coercion compatability @@ -201,10 +202,16 @@ def convert(td, dtype): return np.array([ convert(v,dtype) for v in value ], dtype='m8[ns]') # deal with numpy not being able to handle certain timedelta operations - if isinstance(value, (ABCSeries, np.ndarray)) and value.dtype.kind == 'm': - if value.dtype != 'timedelta64[ns]': + if isinstance(value, (ABCSeries, np.ndarray)): + + # i8 conversions + if value.dtype == 'int64' and np.dtype(dtype) == 'timedelta64[ns]': value = value.astype('timedelta64[ns]') - return value + return value + elif value.dtype.kind == 'm': + if value.dtype != 'timedelta64[ns]': + value = value.astype('timedelta64[ns]') + return value # we don't have a timedelta, but we want to try to convert to one (but # don't force it) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index e7385400e5962..491997d680ce7 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -148,7 +148,7 @@ cdef inline bint _is_fixed_offset(object tz): else: return 0 return 1 - + _zero_time = datetime_time(0, 0) @@ -340,7 +340,7 @@ class Timestamp(_Timestamp): @property def is_year_end(self): return self._get_start_end_field('is_year_end') - + def tz_localize(self, tz): """ Convert naive Timestamp to local time zone @@ -994,7 +994,7 @@ cdef inline void _localize_tso(_TSObject obj, object tz): pandas_datetime_to_datetimestruct(obj.value + deltas[0], PANDAS_FR_ns, &obj.dts) else: - pandas_datetime_to_datetimestruct(obj.value, PANDAS_FR_ns, &obj.dts) + pandas_datetime_to_datetimestruct(obj.value, PANDAS_FR_ns, &obj.dts) obj.tzinfo = tz elif _treat_tz_as_pytz(tz): inf = tz._transition_info[pos] @@ -1044,7 +1044,7 @@ cdef inline object _get_zone(object tz): cpdef inline object maybe_get_tz(object tz): ''' (Maybe) Construct a timezone object from a string. If tz is a string, use it to construct a timezone object. - Otherwise, just return tz. + Otherwise, just return tz. ''' if isinstance(tz, string_types): split_tz = tz.split('/', 1) @@ -1338,7 +1338,7 @@ def array_to_timedelta64(ndarray[object] values, coerce=False): def convert_to_timedelta(object ts, object unit='ns', coerce=False): return convert_to_timedelta64(ts, unit, coerce) -cdef convert_to_timedelta64(object ts, object unit, object coerce): +cdef inline convert_to_timedelta64(object ts, object unit, object coerce): """ Convert an incoming object to a timedelta64 if possible @@ -1953,9 +1953,9 @@ cdef inline bint _treat_tz_as_dateutil(object tz): cdef inline object _tz_cache_key(object tz): """ Return the key in the cache for the timezone info object or None if unknown. - + The key is currently the tz string for pytz timezones, the filename for dateutil timezones. - + Notes ===== This cannot just be the hash of a timezone object. Unfortunately, the hashes of two dateutil tz objects @@ -2137,7 +2137,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, bint infer_dst=False): # right side idx_shifted = _ensure_int64( np.maximum(0, trans.searchsorted(vals + DAY_NS, side='right') - 1)) - + for i in range(n): v = vals[i] - deltas[idx_shifted[i]] pos = bisect_right_i8(tdata, v, ntrans) - 1 @@ -2517,7 +2517,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts) dom = dts.day - + if dom == 1: out[i] = 1 return out.view(bool) @@ -2535,7 +2535,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N doy = mo_off + dom ldom = _month_offset[isleap, dts.month] dow = ts_dayofweek(ts) - + if (ldom == doy and dow < 5) or (dow == 4 and (ldom - doy <= 2)): out[i] = 1 return out.view(bool) @@ -2549,9 +2549,9 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N dom = dts.day doy = mo_off + dom ldom = _month_offset[isleap, dts.month] - + if ldom == doy: - out[i] = 1 + out[i] = 1 return out.view(bool) elif field == 'is_quarter_start': @@ -2565,7 +2565,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N dow = ts_dayofweek(ts) if ((dts.month - start_month) % 3 == 0) and ((dom == 1 and dow < 5) or (dom <= 3 and dow == 0)): - out[i] = 1 + out[i] = 1 return out.view(bool) else: for i in range(count): @@ -2573,9 +2573,9 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts) dom = dts.day - + if ((dts.month - start_month) % 3 == 0) and dom == 1: - out[i] = 1 + out[i] = 1 return out.view(bool) elif field == 'is_quarter_end': @@ -2591,9 +2591,9 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N doy = mo_off + dom ldom = _month_offset[isleap, dts.month] dow = ts_dayofweek(ts) - + if ((dts.month - end_month) % 3 == 0) and ((ldom == doy and dow < 5) or (dow == 4 and (ldom - doy <= 2))): - out[i] = 1 + out[i] = 1 return out.view(bool) else: for i in range(count): @@ -2605,9 +2605,9 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N dom = dts.day doy = mo_off + dom ldom = _month_offset[isleap, dts.month] - + if ((dts.month - end_month) % 3 == 0) and (ldom == doy): - out[i] = 1 + out[i] = 1 return out.view(bool) elif field == 'is_year_start': @@ -2621,7 +2621,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N dow = ts_dayofweek(ts) if (dts.month == start_month) and ((dom == 1 and dow < 5) or (dom <= 3 and dow == 0)): - out[i] = 1 + out[i] = 1 return out.view(bool) else: for i in range(count): @@ -2649,7 +2649,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N ldom = _month_offset[isleap, dts.month] if (dts.month == end_month) and ((ldom == doy and dow < 5) or (dow == 4 and (ldom - doy <= 2))): - out[i] = 1 + out[i] = 1 return out.view(bool) else: for i in range(count): @@ -2666,7 +2666,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, object freqstr=N if (dts.month == end_month) and (ldom == doy): out[i] = 1 return out.view(bool) - + raise ValueError("Field %s not supported" % field) diff --git a/vb_suite/inference.py b/vb_suite/inference.py new file mode 100644 index 0000000000000..8855f7e654bb1 --- /dev/null +++ b/vb_suite/inference.py @@ -0,0 +1,36 @@ +from vbench.api import Benchmark +from datetime import datetime +import sys + +# from GH 7332 + +setup = """from pandas_vb_common import * +import pandas as pd +N = 500000 +df_int64 = DataFrame(dict(A = np.arange(N,dtype='int64'), B = np.arange(N,dtype='int64'))) +df_int32 = DataFrame(dict(A = np.arange(N,dtype='int32'), B = np.arange(N,dtype='int32'))) +df_uint32 = DataFrame(dict(A = np.arange(N,dtype='uint32'), B = np.arange(N,dtype='uint32'))) +df_float64 = DataFrame(dict(A = np.arange(N,dtype='float64'), B = np.arange(N,dtype='float64'))) +df_float32 = DataFrame(dict(A = np.arange(N,dtype='float32'), B = np.arange(N,dtype='float32'))) +df_datetime64 = DataFrame(dict(A = pd.to_datetime(np.arange(N,dtype='int64'),unit='ms'), + B = pd.to_datetime(np.arange(N,dtype='int64'),unit='ms'))) +df_timedelta64 = DataFrame(dict(A = df_datetime64['A']-df_datetime64['B'], + B = df_datetime64['B'])) +""" + +dtype_infer_int64 = Benchmark('df_int64["A"] + df_int64["B"]', setup, + start_date=datetime(2014, 1, 1)) +dtype_infer_int32 = Benchmark('df_int32["A"] + df_int32["B"]', setup, + start_date=datetime(2014, 1, 1)) +dtype_infer_uint32 = Benchmark('df_uint32["A"] + df_uint32["B"]', setup, + start_date=datetime(2014, 1, 1)) +dtype_infer_float64 = Benchmark('df_float64["A"] + df_float64["B"]', setup, + start_date=datetime(2014, 1, 1)) +dtype_infer_float32 = Benchmark('df_float32["A"] + df_float32["B"]', setup, + start_date=datetime(2014, 1, 1)) +dtype_infer_datetime64 = Benchmark('df_datetime64["A"] - df_datetime64["B"]', setup, + start_date=datetime(2014, 1, 1)) +dtype_infer_timedelta64_1 = Benchmark('df_timedelta64["A"] + df_timedelta64["B"]', setup, + start_date=datetime(2014, 1, 1)) +dtype_infer_timedelta64_2 = Benchmark('df_timedelta64["A"] + df_timedelta64["A"]', setup, + start_date=datetime(2014, 1, 1)) diff --git a/vb_suite/suite.py b/vb_suite/suite.py index a1b38e8509e4e..be9aa03801641 100644 --- a/vb_suite/suite.py +++ b/vb_suite/suite.py @@ -12,6 +12,7 @@ 'index_object', 'indexing', 'io_bench', + 'inference', 'hdfstore_bench', 'join_merge', 'miscellaneous',
closes #7332 for some reason `int64` was not being looked up correctly in the _TYPE_MAP table. changing to work on name/kind makes this more efficient. separately better inference of timedelta conversions from i8 were not being done so that yields a big boost ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- dtype_infer_timedelta64_2 | 3.9814 | 1765.3640 | 0.0023 | dtype_infer_datetime64 | 4.8450 | 1825.3207 | 0.0027 | dtype_infer_int64 | 0.7904 | 18.5820 | 0.0425 | dtype_infer_float64 | 0.8090 | 1.0493 | 0.7710 | dtype_infer_float32 | 0.3430 | 0.3823 | 0.8971 | dtype_infer_int32 | 0.3897 | 0.4017 | 0.9701 | dtype_infer_uint32 | 0.5450 | 0.5583 | 0.9762 | dtype_infer_timedelta64_1 | 118.2277 | 117.6037 | 1.0053 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [e2f5a50] : PERF: use names/kinds for dtype inference on known types Base [87660ef] : Merge pull request #6968 from ahlmss/AHLMSS_0.13.1_ahl1 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7342
2014-06-04T17:02:55Z
2014-06-04T20:30:39Z
2014-06-04T20:30:39Z
2014-06-21T02:59:17Z
FIX: resample with fill_method and how #2073
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 20620f15944f0..b1e1f77d0b833 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -89,6 +89,7 @@ Bug Fixes ~~~~~~~~~ - Bug in ``Index.min`` and ``max`` doesn't handle ``nan`` and ``NaT`` properly (:issue:`7261`) +- Bug in ``resample`` where ``fill_method`` was ignored if you passed ``how`` (:issue:`7261`) - Bug in ``TimeGrouper`` doesn't exclude column specified by ``key`` (:issue:`7227`) - Bug in ``DataFrame`` and ``Series`` bar and barh plot raises ``TypeError`` when ``bottom`` and ``left`` keyword is specified (:issue:`7226`) diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index dd72a5245e7b2..812dd5aba71e0 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -252,6 +252,11 @@ def _resample_timestamps(self): # downsample grouped = obj.groupby(grouper, axis=self.axis) result = grouped.aggregate(self._agg_method) + # GH2073 + if self.fill_method is not None: + result = result.fillna(method=self.fill_method, + limit=self.limit) + else: # upsampling shortcut if self.axis: diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 45d17052d904b..88bacb3d7b8ab 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -869,6 +869,14 @@ def test_monthly_upsample(self): expected = expected.asfreq(targ, 'ffill').to_period() assert_series_equal(result, expected) + def test_fill_method_and_how_upsample(self): + # GH2073 + s = Series(range(9), + index=date_range('2010-01-01', periods=9, freq='Q')) + last = s.resample('M', fill_method='ffill') + both = s.resample('M', how='last', fill_method='ffill').astype('int64') + assert_series_equal(last, both) + def test_weekly_upsample(self): targets = ['D', 'B']
closes #2073
https://api.github.com/repos/pandas-dev/pandas/pulls/7341
2014-06-04T16:46:05Z
2014-06-04T19:47:32Z
2014-06-04T19:47:32Z
2014-07-12T06:26:14Z
add class=„pandas-empty“ to NaN-cells’ HTML
diff --git a/pandas/core/format.py b/pandas/core/format.py index c2f439877ca00..f344d4b009df9 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -711,6 +711,8 @@ def write_th(self, s, indent=0, tags=None): return self._write_cell(s, kind='th', indent=indent, tags=tags) def write_td(self, s, indent=0, tags=None): + if s == self.fmt.na_rep: + tags = (tags or "") + ' class="pandas-empty"' return self._write_cell(s, kind='td', indent=indent, tags=tags) def _write_cell(self, s, kind='td', indent=0, tags=None):
This PR is related to #5330, but is much simpler. There is no new API, this just allows styling empty cells using CSS, e.g. using ``` css td.pandas-empty { color: #eee } ``` to de-emphasize, or ``` css td.pandas-empty { background-color: yellow } ``` to let empty cells stand out.
https://api.github.com/repos/pandas-dev/pandas/pulls/7338
2014-06-04T14:33:26Z
2014-06-05T16:04:24Z
null
2014-06-14T15:47:54Z
BUG: Series.map fails when keys are tuples of different lengths (#7333)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index ff35ce9ca3069..60dbaf9d7427e 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -116,3 +116,4 @@ Bug Fixes - Bug in ``CustomBusinessDay.apply`` raiases ``NameError`` when ``np.datetime64`` object is passed (:issue:`7196`) - Bug in ``MultiIndex.append``, ``concat`` and ``pivot_table`` don't preserve timezone (:issue:`6606`) - Bug all ``StringMethods`` now work on empty Series (:issue:`7242`) +- Bug in ``Series.map`` when mapping a dict with tuple keys of different lengths (:issue:`7333`) diff --git a/pandas/core/series.py b/pandas/core/series.py index ea3656662ab06..b66b74a011c4d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1940,7 +1940,7 @@ def map_f(values, f): if isinstance(arg, (dict, Series)): if isinstance(arg, dict): - arg = self._constructor(arg) + arg = self._constructor(arg, index=arg.keys()) indexer = arg.index.get_indexer(values) new_values = com.take_1d(arg.values, indexer) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 44587248e6d51..6421986d75f61 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -4860,6 +4860,25 @@ def test_map_na_exclusion(self): exp = s * 2 assert_series_equal(result, exp) + def test_map_dict_with_tuple_keys(self): + ''' + Due to new MultiIndex-ing behaviour in v0.14.0, + dicts with tuple keys passed to map were being + converted to a multi-index, preventing tuple values + from being mapped properly. + ''' + df = pd.DataFrame({'a': [(1,), (2,), (3, 4), (5, 6)]}) + label_mappings = { + (1,): 'A', + (2,): 'B', + (3, 4): 'A', + (5, 6): 'B' + } + df['labels'] = df['a'].map(label_mappings) + df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index) + # All labels should be filled now + tm.assert_series_equal(df['labels'], df['expected_labels']) + def test_apply(self): assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
closes #7333 seems to be due to the new behaviour of `Series`, which will automatically create a MultiIndex out of the dict's keys when creating a series to map the values. The MultiIndex doesn't match the values for the shorter tuples, so they fail to map. The fix is fairly simple, just pass the dict keys explicitly as the index. I've added a test case for this specific issue, but could add some more if others can see related issues that would arise from this. I'll try to finalize the PR (squashing into a single commit, rebasing if necessary) tomorrow if it looks good.
https://api.github.com/repos/pandas-dev/pandas/pulls/7336
2014-06-04T13:22:59Z
2014-06-05T17:57:28Z
2014-06-05T17:57:28Z
2014-06-16T14:59:08Z
ENH group attribute access for HDFStore
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 214994a6fc185..b776b9afe4417 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -889,6 +889,8 @@ def to_hdf(self, path_or_buf, key, **kwargs): in the store wherever possible fletcher32 : bool, default False If applying compression use the fletcher32 checksum + attrs : dict, default None + Also store each value inside an attribute of the group """ diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index cee1867e73179..9f3882d90ddf2 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -40,6 +40,7 @@ import pandas.tslib as tslib from contextlib import contextmanager +from collections import namedtuple from distutils.version import LooseVersion # versioning attribute @@ -65,6 +66,7 @@ def _ensure_encoding(encoding): Term = Expr +_raise_attribute = object() def _ensure_term(where, scope_level): """ @@ -341,6 +343,20 @@ def read_hdf(path_or_buf, key, **kwargs): # a passed store; user controls open/close f(path_or_buf, False) +def get_attrs(path_or_buf, key, attrs, default=_raise_attribute, mode='r'): + if isinstance(path_or_buf, string_types): + with get_store(path_or_buf, mode=mode) as store: + return store.get_attrs(key, attrs, default) + else: + return path_or_buf.get_attrs(key, attrs, default) + +def set_attrs(path_or_buf, key, mode='a', **attrs): + if isinstance(path_or_buf, string_types): + with get_store(path_or_buf, mode=mode) as store: + store.set_attrs(key, **attrs) + else: + path_or_buf.set_attrs(key, **attrs) + class HDFStore(StringMixin): @@ -578,6 +594,58 @@ def is_open(self): return False return bool(self._handle.isopen) + def get_attrs(self, key, attrs, default=_raise_attribute): + """ + get values from a groups attributes + + Paramters + --------- + key : object + attrs : list or str + specifies attributes to return as namedtuple + default : object + optinal default value when an attribute is not found + + Returns + ------- + nametuple with requested attributes or single value when attrs is a + string specifing a single attribute + + Exceptions + ---------- + raises KeyError when group is not found + raises ValueError when attrs argument can't be used as namedtuple + raises AttributeError when an attribute is not in group and no default + value is given + """ + node = self.get_node(key) + if node is None: + raise KeyError('No group named %s in the file' % key) + return _get_attrs(node, attrs, default) + + def set_attrs(self, key, **attrs): + """ + sets attributes of a node + + Note that the size of the metastore for a group inside a hdf5 file is + limited and already used for internal metadata, so be carefull about + storing large objects inside attributes. + + Paramters + --------- + key : object + attrs : kwargs + attribute values to set + + Exceptions + ---------- + raises KeyError when node is not found + """ + node = self.get_node(key) + if node is None: + raise KeyError('No group named %s in the file' % key) + return _set_attrs(node, **attrs) + def flush(self, fsync=False): """ Force all buffered modifications to be written to disk. @@ -811,6 +879,8 @@ def put(self, key, value, format=None, append=False, **kwargs): encoding : default None, provide an encoding for strings dropna : boolean, default True, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' + attrs : dict, default None + Also store each value inside an attribute of the group """ if format is None: format = get_option("io.hdf.default_format") or 'fixed' @@ -893,6 +963,8 @@ def append(self, key, value, format=None, append=True, columns=None, encoding : default None, provide an encoding for strings dropna : boolean, default True, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' + attrs : dict, default None + Also store each value inside an attribute of the group Notes ----- Does *not* check if data being appended overlaps with existing @@ -949,6 +1021,10 @@ def append_to_multiple(self, d, value, selector, data_columns=None, "append_to_multiple requires a selector that is in passed dict" ) + if 'attrs' in kwargs: + raise TypeError('attrs argument not allowed for append_to_multiple, ' + 'use set_attrs manually') + # figure out the splitting axis (the non_index_axis) axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0] @@ -1218,7 +1294,7 @@ def error(t): error('_TABLE_MAP') def _write_to_group(self, key, value, format, index=True, append=False, - complib=None, encoding=None, **kwargs): + complib=None, encoding=None, attrs=None, **kwargs): group = self.get_node(key) # remove the node if we are not appending @@ -1272,12 +1348,39 @@ def _write_to_group(self, key, value, format, index=True, append=False, if s.is_table and index: s.create_index(columns=index) + if attrs: + _set_attrs(s.group, **attrs) + def _read_group(self, group, **kwargs): s = self._create_storer(group) s.infer_axes() return s.read(**kwargs) +def _get_attrs(node, attrs, default=_raise_attribute): + Attrs = namedtuple('GroupAttrs', attrs) + + if default==_raise_attribute: + def get(attr): + return getattr(node._v_attrs, attr) + else: + def get(attr): + try: + return getattr(node._v_attrs, attr) + except AttributeError: + return default + + vals = [get(a) for a in Attrs._fields] + if len(vals) == 1 and isinstance(attrs, string_types): + return vals[0] + else: + return Attrs(*vals) + +def _set_attrs(node, **attrs): + for attr,val in attrs.items(): + node._v_attrs[attr] = val + + class TableIterator(object): """ define the iteration interface on a table diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index dd30527b1f82d..30fe12a4912c1 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -288,6 +288,95 @@ def test_api(self): self.assertRaises(TypeError, df.to_hdf, path,'df',append=True,format='foo') self.assertRaises(TypeError, df.to_hdf, path,'df',append=False,format='bar') + def test_attrs(self): + df = tm.makeDataFrame() + + attrs = {'val': 42, + 'test': True, + 'meta': dict(name='pandas', what='rocks')} + over = {'val': 13, 'meta': dict(foo='bar'), 'more': 37} + mix = attrs.copy() + mix.update(over) + + def assert_attrs(st, key, attrs=attrs): + access = st.get_node(key)._v_attrs + for name, val in attrs.items(): + self.assertEqual(access[name], val) + + with ensure_clean_store(self.path) as st: + st.put('grp/df', df) + st.set_attrs('grp/df', **attrs) + assert_attrs(st, 'grp/df') + st.set_attrs('grp/df', **over) + assert_attrs(st, 'grp/df', attrs=mix) + + st.set_attrs('grp', **attrs) + assert_attrs(st, 'grp') + + res1 = st.get_attrs('grp/df', ['val', 'test', 'meta', 'more']) + self.assertEqual(res1._fields, ('val', 'test', 'meta', 'more')) + self.assertEqual(list(res1), [13, True, dict(foo='bar'), 37]) + + res2 = st.get_attrs('grp/df', 'val test meta more') + self.assertEqual(list(res2), [13, True, dict(foo='bar'), 37]) + self.assertEqual(res2._fields, ('val', 'test', 'meta', 'more')) + + res3 = st.get_attrs('grp/df', 'val') + self.assertEqual(res3, 13) + + res4 = st.get_attrs('grp/df', ['val']) + self.assertEqual(list(res4), [13]) + + with tm.assertRaises(KeyError): + st.set_attrs('nil', val=42) + with tm.assertRaises(KeyError): + st.get_attrs('nil', 'val') + with tm.assertRaises(AttributeError): + st.get_attrs('grp', 'val nil test') + with tm.assertRaises(ValueError): + st.get_attrs('grp', 'valid and in-valid!') + + res4 = st.get_attrs('grp', 'val nil test', None) + self.assertEqual(list(res4), [42, None, True]) + + with ensure_clean_path(self.path) as path: + df.to_hdf(path, 'tbl', format='t', attrs=attrs) + df.to_hdf(path, 'fix', format='f', attrs=attrs) + with get_store(path) as st: + assert_attrs(st, 'tbl') + assert_attrs(st, 'fix') + # also check if data is stored + assert_frame_equal(st.tbl, df) + assert_frame_equal(st.fix, df) + + from pandas.io.pytables import set_attrs, get_attrs + with ensure_clean_path(self.path) as path: + df.to_hdf(path, 'df') + set_attrs(path, 'df', **attrs) + with get_store(path) as st: + assert_attrs(st, 'df') + res = get_attrs(path, 'df', 'val nil test', None) + self.assertEqual(list(res), [42, None, True]) + + with ensure_clean_store(self.path) as st: + st.put('fix', df, attrs=attrs) + assert_attrs(st, 'fix') + st.append('tbl', df, attrs=attrs) + assert_attrs(st, 'tbl') + st.append('fst', df.iloc[:10], attrs=attrs) + st.append('fst', df.iloc[10:]) + assert_attrs(st, 'fst') + st.append('snd', df.iloc[:10]) + st.append('snd', df.iloc[10:], attrs=attrs) + assert_attrs(st, 'snd') + st.append('mix', df.iloc[:10], attrs=attrs) + st.append('mix', df.iloc[10:], attrs=over) + assert_attrs(st, 'mix', attrs=mix) + + with tm.assertRaises(TypeError): + st.append_to_multiple({'a': ['A', 'B'], 'b': ['C', 'D']}, df, attrs=attrs) + + def test_api_default_format(self): # default_format option
Summary: - `store.set_attrs(key, **attrs)` to set attributes of a group - `store.get_attrs(key, attrs, default)` returns namedtuple with values from a groups attributes or single attribute directly if attrs is specified as a single string - `to_hdf`, `put` and `append` have optional `attrs=dict(...)` argument to update attributes when storing an object Examples: ``` from pandas.io.pytables import get_attrs df.to_hdf('h5', 'df', attrs=dict(a=1, b=2)) a,b = get_attrs('h5', 'df', ['a', 'b']) df = read_hdf('h5', 'df') ``` ``` st.put('df', df) st.set_attrs('df', a=1, b=2) a,b,c = st.get_attrs('df', 'a b c', default=None) a = st.get_attrs('df', 'a') b, = st.get_attrs('df', ['b']) ``` ``` st.append_to_mutple({'grp/a': ['A1', 'A2'], 'grp/b': ['B1', 'B2']}, 'grp/a') st.set_attrs('grp', a=1, b=2) attrs = st.get_attrs('group', ['a', 'b']) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7334
2014-06-04T11:36:56Z
2015-01-18T21:36:57Z
null
2015-01-18T21:36:57Z
BUG: Bug in broadcasting with .div, integer dtypes and divide-by-zero (GH7325)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index d4598e0aece37..608c368c79ed4 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -87,3 +87,4 @@ Bug Fixes (:issue:`7315`). - Bug in inferred_freq results in None for eastern hemisphere timezones (:issue:`7310`) - Bug in ``Easter`` returns incorrect date when offset is negative (:issue:`7195`) +- Bug in broadcasting with ``.div``, integer dtypes and divide-by-zero (:issue:`7325`) diff --git a/pandas/core/common.py b/pandas/core/common.py index afa376a14d4da..d993112933fa9 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1244,20 +1244,21 @@ def _fill_zeros(result, x, y, name, fill): if is_integer_dtype(y): - mask = y.ravel() == 0 - if mask.any(): + if (y.ravel() == 0).any(): shape = result.shape result = result.ravel().astype('float64') + # GH 7325, mask and nans must be broadcastable signs = np.sign(result) - nans = np.isnan(x.ravel()) - np.putmask(result, mask & ~nans, fill) + mask = ((y == 0) & ~np.isnan(x)).ravel() + + np.putmask(result, mask, fill) # if we have a fill of inf, then sign it # correctly # GH 6178 if np.isinf(fill): - np.putmask(result,signs<0 & mask & ~nans,-fill) + np.putmask(result,signs<0 & mask, -fill) result = result.reshape(shape) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 170a64aa58482..358d9d82403f6 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5240,6 +5240,17 @@ def test_arith_flex_series(self): assert_frame_equal(df.div(row), df / row) assert_frame_equal(df.div(col, axis=0), (df.T / col).T) + # broadcasting issue in GH7325 + df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='int64') + expected = DataFrame([[np.inf,np.inf],[1.0,1.5],[1.0,1.25]]) + result = df.div(df[0],axis='index') + assert_frame_equal(result,expected) + + df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='float64') + expected = DataFrame([[np.nan,np.inf],[1.0,1.5],[1.0,1.25]]) + result = df.div(df[0],axis='index') + assert_frame_equal(result,expected) + def test_arith_non_pandas_object(self): df = self.simple
closes #7325
https://api.github.com/repos/pandas-dev/pandas/pulls/7328
2014-06-03T23:39:45Z
2014-06-04T00:03:36Z
2014-06-04T00:03:36Z
2014-06-16T02:31:33Z
BUG: already mixed indexes should not sort
diff --git a/pandas/core/index.py b/pandas/core/index.py index 146b7cd0eb503..8b2c7bde44782 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2,7 +2,6 @@ import datetime import warnings from functools import partial -import warnings from pandas.compat import range, zip, lrange, lzip, u, reduce from pandas import compat import numpy as np @@ -29,6 +28,9 @@ __all__ = ['Index'] +_unsortable_types = frozenset(('mixed', 'mixed-integer')) + + def _try_get_item(x): try: return x.item() @@ -1011,7 +1013,10 @@ def union(self, other): warnings.warn("%s, sort order is undefined for " "incomparable objects" % e, RuntimeWarning) else: - result.sort() + types = frozenset((self.inferred_type, + other.inferred_type)) + if not types & _unsortable_types: + result.sort() else: result = self.values diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 5def2039c5ee8..29aed792bfe11 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -793,7 +793,7 @@ def test_tuple_union_bug(self): # union broken union_idx = idx1.union(idx2) - expected = pandas.Index(sorted(set(idx1) | set(idx2))) + expected = idx2 self.assertEqual(union_idx.ndim, 1) self.assertTrue(union_idx.equals(expected))
cc @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/7327
2014-06-03T22:58:25Z
2014-06-04T20:53:41Z
2014-06-04T20:53:41Z
2014-07-07T05:52:08Z
BUG: fix read_sql delegation for queries without select statement (GH7324)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 3a53b5629c6e3..0a89806c899a4 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -117,3 +117,6 @@ Bug Fixes - Bug in ``MultiIndex.append``, ``concat`` and ``pivot_table`` don't preserve timezone (:issue:`6606`) - Bug in ``.loc`` with a list of indexers on a single-multi index level (that is not nested) (:issue:`7349`) - Bug in ``Series.map`` when mapping a dict with tuple keys of different lengths (:issue:`7333`) +- Bug all ``StringMethods`` now work on empty Series (:issue:`7242`) +- Fix delegation of `read_sql` to `read_sql_query` when query does not contain + 'select' (:issue:`7324`). diff --git a/pandas/io/sql.py b/pandas/io/sql.py index aa08c95c4f1c3..0ac4b5f3fcc2b 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -374,26 +374,19 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, """ pandas_sql = pandasSQL_builder(con) - if 'select' in sql.lower(): - try: - if pandas_sql.has_table(sql): - return pandas_sql.read_table( - sql, index_col=index_col, coerce_float=coerce_float, - parse_dates=parse_dates, columns=columns) - except: - pass - + if isinstance(pandas_sql, PandasSQLLegacy): return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) - else: - if isinstance(pandas_sql, PandasSQLLegacy): - raise ValueError("Reading a table with read_sql is not supported " - "for a DBAPI2 connection. Use an SQLAlchemy " - "engine or specify an sql query") + + if pandas_sql.has_table(sql): return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) + else: + return pandas_sql.read_sql( + sql, index_col=index_col, params=params, + coerce_float=coerce_float, parse_dates=parse_dates) def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 2796ab48ec894..a34f278fc5a96 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -671,7 +671,7 @@ def test_read_sql_delegate(self): "read_sql and read_sql_query have not the same" " result with a query") - self.assertRaises(ValueError, sql.read_sql, 'iris', self.conn) + self.assertRaises(sql.DatabaseError, sql.read_sql, 'iris', self.conn) def test_safe_names_warning(self): # GH 6798 @@ -1078,6 +1078,36 @@ def test_default_type_conversion(self): self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating), "BoolColWithNull loaded with incorrect type") + def test_read_procedure(self): + # see GH7324. Although it is more an api test, it is added to the + # mysql tests as sqlite does not have stored procedures + df = DataFrame({'a': [1, 2, 3], 'b':[0.1, 0.2, 0.3]}) + df.to_sql('test_procedure', self.conn, index=False) + + proc = """DROP PROCEDURE IF EXISTS get_testdb; + + CREATE PROCEDURE get_testdb () + + BEGIN + SELECT * FROM test_procedure; + END""" + + connection = self.conn.connect() + trans = connection.begin() + try: + r1 = connection.execute(proc) + trans.commit() + except: + trans.rollback() + raise + + res1 = sql.read_sql_query("CALL get_testdb();", self.conn) + tm.assert_frame_equal(df, res1) + + # test delegation to read_sql_query + res2 = sql.read_sql("CALL get_testdb();", self.conn) + tm.assert_frame_equal(df, res2) + class TestPostgreSQLAlchemy(_TestSQLAlchemy): """
Closes #7324. The `if 'select' in ...'` check was introduced for mysql legacy compatibility, but introduced problem if the query did not contain a select statement (eg a stored procedure). This should solve this, and passes the test suite (locally).
https://api.github.com/repos/pandas-dev/pandas/pulls/7326
2014-06-03T22:10:49Z
2014-06-05T21:25:09Z
2014-06-05T21:25:09Z
2014-08-24T10:28:40Z
UNI/HTML/WIP: add encoding argument to read_html
https://github.com/pandas-dev/pandas/pull/7323.diff
closes #7220
https://api.github.com/repos/pandas-dev/pandas/pulls/7323
2014-06-03T15:31:29Z
2014-06-04T14:03:24Z
2014-06-04T14:03:24Z
2014-06-15T10:03:30Z
BUG: xlim on plots with shared axes (GH2960, GH3490)
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 42041cceeb81b..9f9a87f642a4d 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -270,6 +270,9 @@ Bug Fixes - Bug in non-monotonic ``Index.union`` may preserve ``name`` incorrectly (:issue:`7458`) - Bug in ``DatetimeIndex.intersection`` doesn't preserve timezone (:issue:`4690`) +- Bug with last plotted timeseries dictating ``xlim`` (:issue:`2960`) +- Bug with ``secondary_y`` axis not being considered for timeseries ``xlim`` (:issue:`3490`) + - Bug in ``Float64Index`` assignment with a non scalar indexer (:issue:`7586`) - Bug in ``pandas.core.strings.str_contains`` does not properly match in a case insensitive fashion when ``regex=False`` and ``case=False`` (:issue:`7505`) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 9e7badc836054..2b02523c143b4 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1563,6 +1563,8 @@ def _make_plot(self): kwds = self.kwds.copy() self._maybe_add_color(colors, kwds, style, i) + lines += _get_all_lines(ax) + errors = self._get_errorbars(label=label, index=i) kwds = dict(kwds, **errors) @@ -3064,6 +3066,20 @@ def _flatten(axes): return axes +def _get_all_lines(ax): + lines = ax.get_lines() + + # check for right_ax, which can oddly sometimes point back to ax + if hasattr(ax, 'right_ax') and ax.right_ax != ax: + lines += ax.right_ax.get_lines() + + # no such risk with left_ax + if hasattr(ax, 'left_ax'): + lines += ax.left_ax.get_lines() + + return lines + + def _get_xlim(lines): left, right = np.inf, -np.inf for l in lines: diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py index 9eecfc21be189..e390607a0e7e2 100644 --- a/pandas/tseries/plotting.py +++ b/pandas/tseries/plotting.py @@ -22,6 +22,8 @@ from pandas.tseries.converter import (PeriodConverter, TimeSeries_DateLocator, TimeSeries_DateFormatter) +from pandas.tools.plotting import _get_all_lines + #---------------------------------------------------------------------- # Plotting functions and monkey patches @@ -78,7 +80,7 @@ def tsplot(series, plotf, **kwargs): # set date formatter, locators and rescale limits format_dateaxis(ax, ax.freq) - left, right = _get_xlim(ax.get_lines()) + left, right = _get_xlim(_get_all_lines(ax)) ax.set_xlim(left, right) # x and y coord info @@ -115,7 +117,7 @@ def _get_ax_freq(ax): if ax_freq is None: if hasattr(ax, 'left_ax'): ax_freq = getattr(ax.left_ax, 'freq', None) - if hasattr(ax, 'right_ax'): + elif hasattr(ax, 'right_ax'): ax_freq = getattr(ax.right_ax, 'freq', None) return ax_freq diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index f0641b6389ebf..84c131ad0ffc6 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -919,6 +919,84 @@ def test_mpl_nopandas(self): assert_array_equal(np.array([x.toordinal() for x in dates]), line2.get_xydata()[:, 0]) + @slow + def test_irregular_ts_shared_ax_xlim(self): + # GH 2960 + ts = tm.makeTimeSeries()[:20] + ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] + + # plot the left section of the irregular series, then the right section + ax = ts_irregular[:5].plot() + ts_irregular[5:].plot(ax=ax) + + # check that axis limits are correct + left, right = ax.get_xlim() + self.assertEqual(left, ts_irregular.index.min().toordinal()) + self.assertEqual(right, ts_irregular.index.max().toordinal()) + + @slow + def test_secondary_y_non_ts_xlim(self): + # GH 3490 - non-timeseries with secondary y + index_1 = [1, 2, 3, 4] + index_2 = [5, 6, 7, 8] + s1 = Series(1, index=index_1) + s2 = Series(2, index=index_2) + + ax = s1.plot() + left_before, right_before = ax.get_xlim() + s2.plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + self.assertEqual(left_before, left_after) + self.assertLess(right_before, right_after) + + @slow + def test_secondary_y_regular_ts_xlim(self): + # GH 3490 - regular-timeseries with secondary y + index_1 = date_range(start='2000-01-01', periods=4, freq='D') + index_2 = date_range(start='2000-01-05', periods=4, freq='D') + s1 = Series(1, index=index_1) + s2 = Series(2, index=index_2) + + ax = s1.plot() + left_before, right_before = ax.get_xlim() + s2.plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + self.assertEqual(left_before, left_after) + self.assertLess(right_before, right_after) + + @slow + def test_secondary_y_mixed_freq_ts_xlim(self): + # GH 3490 - mixed frequency timeseries with secondary y + rng = date_range('2000-01-01', periods=10000, freq='min') + ts = Series(1, index=rng) + + ax = ts.plot() + left_before, right_before = ax.get_xlim() + ts.resample('D').plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + # a downsample should not have changed either limit + self.assertEqual(left_before, left_after) + self.assertEqual(right_before, right_after) + + @slow + def test_secondary_y_irregular_ts_xlim(self): + # GH 3490 - irregular-timeseries with secondary y + ts = tm.makeTimeSeries()[:20] + ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] + + ax = ts_irregular[:5].plot() + # plot higher-x values on secondary axis + ts_irregular[5:].plot(secondary_y=True, ax=ax) + # ensure secondary limits aren't overwritten by plot on primary + ts_irregular[:5].plot(ax=ax) + + left, right = ax.get_xlim() + self.assertEqual(left, ts_irregular.index.min().toordinal()) + self.assertEqual(right, ts_irregular.index.max().toordinal()) + def _check_plot_works(f, freq=None, series=None, *args, **kwargs): import matplotlib.pyplot as plt
Fixes #2960 Fixes #3490. Summary of changes: - for irregular timeseries plots, considers all lines already plotted when calculating xlim - for plots with secondary_y axis, considers lines on both left and right axes when calculating xlim. There are tests for non-timeseries plots (always worked, but now a test to confirm this), irregular timeseries plots, and regular timeseries plots. This does not fix #6608, which I now think is a fairly specific/rare case that could always be manually avoided by `x_compat=True`. I will still try and fix that, but it looks a bit uglier and I didn't want to hold back these more important changes.
https://api.github.com/repos/pandas-dev/pandas/pulls/7322
2014-06-03T15:28:12Z
2014-07-01T23:59:35Z
2014-07-01T23:59:35Z
2014-07-03T20:56:46Z
BUG: Revisit Index.delete preserves freq
diff --git a/pandas/core/index.py b/pandas/core/index.py index 8b2c7bde44782..9ccc2e694f92f 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1774,9 +1774,7 @@ def delete(self, loc): ------- new_index : Index """ - return self._simple_new(np.delete(self, loc), self.name, - freq=getattr(self, 'freq', None), - tz=getattr(self, 'tz', None)) + return np.delete(self, loc) def insert(self, loc, item): """ diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 34b0045b4983b..42cc80cc5dc63 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1608,6 +1608,34 @@ def insert(self, loc, item): return self.asobject.insert(loc, item) raise TypeError("cannot insert DatetimeIndex with incompatible label") + def delete(self, loc): + """ + Make new DatetimeIndex with passed location deleted + Returns + + loc: int, slice or array of ints + Indicate which sub-arrays to remove. + + ------- + new_index : DatetimeIndex + """ + new_dates = np.delete(self.asi8, loc) + + freq = None + if lib.is_integer(loc): + if loc in (0, -len(self), -1, len(self) - 1): + freq = self.freq + else: + if com.is_list_like(loc): + loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc))) + if isinstance(loc, slice) and loc.step in (1, None): + if (loc.start in (0, None) or loc.stop in (len(self), None)): + freq = self.freq + + if self.tz is not None: + new_dates = tslib.date_normalize(new_dates, self.tz) + return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz) + def _view_like(self, ndarray): result = ndarray.view(type(self)) result.offset = self.offset diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 610b5687b9fdf..83cc5dcc7485f 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2293,33 +2293,82 @@ def test_insert(self): self.assertEqual(result.freqstr, 'M') def test_delete(self): - idx = date_range(start='2000-01-01', periods=4, freq='M', name='idx') + idx = date_range(start='2000-01-01', periods=5, freq='M', name='idx') - expected = date_range(start='2000-02-01', periods=3, freq='M', name='idx') - result = idx.delete(0) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freqstr, 'M') + # prserve freq + expected_0 = date_range(start='2000-02-01', periods=4, freq='M', name='idx') + expected_4 = date_range(start='2000-01-01', periods=4, freq='M', name='idx') - expected = date_range(start='2000-01-01', periods=3, freq='M', name='idx') - result = idx.delete(-1) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freqstr, 'M') + # reset freq to None + expected_1 = DatetimeIndex(['2000-01-31', '2000-03-31', '2000-04-30', + '2000-05-31'], freq=None, name='idx') + + cases ={0: expected_0, -5: expected_0, + -1: expected_4, 4: expected_4, + 1: expected_1} + for n, expected in compat.iteritems(cases): + result = idx.delete(n) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) with tm.assertRaises((IndexError, ValueError)): # either depeidnig on numpy version result = idx.delete(5) - idx = date_range(start='2000-01-01', periods=4, - freq='M', name='idx', tz='US/Pacific') + idx = date_range(start='2000-01-01', periods=5, + freq='D', name='idx', tz='US/Pacific') - expected = date_range(start='2000-02-01', periods=3, - freq='M', name='idx', tz='US/Pacific') + expected = date_range(start='2000-01-02', periods=4, + freq='D', name='idx', tz='US/Pacific') result = idx.delete(0) self.assertTrue(result.equals(expected)) self.assertEqual(result.name, expected.name) - self.assertEqual(result.freqstr, 'M') + self.assertEqual(result.freqstr, 'D') + self.assertEqual(result.tz, expected.tz) + + def test_delete_slice(self): + idx = date_range(start='2000-01-01', periods=10, freq='D', name='idx') + + # prserve freq + expected_0_2 = date_range(start='2000-01-04', periods=7, freq='D', name='idx') + expected_7_9 = date_range(start='2000-01-01', periods=7, freq='D', name='idx') + + # reset freq to None + expected_3_5 = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03', + '2000-01-07', '2000-01-08', '2000-01-09', + '2000-01-10'], freq=None, name='idx') + + cases ={(0, 1, 2): expected_0_2, + (7, 8, 9): expected_7_9, + (3, 4, 5): expected_3_5} + for n, expected in compat.iteritems(cases): + result = idx.delete(n) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) + + result = idx.delete(slice(n[0], n[-1] + 1)) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) + + ts = pd.Series(1, index=pd.date_range('2000-01-01', periods=10, + freq='D', name='idx')) + # preserve freq + result = ts.drop(ts.index[:5]).index + expected = pd.date_range('2000-01-06', periods=5, freq='D', name='idx') + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) + + # reset freq to None + result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index + expected = DatetimeIndex(['2000-01-01', '2000-01-03', '2000-01-05', + '2000-01-07', '2000-01-09'], freq=None, name='idx') + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) def test_map_bug_1677(self): index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
Fix the issue that freq is preserved inappropriately, caused by #7302. Fix preserves freq if edge element is being deleted (same as #7299). CC: @rosnfeld
https://api.github.com/repos/pandas-dev/pandas/pulls/7320
2014-06-03T13:38:41Z
2014-06-05T16:01:00Z
2014-06-05T16:01:00Z
2014-07-12T06:20:06Z
BUG: inferred_freq results in None with eastern hemisphere timezones
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 45e0e15d311b4..48eac7fb1b761 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -85,3 +85,4 @@ Bug Fixes - Bug in ``isnull()`` when ``mode.use_inf_as_null == True`` where isnull wouldn't test ``True`` when it encountered an ``inf``/``-inf`` (:issue:`7315`). +- Bug in inferred_freq results in None for eastern hemisphere timezones (:issue:`7310`) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index e3c933e116987..70eab4dde8c1f 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -668,6 +668,9 @@ class _FrequencyInferer(object): def __init__(self, index, warn=True): self.index = index self.values = np.asarray(index).view('i8') + if index.tz is not None: + self.values = tslib.date_normalize(self.values, index.tz) + self.warn = warn if len(index) < 3: diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py index 076b0e06cdddf..e2e30fcb69440 100644 --- a/pandas/tseries/tests/test_frequencies.py +++ b/pandas/tseries/tests/test_frequencies.py @@ -256,6 +256,22 @@ def test_infer_freq(self): rng = Index(rng.to_timestamp('D', how='e').asobject) self.assertEqual(rng.inferred_freq, 'Q-OCT') + def test_infer_freq_tz(self): + + # GH 7310 + for tz in [None, 'Asia/Tokyo', 'US/Pacific', 'Europe/Paris']: + dates = ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'] + idx = DatetimeIndex(dates) + self.assertEqual(idx.inferred_freq, 'M') + + dates = ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'] + idx = DatetimeIndex(dates) + self.assertEqual(idx.inferred_freq, 'D') + + dates = ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00'] + idx = DatetimeIndex(dates) + self.assertEqual(idx.inferred_freq, 'H') + def test_not_monotonic(self): rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002']) rng = rng[::-1]
Closes #7310
https://api.github.com/repos/pandas-dev/pandas/pulls/7318
2014-06-03T11:25:36Z
2014-06-03T13:16:57Z
2014-06-03T13:16:57Z
2014-07-01T15:08:30Z
CLN/DOC: Remove trailing whitespace from .rst files in doc folder
diff --git a/doc/source/comparison_with_sql.rst b/doc/source/comparison_with_sql.rst index 3d8b85e9460c4..4d0a2b80c9949 100644 --- a/doc/source/comparison_with_sql.rst +++ b/doc/source/comparison_with_sql.rst @@ -3,11 +3,11 @@ Comparison with SQL ******************** -Since many potential pandas users have some familiarity with -`SQL <http://en.wikipedia.org/wiki/SQL>`_, this page is meant to provide some examples of how +Since many potential pandas users have some familiarity with +`SQL <http://en.wikipedia.org/wiki/SQL>`_, this page is meant to provide some examples of how various SQL operations would be performed using pandas. -If you're new to pandas, you might want to first read through :ref:`10 Minutes to Pandas<10min>` +If you're new to pandas, you might want to first read through :ref:`10 Minutes to Pandas<10min>` to familiarize yourself with the library. As is customary, we import pandas and numpy as follows: @@ -17,8 +17,8 @@ As is customary, we import pandas and numpy as follows: import pandas as pd import numpy as np -Most of the examples will utilize the ``tips`` dataset found within pandas tests. We'll read -the data into a DataFrame called `tips` and assume we have a database table of the same name and +Most of the examples will utilize the ``tips`` dataset found within pandas tests. We'll read +the data into a DataFrame called `tips` and assume we have a database table of the same name and structure. .. ipython:: python @@ -44,7 +44,7 @@ With pandas, column selection is done by passing a list of column names to your tips[['total_bill', 'tip', 'smoker', 'time']].head(5) -Calling the DataFrame without the list of column names would display all columns (akin to SQL's +Calling the DataFrame without the list of column names would display all columns (akin to SQL's ``*``). WHERE @@ -58,14 +58,14 @@ Filtering in SQL is done via a WHERE clause. WHERE time = 'Dinner' LIMIT 5; -DataFrames can be filtered in multiple ways; the most intuitive of which is using +DataFrames can be filtered in multiple ways; the most intuitive of which is using `boolean indexing <http://pandas.pydata.org/pandas-docs/stable/indexing.html#boolean-indexing>`_. .. ipython:: python tips[tips['time'] == 'Dinner'].head(5) -The above statement is simply passing a ``Series`` of True/False objects to the DataFrame, +The above statement is simply passing a ``Series`` of True/False objects to the DataFrame, returning all rows with True. .. ipython:: python @@ -74,7 +74,7 @@ returning all rows with True. is_dinner.value_counts() tips[is_dinner].head(5) -Just like SQL's OR and AND, multiple conditions can be passed to a DataFrame using | (OR) and & +Just like SQL's OR and AND, multiple conditions can be passed to a DataFrame using | (OR) and & (AND). .. code-block:: sql @@ -101,16 +101,16 @@ Just like SQL's OR and AND, multiple conditions can be passed to a DataFrame usi # tips by parties of at least 5 diners OR bill total was more than $45 tips[(tips['size'] >= 5) | (tips['total_bill'] > 45)] -NULL checking is done using the :meth:`~pandas.Series.notnull` and :meth:`~pandas.Series.isnull` +NULL checking is done using the :meth:`~pandas.Series.notnull` and :meth:`~pandas.Series.isnull` methods. .. ipython:: python - + frame = pd.DataFrame({'col1': ['A', 'B', np.NaN, 'C', 'D'], 'col2': ['F', np.NaN, 'G', 'H', 'I']}) frame -Assume we have a table of the same structure as our DataFrame above. We can see only the records +Assume we have a table of the same structure as our DataFrame above. We can see only the records where ``col2`` IS NULL with the following query: .. code-block:: sql @@ -138,12 +138,12 @@ Getting items where ``col1`` IS NOT NULL can be done with :meth:`~pandas.Series. GROUP BY -------- -In pandas, SQL's GROUP BY operations performed using the similarly named -:meth:`~pandas.DataFrame.groupby` method. :meth:`~pandas.DataFrame.groupby` typically refers to a +In pandas, SQL's GROUP BY operations performed using the similarly named +:meth:`~pandas.DataFrame.groupby` method. :meth:`~pandas.DataFrame.groupby` typically refers to a process where we'd like to split a dataset into groups, apply some function (typically aggregation) , and then combine the groups together. -A common SQL operation would be getting the count of records in each group throughout a dataset. +A common SQL operation would be getting the count of records in each group throughout a dataset. For instance, a query getting us the number of tips left by sex: .. code-block:: sql @@ -163,23 +163,23 @@ The pandas equivalent would be: tips.groupby('sex').size() -Notice that in the pandas code we used :meth:`~pandas.DataFrameGroupBy.size` and not -:meth:`~pandas.DataFrameGroupBy.count`. This is because :meth:`~pandas.DataFrameGroupBy.count` +Notice that in the pandas code we used :meth:`~pandas.DataFrameGroupBy.size` and not +:meth:`~pandas.DataFrameGroupBy.count`. This is because :meth:`~pandas.DataFrameGroupBy.count` applies the function to each column, returning the number of ``not null`` records within each. .. ipython:: python tips.groupby('sex').count() -Alternatively, we could have applied the :meth:`~pandas.DataFrameGroupBy.count` method to an +Alternatively, we could have applied the :meth:`~pandas.DataFrameGroupBy.count` method to an individual column: .. ipython:: python tips.groupby('sex')['total_bill'].count() -Multiple functions can also be applied at once. For instance, say we'd like to see how tip amount -differs by day of the week - :meth:`~pandas.DataFrameGroupBy.agg` allows you to pass a dictionary +Multiple functions can also be applied at once. For instance, say we'd like to see how tip amount +differs by day of the week - :meth:`~pandas.DataFrameGroupBy.agg` allows you to pass a dictionary to your grouped DataFrame, indicating which functions to apply to specific columns. .. code-block:: sql @@ -198,7 +198,7 @@ to your grouped DataFrame, indicating which functions to apply to specific colum tips.groupby('day').agg({'tip': np.mean, 'day': np.size}) -Grouping by more than one column is done by passing a list of columns to the +Grouping by more than one column is done by passing a list of columns to the :meth:`~pandas.DataFrame.groupby` method. .. code-block:: sql @@ -207,7 +207,7 @@ Grouping by more than one column is done by passing a list of columns to the FROM tip GROUP BY smoker, day; /* - smoker day + smoker day No Fri 4 2.812500 Sat 45 3.102889 Sun 57 3.167895 @@ -226,16 +226,16 @@ Grouping by more than one column is done by passing a list of columns to the JOIN ---- -JOINs can be performed with :meth:`~pandas.DataFrame.join` or :meth:`~pandas.merge`. By default, -:meth:`~pandas.DataFrame.join` will join the DataFrames on their indices. Each method has -parameters allowing you to specify the type of join to perform (LEFT, RIGHT, INNER, FULL) or the +JOINs can be performed with :meth:`~pandas.DataFrame.join` or :meth:`~pandas.merge`. By default, +:meth:`~pandas.DataFrame.join` will join the DataFrames on their indices. Each method has +parameters allowing you to specify the type of join to perform (LEFT, RIGHT, INNER, FULL) or the columns to join on (column names or indices). .. ipython:: python df1 = pd.DataFrame({'key': ['A', 'B', 'C', 'D'], 'value': np.random.randn(4)}) - df2 = pd.DataFrame({'key': ['B', 'D', 'D', 'E'], + df2 = pd.DataFrame({'key': ['B', 'D', 'D', 'E'], 'value': np.random.randn(4)}) Assume we have two database tables of the same name and structure as our DataFrames. @@ -256,7 +256,7 @@ INNER JOIN # merge performs an INNER JOIN by default pd.merge(df1, df2, on='key') -:meth:`~pandas.merge` also offers parameters for cases when you'd like to join one DataFrame's +:meth:`~pandas.merge` also offers parameters for cases when you'd like to join one DataFrame's column with another DataFrame's index. .. ipython:: python @@ -296,7 +296,7 @@ RIGHT JOIN FULL JOIN ~~~~~~~~~ -pandas also allows for FULL JOINs, which display both sides of the dataset, whether or not the +pandas also allows for FULL JOINs, which display both sides of the dataset, whether or not the joined columns find a match. As of writing, FULL JOINs are not supported in all RDBMS (MySQL). .. code-block:: sql @@ -364,7 +364,7 @@ SQL's UNION is similar to UNION ALL, however UNION will remove duplicate rows. Los Angeles 5 */ -In pandas, you can use :meth:`~pandas.concat` in conjunction with +In pandas, you can use :meth:`~pandas.concat` in conjunction with :meth:`~pandas.DataFrame.drop_duplicates`. .. ipython:: python @@ -377,4 +377,4 @@ UPDATE DELETE ------- \ No newline at end of file +------ diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 7b064c69c721c..d5dcacf53ec23 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -244,7 +244,7 @@ accept the following arguments: is min for ``rolling_min``, max for ``rolling_max``, median for ``rolling_median``, and mean for all other rolling functions. See :meth:`DataFrame.resample`'s how argument for more information. - + These functions can be applied to ndarrays or Series objects: .. ipython:: python diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index e76f58f023619..a927bcec683f5 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -100,7 +100,7 @@ index, not membership among the values. 2 in s 'b' in s -If this behavior is surprising, keep in mind that using ``in`` on a Python +If this behavior is surprising, keep in mind that using ``in`` on a Python dictionary tests keys, not values, and Series are dict-like. To test for membership in the values, use the method :func:`~pandas.Series.isin`: diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 1d25a395f74a9..b90ae05c62895 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -216,9 +216,9 @@ new column. sa dfa.A = list(range(len(dfa.index))) # ok if A already exists dfa - dfa['A'] = list(range(len(dfa.index))) # use this form to create a new column + dfa['A'] = list(range(len(dfa.index))) # use this form to create a new column dfa - + .. warning:: - You can use this access only if the index element is a valid python identifier, e.g. ``s.1`` is not allowed. diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index ac5c8a4463b39..6dac071a5b2bb 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -598,7 +598,7 @@ You can also operate on the DataFrame in place .. warning:: - When replacing multiple ``bool`` or ``datetime64`` objects, the first + When replacing multiple ``bool`` or ``datetime64`` objects, the first argument to ``replace`` (``to_replace``) must match the type of the value being replaced type. For example, @@ -669,4 +669,3 @@ However, these can be filled in using **fillna** and it will work fine: reindexed[crit.fillna(False)] reindexed[crit.fillna(True)] - diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 4d891d38f77a1..8e47466385e77 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -99,7 +99,7 @@ resources for development through the end of 2011, and continues to contribute bug reports today. Since January 2012, `Lambda Foundry <http://www.lambdafoundry.com>`__, has -been providing development resources, as well as commercial support, +been providing development resources, as well as commercial support, training, and consulting for pandas. pandas is only made possible by a group of people around the world like you @@ -114,8 +114,8 @@ collection of developers focused on the improvement of Python's data libraries. The core team that coordinates development can be found on `Github <http://github.com/pydata>`__. If you're interested in contributing, please visit the `project website <http://pandas.pydata.org>`__. - + License ------- -.. literalinclude:: ../../LICENSE \ No newline at end of file +.. literalinclude:: ../../LICENSE diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst index 5af5685ed1f56..98fc4edfd5816 100644 --- a/doc/source/r_interface.rst +++ b/doc/source/r_interface.rst @@ -22,9 +22,9 @@ rpy2 / R interface If your computer has R and rpy2 (> 2.2) installed (which will be left to the reader), you will be able to leverage the below functionality. On Windows, doing this is quite an ordeal at the moment, but users on Unix-like systems -should find it quite easy. rpy2 evolves in time, and is currently reaching +should find it quite easy. rpy2 evolves in time, and is currently reaching its release 2.3, while the current interface is -designed for the 2.2.x series. We recommend to use 2.2.x over other series +designed for the 2.2.x series. We recommend to use 2.2.x over other series unless you are prepared to fix parts of the code, yet the rpy2-2.3.0 introduces improvements such as a better R-Python bridge memory management layer so it might be a good idea to bite the bullet and submit patches for diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 436055ffe37d1..db68c0eb224e2 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -266,7 +266,7 @@ It takes a number of arguments - ``values``: a column or a list of columns to aggregate - ``index``: a column, Grouper, array which has the same length as data, or list of them. Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. -- ``columns``: a column, Grouper, array which has the same length as data, or list of them. +- ``columns``: a column, Grouper, array which has the same length as data, or list of them. Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. - ``aggfunc``: function to use for aggregation, defaulting to ``numpy.mean`` @@ -456,4 +456,3 @@ handling of NaN: pd.factorize(x, sort=True) np.unique(x, return_inverse=True)[::-1] - diff --git a/doc/source/rplot.rst b/doc/source/rplot.rst index 12ade83261fb7..cdecee39d8d1e 100644 --- a/doc/source/rplot.rst +++ b/doc/source/rplot.rst @@ -45,7 +45,7 @@ We import the rplot API: Examples -------- -RPlot is a flexible API for producing Trellis plots. These plots allow you to arrange data in a rectangular grid by values of certain attributes. +RPlot is a flexible API for producing Trellis plots. These plots allow you to arrange data in a rectangular grid by values of certain attributes. .. ipython:: python diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index 65ff95a905c14..dafb9200cab1c 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -22,8 +22,8 @@ are examples with real-world data, and all the bugs and weirdness that that entails. Here are links to the v0.1 release. For an up-to-date table of contents, see the `pandas-cookbook GitHub -repository <http://github.com/jvns/pandas-cookbook>`_. To run the examples in this tutorial, you'll need to -clone the GitHub repository and get IPython Notebook running. +repository <http://github.com/jvns/pandas-cookbook>`_. To run the examples in this tutorial, you'll need to +clone the GitHub repository and get IPython Notebook running. See `How to use this cookbook <https://github.com/jvns/pandas-cookbook#how-to-use-this-cookbook>`_. - `A quick tour of the IPython Notebook: <http://nbviewer.ipython.org/github/jvns/pandas-c|%2055ookbook/blob/v0.1/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb>`_
https://api.github.com/repos/pandas-dev/pandas/pulls/7317
2014-06-03T02:35:47Z
2014-06-03T10:10:20Z
2014-06-03T10:10:20Z
2014-06-26T13:18:58Z
BUG: isnull doesn't properly check for inf when requested
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 248fa098c7269..45e0e15d311b4 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -82,3 +82,6 @@ Bug Fixes (:issue:`7140`). - Bug in ``StringMethods.extract()`` where a single match group Series would use the matcher's name instead of the group name (:issue:`7313`). +- Bug in ``isnull()`` when ``mode.use_inf_as_null == True`` where isnull + wouldn't test ``True`` when it encountered an ``inf``/``-inf`` + (:issue:`7315`). diff --git a/pandas/src/util.pxd b/pandas/src/util.pxd index 7a30f018e623e..cc1921e6367c5 100644 --- a/pandas/src/util.pxd +++ b/pandas/src/util.pxd @@ -76,8 +76,7 @@ cdef inline bint _checknull_old(object val): cdef double INF = <double> np.inf cdef double NEGINF = -INF try: - return bool(val is None or val != val and val != INF - and val != NEGINF) + return val is None or val != val or val == INF or val == NEGINF except ValueError: return False diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 2385f9ef514fc..bc12cc5aaaa3b 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2941,6 +2941,16 @@ def test_raise_on_info(self): with tm.assertRaises(AttributeError): s.info() + def test_isnull_for_inf(self): + s = Series(['a', np.inf, np.nan, 1.0]) + with pd.option_context('mode.use_inf_as_null', True): + r = s.isnull() + dr = s.dropna() + e = Series([False, True, True, False]) + de = Series(['a', 1.0], index=[0, 3]) + tm.assert_series_equal(r, e) + tm.assert_series_equal(dr, de) + # TimeSeries-specific
closes #7314
https://api.github.com/repos/pandas-dev/pandas/pulls/7315
2014-06-02T17:47:44Z
2014-06-03T00:44:37Z
2014-06-03T00:44:37Z
2014-06-24T04:07:16Z
BUG: single group series should preserve group name
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index adaffa1fca1be..248fa098c7269 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -80,3 +80,5 @@ Bug Fixes - Bug in ``Float64Index`` which didn't allow duplicates (:issue:`7149`). - Bug in ``DataFrame.replace()`` where truthy values were being replaced (:issue:`7140`). +- Bug in ``StringMethods.extract()`` where a single match group Series + would use the matcher's name instead of the group name (:issue:`7313`). diff --git a/pandas/core/strings.py b/pandas/core/strings.py index b52e3ba1dbf60..2176e0300f25f 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -906,8 +906,9 @@ def _wrap_result(self, result): if not hasattr(result, 'ndim'): return result elif result.ndim == 1: + name = getattr(result, 'name', None) return Series(result, index=self.series.index, - name=self.series.name) + name=name or self.series.name) else: assert result.ndim < 3 return DataFrame(result, index=self.series.index) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 3806553004edb..e50b2ef2289c5 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -578,6 +578,13 @@ def check_index(index): tm.makeDateIndex, tm.makePeriodIndex ]: check_index(index()) + def test_extract_single_series_name_is_preserved(self): + s = Series(['a3', 'b3', 'c2'], name='bob') + r = s.str.extract(r'(?P<sue>[a-z])') + e = Series(['a', 'b', 'c'], name='sue') + tm.assert_series_equal(r, e) + self.assertEqual(r.name, e.name) + def test_get_dummies(self): s = Series(['a|b', 'a|c', np.nan]) result = s.str.get_dummies('|')
Extract currently doesn't preserve group names when a series is returned. This PR fixes that.
https://api.github.com/repos/pandas-dev/pandas/pulls/7313
2014-06-02T15:17:23Z
2014-06-02T20:51:23Z
2014-06-02T20:51:23Z
2014-06-13T04:48:46Z
BUG: quantile ignores axis kwarg
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 20620f15944f0..df2e86ed34d7e 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -111,3 +111,4 @@ Bug Fixes - Bug in ``CustomBusinessDay.apply`` raiases ``NameError`` when ``np.datetime64`` object is passed (:issue:`7196`) - Bug in ``MultiIndex.append``, ``concat`` and ``pivot_table`` don't preserve timezone (:issue:`6606`) - Bug all ``StringMethods`` now work on empty Series (:issue:`7242`) +- Bug in ``quantile`` ignoring the axis keyword argument (:issue`7306`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fb39aced6ec1d..72ecddc29646b 100755 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4181,6 +4181,8 @@ def f(arr, per): return _quantile(values, per) data = self._get_numeric_data() if numeric_only else self + if axis == 1: + data = data.T # need to know which cols are timestamp going in so that we can # map timestamp over them after getting the quantile. diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 358d9d82403f6..794563a219840 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -11103,6 +11103,26 @@ def test_quantile(self): xp = df.median() assert_series_equal(rs, xp) + # axis + df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) + result = df.quantile(.5, axis=1) + expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3]) + assert_series_equal(result, expected) + + result = df.quantile([.5, .75], axis=1) + expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75], + 3: [3.5, 3.75]}, index=["0.5", "0.75"]) + assert_frame_equal(result, expected) + + # We may want to break API in the future to change this + # so that we exclude non-numeric along the same axis + # See GH #7312 + df = DataFrame([[1, 2, 3], + ['a', 'b', 4]]) + result = df.quantile(.5, axis=1) + expected = Series([3., 4.], index=[0, 1]) + assert_series_equal(result, expected) + def test_quantile_multi(self): df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=['a', 'b', 'c']) @@ -11141,6 +11161,20 @@ def test_quantile_datetime(self): index=[.5], columns=['a', 'b']) assert_frame_equal(result, expected) + # axis = 1 + df['c'] = pd.to_datetime(['2011', '2012']) + result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False) + expected = Series([Timestamp('2010-07-02 12:00:00'), + Timestamp('2011-07-02 12:00:00')], + index=[0, 1]) + assert_series_equal(result, expected) + + result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False) + expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), + Timestamp('2011-07-02 12:00:00')]], + index=[0.5], columns=[0, 1]) + assert_frame_equal(result, expected) + def test_cumsum(self): self.tsframe.ix[5:10, 0] = nan self.tsframe.ix[10:15, 1] = nan
Closes https://github.com/pydata/pandas/issues/7306 We may want to wait on this till later today while I figure out what will happen in https://github.com/pydata/pandas/issues/7308
https://api.github.com/repos/pandas-dev/pandas/pulls/7312
2014-06-02T14:54:00Z
2014-06-11T15:10:21Z
2014-06-11T15:10:21Z
2016-11-03T12:38:03Z
BUG/TST: should skip openpyxl before testing version compat
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 5928e99ed055c..b45897dff9aa2 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -1217,9 +1217,9 @@ def test_ExcelWriter_dispatch(self): import xlsxwriter writer_klass = _XlsxWriter except ImportError: + _skip_if_no_openpyxl() if not openpyxl_compat.is_compat(): raise nose.SkipTest('incompatible openpyxl version') - _skip_if_no_openpyxl() writer_klass = _OpenpyxlWriter with ensure_clean('.xlsx') as path:
null
https://api.github.com/repos/pandas-dev/pandas/pulls/7311
2014-06-02T14:52:11Z
2014-06-02T16:51:23Z
2014-06-02T16:51:23Z
2014-06-14T21:27:05Z
DOC: Complete R interface section
diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst index 98fc4edfd5816..da33c65f3c81f 100644 --- a/doc/source/r_interface.rst +++ b/doc/source/r_interface.rst @@ -6,6 +6,11 @@ :suppress: from pandas import * + import numpy as np + np.random.seed(123456) + import matplotlib.pyplot as plt + plt.close('all') + options.display.mpl_style = 'default' options.display.max_rows=15 @@ -71,24 +76,26 @@ Converting DataFrames into R objects .. versionadded:: 0.8 Starting from pandas 0.8, there is **experimental** support to convert -DataFrames into the equivalent R object (that is, **data.frame**): +``DataFrame`` into the equivalent R object (that is, **data.frame**) using ``convert_to_r_dataframe`` function: -.. ipython:: python - from pandas import DataFrame +.. ipython:: python df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C':[7,8,9]}, index=["one", "two", "three"]) r_dataframe = com.convert_to_r_dataframe(df) - print(type(r_dataframe)) + type(r_dataframe) print(r_dataframe) + print(r_dataframe.rownames) + print(r_dataframe.colnames) + -The DataFrame's index is stored as the ``rownames`` attribute of the -data.frame instance. +The ``rpy2.robjects.vectors.DataFrame`` index is stored as the ``rownames``, and columns are stored as the +``colnames`` attributes. -You can also use **convert_to_r_matrix** to obtain a ``Matrix`` instance, but +You can also use ``convert_to_r_matrix`` to obtain a ``rpy2.robjects.vectors.Matrix`` instance, but bear in mind that it will only work with homogeneously-typed DataFrames (as R matrices bear no information on the data type): @@ -97,14 +104,186 @@ R matrices bear no information on the data type): r_matrix = com.convert_to_r_matrix(df) - print(type(r_matrix)) + type(r_matrix) print(r_matrix) Calling R functions with pandas objects --------------------------------------- +It is easier to use ``rpy2.robjects`` directly to call R functions. +You can retrieve R object (including R function) from R namespace by dictionary access of ``robjects.r``. + +Below example shows to retrieve R's **sum** function and pass ``rpy2.robjects.vector.DataFrame``. +Note that the returned value from R **sum** is stored as ``robjects.vectors.Vectors`` type. +Thus, specify index to get raw values. + +See `RPy2 documentation <http://rpy.sourceforge.net/rpy2/doc-2.2/html/index.html>`__ for more. + + +.. ipython:: python + + import rpy2.robjects as robjects + + rsum = robjects.r['sum'] + rsum_result = rsum(r_dataframe) + + type(rsum_result) + rsum_result[0] + + +Preparing Data for R +-------------------- + +Load Iris dataset and convert it to R **data.frame**. +You can pass ``rpy2.robjects.vectors.DataFrame`` to R namespace using ``rpy2.robjects.r.assign``. +In following examle, `r_iris` DataFrame can be refered as `iris` on R namespace. + + +.. ipython:: python + + iris = com.load_data('iris') + iris.head() + + r_iris = com.convert_to_r_dataframe(iris) + robjects.r.assign('iris', r_iris); + + +You can convert each data type using R functions if required. +Function calling ``objects.r`` will execure a passed formula on R's namespace. +For example, we can check the data type using R's **str** function, +then convert "Species" column to categorical type (Factor) using R's **factor** function. + + +.. ipython:: python + + print(robjects.r('str(iris)')) + + robjects.r('iris$Species <- factor(iris$Species)'); + print(robjects.r('str(iris)')) High-level interface to R estimators ------------------------------------ + +Use "setosa" data in iris data set to perform Linear Regression. +It is much easier to prepare and slice data on pandas side, then convert it to R **data.frame**. + + +.. ipython:: python + + setosa = iris[iris['Species'] == 'setosa'] + setosa.head() + + r_setosa = com.convert_to_r_dataframe(setosa) + robjects.r.assign('setosa', r_setosa); + + +Once DataFrame is passed to R namespace, you can execute R formula to perform Liner Regression. + + +.. ipython:: python + + robjects.r('result <- lm(Sepal.Length~Sepal.Width, data=setosa)'); + print(robjects.r('summary(result)')) + + +You can retrieve the result from R namespace to python namespace via ``rpy2.robjects.r``. +If a returned value is R named list, you can check the list of keys via ``names`` attribute. +To get raw values, access each element specifying index. + + +.. ipython:: python + + result = robjects.r['result'] + + print(result.names) + print(result.rx('coefficients')) + + intercept, coef1 = result.rx('coefficients')[0] + intercept + coef1 + + +``convert_robj`` function converts retrieved data to python friendly data type. +In below example, retrieved R **data.frame** of fitted values and confidence interval will be +converted to pandas ``DataFrame``. + + +.. ipython:: python + + robjects.r('predicted <- predict(result, setosa, interval="prediction")'); + print(robjects.r('head(predicted)')) + + predicted = robjects.r['predicted'] + type(predicted) + + predicted = com.convert_robj(predicted) + type(predicted) + predicted.head() + + +Handling Time Series +-------------------- + +Currently, there is no easy way to create R's built-in **ts** object from pandas time series. +Also, ``Series`` cannot be converted using ``convert_to_r_dataframe`` function. +Thus, you must create ``rpy2.robjects.vectors.Vector`` instance manually before calling ``robjects.r.assign``. + +Use corresponding ``Vector`` class depending on the intended data type. +See the rpy2 documentation `Vectors and arrays <http://rpy.sourceforge.net/rpy2/doc-2.2/html/vector.html>`__ for more. + +Once the ``Vector`` is passed to R's namespace, call R's **ts** function to create **ts** object. + + +.. ipython:: python + + idx = date_range(start='2013-01-01', freq='M', periods=48) + vts = Series(np.random.randn(48), index=idx).cumsum() + vts + + r_values = robjects.FloatVector(vts.values) + robjects.r.assign('values', r_values); + + robjects.r('vts <- ts(values, start=c(2013, 1, 1), frequency=12)'); + print(robjects.r['vts']) + + +Below example performs Seasonal Decomposition using R's **stl** function, and get the result as `converted` ``DataFrame``. +Because R's **ts** index cannot be retrieved by ``convert_robj``, assign ``DatetimeIndex`` manually after retrieval. + + +.. ipython:: python + + robjects.r('result <- stl(vts, s.window=12)'); + result = robjects.r['result'] + + print(result.names) + + result_ts = result.rx('time.series')[0] + converted = com.convert_robj(result_ts) + converted.head() + + converted.index = idx + converted.head() + + +Now you have pandas ``DataFrame``, you can perform further operation easily. + + +.. ipython:: python + + fig, axes = plt.subplots(4, 1) + + axes[0].set_ylabel('Original'); + ax = vts.plot(ax=axes[0]); + axes[1].set_ylabel('Trend'); + ax = converted['trend'].plot(ax=axes[1]); + + axes[2].set_ylabel('Seasonal'); + ax = converted['seasonal'].plot(ax=axes[2]); + + axes[3].set_ylabel('Residuals'); + @savefig rpy2_timeseries.png + converted['remainder'].plot(ax=axes[3]) +
Currently, "rpy2 R interface" documentation has blank section. Added descriptions to cover required basic topics.
https://api.github.com/repos/pandas-dev/pandas/pulls/7309
2014-06-02T13:49:13Z
2015-03-08T14:39:50Z
null
2015-03-31T13:31:25Z
CLN/DOC: Change instances of "Pandas" to "pandas" in documentation
diff --git a/doc/make.py b/doc/make.py index 88e5a939eef46..8a92654d1378b 100755 --- a/doc/make.py +++ b/doc/make.py @@ -300,7 +300,7 @@ def _get_config(): import argparse argparser = argparse.ArgumentParser(description=""" -Pandas documentation builder +pandas documentation builder """.strip()) # argparser.add_argument('-arg_name', '--arg_name', @@ -325,7 +325,7 @@ def generate_index(api=True, single=False, **kwds): f.write(t.render(api=api,single=single,**kwds)) import argparse -argparser = argparse.ArgumentParser(description="Pandas documentation builder", +argparser = argparse.ArgumentParser(description="pandas documentation builder", epilog="Targets : %s" % funcd.keys()) argparser.add_argument('--no-api', diff --git a/doc/source/10min.rst b/doc/source/10min.rst index c56826c5cd5d4..a9a97ee56813c 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -22,7 +22,7 @@ ******************** -10 Minutes to Pandas +10 Minutes to pandas ******************** This is a short introduction to pandas, geared mainly for new users. @@ -344,7 +344,7 @@ A ``where`` operation with setting. Missing Data ------------ -Pandas primarily uses the value ``np.nan`` to represent missing data. It is by +pandas primarily uses the value ``np.nan`` to represent missing data. It is by default not included in computations. See the :ref:`Missing Data section <missing_data>` @@ -445,7 +445,7 @@ Merge Concat ~~~~~~ -Pandas provides various facilities for easily combining together Series, +pandas provides various facilities for easily combining together Series, DataFrame, and Panel objects with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations. @@ -585,7 +585,7 @@ We can produce pivot tables from this data very easily: Time Series ----------- -Pandas has simple, powerful, and efficient functionality for performing +pandas has simple, powerful, and efficient functionality for performing resampling operations during frequency conversion (e.g., converting secondly data into 5-minutely data). This is extremely common in, but not limited to, financial applications. See the :ref:`Time Series section <timeseries>` diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 336f932656787..02df98313071e 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -93,7 +93,7 @@ unlike the axis labels, cannot be assigned to. Accelerated operations ---------------------- -Pandas has support for accelerating certain types of binary numerical and boolean operations using +pandas has support for accelerating certain types of binary numerical and boolean operations using the ``numexpr`` library (starting in 0.11.0) and the ``bottleneck`` libraries. These libraries are especially useful when dealing with large data sets, and provide large @@ -1556,7 +1556,7 @@ Working with package options .. _basics.working_with_options: .. versionadded:: 0.10.1 -Pandas has an options system that let's you customize some aspects of it's behaviour, +pandas has an options system that let's you customize some aspects of it's behaviour, display-related options being those the user is must likely to adjust. Options have a full "dotted-style", case-insensitive name (e.g. ``display.max_rows``), diff --git a/doc/source/comparison_with_sql.rst b/doc/source/comparison_with_sql.rst index 4d0a2b80c9949..371875d9996f9 100644 --- a/doc/source/comparison_with_sql.rst +++ b/doc/source/comparison_with_sql.rst @@ -7,7 +7,7 @@ Since many potential pandas users have some familiarity with `SQL <http://en.wikipedia.org/wiki/SQL>`_, this page is meant to provide some examples of how various SQL operations would be performed using pandas. -If you're new to pandas, you might want to first read through :ref:`10 Minutes to Pandas<10min>` +If you're new to pandas, you might want to first read through :ref:`10 Minutes to pandas<10min>` to familiarize yourself with the library. As is customary, we import pandas and numpy as follows: diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 7099f9dbccd77..2548f2d88c5d9 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -551,7 +551,7 @@ Storing Attributes to a group node Binary Files ~~~~~~~~~~~~ -Pandas readily accepts numpy record arrays, if you need to read in a binary +pandas readily accepts numpy record arrays, if you need to read in a binary file consisting of an array of C structs. For example, given this C program in a file called ``main.c`` compiled with ``gcc main.c -std=gnu99`` on a 64-bit machine, diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 3b409ec918bb8..e5009aeb1c6f6 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -1,7 +1,7 @@ .. _ecosystem: **************** -Pandas Ecosystem +pandas Ecosystem **************** Increasingly, packages are being built on top of pandas to address specific needs @@ -89,4 +89,3 @@ Domain Specific Geopandas extends pandas data objects to include geographic information which support geometric operations. If your work entails maps and geographical coordinates, and you love pandas, you should take a close look at Geopandas. - diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 6460662553d6b..6154d21e12336 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -30,7 +30,7 @@ Frequently Asked Questions (FAQ) How do I control the way my DataFrame is displayed? --------------------------------------------------- -Pandas users rely on a variety of environments for using pandas: scripts, terminal, +pandas users rely on a variety of environments for using pandas: scripts, terminal, IPython qtconsole/ notebook, (IDLE, spyder, etc'). Each environment has it's own capabilities and limitations: HTML support, horizontal scrolling, auto-detection of width/height. @@ -64,10 +64,10 @@ options automatically when starting up. .. _ref-monkey-patching: -Adding Features to your Pandas Installation +Adding Features to your pandas Installation ------------------------------------------- -Pandas is a powerful tool and already has a plethora of data manipulation +pandas is a powerful tool and already has a plethora of data manipulation operations implemented, most of them are very fast as well. It's very possible however that certain functionality that would make your life easier is missing. In that case you have several options: diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index a927bcec683f5..0078ffb506cc9 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -18,10 +18,10 @@ Caveats and Gotchas .. _gotchas.truth: -Using If/Truth Statements with Pandas +Using If/Truth Statements with pandas ------------------------------------- -Pandas follows the numpy convention of raising an error when you try to convert something to a ``bool``. +pandas follows the numpy convention of raising an error when you try to convert something to a ``bool``. This happens in a ``if`` or when using the boolean operations, ``and``, ``or``, or ``not``. It is not clear what the result of diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index 01f654192b549..905e76aee88eb 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -1,4 +1,4 @@ -.. Pandas documentation master file, created by +.. pandas documentation master file, created by ********************************************* pandas: powerful Python data analysis toolkit diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index b90ae05c62895..84736d4989f6f 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -60,7 +60,7 @@ Different Choices for Indexing (``loc``, ``iloc``, and ``ix``) .. versionadded:: 0.11.0 Object selection has had a number of user-requested additions in order to -support more explicit location based indexing. Pandas now supports three types +support more explicit location based indexing. pandas now supports three types of multi-axis indexing. - ``.loc`` is strictly label based, will raise ``KeyError`` when the items are @@ -275,7 +275,7 @@ Selection By Label This is sometimes called ``chained assignment`` and should be avoided. See :ref:`Returning a View versus Copy <indexing.view_versus_copy>` -Pandas provides a suite of methods in order to have **purely label based indexing**. This is a strict inclusion based protocol. +pandas provides a suite of methods in order to have **purely label based indexing**. This is a strict inclusion based protocol. **ALL** of the labels for which you ask, must be in the index or a ``KeyError`` will be raised! When slicing, the start bound is *included*, **AND** the stop bound is *included*. Integers are valid labels, but they refer to the label **and not the position**. The ``.loc`` attribute is the primary access method. The following are valid inputs: @@ -346,7 +346,7 @@ Selection By Position This is sometimes called ``chained assignment`` and should be avoided. See :ref:`Returning a View versus Copy <indexing.view_versus_copy>` -Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely python and numpy slicing. These are ``0-based`` indexing. When slicing, the start bounds is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise a ``IndexError``. +pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely python and numpy slicing. These are ``0-based`` indexing. When slicing, the start bounds is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise a ``IndexError``. The ``.iloc`` attribute is the primary access method. The following are valid inputs: @@ -1158,7 +1158,7 @@ Advanced Indexing with ``.ix`` The recent addition of ``.loc`` and ``.iloc`` have enabled users to be quite explicit about indexing choices. ``.ix`` allows a great flexibility to - specify indexing locations by *label* and/or *integer position*. Pandas will + specify indexing locations by *label* and/or *integer position*. pandas will attempt to use any passed *integer* as *label* locations first (like what ``.loc`` would do, then to fall back on *positional* indexing, like what ``.iloc`` would do). See :ref:`Fallback Indexing <indexing.fallback>` for @@ -1509,10 +1509,10 @@ Fallback indexing .. _indexing.fallback: Float indexes should be used only with caution. If you have a float indexed -``DataFrame`` and try to select using an integer, the row that Pandas returns -might not be what you expect. Pandas first attempts to use the *integer* +``DataFrame`` and try to select using an integer, the row that pandas returns +might not be what you expect. pandas first attempts to use the *integer* as a *label* location, but fails to find a match (because the types -are not equal). Pandas then falls back to back to positional indexing. +are not equal). pandas then falls back to back to positional indexing. .. ipython:: python diff --git a/doc/source/install.rst b/doc/source/install.rst index 7cce761445c51..56ab7b70407bc 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -29,7 +29,7 @@ _____________ Stable installers available on `PyPI <http://pypi.python.org/pypi/pandas>`__ -Preliminary builds and installers on the `Pandas download page <http://pandas.pydata.org/getpandas.html>`__ . +Preliminary builds and installers on the `pandas download page <http://pandas.pydata.org/getpandas.html>`__ . Overview ___________ diff --git a/doc/source/io.rst b/doc/source/io.rst index f10dac9d3f921..bc58b04de4473 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -29,7 +29,7 @@ IO Tools (Text, CSV, HDF5, ...) ******************************* -The Pandas I/O api is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas`` +The pandas I/O api is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas`` object. * :ref:`read_csv<io.read_csv_table>` @@ -515,7 +515,7 @@ If you have ``parse_dates`` enabled for some or all of your columns, and your datetime strings are all formatted the same way, you may get a large speed up by setting ``infer_datetime_format=True``. If set, pandas will attempt to guess the format of your datetime strings, and then use a faster means -of parsing the strings. 5-10x parsing speeds have been observed. Pandas +of parsing the strings. 5-10x parsing speeds have been observed. pandas will fallback to the usual parsing if either the format cannot be guessed or the format that was guessed cannot properly parse the entire column of strings. So in general, ``infer_datetime_format`` should not have any @@ -1438,7 +1438,7 @@ Dates written in nanoseconds need to be read back in nanoseconds: dfju = pd.read_json(json, date_unit='ms') dfju - # Let Pandas detect the correct precision + # Let pandas detect the correct precision dfju = pd.read_json(json) dfju @@ -1518,7 +1518,7 @@ Normalization .. versionadded:: 0.13.0 -Pandas provides a utility function to take a dict or list of dicts and *normalize* this semi-structured data +pandas provides a utility function to take a dict or list of dicts and *normalize* this semi-structured data into a flat table. .. ipython:: python @@ -2174,7 +2174,7 @@ for some advanced strategies .. note:: ``PyTables`` 3.0.0 was recently released to enable support for Python 3. - Pandas should be fully compatible (and previously written stores should be + pandas should be fully compatible (and previously written stores should be backwards compatible) with all ``PyTables`` >= 2.3. For ``python >= 3.2``, ``pandas >= 0.12.0`` is required for compatibility. @@ -3627,4 +3627,3 @@ And here's the code def test_csv_read(): pd.read_csv('test.csv',index_col=0) - diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index 6dac071a5b2bb..9263eb2cedf9b 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -94,7 +94,7 @@ Datetimes For datetime64[ns] types, ``NaT`` represents missing values. This is a pseudo-native sentinel value that can be represented by numpy in a singular dtype (datetime64[ns]). -Pandas objects provide intercompatibility between ``NaT`` and ``NaN``. +pandas objects provide intercompatibility between ``NaT`` and ``NaN``. .. ipython:: python diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst index 16edf64802908..b0cd96cac6f5f 100644 --- a/doc/source/remote_data.rst +++ b/doc/source/remote_data.rst @@ -102,7 +102,7 @@ Dataset names are listed at `Fama/French Data Library World Bank ---------- -``Pandas`` users can easily access thousands of panel data series from the +``pandas`` users can easily access thousands of panel data series from the `World Bank's World Development Indicators <http://data.worldbank.org>`__ by using the ``wb`` I/O functions. @@ -170,7 +170,7 @@ contracts around the world. 4027 IT.MOB.COV.ZS Population coverage of mobile cellular telepho... Notice that this second search was much faster than the first one because -``Pandas`` now has a cached list of available data series. +``pandas`` now has a cached list of available data series. .. code-block:: python diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 8f3d2fe8eb079..5897b1a43054f 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -736,7 +736,7 @@ in pandas. Legacy Aliases ~~~~~~~~~~~~~~ -Note that prior to v0.8.0, time rules had a slightly different look. Pandas +Note that prior to v0.8.0, time rules had a slightly different look. pandas will continue to support the legacy time rules for the time being but it is strongly recommended that you switch to using the new offset aliases. @@ -1509,7 +1509,7 @@ Numpy < 1.7 Compatibility ~~~~~~~~~~~~~~~~~~~~~~~~~ Numpy < 1.7 has a broken ``timedelta64`` type that does not correctly work -for arithmetic. Pandas bypasses this, but for frequency conversion as above, +for arithmetic. pandas bypasses this, but for frequency conversion as above, you need to create the divisor yourself. The ``np.timetimedelta64`` type only has 1 argument, the number of **micro** seconds. @@ -1524,4 +1524,3 @@ The following are equivalent statements in the two versions of numpy. else: y / np.timedelta64(1,'D') y / np.timedelta64(1,'s') - diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index dafb9200cab1c..421304bb89541 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -9,11 +9,11 @@ This is a guide to many pandas tutorials, geared mainly for new users. Internal Guides --------------- -Pandas own :ref:`10 Minutes to Pandas<10min>` +pandas own :ref:`10 Minutes to pandas<10min>` More complex recipes are in the :ref:`Cookbook<cookbook>` -Pandas Cookbook +pandas Cookbook --------------- The goal of this cookbook (by `Julia Evans <http://jvns.ca>`_) is to @@ -54,7 +54,7 @@ See `How to use this cookbook <https://github.com/jvns/pandas-cookbook#how-to-us to be really easy. -Lessons for New Pandas Users +Lessons for New pandas Users ---------------------------- For more resources, please visit the main `repository <https://bitbucket.org/hrojas/learn-pandas>`_. @@ -117,7 +117,7 @@ Excel charts with pandas, vincent and xlsxwriter Various Tutorials ----------------- -- `Wes McKinney's (Pandas BDFL) blog <http://blog.wesmckinney.com/>`_ +- `Wes McKinney's (pandas BDFL) blog <http://blog.wesmckinney.com/>`_ - `Statistical analysis made easy in Python with SciPy and pandas DataFrames, by Randal Olson <http://www.randalolson.com/2012/08/06/statistical-analysis-made-easy-in-python/>`_ - `Statistical Data Analysis in Python, tutorial videos, by Christopher Fonnesbeck from SciPy 2013 <http://conference.scipy.org/scipy2013/tutorial_detail.php?id=109>`_ - `Financial analysis in python, by Thomas Wiecki <http://nbviewer.ipython.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb>`_ diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 6064d02cae89f..630e40c4ebfa2 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -854,7 +854,7 @@ with "(right)" in the legend. To turn off the automatic marking, use the Suppressing Tick Resolution Adjustment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Pandas includes automatically tick resolution adjustment for regular frequency +pandas includes automatically tick resolution adjustment for regular frequency time-series data. For limited cases where pandas cannot infer the frequency information (e.g., in an externally created ``twinx``), you can choose to suppress this behavior for alignment purposes. @@ -1144,4 +1144,3 @@ when plotting a large number of points. :suppress: plt.close('all') -
https://api.github.com/repos/pandas-dev/pandas/pulls/7305
2014-06-02T01:25:39Z
2014-06-03T13:03:14Z
2014-06-03T13:03:14Z
2014-07-02T11:30:45Z
BUG: replace() alters unrelated values
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index b430d2eed5a10..cda800b0f0111 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -77,3 +77,5 @@ Bug Fixes sign were being treated as temporaries attempting to be deleted (:issue:`7300`). - Bug in ``Float64Index`` which didn't allow duplicates (:issue:`7149`). +- Bug in ``DataFrame.replace()`` where truthy values were being replaced + (:issue:`7140`). diff --git a/pandas/core/internals.py b/pandas/core/internals.py index de93330f10271..52db14d43fe05 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -3,18 +3,17 @@ import re import operator from datetime import datetime, timedelta -from collections import defaultdict, deque +from collections import defaultdict import numpy as np from pandas.core.base import PandasObject -from pandas.hashtable import Factorizer -from pandas.core.common import (_possibly_downcast_to_dtype, isnull, notnull, +from pandas.core.common import (_possibly_downcast_to_dtype, isnull, _NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like, ABCSparseSeries, _infer_dtype_from_scalar, _is_null_datelike_scalar, is_timedelta64_dtype, is_datetime64_dtype,) -from pandas.core.index import Index, Int64Index, MultiIndex, _ensure_index +from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (_maybe_convert_indices, _length_of_indexer) import pandas.core.common as com from pandas.sparse.array import _maybe_to_sparse, SparseArray @@ -25,12 +24,10 @@ from pandas.tslib import Timestamp from pandas import compat -from pandas.compat import (range, lrange, lmap, callable, map, zip, u, - OrderedDict) +from pandas.compat import range, map, zip, u from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type - from pandas.lib import BlockPlacement @@ -1020,6 +1017,7 @@ def equals(self, other): left, right = self.values, other.values return ((left == right) | (np.isnan(left) & np.isnan(right))).all() + class FloatBlock(FloatOrComplexBlock): __slots__ = () is_float = True @@ -1212,7 +1210,7 @@ def replace(self, to_replace, value, inplace=False, filter=None, regex=False): to_replace_values = np.atleast_1d(to_replace) if not np.can_cast(to_replace_values, bool): - to_replace = to_replace_values + return self return super(BoolBlock, self).replace(to_replace, value, inplace=inplace, filter=filter, regex=regex) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7e87c07911353..170a64aa58482 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -8351,6 +8351,12 @@ def test_replace_with_dict_with_bool_keys(self): with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'): df.replace({'asdf': 'asdb', True: 'yes'}) + def test_replace_truthy(self): + df = DataFrame({'a': [True, True]}) + r = df.replace([np.inf, -np.inf], np.nan) + e = df + tm.assert_frame_equal(r, e) + def test_replace_int_to_int_chain(self): df = DataFrame({'a': lrange(1, 5)}) with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"): diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index a3217e2fe8b04..5962e2447fbc9 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -4,7 +4,7 @@ import numpy as np from pandas import Index, MultiIndex, DataFrame, Series -from pandas.compat import OrderedDict +from pandas.compat import OrderedDict, lrange from pandas.sparse.array import SparseArray from pandas.core.internals import * import pandas.core.internals as internals @@ -835,8 +835,6 @@ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, # reindex_indexer(new_labels, indexer, axis) - - class TestBlockPlacement(tm.TestCase): _multiprocess_can_split_ = True
closes #7140
https://api.github.com/repos/pandas-dev/pandas/pulls/7304
2014-06-01T21:05:19Z
2014-06-02T14:43:50Z
2014-06-02T14:43:50Z
2014-06-17T04:56:54Z
BUG: do not remove temporaries from eval/query scope
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 8d2275b2f51c5..c19a3951ac359 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -71,3 +71,6 @@ Bug Fixes and ``left`` keyword is specified (:issue:`7226`) - BUG in ``DataFrame.hist`` raises ``TypeError`` when it contains non numeric column (:issue:`7277`) - BUG in ``Index.delete`` does not preserve ``name`` and ``freq`` attributes (:issue:`7302`) +- Bug in ``DataFrame.query()``/``eval`` where local string variables with the @ + sign were being treated as temporaries attempting to be deleted + (:issue:`7300`). diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py index 353c58c23febd..b6a1fcbec8339 100644 --- a/pandas/computation/expr.py +++ b/pandas/computation/expr.py @@ -340,12 +340,10 @@ def _rewrite_membership_op(self, node, left, right): # pop the string variable out of locals and replace it with a list # of one string, kind of a hack if right_str: - self.env.remove_tmp(right.name) name = self.env.add_tmp([right.value]) right = self.term_type(name, self.env) if left_str: - self.env.remove_tmp(left.name) name = self.env.add_tmp([left.value]) left = self.term_type(name, self.env) diff --git a/pandas/computation/scope.py b/pandas/computation/scope.py index 24be71f96b282..875aaa959b264 100644 --- a/pandas/computation/scope.py +++ b/pandas/computation/scope.py @@ -278,16 +278,6 @@ def add_tmp(self, value): # only increment if the variable gets put in the scope return name - def remove_tmp(self, name): - """Remove a temporary variable from this scope - - Parameters - ---------- - name : str - The name of a temporary to be removed - """ - del self.temps[name] - @property def ntemps(self): """The number of temporary variables in this scope""" diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 84bed9be76939..9b727d5752097 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -13685,6 +13685,18 @@ def test_query_single_element_booleans(self): for parser, engine in product(PARSERS, ENGINES): yield self.check_query_single_element_booleans, parser, engine + def check_query_string_scalar_variable(self, parser, engine): + tm.skip_if_no_ne(engine) + df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'], + 'Price': [109.70, 109.72, 183.30, 183.35]}) + e = df[df.Symbol == 'BUD US'] + symb = 'BUD US' + r = df.query('Symbol == @symb', parser=parser, engine=engine) + tm.assert_frame_equal(e, r) + + def test_query_string_scalar_variable(self): + for parser, engine in product(['pandas'], ENGINES): + yield self.check_query_string_scalar_variable, parser, engine class TestDataFrameEvalNumExprPandas(tm.TestCase):
closes #7300
https://api.github.com/repos/pandas-dev/pandas/pulls/7303
2014-06-01T13:25:57Z
2014-06-01T21:14:22Z
2014-06-01T21:14:22Z
2014-06-16T19:22:42Z
BUG: Index.delete doesnt preserve name and other attrs
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 5e1d237b2b559..8d2275b2f51c5 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -70,3 +70,4 @@ Bug Fixes - Bug in ``DataFrame`` and ``Series`` bar and barh plot raises ``TypeError`` when ``bottom`` and ``left`` keyword is specified (:issue:`7226`) - BUG in ``DataFrame.hist`` raises ``TypeError`` when it contains non numeric column (:issue:`7277`) +- BUG in ``Index.delete`` does not preserve ``name`` and ``freq`` attributes (:issue:`7302`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 02d6e983f5183..6ac0b7004af6c 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1770,7 +1770,9 @@ def delete(self, loc): ------- new_index : Index """ - return np.delete(self, loc) + return self._simple_new(np.delete(self, loc), self.name, + freq=getattr(self, 'freq', None), + tz=getattr(self, 'tz', None)) def insert(self, loc, item): """ diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 751e6f57a3332..9681a606c7c57 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -265,6 +265,23 @@ def test_insert(self): self.assertTrue(Index(['a']).equals( null_index.insert(0, 'a'))) + def test_delete(self): + idx = Index(['a', 'b', 'c', 'd'], name='idx') + + expected = Index(['b', 'c', 'd'], name='idx') + result = idx.delete(0) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + + expected = Index(['a', 'b', 'c'], name='idx') + result = idx.delete(-1) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + + with tm.assertRaises((IndexError, ValueError)): + # either depeidnig on numpy version + result = idx.delete(5) + def test_identical(self): # index diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index d83022c814d0a..3612b9dbeafb3 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1608,17 +1608,6 @@ def insert(self, loc, item): return self.asobject.insert(loc, item) raise TypeError("cannot insert DatetimeIndex with incompatible label") - def delete(self, loc): - """ - Make new DatetimeIndex with passed location deleted - - Returns - ------- - new_index : DatetimeIndex - """ - arr = np.delete(self.values, loc) - return DatetimeIndex(arr, tz=self.tz) - def _view_like(self, ndarray): result = ndarray.view(type(self)) result.offset = self.offset diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 0bdcff58a0b30..068883423015e 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -4,8 +4,6 @@ import os import operator -from distutils.version import LooseVersion - import nose import numpy as np @@ -2094,6 +2092,35 @@ def test_insert(self): result = idx.insert(3, datetime(2000, 4, 30)) self.assertEqual(result.freqstr, 'M') + def test_delete(self): + idx = date_range(start='2000-01-01', periods=4, freq='M', name='idx') + + expected = date_range(start='2000-02-01', periods=3, freq='M', name='idx') + result = idx.delete(0) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freqstr, 'M') + + expected = date_range(start='2000-01-01', periods=3, freq='M', name='idx') + result = idx.delete(-1) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freqstr, 'M') + + with tm.assertRaises((IndexError, ValueError)): + # either depeidnig on numpy version + result = idx.delete(5) + + idx = date_range(start='2000-01-01', periods=4, + freq='M', name='idx', tz='US/Pacific') + + expected = date_range(start='2000-02-01', periods=3, + freq='M', name='idx', tz='US/Pacific') + result = idx.delete(0) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freqstr, 'M') + def test_map_bug_1677(self): index = DatetimeIndex(['2012-04-25 09:30:00.393000']) f = index.asof
Made `Index.delete` to preserve `name`, `tz` and `freq` attributes.
https://api.github.com/repos/pandas-dev/pandas/pulls/7302
2014-05-31T23:12:44Z
2014-06-01T16:37:17Z
2014-06-01T16:37:17Z
2014-06-16T10:54:28Z
BUG: DatetimeIndex.insert doesnt preserve name and tz
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 3e06a705487df..20067b75755bb 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -123,3 +123,5 @@ Bug Fixes - Bug where a string column name assignment to a ``DataFrame`` with a ``Float64Index`` raised a ``TypeError`` during a call to ``np.isnan`` (:issue:`7366`). +- BUG in ``DatetimeIndex.insert`` doesn't preserve ``name`` and ``tz`` (:issue:`7299`) +- BUG in ``DatetimeIndex.asobject`` doesn't preserve ``name`` (:issue:`7299`) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 42cc80cc5dc63..f23a7a6fd20ac 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -784,7 +784,7 @@ def tolist(self): def _get_object_index(self): boxfunc = lambda x: Timestamp(x, offset=self.offset, tz=self.tz) boxed_values = lib.map_infer(self.asi8, boxfunc) - return Index(boxed_values, dtype=object) + return Index(boxed_values, dtype=object, name=self.name) def to_pydatetime(self): """ @@ -1594,15 +1594,30 @@ def insert(self, loc, item): ------- new_index : Index """ + freq = None if isinstance(item, datetime): + zone = tslib.get_timezone(self.tz) + izone = tslib.get_timezone(getattr(item, 'tzinfo', None)) + if zone != izone: + raise ValueError('Passed item and index have different timezone') + + # check freq can be preserved on edge cases + if self.freq is not None: + if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]: + freq = self.freq + elif (loc == len(self)) and item - self.freq == self[-1]: + freq = self.freq + item = _to_m8(item, tz=self.tz) + try: - new_index = np.concatenate((self[:loc].asi8, - [item.view(np.int64)], - self[loc:].asi8)) - return DatetimeIndex(new_index, freq='infer') - except (AttributeError, TypeError): + new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)], + self[loc:].asi8)) + if self.tz is not None: + new_dates = tslib.date_normalize(new_dates, self.tz) + return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz) + except (AttributeError, TypeError): # fall back to object index if isinstance(item,compat.string_types): return self.asobject.insert(loc, item) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 83cc5dcc7485f..52e813aaeffda 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2273,24 +2273,101 @@ def test_order(self): self.assertTrue(ordered[::-1].is_monotonic) self.assert_numpy_array_equal(dexer, [0, 2, 1]) + def test_asobject(self): + idx = date_range(start='2013-01-01', periods=4, freq='M', name='idx') + expected = Index([Timestamp('2013-01-31'), Timestamp('2013-02-28'), + Timestamp('2013-03-31'), Timestamp('2013-04-30')], + dtype=object, name='idx') + + result = idx.asobject + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + def test_insert(self): - idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) + idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'], name='idx') result = idx.insert(2, datetime(2000, 1, 5)) - exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05', - '2000-01-02']) - self.assertTrue(result.equals(exp)) + expected = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05', + '2000-01-02'], name='idx') + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) # insertion of non-datetime should coerce to object index result = idx.insert(1, 'inserted') expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1), - datetime(2000, 1, 2)]) + datetime(2000, 1, 2)], name='idx') self.assertNotIsInstance(result, DatetimeIndex) tm.assert_index_equal(result, expected) + self.assertEqual(result.name, expected.name) + + idx = date_range('1/1/2000', periods=3, freq='M', name='idx') + + # preserve freq + expected_0 = DatetimeIndex(['1999-12-31', '2000-01-31', '2000-02-29', + '2000-03-31'], name='idx', freq='M') + expected_3 = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31', + '2000-04-30'], name='idx', freq='M') + + # reset freq to None + expected_1_nofreq = DatetimeIndex(['2000-01-31', '2000-01-31', '2000-02-29', + '2000-03-31'], name='idx', freq=None) + expected_3_nofreq = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31', + '2000-01-02'], name='idx', freq=None) + + cases = [(0, datetime(1999, 12, 31), expected_0), + (-3, datetime(1999, 12, 31), expected_0), + (3, datetime(2000, 4, 30), expected_3), + (1, datetime(2000, 1, 31), expected_1_nofreq), + (3, datetime(2000, 1, 2), expected_3_nofreq)] + + for n, d, expected in cases: + result = idx.insert(n, d) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freq, expected.freq) - idx = date_range('1/1/2000', periods=3, freq='M') - result = idx.insert(3, datetime(2000, 4, 30)) - self.assertEqual(result.freqstr, 'M') + # reset freq to None + result = idx.insert(3, datetime(2000, 1, 2)) + expected = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31', + '2000-01-02'], name='idx', freq=None) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertTrue(result.freq is None) + + # GH 7299 + _skip_if_no_pytz() + import pytz + + idx = date_range('1/1/2000', periods=3, freq='D', tz='US/Pacific', name='idx') + with tm.assertRaises(ValueError): + result = idx.insert(3, pd.Timestamp('2000-01-04')) + with tm.assertRaises(ValueError): + result = idx.insert(3, datetime(2000, 1, 4)) + with tm.assertRaises(ValueError): + result = idx.insert(3, pd.Timestamp('2000-01-04', tz='US/Eastern')) + with tm.assertRaises(ValueError): + result = idx.insert(3, datetime(2000, 1, 4, tzinfo=pytz.timezone('US/Eastern'))) + + # preserve freq + expected = date_range('1/1/2000', periods=4, freq='D', tz='US/Pacific', name='idx') + for d in [pd.Timestamp('2000-01-04', tz='US/Pacific'), + datetime(2000, 1, 4, tzinfo=pytz.timezone('US/Pacific'))]: + + result = idx.insert(3, d) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertEqual(result.freqstr, 'D') + + expected = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03', + '2000-01-02'], name='idx', + tz='US/Pacific', freq=None) + # reset freq to None + for d in [pd.Timestamp('2000-01-02', tz='US/Pacific'), + datetime(2000, 1, 2, tzinfo=pytz.timezone('US/Pacific'))]: + result = idx.insert(3, d) + self.assertTrue(result.equals(expected)) + self.assertEqual(result.name, expected.name) + self.assertTrue(result.freq is None) def test_delete(self): idx = date_range(start='2000-01-01', periods=5, freq='M', name='idx')
`DatetimeIndex.insert` doesn't preserve `name` and `tz` attributes. Also modified `DatetimeIndex.asobject` to return an object `Index` which has the same name as original to cover the case when the result is being object `Index`. ``` # normal Index preserves its name after insertion idx = pd.Index([1, 2, 3], name='normal') inserted = idx.insert(0, 1) inserted.name # normal # But DatetimeIndex doesn't dtidx = pd.DatetimeIndex([datetime.datetime(2011, 1, 3), datetime.datetime(2011, 1, 4)], freq='M', name='dtidx') inserted = dtidx.insert(0, datetime.datetime(2011, 1, 5)) inserted.name # None ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7299
2014-05-31T22:06:03Z
2014-06-06T12:58:23Z
2014-06-06T12:58:23Z
2014-06-12T06:09:49Z
TST: fix intermittent dict_complex test failures
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py index 46d3124a06a0b..9633f567ab098 100644 --- a/pandas/io/tests/test_packers.py +++ b/pandas/io/tests/test_packers.py @@ -149,8 +149,9 @@ def test_dict_float(self): def test_dict_complex(self): x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j} x_rec = self.encode_decode(x) - self.assertTrue(all(map(lambda x, y: x == y, x.values(), x_rec.values())) and - all(map(lambda x, y: type(x) == type(y), x.values(), x_rec.values()))) + self.assertEqual(x, x_rec) + for key in x: + self.assertEqual(type(x[key]), type(x_rec[key])) def test_dict_numpy_float(self): x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)} @@ -158,12 +159,12 @@ def test_dict_numpy_float(self): tm.assert_almost_equal(x,x_rec) def test_dict_numpy_complex(self): - x = {'foo': np.complex128( - 1.0 + 1.0j), 'bar': np.complex128(2.0 + 2.0j)} + x = {'foo': np.complex128(1.0 + 1.0j), + 'bar': np.complex128(2.0 + 2.0j)} x_rec = self.encode_decode(x) - self.assertTrue(all(map(lambda x, y: x == y, x.values(), x_rec.values())) and - all(map(lambda x, y: type(x) == type(y), x.values(), x_rec.values()))) - + self.assertEqual(x, x_rec) + for key in x: + self.assertEqual(type(x[key]), type(x_rec[key])) def test_numpy_array_float(self):
`test_dict_complex` and `test_dict_numpy_complex` would give occasional test failures on Python 3.4. The test was written to depend on the order of values from `dict.values()`; remove that dependency. Closes gh-7293.
https://api.github.com/repos/pandas-dev/pandas/pulls/7298
2014-05-31T20:38:43Z
2014-05-31T21:38:13Z
2014-05-31T21:38:13Z
2014-07-01T00:16:02Z
CLN/TST remove compat.scipy
diff --git a/pandas/compat/scipy.py b/pandas/compat/scipy.py deleted file mode 100644 index 06da8799d0c96..0000000000000 --- a/pandas/compat/scipy.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -Shipping functions from SciPy to reduce dependency on having SciPy installed -""" - -from pandas.compat import range, lrange -import numpy as np - - -def rankdata(a): - """ - Ranks the data, dealing with ties appropriately. - - Equal values are assigned a rank that is the average of the ranks that - would have been otherwise assigned to all of the values within that set. - Ranks begin at 1, not 0. - - Parameters - ---------- - a : array_like - This array is first flattened. - - Returns - ------- - rankdata : ndarray - An array of length equal to the size of `a`, containing rank scores. - - Examples - -------- - >>> stats.rankdata([0, 2, 2, 3]) - array([ 1. , 2.5, 2.5, 4. ]) - - """ - a = np.ravel(a) - n = len(a) - svec, ivec = fastsort(a) - sumranks = 0 - dupcount = 0 - newarray = np.zeros(n, float) - for i in range(n): - sumranks += i - dupcount += 1 - if i == n - 1 or svec[i] != svec[i + 1]: - averank = sumranks / float(dupcount) + 1 - for j in range(i - dupcount + 1, i + 1): - newarray[ivec[j]] = averank - sumranks = 0 - dupcount = 0 - return newarray - - -def fastsort(a): - """ - Sort an array and provide the argsort. - - Parameters - ---------- - a : array_like - Input array. - - Returns - ------- - fastsort : ndarray of type int - sorted indices into the original array - - """ - # TODO: the wording in the docstring is nonsense. - it = np.argsort(a) - as_ = a[it] - return as_, it - - -def percentileofscore(a, score, kind='rank'): - """The percentile rank of a score relative to a list of scores. - - A `percentileofscore` of, for example, 80% means that 80% of the - scores in `a` are below the given score. In the case of gaps or - ties, the exact definition depends on the optional keyword, `kind`. - - Parameters - ---------- - a: array like - Array of scores to which `score` is compared. - score: int or float - Score that is compared to the elements in `a`. - kind: {'rank', 'weak', 'strict', 'mean'}, optional - This optional parameter specifies the interpretation of the - resulting score: - - - "rank": Average percentage ranking of score. In case of - multiple matches, average the percentage rankings of - all matching scores. - - "weak": This kind corresponds to the definition of a cumulative - distribution function. A percentileofscore of 80% - means that 80% of values are less than or equal - to the provided score. - - "strict": Similar to "weak", except that only values that are - strictly less than the given score are counted. - - "mean": The average of the "weak" and "strict" scores, often used in - testing. See - - http://en.wikipedia.org/wiki/Percentile_rank - - Returns - ------- - pcos : float - Percentile-position of score (0-100) relative to `a`. - - Examples - -------- - Three-quarters of the given values lie below a given score: - - >>> percentileofscore([1, 2, 3, 4], 3) - 75.0 - - With multiple matches, note how the scores of the two matches, 0.6 - and 0.8 respectively, are averaged: - - >>> percentileofscore([1, 2, 3, 3, 4], 3) - 70.0 - - Only 2/5 values are strictly less than 3: - - >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='strict') - 40.0 - - But 4/5 values are less than or equal to 3: - - >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='weak') - 80.0 - - The average between the weak and the strict scores is - - >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='mean') - 60.0 - - """ - a = np.array(a) - n = len(a) - - if kind == 'rank': - if not(np.any(a == score)): - a = np.append(a, score) - a_len = np.array(lrange(len(a))) - else: - a_len = np.array(lrange(len(a))) + 1.0 - - a = np.sort(a) - idx = [a == score] - pct = (np.mean(a_len[idx]) / n) * 100.0 - return pct - - elif kind == 'strict': - return sum(a < score) / float(n) * 100 - elif kind == 'weak': - return sum(a <= score) / float(n) * 100 - elif kind == 'mean': - return (sum(a < score) + sum(a <= score)) * 50 / float(n) - else: - raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'") diff --git a/pandas/stats/misc.py b/pandas/stats/misc.py index c79bae34f20c4..ef663b25e9ca0 100644 --- a/pandas/stats/misc.py +++ b/pandas/stats/misc.py @@ -42,6 +42,94 @@ def correl_ts(frame1, frame2): def correl_xs(frame1, frame2): return correl_ts(frame1.T, frame2.T) +def percentileofscore(a, score, kind='rank'): + """The percentile rank of a score relative to a list of scores. + + A `percentileofscore` of, for example, 80% means that 80% of the + scores in `a` are below the given score. In the case of gaps or + ties, the exact definition depends on the optional keyword, `kind`. + + Parameters + ---------- + a: array like + Array of scores to which `score` is compared. + score: int or float + Score that is compared to the elements in `a`. + kind: {'rank', 'weak', 'strict', 'mean'}, optional + This optional parameter specifies the interpretation of the + resulting score: + + - "rank": Average percentage ranking of score. In case of + multiple matches, average the percentage rankings of + all matching scores. + - "weak": This kind corresponds to the definition of a cumulative + distribution function. A percentileofscore of 80% + means that 80% of values are less than or equal + to the provided score. + - "strict": Similar to "weak", except that only values that are + strictly less than the given score are counted. + - "mean": The average of the "weak" and "strict" scores, often used in + testing. See + + http://en.wikipedia.org/wiki/Percentile_rank + + Returns + ------- + pcos : float + Percentile-position of score (0-100) relative to `a`. + + Examples + -------- + Three-quarters of the given values lie below a given score: + + >>> percentileofscore([1, 2, 3, 4], 3) + 75.0 + + With multiple matches, note how the scores of the two matches, 0.6 + and 0.8 respectively, are averaged: + + >>> percentileofscore([1, 2, 3, 3, 4], 3) + 70.0 + + Only 2/5 values are strictly less than 3: + + >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='strict') + 40.0 + + But 4/5 values are less than or equal to 3: + + >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='weak') + 80.0 + + The average between the weak and the strict scores is + + >>> percentileofscore([1, 2, 3, 3, 4], 3, kind='mean') + 60.0 + + """ + a = np.array(a) + n = len(a) + + if kind == 'rank': + if not(np.any(a == score)): + a = np.append(a, score) + a_len = np.array(lrange(len(a))) + else: + a_len = np.array(lrange(len(a))) + 1.0 + + a = np.sort(a) + idx = [a == score] + pct = (np.mean(a_len[idx]) / n) * 100.0 + return pct + + elif kind == 'strict': + return sum(a < score) / float(n) * 100 + elif kind == 'weak': + return sum(a <= score) / float(n) * 100 + elif kind == 'mean': + return (sum(a < score) + sum(a <= score)) * 50 / float(n) + else: + raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'") def percentileRank(frame, column=None, kind='mean'): """ @@ -76,7 +164,6 @@ def percentileRank(frame, column=None, kind='mean'): ------- TimeSeries or DataFrame, depending on input """ - from pandas.compat.scipy import percentileofscore fun = lambda xs, score: percentileofscore(remove_na(xs), score, kind=kind) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4e5b00a6db765..84bed9be76939 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -11142,7 +11142,8 @@ def test_cumprod(self): df.cumprod(1) def test_rank(self): - from pandas.compat.scipy import rankdata + _skip_if_no_scipy() + from scipy.stats import rankdata self.frame['A'][::2] = np.nan self.frame['B'][::3] = np.nan @@ -11235,7 +11236,8 @@ def test_rank2(self): def test_rank_na_option(self): - from pandas.compat.scipy import rankdata + _skip_if_no_scipy() + from scipy.stats import rankdata self.frame['A'][::2] = np.nan self.frame['B'][::3] = np.nan diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 6c732fe352d6a..2385f9ef514fc 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -4063,7 +4063,8 @@ def test_nsmallest_nlargest(self): assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]]) def test_rank(self): - from pandas.compat.scipy import rankdata + _skip_if_no_scipy() + from scipy.stats import rankdata self.ts[::2] = np.nan self.ts[:10][::3] = 4. diff --git a/pandas/tests/test_tseries.py b/pandas/tests/test_tseries.py index 0bf3f1bec9706..c1316f068de29 100644 --- a/pandas/tests/test_tseries.py +++ b/pandas/tests/test_tseries.py @@ -9,6 +9,12 @@ import pandas.algos as algos from datetime import datetime +def _skip_if_no_scipy(): + try: + import scipy.stats + except ImportError: + raise nose.SkipTest("scipy not installed") + class TestTseriesUtil(tm.TestCase): _multiprocess_can_split_ = True @@ -335,7 +341,8 @@ def test_convert_objects_complex_number(): def test_rank(): - from pandas.compat.scipy import rankdata + _skip_if_no_scipy() + from scipy.stats import rankdata def _check(arr): mask = ~np.isfinite(arr)
re-branch of https://github.com/pydata/pandas/pull/7253#issue-34432164 -Move `percentileofscore` to `stats.misc`; Update `percentileRank` to reflect the move. -Change testing references to `compat.scipy.rankdata()` , to use `scipy.stats.rankdata` directly -Delete compat.scipy
https://api.github.com/repos/pandas-dev/pandas/pulls/7296
2014-05-31T17:50:48Z
2014-06-01T02:51:45Z
2014-06-01T02:51:45Z
2014-07-02T11:30:39Z
fix string format for Python 2.6
diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 36af8b1da0e30..82ad17258113f 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -622,7 +622,7 @@ def _convert_to_style(cls, style_dict): if openpyxl_compat.is_compat(): register_writer(_OpenpyxlWriter) else: - warn('Installed openpyxl is not supported at this time. Use >={} and <{}.' + warn('Installed openpyxl is not supported at this time. Use >={0} and <{1}.' .format(openpyxl_compat.start_ver, openpyxl_compat.stop_ver))
This PR fixes a minor string formatting problem when running under Python 2.6
https://api.github.com/repos/pandas-dev/pandas/pulls/7291
2014-05-31T05:07:04Z
2014-05-31T08:47:29Z
null
2014-06-18T15:50:56Z
TST: Use ISO8601 format for DTI partial slicing
diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index b5034993f34fd..e0aea9a1a29b1 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -1973,8 +1973,8 @@ def test_range_slice_outofbounds(self): tm.assert_frame_equal(df['2013/10/01':'2013/10/02'], df.iloc[:2]) tm.assert_frame_equal(df['2013/10/02':'2013/09/30'], empty) tm.assert_frame_equal(df['2013/10/15':'2013/10/17'], empty) - tm.assert_frame_equal(df['2013/06':'2013/09'], empty) - tm.assert_frame_equal(df['2013/11':'2013/12'], empty) + tm.assert_frame_equal(df['2013-06':'2013-09'], empty) + tm.assert_frame_equal(df['2013-11':'2013-12'], empty) def test_pindex_qaccess(self): pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
Closes #7289.
https://api.github.com/repos/pandas-dev/pandas/pulls/7290
2014-05-31T04:16:41Z
2014-05-31T08:36:26Z
2014-05-31T08:36:26Z
2014-06-28T10:58:37Z
BUG/CI/TST: fix format strings
diff --git a/ci/requirements-2.6.txt b/ci/requirements-2.6.txt index d90774e159ccc..4ed488795fb07 100644 --- a/ci/requirements-2.6.txt +++ b/ci/requirements-2.6.txt @@ -10,6 +10,6 @@ sqlalchemy==0.8.1 scipy==0.11.0 statsmodels==0.4.3 xlwt==0.7.5 -openpyxl==1.6.2 +openpyxl==2.0.3 xlsxwriter==0.4.6 xlrd==0.9.2 diff --git a/ci/requirements-3.2.txt b/ci/requirements-3.2.txt index c15871730fa16..40a5310af0c6f 100644 --- a/ci/requirements-3.2.txt +++ b/ci/requirements-3.2.txt @@ -1,6 +1,5 @@ python-dateutil==2.1 pytz==2013b -openpyxl==1.6.2 xlsxwriter==0.4.6 xlrd==0.9.2 numpy==1.7.1 diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 0d4dbae07413a..b430d2eed5a10 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -27,6 +27,8 @@ users upgrade to this version. API changes ~~~~~~~~~~~ +- Openpyxl now raises a ValueError on construction of the openpyxl writer + instead of warning on pandas import (:issue:`7284`). .. _whatsnew_0141.prior_deprecations: diff --git a/pandas/compat/openpyxl_compat.py b/pandas/compat/openpyxl_compat.py index 4a48cdac98dd2..25ba83d58aaed 100644 --- a/pandas/compat/openpyxl_compat.py +++ b/pandas/compat/openpyxl_compat.py @@ -9,18 +9,16 @@ start_ver = '1.6.1' stop_ver = '2.0.0' + def is_compat(): - """ - Detect whether the installed version of openpyxl is supported - Returns True/False if openpyxl is installed, None otherwise - """ - try: - import openpyxl - except ImportError: - return None + """Detect whether the installed version of openpyxl is supported. + Returns + ------- + compat : bool + ``True`` if openpyxl is installed and is between versions 1.6.1 and + 2.0.0, ``False`` otherwise. + """ + import openpyxl ver = LooseVersion(openpyxl.__version__) - if ver < LooseVersion(start_ver) or LooseVersion(stop_ver) <= ver: - return False - - return True + return LooseVersion(start_ver) < ver <= LooseVersion(stop_ver) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 36af8b1da0e30..6372d83f50051 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -523,6 +523,11 @@ class _OpenpyxlWriter(ExcelWriter): supported_extensions = ('.xlsx', '.xlsm') def __init__(self, path, engine=None, **engine_kwargs): + if not openpyxl_compat.is_compat(): + raise ValueError('Installed openpyxl is not supported at this ' + 'time. Use >={0} and ' + '<{1}.'.format(openpyxl_compat.start_ver, + openpyxl_compat.stop_ver)) # Use the openpyxl module as the Excel writer. from openpyxl.workbook import Workbook @@ -618,12 +623,7 @@ def _convert_to_style(cls, style_dict): return xls_style - -if openpyxl_compat.is_compat(): - register_writer(_OpenpyxlWriter) -else: - warn('Installed openpyxl is not supported at this time. Use >={} and <{}.' - .format(openpyxl_compat.start_ver, openpyxl_compat.stop_ver)) +register_writer(_OpenpyxlWriter) class _XlwtWriter(ExcelWriter): diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index ef7e57076c908..5928e99ed055c 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -5,6 +5,8 @@ import os from distutils.version import LooseVersion +import operator +import functools import nose from numpy import nan @@ -45,10 +47,6 @@ def _skip_if_no_openpyxl(): except ImportError: raise nose.SkipTest('openpyxl not installed, skipping') - if not openpyxl_compat.is_compat(): - raise nose.SkipTest('need %s <= openpyxl < %s, skipping' % - (openpyxl_compat.start_ver, openpyxl_compat.stop_ver)) - def _skip_if_no_xlsxwriter(): try: @@ -884,7 +882,6 @@ def test_to_excel_output_encoding(self): result = read_excel(filename, 'TestSheet', encoding='utf8') tm.assert_frame_equal(result, df) - def test_to_excel_unicode_filename(self): _skip_if_no_xlrd() with ensure_clean(u('\u0192u.') + self.ext) as filename: @@ -1094,13 +1091,36 @@ def test_swapped_columns(self): tm.assert_series_equal(write_frame['B'], read_frame['B']) +def raise_wrapper(orig_method): + @functools.wraps(orig_method) + def wrapped(self, *args, **kwargs): + _skip_if_no_openpyxl() + if openpyxl_compat.is_compat(): + orig_method(self, *args, **kwargs) + else: + msg = 'Installed openpyxl is not supported at this time\. Use.+' + with tm.assertRaisesRegexp(ValueError, msg): + orig_method(self, *args, **kwargs) + return wrapped + + +def raise_on_incompat_version(cls): + methods = filter(operator.methodcaller('startswith', 'test_'), dir(cls)) + for method in methods: + setattr(cls, method, raise_wrapper(getattr(cls, method))) + return cls + + +@raise_on_incompat_version class OpenpyxlTests(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'openpyxl' - check_skip = staticmethod(_skip_if_no_openpyxl) + check_skip = staticmethod(lambda *args, **kwargs: None) def test_to_excel_styleconverter(self): _skip_if_no_openpyxl() + if not openpyxl_compat.is_compat(): + raise nose.SkipTest('incompatiable openpyxl version') import openpyxl @@ -1114,17 +1134,17 @@ def test_to_excel_styleconverter(self): xlsx_style = _OpenpyxlWriter._convert_to_style(hstyle) self.assertTrue(xlsx_style.font.bold) self.assertEqual(openpyxl.style.Border.BORDER_THIN, - xlsx_style.borders.top.border_style) + xlsx_style.borders.top.border_style) self.assertEqual(openpyxl.style.Border.BORDER_THIN, - xlsx_style.borders.right.border_style) + xlsx_style.borders.right.border_style) self.assertEqual(openpyxl.style.Border.BORDER_THIN, - xlsx_style.borders.bottom.border_style) + xlsx_style.borders.bottom.border_style) self.assertEqual(openpyxl.style.Border.BORDER_THIN, - xlsx_style.borders.left.border_style) + xlsx_style.borders.left.border_style) self.assertEqual(openpyxl.style.Alignment.HORIZONTAL_CENTER, - xlsx_style.alignment.horizontal) + xlsx_style.alignment.horizontal) self.assertEqual(openpyxl.style.Alignment.VERTICAL_TOP, - xlsx_style.alignment.vertical) + xlsx_style.alignment.vertical) class XlwtTests(ExcelWriterBase, tm.TestCase): @@ -1160,6 +1180,7 @@ class XlsxWriterTests(ExcelWriterBase, tm.TestCase): check_skip = staticmethod(_skip_if_no_xlsxwriter) +@raise_on_incompat_version class OpenpyxlTests_NoMerge(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'openpyxl' @@ -1196,6 +1217,8 @@ def test_ExcelWriter_dispatch(self): import xlsxwriter writer_klass = _XlsxWriter except ImportError: + if not openpyxl_compat.is_compat(): + raise nose.SkipTest('incompatible openpyxl version') _skip_if_no_openpyxl() writer_klass = _OpenpyxlWriter @@ -1246,6 +1269,7 @@ def check_called(func): check_called(lambda: df.to_excel('something.xls', engine='dummy')) set_option('io.excel.xlsx.writer', val) + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
Also test against an unsupported version of openpyxl (2.0.3) closes #7284
https://api.github.com/repos/pandas-dev/pandas/pulls/7285
2014-05-30T15:54:36Z
2014-06-02T11:29:30Z
2014-06-02T11:29:30Z
2014-06-12T12:04:55Z
ENH/TST: add anonymous reading of s3 for public buckets
diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt index 01b321e6fdd6e..2e0e20b047ee0 100644 --- a/ci/requirements-2.7.txt +++ b/ci/requirements-2.7.txt @@ -20,3 +20,4 @@ scipy==0.13.3 beautifulsoup4==4.2.1 statsmodels==0.5.0 bigquery==2.0.17 +boto==2.26.1 diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index b430d2eed5a10..4c71fd111dca5 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -51,6 +51,7 @@ Known Issues Enhancements ~~~~~~~~~~~~ +- Tests for basic reading of public S3 buckets now exist (:issue:`7281`). .. _whatsnew_0141.performance: diff --git a/pandas/io/common.py b/pandas/io/common.py index d6b2827f94d36..daf441f2cdb8c 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -126,7 +126,12 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None): # Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY # are environment variables parsed_url = parse_url(filepath_or_buffer) - conn = boto.connect_s3() + + try: + conn = boto.connect_s3() + except boto.exception.NoAuthHandlerFound: + conn = boto.connect_s3(anon=True) + b = conn.get_bucket(parsed_url.netloc) k = boto.s3.key.Key(b) k.key = parsed_url.path diff --git a/pandas/io/tests/data/tips.csv b/pandas/io/tests/data/tips.csv new file mode 100644 index 0000000000000..856a65a69e647 --- /dev/null +++ b/pandas/io/tests/data/tips.csv @@ -0,0 +1,245 @@ +total_bill,tip,sex,smoker,day,time,size +16.99,1.01,Female,No,Sun,Dinner,2 +10.34,1.66,Male,No,Sun,Dinner,3 +21.01,3.5,Male,No,Sun,Dinner,3 +23.68,3.31,Male,No,Sun,Dinner,2 +24.59,3.61,Female,No,Sun,Dinner,4 +25.29,4.71,Male,No,Sun,Dinner,4 +8.77,2.0,Male,No,Sun,Dinner,2 +26.88,3.12,Male,No,Sun,Dinner,4 +15.04,1.96,Male,No,Sun,Dinner,2 +14.78,3.23,Male,No,Sun,Dinner,2 +10.27,1.71,Male,No,Sun,Dinner,2 +35.26,5.0,Female,No,Sun,Dinner,4 +15.42,1.57,Male,No,Sun,Dinner,2 +18.43,3.0,Male,No,Sun,Dinner,4 +14.83,3.02,Female,No,Sun,Dinner,2 +21.58,3.92,Male,No,Sun,Dinner,2 +10.33,1.67,Female,No,Sun,Dinner,3 +16.29,3.71,Male,No,Sun,Dinner,3 +16.97,3.5,Female,No,Sun,Dinner,3 +20.65,3.35,Male,No,Sat,Dinner,3 +17.92,4.08,Male,No,Sat,Dinner,2 +20.29,2.75,Female,No,Sat,Dinner,2 +15.77,2.23,Female,No,Sat,Dinner,2 +39.42,7.58,Male,No,Sat,Dinner,4 +19.82,3.18,Male,No,Sat,Dinner,2 +17.81,2.34,Male,No,Sat,Dinner,4 +13.37,2.0,Male,No,Sat,Dinner,2 +12.69,2.0,Male,No,Sat,Dinner,2 +21.7,4.3,Male,No,Sat,Dinner,2 +19.65,3.0,Female,No,Sat,Dinner,2 +9.55,1.45,Male,No,Sat,Dinner,2 +18.35,2.5,Male,No,Sat,Dinner,4 +15.06,3.0,Female,No,Sat,Dinner,2 +20.69,2.45,Female,No,Sat,Dinner,4 +17.78,3.27,Male,No,Sat,Dinner,2 +24.06,3.6,Male,No,Sat,Dinner,3 +16.31,2.0,Male,No,Sat,Dinner,3 +16.93,3.07,Female,No,Sat,Dinner,3 +18.69,2.31,Male,No,Sat,Dinner,3 +31.27,5.0,Male,No,Sat,Dinner,3 +16.04,2.24,Male,No,Sat,Dinner,3 +17.46,2.54,Male,No,Sun,Dinner,2 +13.94,3.06,Male,No,Sun,Dinner,2 +9.68,1.32,Male,No,Sun,Dinner,2 +30.4,5.6,Male,No,Sun,Dinner,4 +18.29,3.0,Male,No,Sun,Dinner,2 +22.23,5.0,Male,No,Sun,Dinner,2 +32.4,6.0,Male,No,Sun,Dinner,4 +28.55,2.05,Male,No,Sun,Dinner,3 +18.04,3.0,Male,No,Sun,Dinner,2 +12.54,2.5,Male,No,Sun,Dinner,2 +10.29,2.6,Female,No,Sun,Dinner,2 +34.81,5.2,Female,No,Sun,Dinner,4 +9.94,1.56,Male,No,Sun,Dinner,2 +25.56,4.34,Male,No,Sun,Dinner,4 +19.49,3.51,Male,No,Sun,Dinner,2 +38.01,3.0,Male,Yes,Sat,Dinner,4 +26.41,1.5,Female,No,Sat,Dinner,2 +11.24,1.76,Male,Yes,Sat,Dinner,2 +48.27,6.73,Male,No,Sat,Dinner,4 +20.29,3.21,Male,Yes,Sat,Dinner,2 +13.81,2.0,Male,Yes,Sat,Dinner,2 +11.02,1.98,Male,Yes,Sat,Dinner,2 +18.29,3.76,Male,Yes,Sat,Dinner,4 +17.59,2.64,Male,No,Sat,Dinner,3 +20.08,3.15,Male,No,Sat,Dinner,3 +16.45,2.47,Female,No,Sat,Dinner,2 +3.07,1.0,Female,Yes,Sat,Dinner,1 +20.23,2.01,Male,No,Sat,Dinner,2 +15.01,2.09,Male,Yes,Sat,Dinner,2 +12.02,1.97,Male,No,Sat,Dinner,2 +17.07,3.0,Female,No,Sat,Dinner,3 +26.86,3.14,Female,Yes,Sat,Dinner,2 +25.28,5.0,Female,Yes,Sat,Dinner,2 +14.73,2.2,Female,No,Sat,Dinner,2 +10.51,1.25,Male,No,Sat,Dinner,2 +17.92,3.08,Male,Yes,Sat,Dinner,2 +27.2,4.0,Male,No,Thur,Lunch,4 +22.76,3.0,Male,No,Thur,Lunch,2 +17.29,2.71,Male,No,Thur,Lunch,2 +19.44,3.0,Male,Yes,Thur,Lunch,2 +16.66,3.4,Male,No,Thur,Lunch,2 +10.07,1.83,Female,No,Thur,Lunch,1 +32.68,5.0,Male,Yes,Thur,Lunch,2 +15.98,2.03,Male,No,Thur,Lunch,2 +34.83,5.17,Female,No,Thur,Lunch,4 +13.03,2.0,Male,No,Thur,Lunch,2 +18.28,4.0,Male,No,Thur,Lunch,2 +24.71,5.85,Male,No,Thur,Lunch,2 +21.16,3.0,Male,No,Thur,Lunch,2 +28.97,3.0,Male,Yes,Fri,Dinner,2 +22.49,3.5,Male,No,Fri,Dinner,2 +5.75,1.0,Female,Yes,Fri,Dinner,2 +16.32,4.3,Female,Yes,Fri,Dinner,2 +22.75,3.25,Female,No,Fri,Dinner,2 +40.17,4.73,Male,Yes,Fri,Dinner,4 +27.28,4.0,Male,Yes,Fri,Dinner,2 +12.03,1.5,Male,Yes,Fri,Dinner,2 +21.01,3.0,Male,Yes,Fri,Dinner,2 +12.46,1.5,Male,No,Fri,Dinner,2 +11.35,2.5,Female,Yes,Fri,Dinner,2 +15.38,3.0,Female,Yes,Fri,Dinner,2 +44.3,2.5,Female,Yes,Sat,Dinner,3 +22.42,3.48,Female,Yes,Sat,Dinner,2 +20.92,4.08,Female,No,Sat,Dinner,2 +15.36,1.64,Male,Yes,Sat,Dinner,2 +20.49,4.06,Male,Yes,Sat,Dinner,2 +25.21,4.29,Male,Yes,Sat,Dinner,2 +18.24,3.76,Male,No,Sat,Dinner,2 +14.31,4.0,Female,Yes,Sat,Dinner,2 +14.0,3.0,Male,No,Sat,Dinner,2 +7.25,1.0,Female,No,Sat,Dinner,1 +38.07,4.0,Male,No,Sun,Dinner,3 +23.95,2.55,Male,No,Sun,Dinner,2 +25.71,4.0,Female,No,Sun,Dinner,3 +17.31,3.5,Female,No,Sun,Dinner,2 +29.93,5.07,Male,No,Sun,Dinner,4 +10.65,1.5,Female,No,Thur,Lunch,2 +12.43,1.8,Female,No,Thur,Lunch,2 +24.08,2.92,Female,No,Thur,Lunch,4 +11.69,2.31,Male,No,Thur,Lunch,2 +13.42,1.68,Female,No,Thur,Lunch,2 +14.26,2.5,Male,No,Thur,Lunch,2 +15.95,2.0,Male,No,Thur,Lunch,2 +12.48,2.52,Female,No,Thur,Lunch,2 +29.8,4.2,Female,No,Thur,Lunch,6 +8.52,1.48,Male,No,Thur,Lunch,2 +14.52,2.0,Female,No,Thur,Lunch,2 +11.38,2.0,Female,No,Thur,Lunch,2 +22.82,2.18,Male,No,Thur,Lunch,3 +19.08,1.5,Male,No,Thur,Lunch,2 +20.27,2.83,Female,No,Thur,Lunch,2 +11.17,1.5,Female,No,Thur,Lunch,2 +12.26,2.0,Female,No,Thur,Lunch,2 +18.26,3.25,Female,No,Thur,Lunch,2 +8.51,1.25,Female,No,Thur,Lunch,2 +10.33,2.0,Female,No,Thur,Lunch,2 +14.15,2.0,Female,No,Thur,Lunch,2 +16.0,2.0,Male,Yes,Thur,Lunch,2 +13.16,2.75,Female,No,Thur,Lunch,2 +17.47,3.5,Female,No,Thur,Lunch,2 +34.3,6.7,Male,No,Thur,Lunch,6 +41.19,5.0,Male,No,Thur,Lunch,5 +27.05,5.0,Female,No,Thur,Lunch,6 +16.43,2.3,Female,No,Thur,Lunch,2 +8.35,1.5,Female,No,Thur,Lunch,2 +18.64,1.36,Female,No,Thur,Lunch,3 +11.87,1.63,Female,No,Thur,Lunch,2 +9.78,1.73,Male,No,Thur,Lunch,2 +7.51,2.0,Male,No,Thur,Lunch,2 +14.07,2.5,Male,No,Sun,Dinner,2 +13.13,2.0,Male,No,Sun,Dinner,2 +17.26,2.74,Male,No,Sun,Dinner,3 +24.55,2.0,Male,No,Sun,Dinner,4 +19.77,2.0,Male,No,Sun,Dinner,4 +29.85,5.14,Female,No,Sun,Dinner,5 +48.17,5.0,Male,No,Sun,Dinner,6 +25.0,3.75,Female,No,Sun,Dinner,4 +13.39,2.61,Female,No,Sun,Dinner,2 +16.49,2.0,Male,No,Sun,Dinner,4 +21.5,3.5,Male,No,Sun,Dinner,4 +12.66,2.5,Male,No,Sun,Dinner,2 +16.21,2.0,Female,No,Sun,Dinner,3 +13.81,2.0,Male,No,Sun,Dinner,2 +17.51,3.0,Female,Yes,Sun,Dinner,2 +24.52,3.48,Male,No,Sun,Dinner,3 +20.76,2.24,Male,No,Sun,Dinner,2 +31.71,4.5,Male,No,Sun,Dinner,4 +10.59,1.61,Female,Yes,Sat,Dinner,2 +10.63,2.0,Female,Yes,Sat,Dinner,2 +50.81,10.0,Male,Yes,Sat,Dinner,3 +15.81,3.16,Male,Yes,Sat,Dinner,2 +7.25,5.15,Male,Yes,Sun,Dinner,2 +31.85,3.18,Male,Yes,Sun,Dinner,2 +16.82,4.0,Male,Yes,Sun,Dinner,2 +32.9,3.11,Male,Yes,Sun,Dinner,2 +17.89,2.0,Male,Yes,Sun,Dinner,2 +14.48,2.0,Male,Yes,Sun,Dinner,2 +9.6,4.0,Female,Yes,Sun,Dinner,2 +34.63,3.55,Male,Yes,Sun,Dinner,2 +34.65,3.68,Male,Yes,Sun,Dinner,4 +23.33,5.65,Male,Yes,Sun,Dinner,2 +45.35,3.5,Male,Yes,Sun,Dinner,3 +23.17,6.5,Male,Yes,Sun,Dinner,4 +40.55,3.0,Male,Yes,Sun,Dinner,2 +20.69,5.0,Male,No,Sun,Dinner,5 +20.9,3.5,Female,Yes,Sun,Dinner,3 +30.46,2.0,Male,Yes,Sun,Dinner,5 +18.15,3.5,Female,Yes,Sun,Dinner,3 +23.1,4.0,Male,Yes,Sun,Dinner,3 +15.69,1.5,Male,Yes,Sun,Dinner,2 +19.81,4.19,Female,Yes,Thur,Lunch,2 +28.44,2.56,Male,Yes,Thur,Lunch,2 +15.48,2.02,Male,Yes,Thur,Lunch,2 +16.58,4.0,Male,Yes,Thur,Lunch,2 +7.56,1.44,Male,No,Thur,Lunch,2 +10.34,2.0,Male,Yes,Thur,Lunch,2 +43.11,5.0,Female,Yes,Thur,Lunch,4 +13.0,2.0,Female,Yes,Thur,Lunch,2 +13.51,2.0,Male,Yes,Thur,Lunch,2 +18.71,4.0,Male,Yes,Thur,Lunch,3 +12.74,2.01,Female,Yes,Thur,Lunch,2 +13.0,2.0,Female,Yes,Thur,Lunch,2 +16.4,2.5,Female,Yes,Thur,Lunch,2 +20.53,4.0,Male,Yes,Thur,Lunch,4 +16.47,3.23,Female,Yes,Thur,Lunch,3 +26.59,3.41,Male,Yes,Sat,Dinner,3 +38.73,3.0,Male,Yes,Sat,Dinner,4 +24.27,2.03,Male,Yes,Sat,Dinner,2 +12.76,2.23,Female,Yes,Sat,Dinner,2 +30.06,2.0,Male,Yes,Sat,Dinner,3 +25.89,5.16,Male,Yes,Sat,Dinner,4 +48.33,9.0,Male,No,Sat,Dinner,4 +13.27,2.5,Female,Yes,Sat,Dinner,2 +28.17,6.5,Female,Yes,Sat,Dinner,3 +12.9,1.1,Female,Yes,Sat,Dinner,2 +28.15,3.0,Male,Yes,Sat,Dinner,5 +11.59,1.5,Male,Yes,Sat,Dinner,2 +7.74,1.44,Male,Yes,Sat,Dinner,2 +30.14,3.09,Female,Yes,Sat,Dinner,4 +12.16,2.2,Male,Yes,Fri,Lunch,2 +13.42,3.48,Female,Yes,Fri,Lunch,2 +8.58,1.92,Male,Yes,Fri,Lunch,1 +15.98,3.0,Female,No,Fri,Lunch,3 +13.42,1.58,Male,Yes,Fri,Lunch,2 +16.27,2.5,Female,Yes,Fri,Lunch,2 +10.09,2.0,Female,Yes,Fri,Lunch,2 +20.45,3.0,Male,No,Sat,Dinner,4 +13.28,2.72,Male,No,Sat,Dinner,2 +22.12,2.88,Female,Yes,Sat,Dinner,2 +24.01,2.0,Male,Yes,Sat,Dinner,4 +15.69,3.0,Male,Yes,Sat,Dinner,3 +11.61,3.39,Male,No,Sat,Dinner,2 +10.77,1.47,Male,No,Sat,Dinner,2 +15.53,3.0,Male,Yes,Sat,Dinner,2 +10.07,1.25,Male,No,Sat,Dinner,2 +12.6,1.0,Male,Yes,Sat,Dinner,2 +32.83,1.17,Male,Yes,Sat,Dinner,2 +35.83,4.67,Female,No,Sat,Dinner,3 +29.03,5.92,Male,No,Sat,Dinner,3 +27.18,2.0,Female,Yes,Sat,Dinner,2 +22.67,2.0,Male,Yes,Sat,Dinner,2 +17.82,1.75,Male,No,Sat,Dinner,2 +18.78,3.0,Female,No,Thur,Dinner,2 diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 6d928a24977f0..16449b317d0b6 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -3414,6 +3414,36 @@ def test_convert_sql_column_decimals(self): assert_same_values_and_dtype(result, expected) +class TestS3(tm.TestCase): + def setUp(self): + try: + import boto + except ImportError: + raise nose.SkipTest("boto not installed") + + if compat.PY3: + raise nose.SkipTest("boto incompatible with Python 3") + + @tm.network + def test_parse_public_s3_bucket(self): + import nose.tools as nt + df = pd.read_csv('s3://nyqpug/tips.csv') + nt.assert_true(isinstance(df, pd.DataFrame)) + nt.assert_false(df.empty) + tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')), df) + + @tm.network + def test_s3_fails(self): + import boto + with tm.assertRaisesRegexp(boto.exception.S3ResponseError, + 'S3ResponseError: 404 Not Found'): + pd.read_csv('s3://nyqpug/asdf.csv') + + with tm.assertRaisesRegexp(boto.exception.S3ResponseError, + 'S3ResponseError: 403 Forbidden'): + pd.read_csv('s3://cant_get_it/tips.csv') + + def assert_same_values_and_dtype(res, exp): tm.assert_equal(res.dtype, exp.dtype) tm.assert_almost_equal(res, exp)
closes #7246
https://api.github.com/repos/pandas-dev/pandas/pulls/7281
2014-05-29T23:51:45Z
2014-06-02T16:33:13Z
2014-06-02T16:33:13Z
2014-06-12T09:23:03Z
FIX DataFrame diff with timedelta
diff --git a/pandas/core/common.py b/pandas/core/common.py index 00fa970c0f77a..884a841f6da7c 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -813,7 +813,8 @@ def diff(arr, n, axis=0): na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None) out_arr[tuple(na_indexer)] = na - if arr.ndim == 2 and arr.dtype.name in _diff_special: + if (arr.ndim == 2 and arr.dtype.name in _diff_special + and dtype != 'timedelta64[ns]'): f = _diff_special[arr.dtype.name] f(arr, out_arr, n, axis) else: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4e5b00a6db765..f67d7bfb811d9 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -9285,6 +9285,16 @@ def test_diff_float_n(self): xp = self.tsframe.diff(1) assert_frame_equal(rs, xp) + def test_diff_timedelta(self): + df = DataFrame(dict(time=[Timestamp('20130101 9:01'), + Timestamp('20130101 9:02')], + value=[1.0,2.0])) + res = df.diff() + exp = DataFrame([[pd.NaT, np.nan], + [np.timedelta64(int(6e10), 'ns'), 1]], + columns=['time', 'value']) + assert_frame_equal(res, exp) + def test_pct_change(self): rs = self.tsframe.pct_change(fill_method=None) assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
fixes #4533 _need to fix test in numpy 1.6..._
https://api.github.com/repos/pandas-dev/pandas/pulls/7280
2014-05-29T21:40:05Z
2014-09-09T23:25:57Z
null
2014-09-09T23:30:37Z
BUG: Index.min and max doesnt handle nan and NaT properly
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 0aa30e536ef48..ef8de452d1d38 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -65,3 +65,4 @@ There are no experimental changes in 0.14.1 Bug Fixes ~~~~~~~~~ +- Bug in ``Index.min`` and ``max`` doesn't handle ``nan`` and ``NaT`` properly (:issue:`7261`) diff --git a/pandas/core/base.py b/pandas/core/base.py index 0e7bc0fee8a48..6bbcc33c2271b 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -236,11 +236,13 @@ def _wrap_access_object(self, obj): def max(self): """ The maximum value of the object """ - return self.values.max() + import pandas.core.nanops + return pandas.core.nanops.nanmax(self.values) def min(self): """ The minimum value of the object """ - return self.values.min() + import pandas.core.nanops + return pandas.core.nanops.nanmin(self.values) def value_counts(self, normalize=False, sort=True, ascending=False, bins=None): diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 927e096f8d769..4aaab3b2c52a5 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -1,5 +1,5 @@ import re -from datetime import timedelta +from datetime import datetime, timedelta import numpy as np import pandas.compat as compat import pandas as pd @@ -210,6 +210,39 @@ def test_ops(self): expected = expected.astype('M8[ns]').astype('int64') self.assertEqual(result.value, expected) + def test_nanops(self): + # GH 7261 + for op in ['max','min']: + for klass in [Index, Series]: + + obj = klass([np.nan, 2.0]) + self.assertEqual(getattr(obj, op)(), 2.0) + + obj = klass([np.nan]) + self.assertTrue(pd.isnull(getattr(obj, op)())) + + obj = klass([]) + self.assertTrue(pd.isnull(getattr(obj, op)())) + + obj = klass([pd.NaT, datetime(2011, 11, 1)]) + # check DatetimeIndex monotonic path + self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1)) + + obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT]) + # check DatetimeIndex non-monotonic path + self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1)) + + # explicitly create DatetimeIndex + obj = DatetimeIndex([]) + self.assertTrue(pd.isnull(getattr(obj, op)())) + + obj = DatetimeIndex([pd.NaT]) + self.assertTrue(pd.isnull(getattr(obj, op)())) + + obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT]) + self.assertTrue(pd.isnull(getattr(obj, op)())) + + def test_value_counts_unique_nunique(self): for o in self.objs: klass = type(o) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 0e18509d9bc26..d83022c814d0a 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1758,20 +1758,28 @@ def min(self, axis=None): """ Overridden ndarray.min to return a Timestamp """ - if self.is_monotonic: - return self[0] + mask = self.asi8 == tslib.iNaT + masked = self[~mask] + if len(masked) == 0: + return tslib.NaT + elif self.is_monotonic: + return masked[0] else: - min_stamp = self.asi8.min() + min_stamp = masked.asi8.min() return Timestamp(min_stamp, tz=self.tz) def max(self, axis=None): """ Overridden ndarray.max to return a Timestamp """ - if self.is_monotonic: - return self[-1] + mask = self.asi8 == tslib.iNaT + masked = self[~mask] + if len(masked) == 0: + return tslib.NaT + elif self.is_monotonic: + return masked[-1] else: - max_stamp = self.asi8.max() + max_stamp = masked.asi8.max() return Timestamp(max_stamp, tz=self.tz) def to_julian_date(self):
Closes #7261.
https://api.github.com/repos/pandas-dev/pandas/pulls/7279
2014-05-29T20:05:50Z
2014-05-30T14:24:07Z
2014-05-30T14:24:07Z
2014-06-29T10:25:42Z
BUG: hist raises TypeError when df contains non numeric column
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 0aa30e536ef48..f9df328e7f3a3 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -65,3 +65,5 @@ There are no experimental changes in 0.14.1 Bug Fixes ~~~~~~~~~ +- BUG in ``DataFrame.hist`` raises ``TypeError`` when it contains non numeric column (:issue:`7277`) + diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 1b9691257347b..2aced69bc3e54 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1464,14 +1464,12 @@ def test_kde(self): @slow def test_hist(self): - df = DataFrame(randn(100, 4)) - _check_plot_works(df.hist) - _check_plot_works(df.hist, grid=False) + _check_plot_works(self.hist_df.hist) # make sure layout is handled df = DataFrame(randn(100, 3)) - _check_plot_works(df.hist) - axes = df.hist(grid=False) + axes = _check_plot_works(df.hist, grid=False) + self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) self.assertFalse(axes[1, 1].get_visible()) df = DataFrame(randn(100, 1)) @@ -1479,7 +1477,8 @@ def test_hist(self): # make sure layout is handled df = DataFrame(randn(100, 6)) - _check_plot_works(df.hist) + axes = _check_plot_works(df.hist, layout=(4, 2)) + self._check_axes_shape(axes, axes_num=6, layout=(4, 2)) # make sure sharex, sharey is handled _check_plot_works(df.hist, sharex=True, sharey=True) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 028334afbd62c..475f5085d55f5 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -4086,7 +4086,8 @@ def test_series_groupby_plotting_nominally_works(self): n = 10 weight = Series(np.random.normal(166, 20, size=n)) height = Series(np.random.normal(60, 10, size=n)) - gender = tm.choice(['male', 'female'], size=n) + with tm.RNGContext(42): + gender = tm.choice(['male', 'female'], size=n) weight.groupby(gender).plot() tm.close() @@ -4118,7 +4119,8 @@ def test_frame_groupby_plot_boxplot(self): n = 10 weight = Series(np.random.normal(166, 20, size=n)) height = Series(np.random.normal(60, 10, size=n)) - gender = tm.choice(['male', 'female'], size=n) + with tm.RNGContext(42): + gender = tm.choice(['male', 'female'], size=n) df = DataFrame({'height': height, 'weight': weight, 'gender': gender}) gb = df.groupby('gender') @@ -4136,11 +4138,6 @@ def test_frame_groupby_plot_boxplot(self): res = df.groupby('gender').hist() tm.close() - df2 = df.copy() - df2['gender2'] = df['gender'] - with tm.assertRaisesRegexp(TypeError, '.*str.+float'): - df2.groupby('gender').hist() - @slow def test_frame_groupby_hist(self): _skip_if_mpl_not_installed() @@ -4152,7 +4149,8 @@ def test_frame_groupby_hist(self): n = 10 weight = Series(np.random.normal(166, 20, size=n)) height = Series(np.random.normal(60, 10, size=n)) - gender_int = tm.choice([0, 1], size=n) + with tm.RNGContext(42): + gender_int = tm.choice([0, 1], size=n) df_int = DataFrame({'height': height, 'weight': weight, 'gender': gender_int}) gb = df_int.groupby('gender') diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 00f7a61870369..25c7f5a7e6c0a 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2536,6 +2536,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, if not isinstance(column, (list, np.ndarray)): column = [column] data = data[column] + data = data._get_numeric_data() naxes = len(data.columns) nrows, ncols = _get_layout(naxes, layout=layout)
`DataFrame.hist` without `by` keyword raises `TypeError` when it contains non-numeric column. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import pandas.util.testing as tm n=100 gender = tm.choice(['Male', 'Female'], size=n) classroom = tm.choice(['A', 'B', 'C'], size=n) single = tm.choice(['S'], size=n) df = pd.DataFrame({'gender': gender, 'classroom': classroom, 'height': np.random.normal(66, 4, size=n), 'weight': np.random.normal(161, 32, size=n), 'category': np.random.randint(4, size=n)}) df.hist(by='gender') # This works, even though 'classroom' (str) column in still contained df.hist() # TypeError: cannot concatenate 'str' and 'float' objects ```
https://api.github.com/repos/pandas-dev/pandas/pulls/7277
2014-05-29T13:54:32Z
2014-05-30T19:02:13Z
null
2014-06-13T16:34:47Z
BUG: TimeGrouper doesnt exclude the column specified by key
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 0aa30e536ef48..1bd15bc1ace02 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -65,3 +65,4 @@ There are no experimental changes in 0.14.1 Bug Fixes ~~~~~~~~~ +- Bug in ``TimeGrouper`` doesn't exclude column specified by ``key`` (:issue:`7227`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 258005c8a08a9..1b07e2fb0aeab 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1927,7 +1927,10 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True): # a passed in Grouper, directly convert if isinstance(key, Grouper): binner, grouper, obj = key._get_grouper(obj) - return grouper, [], obj + if key.key is None: + return grouper, [], obj + else: + return grouper, set([key.key]), obj # already have a BaseGrouper, just return it elif isinstance(key, BaseGrouper): diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index e85b9887bb671..45d17052d904b 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -699,6 +699,32 @@ def test_resample_consistency(self): assert_series_equal(s10_2, r10_2) assert_series_equal(s10_2, rl) + def test_resample_timegrouper(self): + # GH 7227 + dates = [datetime(2014, 10, 1), datetime(2014, 9, 3), + datetime(2014, 11, 5), datetime(2014, 9, 5), + datetime(2014, 10, 8), datetime(2014, 7, 15)] + + df = DataFrame(dict(A=dates, B=np.arange(len(dates)))) + result = df.set_index('A').resample('M', how='count') + exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31', '2014-09-30', + '2014-10-31', '2014-11-30'], freq='M', name='A') + expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx) + assert_frame_equal(result, expected) + + result = df.groupby(pd.Grouper(freq='M', key='A')).count() + assert_frame_equal(result, expected) + + df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates)))) + result = df.set_index('A').resample('M', how='count') + expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]}, + index=exp_idx, columns=['B', 'C']) + assert_frame_equal(result, expected) + + result = df.groupby(pd.Grouper(freq='M', key='A')).count() + assert_frame_equal(result, expected) + + def _simple_ts(start, end, freq='D'): rng = date_range(start, end, freq=freq) return Series(np.random.randn(len(rng)), index=rng)
This solves latter exclude column issue described in #7227.
https://api.github.com/repos/pandas-dev/pandas/pulls/7276
2014-05-29T13:10:18Z
2014-05-30T15:35:32Z
2014-05-30T15:35:32Z
2014-06-13T19:38:11Z
DOC/RLS: remove duplicate content in release.rst and move remaining to whatsnew
diff --git a/doc/source/release.rst b/doc/source/release.rst index fa541baa4e058..14b5741a81712 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -50,486 +50,24 @@ pandas 0.14.0 **Release date:** (May 31, 2014) -New features -~~~~~~~~~~~~ - -- Officially support Python 3.4 -- ``Index`` returns a MultiIndex if passed a list of tuples - ``DataFrame(dict)`` and ``Series(dict)`` create ``MultiIndex`` - columns and index where applicable (:issue:`3323`) -- Hexagonal bin plots from ``DataFrame.plot`` with ``kind='hexbin'`` (:issue:`5478`) -- Pie plots from ``Series.plot`` and ``DataFrame.plot`` with ``kind='pie'`` (:issue:`6976`) -- Added the ``sym_diff`` method to ``Index`` (:issue:`5543`) -- Added ``to_julian_date`` to ``TimeStamp`` and ``DatetimeIndex``. The Julian - Date is used primarily in astronomy and represents the number of days from - noon, January 1, 4713 BC. Because nanoseconds are used to define the time - in pandas the actual range of dates that you can use is 1678 AD to 2262 AD. (:issue:`4041`) -- Added error bar support to the ``.plot`` method of ``DataFrame`` and ``Series`` (:issue:`3796`, :issue:`6834`) -- Implemented ``Panel.pct_change`` (:issue:`6904`) -- The SQL reading and writing functions now support more database flavors - through SQLAlchemy (:issue:`2717`, :issue:`4163`, :issue:`5950`, :issue:`6292`). - -API Changes -~~~~~~~~~~~ - -- ``read_excel`` uses 0 as the default sheet (:issue:`6573`) -- ``iloc`` will now accept out-of-bounds indexers, e.g. a value that exceeds the length of the object being - indexed. These will be excluded. This will make pandas conform more with pandas/numpy indexing of out-of-bounds - values. A single indexer that is out-of-bounds and drops the dimensions of the object will still raise - ``IndexError`` (:issue:`6296`) -- In ``HDFStore``, ``select_as_multiple`` will always raise a ``KeyError``, when a key or the selector is not found (:issue:`6177`) -- ``df['col'] = value`` and ``df.loc[:,'col'] = value`` are now completely equivalent; - previously the ``.loc`` would not necessarily coerce the dtype of the resultant series (:issue:`6149`) -- ``dtypes`` and ``ftypes`` now return a series with ``dtype=object`` on empty containers (:issue:`5740`) -- ``df.to_csv`` will now return a string of the CSV data if neither a target path nor a buffer is provided - (:issue:`6061`) -- ``df.to_html`` will now print out the header of an empty dataframe (:issue:`6062`) -- The ``interpolate`` ``downcast`` keyword default has been changed from ``infer`` to - ``None``. This is to preseve the original dtype unless explicitly requested otherwise (:issue:`6290`). -- ``Series`` and ``Index`` now internall share more common operations, e.g. ``factorize(),nunique(),value_counts()`` are - now supported on ``Index`` types as well. The ``Series.weekday`` property from is removed - from Series for API consistency. Using a ``DatetimeIndex/PeriodIndex`` method on a Series will now raise a ``TypeError``. - (:issue:`4551`, :issue:`4056`, :issue:`5519`, :issue:`6380`, :issue:`7206`). - -- Add ``is_month_start``, ``is_month_end``, ``is_quarter_start``, ``is_quarter_end``, - ``is_year_start``, ``is_year_end`` accessors for ``DateTimeIndex`` / ``Timestamp`` which return a boolean array - of whether the timestamp(s) are at the start/end of the month/quarter/year defined by the - frequency of the ``DateTimeIndex`` / ``Timestamp`` (:issue:`4565`, :issue:`6998`)) - -- ``pd.infer_freq()`` will now raise a ``TypeError`` if given an invalid ``Series/Index`` - type (:issue:`6407`, :issue:`6463`) - -- Local variable usage has changed in - :func:`pandas.eval`/:meth:`DataFrame.eval`/:meth:`DataFrame.query` - (:issue:`5987`). For the :class:`~pandas.DataFrame` methods, two things have - changed - - - Column names are now given precedence over locals - - Local variables must be referred to explicitly. This means that even if - you have a local variable that is *not* a column you must still refer to - it with the ``'@'`` prefix. - - You can have an expression like ``df.query('@a < a')`` with no complaints - from ``pandas`` about ambiguity of the name ``a``. - - The top-level :func:`pandas.eval` function does not allow you use the - ``'@'`` prefix and provides you with an error message telling you so. - - ``NameResolutionError`` was removed because it isn't necessary anymore. - -- ``concat`` will now concatenate mixed Series and DataFrames using the Series name - or numbering columns as needed (:issue:`2385`) -- Slicing and advanced/boolean indexing operations on ``Index`` classes as well - as :meth:`Index.delete` and :meth:`Index.drop` methods will no longer change the type of the - resulting index (:issue:`6440`, :issue:`7040`) -- ``set_index`` no longer converts MultiIndexes to an Index of tuples (:issue:`6459`). -- Slicing with negative start, stop & step values handles corner cases better (:issue:`6531`): - - - ``df.iloc[:-len(df)]`` is now empty - - ``df.iloc[len(df)::-1]`` now enumerates all elements in reverse - -- Better propagation/preservation of Series names when performing groupby - operations: - - - ``SeriesGroupBy.agg`` will ensure that the name attribute of the original - series is propagated to the result (:issue:`6265`). - - If the function provided to ``GroupBy.apply`` returns a named series, the - name of the series will be kept as the name of the column index of the - DataFrame returned by ``GroupBy.apply`` (:issue:`6124`). This facilitates - ``DataFrame.stack`` operations where the name of the column index is used as - the name of the inserted column containing the pivoted data. - -- Allow specification of a more complex groupby, via ``pd.Grouper`` (:issue:`3794`) -- A tuple passed to ``DataFame.sort_index`` will be interpreted as the levels of - the index, rather than requiring a list of tuple (:issue:`4370`) -- Fix a bug where invalid eval/query operations would blow the stack (:issue:`5198`) -- Following keywords are now acceptable for :meth:`DataFrame.plot` with ``kind='bar'`` and ``kind='barh'``: - - - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. (:issue:`6604`) - - `align`: Specify the bar alignment. Default is `center` (different from matplotlib). In previous versions, pandas passes `align='edge'` to - matplotlib and adjust the location to `center` by itself, and it results `align` keyword is not applied as expected. (:issue:`4525`) - - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center). (:issue:`6604`) - -- Define and document the order of column vs index names in query/eval (:issue:`6676`) -- ``DataFrame.sort`` now places NaNs at the beginning or end of the sort according to the ``na_position`` parameter. (:issue:`3917`) -- ``stack`` and ``unstack`` now raise a ``ValueError`` when the ``level`` keyword refers - to a non-unique item in the ``Index`` (previously raised a ``KeyError``). (:issue:`6738`) -- all offset operations now return ``Timestamp`` types (rather than datetime), Business/Week frequencies were incorrect (:issue:`4069`) -- ``Series.iteritems()`` is now lazy (returns an iterator rather than a list). This was the documented behavior prior to 0.14. (:issue:`6760`) -- ``to_excel`` now converts ``np.inf`` into a string representation, - customizable by the ``inf_rep`` keyword argument (Excel has no native inf - representation) (:issue:`6782`) -- Arithmetic ops on bool dtype arrays/scalars now give a warning indicating - that they are evaluated in Python space (:issue:`6762`, :issue:`7210`). -- Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) - -- ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``. The ``table`` keyword can receive the following values. - - - ``False``: Do nothing (default). - - ``True``: Draw a table using the ``DataFrame`` or ``Series`` called ``plot`` method. Data will be transposed to meet matplotlib's default layout. - - ``DataFrame`` or ``Series``: Draw matplotlib.table using the passed data. The data will be drawn as displayed in print method (not transposed automatically). - Also, helper function ``pandas.tools.plotting.table`` is added to create a table from ``DataFrame`` and ``Series``, and add it to an ``matplotlib.Axes``. - -- drop unused order argument from ``Series.sort``; args now in the same orders as ``Series.order``; - add ``na_position`` arg to conform to ``Series.order`` (:issue:`6847`) -- default sorting algorithm for ``Series.order`` is now ``quicksort``, to conform with ``Series.sort`` - (and numpy defaults) -- add ``inplace`` keyword to ``Series.order/sort`` to make them inverses (:issue:`6859`) - -- Replace ``pandas.compat.scipy.scoreatpercentile`` with ``numpy.percentile`` (:issue:`6810`) -- ``.quantile`` on a ``datetime[ns]`` series now returns ``Timestamp`` instead - of ``np.datetime64`` objects (:issue:`6810`) -- change ``AssertionError`` to ``TypeError`` for invalid types passed to ``concat`` (:issue:`6583`) -- Add :class:`~pandas.io.parsers.ParserWarning` class for fallback and option - validation warnings in :func:`read_csv`/:func:`read_table` (:issue:`6607`) -- Raise a ``TypeError`` when ``DataFrame`` is passed an iterator as the - ``data`` argument (:issue:`5357`) -- groupby will now not return the grouped column for non-cython functions (:issue:`5610`, :issue:`5614`, :issue:`6732`), - as its already the index -- ``DataFrame.plot`` and ``Series.plot`` now supports area plot with specifying ``kind='area'`` (:issue:`6656`) -- Line plot can be stacked by ``stacked=True``. (:issue:`6656`) -- Raise ``ValueError`` when ``sep`` specified with - ``delim_whitespace=True`` in :func:`read_csv`/:func:`read_table` - (:issue:`6607`) -- Raise ``ValueError`` when ``engine='c'`` specified with unsupported - options in :func:`read_csv`/:func:`read_table` (:issue:`6607`) -- Raise ``ValueError`` when fallback to python parser causes options to be - ignored (:issue:`6607`) -- Produce :class:`~pandas.io.parsers.ParserWarning` on fallback to python - parser when no options are ignored (:issue:`6607`) -- Added ``factorize`` functions to ``Index`` and ``Series`` to get indexer and unique values (:issue:`7090`) -- :meth:`DataFrame.describe` on a DataFrame with a mix of Timestamp and string like objects - returns a different Index (:issue:`7088`). Previously the index was unintentionally sorted. -- arithmetic operations with **only** ``bool`` dtypes now raise an error - (:issue:`7011`, :issue:`6762`, :issue:`7015`) -- :meth:`DataFrame.boxplot` has a new keyword argument, `return_type`. It accepts ``'dict'``, - ``'axes'``, or ``'both'``, in which case a namedtuple with the matplotlib - axes and a dict of matplotlib Lines is returned. - -Known Issues -~~~~~~~~~~~~ - -- OpenPyXL 2.0.0 breaks backwards compatibility (:issue:`7169`) - -Deprecations -~~~~~~~~~~~~ - -- The :func:`pivot_table`/:meth:`DataFrame.pivot_table` and :func:`crosstab` functions - now take arguments ``index`` and ``columns`` instead of ``rows`` and ``cols``. A - ``FutureWarning`` is raised to alert that the old ``rows`` and ``cols`` arguments - will not be supported in a future release (:issue:`5505`) - -- The :meth:`DataFrame.drop_duplicates` and :meth:`DataFrame.duplicated` methods - now take argument ``subset`` instead of ``cols`` to better align with - :meth:`DataFrame.dropna`. A ``FutureWarning`` is raised to alert that the old - ``cols`` arguments will not be supported in a future release (:issue:`6680`) - -- The :meth:`DataFrame.to_csv` and :meth:`DataFrame.to_excel` functions - now takes argument ``columns`` instead of ``cols``. A - ``FutureWarning`` is raised to alert that the old ``cols`` arguments - will not be supported in a future release (:issue:`6645`) - -- Indexers will warn ``FutureWarning`` when used with a scalar indexer and - a non-floating point Index (:issue:`4892`, :issue:`6960`) - -- Numpy 1.9 compat w.r.t. deprecation warnings (:issue:`6960`) - -- :meth:`Panel.shift` now has a function signature that matches :meth:`DataFrame.shift`. - The old positional argument ``lags`` has been changed to a keyword argument - ``periods`` with a default value of 1. A ``FutureWarning`` is raised if the - old argument ``lags`` is used by name. (:issue:`6910`) - -- The ``order`` keyword argument of :func:`factorize` will be removed. (:issue:`6926`). - -- Remove the ``copy`` keyword from :meth:`DataFrame.xs`, :meth:`Panel.major_xs`, :meth:`Panel.minor_xs`. A view will be - returned if possible, otherwise a copy will be made. Previously the user could think that ``copy=False`` would - ALWAYS return a view. (:issue:`6894`) - -- The :func:`parallel_coordinates` function now takes argument ``color`` - instead of ``colors``. A ``FutureWarning`` is raised to alert that - the old ``colors`` argument will not be supported in a future release. (:issue:`6956`) - -- The :func:`parallel_coordinates` and :func:`andrews_curves` functions now take - positional argument ``frame`` instead of ``data``. A ``FutureWarning`` is - raised if the old ``data`` argument is used by name. (:issue:`6956`) - -- The support for the 'mysql' flavor when using DBAPI connection objects has been deprecated. - MySQL will be further supported with SQLAlchemy engines (:issue:`6900`). - -- The following ``io.sql`` functions have been deprecated: ``tquery``, ``uquery``, ``read_frame``, ``frame_query``, ``write_frame``. +This is a major release from 0.13.1 and includes a number of API changes, several new features, enhancements, and +performance improvements along with a large number of bug fixes. -- The `percentile_width` keyword argument in :meth:`~DataFrame.describe` has been deprecated. - Use the `percentiles` keyword instead, which takes a list of percentiles to display. The - default output is unchanged. +Highlights include: -- The default return type of :func:`boxplot` will change from a dict to a matpltolib Axes - in a future release. You can use the future behavior now by passing ``return_type='axes'`` - to boxplot. - -Prior Version Deprecations/Changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- Remove :class:`DateRange` in favor of :class:`DatetimeIndex` (:issue:`6816`) - -- Remove ``column`` keyword from ``DataFrame.sort`` (:issue:`4370`) - -- Remove ``precision`` keyword from :func:`set_eng_float_format` (:issue:`395`) - -- Remove ``force_unicode`` keyword from :meth:`DataFrame.to_string`, - :meth:`DataFrame.to_latex`, and :meth:`DataFrame.to_html`; these function - encode in unicode by default (:issue:`2224`, :issue:`2225`) - -- Remove ``nanRep`` keyword from :meth:`DataFrame.to_csv` and - :meth:`DataFrame.to_string` (:issue:`275`) - -- Remove ``unique`` keyword from :meth:`HDFStore.select_column` (:issue:`3256`) - -- Remove ``inferTimeRule`` keyword from :func:`Timestamp.offset` (:issue:`391`) - -- Remove ``name`` keyword from :func:`get_data_yahoo` and - :func:`get_data_google` ( `commit b921d1a <https://github.com/pydata/pandas/commit/b921d1a2>`__ ) - -- Remove ``offset`` keyword from :class:`DatetimeIndex` constructor - ( `commit 3136390 <https://github.com/pydata/pandas/commit/3136390>`__ ) - -- Remove ``time_rule`` from several rolling-moment statistical functions, such - as :func:`rolling_sum` (:issue:`1042`) - -- Removed neg ``-`` boolean operations on numpy arrays in favor of inv ``~``, as this is going to - be deprecated in numpy 1.9 (:issue:`6960`) - -Experimental Features -~~~~~~~~~~~~~~~~~~~~~ - - -Improvements to existing features -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- pd.read_clipboard will, if the keyword ``sep`` is unspecified, try to detect data copied from a spreadsheet - and parse accordingly. (:issue:`6223`) -- pd.expanding_apply and pd.rolling_apply now take args and kwargs that are passed on to - the func (:issue:`6289`) -- ``plot(legend='reverse')`` will now reverse the order of legend labels for most plot kinds. - (:issue:`6014`) -- Allow multi-index slicers (:issue:`6134`, :issue:`4036`, :issue:`3057`, :issue:`2598`, :issue:`5641`, :issue:`7106`) -- improve performance of slice indexing on Series with string keys (:issue:`6341`, :issue:`6372`) -- implement joining a single-level indexed DataFrame on a matching column of a multi-indexed DataFrame (:issue:`3662`) -- Performance improvement in indexing into a multi-indexed Series (:issue:`5567`) -- Testing statements updated to use specialized asserts (:issue:`6175`) -- ``DataFrame.rank()`` now has a percentage rank option (:issue:`5971`) -- ``Series.rank()`` now has a percentage rank option (:issue:`5971`) -- ``Series.rank()`` and ``DataFrame.rank()`` now accept ``method='dense'`` for ranks without gaps (:issue:`6514`) -- ``quotechar``, ``doublequote``, and ``escapechar`` can now be specified when - using ``DataFrame.to_csv`` (:issue:`5414`, :issue:`4528`) -- perf improvements in DataFrame construction with certain offsets, by removing faulty caching - (e.g. MonthEnd,BusinessMonthEnd), (:issue:`6479`) -- perf improvements in single-dtyped indexing (:issue:`6484`) -- ``StataWriter`` and ``DataFrame.to_stata`` accept time stamp and data labels (:issue:`6545`) -- offset/freq info now in Timestamp __repr__ (:issue:`4553`) -- Support passing ``encoding`` with xlwt (:issue:`3710`) -- Performance improvement when converting ``DatetimeIndex`` to floating ordinals - using ``DatetimeConverter`` (:issue:`6636`) -- Performance improvement for ``DataFrame.shift`` (:issue:`5609`) -- Performance improvements in timedelta conversions for integer dtypes (:issue:`6754`) -- Performance improvement for ``DataFrame.from_records`` when reading a - specified number of rows from an iterable (:issue:`6700`) -- :ref:`Holidays and holiday calendars<timeseries.holiday>` are now available and can be used with CustomBusinessDay (:issue:`6719`) -- ``Float64Index`` is now backed by a ``float64`` dtype ndarray instead of an - ``object`` dtype array (:issue:`6471`). -- Add option to turn off escaping in ``DataFrame.to_latex`` (:issue:`6472`) -- Added ``how`` option to rolling-moment functions to dictate how to handle resampling; :func:``rolling_max`` defaults to max, - :func:``rolling_min`` defaults to min, and all others default to mean (:issue:`6297`) -- ``pd.stats.moments.rolling_var`` now uses Welford's method for increased numerical stability (:issue:`6817`) -- Translate ``sep='\s+'`` to ``delim_whitespace=True`` in - :func:`read_csv`/:func:`read_table` if no other C-unsupported options - specified (:issue:`6607`) -- ``read_excel`` can now read milliseconds in Excel dates and times with xlrd >= 0.9.3. (:issue:`5945`) -- ``pivot_table`` can now accept ``Grouper`` by ``index`` and ``columns`` keywords (:issue:`6913`) -- Improved performance of compatible pickles (:issue:`6899`) -- Refactor Block classes removing `Block.items` attributes to avoid duplication - in item handling (:issue:`6745`, :issue:`6988`). -- Improve performance in certain reindexing operations by optimizing ``take_2d`` (:issue:`6749`) -- Arrays of strings can be wrapped to a specified width (``str.wrap``) (:issue:`6999`) -- ``GroupBy.count()`` is now implemented in Cython and is much faster for large - numbers of groups (:issue:`7016`). -- ``boxplot`` now supports ``layout`` keyword (:issue:`6769`) -- Regression in the display of a MultiIndexed Series with ``display.max_rows`` is less than the - length of the series (:issue:`7101`) -- :meth:`~DataFrame.describe` now accepts an array of percentiles to include in the summary statistics (:issue:`4196`) -- allow option ``'truncate'`` for ``display.show_dimensions`` to only show the dimensions if the - frame is truncated (:issue:`6547`) - -.. _release.bug_fixes-0.14.0: - -Bug Fixes -~~~~~~~~~ +- Officially support Python 3.4 +- SQL interfaces updated to use ``sqlalchemy``, see :ref:`here<whatsnew_0140.sql>`. +- Display interface changes, see :ref:`here<whatsnew_0140.display>` +- MultiIndexing using Slicers, see :ref:`here<whatsnew_0140.slicers>`. +- Ability to join a singly-indexed DataFrame with a multi-indexed DataFrame, see :ref:`here <merging.join_on_mi>` +- More consistency in groupby results and more flexible groupby specifications, see :ref:`here<whatsnew_0140.groupby>` +- Holiday calendars are now supported in ``CustomBusinessDay``, see :ref:`here <timeseries.holiday>` +- Several improvements in plotting functions, including: hexbin, area and pie plots, see :ref:`here<whatsnew_0140.plotting>`. +- Performance doc section on I/O operations, see :ref:`here <io.perf>` + +See the :ref:`v0.14.0 Whatsnew <whatsnew_0140>` overview or the issue tracker on GitHub for an extensive list +of all API changes, enhancements and bugs that have been fixed in 0.14.0. -- Bug in Series ValueError when index doesn't match data (:issue:`6532`) -- Prevent segfault due to MultiIndex not being supported in HDFStore table - format (:issue:`1848`) -- Bug in ``pd.DataFrame.sort_index`` where mergesort wasn't stable when ``ascending=False`` (:issue:`6399`) -- Bug in ``pd.tseries.frequencies.to_offset`` when argument has leading zeroes (:issue:`6391`) -- Bug in version string gen. for dev versions with shallow clones / install from tarball (:issue:`6127`) -- Inconsistent tz parsing ``Timestamp`` / ``to_datetime`` for current year (:issue:`5958`) -- Indexing bugs with reordered indexes (:issue:`6252`, :issue:`6254`) -- Bug in ``.xs`` with a Series multiindex (:issue:`6258`, :issue:`5684`) -- Bug in conversion of a string types to a DatetimeIndex with a specified frequency (:issue:`6273`, :issue:`6274`) -- Bug in ``eval`` where type-promotion failed for large expressions (:issue:`6205`) -- Bug in interpolate with ``inplace=True`` (:issue:`6281`) -- ``HDFStore.remove`` now handles start and stop (:issue:`6177`) -- ``HDFStore.select_as_multiple`` handles start and stop the same way as ``select`` (:issue:`6177`) -- ``HDFStore.select_as_coordinates`` and ``select_column`` works with a ``where`` clause that results in filters (:issue:`6177`) -- Regression in join of non_unique_indexes (:issue:`6329`) -- Issue with groupby ``agg`` with a single function and a a mixed-type frame (:issue:`6337`) -- Bug in ``DataFrame.replace()`` when passing a non- ``bool`` - ``to_replace`` argument (:issue:`6332`) -- Raise when trying to align on different levels of a multi-index assignment (:issue:`3738`) -- Bug in setting complex dtypes via boolean indexing (:issue:`6345`) -- Bug in TimeGrouper/resample when presented with a non-monotonic DatetimeIndex that would return invalid results. (:issue:`4161`) -- Bug in index name propogation in TimeGrouper/resample (:issue:`4161`) -- TimeGrouper has a more compatible API to the rest of the groupers (e.g. ``groups`` was missing) (:issue:`3881`) -- Bug in multiple grouping with a TimeGrouper depending on target column order (:issue:`6764`) -- Bug in ``pd.eval`` when parsing strings with possible tokens like ``'&'`` - (:issue:`6351`) -- Bug correctly handle placements of ``-inf`` in Panels when dividing by integer 0 (:issue:`6178`) -- ``DataFrame.shift`` with ``axis=1`` was raising (:issue:`6371`) -- Disabled clipboard tests until release time (run locally with ``nosetests -A disabled``) (:issue:`6048`). -- Bug in ``DataFrame.replace()`` when passing a nested ``dict`` that contained - keys not in the values to be replaced (:issue:`6342`) -- ``str.match`` ignored the na flag (:issue:`6609`). -- Bug in take with duplicate columns that were not consolidated (:issue:`6240`) -- Bug in interpolate changing dtypes (:issue:`6290`) -- Bug in ``Series.get``, was using a buggy access method (:issue:`6383`) -- Bug in hdfstore queries of the form ``where=[('date', '>=', datetime(2013,1,1)), ('date', '<=', datetime(2014,1,1))]`` (:issue:`6313`) -- Bug in ``DataFrame.dropna`` with duplicate indices (:issue:`6355`) -- Regression in chained getitem indexing with embedded list-like from 0.12 (:issue:`6394`) -- ``Float64Index`` with nans not comparing correctly (:issue:`6401`) -- ``eval``/``query`` expressions with strings containing the ``@`` character - will now work (:issue:`6366`). -- Bug in ``Series.reindex`` when specifying a ``method`` with some nan values was inconsistent (noted on a resample) (:issue:`6418`) -- Bug in :meth:`DataFrame.replace` where nested dicts were erroneously - depending on the order of dictionary keys and values (:issue:`5338`). -- Perf issue in concatting with empty objects (:issue:`3259`) -- Clarify sorting of ``sym_diff`` on ``Index`` objects with ``NaN`` values (:issue:`6444`) -- Regression in ``MultiIndex.from_product`` with a ``DatetimeIndex`` as input (:issue:`6439`) -- Bug in ``str.extract`` when passed a non-default index (:issue:`6348`) -- Bug in ``str.split`` when passed ``pat=None`` and ``n=1`` (:issue:`6466`) -- Bug in ``io.data.DataReader`` when passed ``"F-F_Momentum_Factor"`` and ``data_source="famafrench"`` (:issue:`6460`) -- Bug in ``sum`` of a ``timedelta64[ns]`` series (:issue:`6462`) -- Bug in ``resample`` with a timezone and certain offsets (:issue:`6397`) -- Bug in ``iat/iloc`` with duplicate indices on a Series (:issue:`6493`) -- Bug in ``read_html`` where nan's were incorrectly being used to indicate - missing values in text. Should use the empty string for consistency with the - rest of pandas (:issue:`5129`). -- Bug in ``read_html`` tests where redirected invalid URLs would make one test - fail (:issue:`6445`). -- Bug in multi-axis indexing using ``.loc`` on non-unique indices (:issue:`6504`) -- Bug that caused _ref_locs corruption when slice indexing across columns axis of a DataFrame (:issue:`6525`) -- Regression from 0.13 in the treatment of numpy ``datetime64`` non-ns dtypes in Series creation (:issue:`6529`) -- ``.names`` attribute of MultiIndexes passed to ``set_index`` are now preserved (:issue:`6459`). -- Bug in setitem with a duplicate index and an alignable rhs (:issue:`6541`) -- Bug in setitem with ``.loc`` on mixed integer Indexes (:issue:`6546`) -- Bug in ``pd.read_stata`` which would use the wrong data types and missing values (:issue:`6327`) -- Bug in ``DataFrame.to_stata`` that lead to data loss in certain cases, and could be exported using the - wrong data types and missing values (:issue:`6335`) -- ``StataWriter`` replaces missing values in string columns by empty string (:issue:`6802`) -- Inconsistent types in ``Timestamp`` addition/subtraction (:issue:`6543`) -- Bug in preserving frequency across Timestamp addition/subtraction (:issue:`4547`) -- Bug in empty list lookup caused ``IndexError`` exceptions (:issue:`6536`, :issue:`6551`) -- ``Series.quantile`` raising on an ``object`` dtype (:issue:`6555`) -- Bug in ``.xs`` with a ``nan`` in level when dropped (:issue:`6574`) -- Bug in fillna with ``method='bfill/ffill'`` and ``datetime64[ns]`` dtype (:issue:`6587`) -- Bug in sql writing with mixed dtypes possibly leading to data loss (:issue:`6509`) -- Bug in ``Series.pop`` (:issue:`6600`) -- Bug in ``iloc`` indexing when positional indexer matched ``Int64Index`` of the corresponding axis and no reordering happened (:issue:`6612`) -- Bug in ``fillna`` with ``limit`` and ``value`` specified -- Bug in ``DataFrame.to_stata`` when columns have non-string names (:issue:`4558`) -- Bug in compat with ``np.compress``, surfaced in (:issue:`6658`) -- Bug in binary operations with a rhs of a Series not aligning (:issue:`6681`) -- Bug in ``DataFrame.to_stata`` which incorrectly handles nan values and ignores ``with_index`` keyword argument (:issue:`6685`) -- Bug in resample with extra bins when using an evenly divisible frequency (:issue:`4076`) -- Bug in consistency of groupby aggregation when passing a custom function (:issue:`6715`) -- Bug in resample when ``how=None`` resample freq is the same as the axis frequency (:issue:`5955`) -- Bug in downcasting inference with empty arrays (:issue:`6733`) -- Bug in ``obj.blocks`` on sparse containers dropping all but the last items of same for dtype (:issue:`6748`) -- Bug in unpickling ``NaT (NaTType)`` (:issue:`4606`) -- Bug in ``DataFrame.replace()`` where regex metacharacters were being treated - as regexs even when ``regex=False`` (:issue:`6777`). -- Bug in timedelta ops on 32-bit platforms (:issue:`6808`) -- Bug in setting a tz-aware index directly via ``.index`` (:issue:`6785`) -- Bug in expressions.py where numexpr would try to evaluate arithmetic ops - (:issue:`6762`). -- Bug in Makefile where it didn't remove Cython generated C files with ``make - clean`` (:issue:`6768`) -- Bug with numpy < 1.7.2 when reading long strings from ``HDFStore`` (:issue:`6166`) -- Bug in ``DataFrame._reduce`` where non bool-like (0/1) integers were being - coverted into bools. (:issue:`6806`) -- Regression from 0.13 with ``fillna`` and a Series on datetime-like (:issue:`6344`) -- Bug in adding ``np.timedelta64`` to ``DatetimeIndex`` with timezone outputs incorrect results (:issue:`6818`) -- Bug in ``DataFrame.replace()`` where changing a dtype through replacement - would only replace the first occurrence of a value (:issue:`6689`) -- Better error message when passing a frequency of 'MS' in ``Period`` construction (GH5332) -- Bug in ``Series.__unicode__`` when ``max_rows=None`` and the Series has more than 1000 rows. (:issue:`6863`) -- Bug in ``groupby.get_group`` where a datetlike wasn't always accepted (:issue:`5267`) -- Bug in ``groupBy.get_group`` created by ``TimeGrouper`` raises ``AttributeError`` (:issue:`6914`) -- Bug in ``DatetimeIndex.tz_localize`` and ``DatetimeIndex.tz_convert`` converting ``NaT`` incorrectly (:issue:`5546`) -- Bug in arithmetic operations affecting ``NaT`` (:issue:`6873`) -- Bug in ``Series.str.extract`` where the resulting ``Series`` from a single - group match wasn't renamed to the group name -- Bug in ``DataFrame.to_csv`` where setting ``index=False`` ignored the - ``header`` kwarg (:issue:`6186`) -- Bug in ``DataFrame.plot`` and ``Series.plot``, where the legend behave inconsistently when plotting to the same axes repeatedly (:issue:`6678`) -- Internal tests for patching ``__finalize__`` / bug in merge not finalizing (:issue:`6923`, :issue:`6927`) -- accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`) -- Bug in C parser with leading whitespace (:issue:`3374`) -- Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines -- Bug in python parser with explicit multi-index in row following column header (:issue:`6893`) -- Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) -- Bug in ``DataFrame.apply`` with functions that used \*args`` or \*\*kwargs and returned - an empty result (:issue:`6952`) -- Bug in sum/mean on 32-bit platforms on overflows (:issue:`6915`) -- Moved ``Panel.shift`` to ``NDFrame.slice_shift`` and fixed to respect multiple dtypes. (:issue:`6959`) -- Bug in enabling ``subplots=True`` in ``DataFrame.plot`` only has single column raises ``TypeError``, and ``Series.plot`` raises ``AttributeError`` (:issue:`6951`) -- Bug in ``DataFrame.plot`` draws unnecessary axes when enabling ``subplots`` and ``kind=scatter`` (:issue:`6951`) -- Bug in ``read_csv`` from a filesystem with non-utf-8 encoding (:issue:`6807`) -- Bug in ``iloc`` when setting / aligning (:issue:`6766`) -- Bug causing UnicodeEncodeError when get_dummies called with unicode values and a prefix (:issue:`6885`) -- Bug in timeseries-with-frequency plot cursor display (:issue:`5453`) -- Bug surfaced in ``groupby.plot`` when using a ``Float64Index`` (:issue:`7025`) -- Stopped tests from failing if options data isn't able to be downloaded from Yahoo (:issue:`7034`) -- Bug in ``parallel_coordinates`` and ``radviz`` where reordering of class column - caused possible color/class mismatch (:issue:`6956`) -- Bug in ``radviz`` and ``andrews_curves`` where multiple values of 'color' - were being passed to plotting method (:issue:`6956`) -- Bug in ``Float64Index.isin()`` where containing ``nan`` s would make indices - claim that they contained all the things (:issue:`7066`). -- Bug in ``DataFrame.boxplot`` where it failed to use the axis passed as the ``ax`` argument (:issue:`3578`) -- Bug in the ``XlsxWriter`` and ``XlwtWriter`` implementations that resulted in datetime columns being formatted without the time (:issue:`7075`) - were being passed to plotting method -- :func:`read_fwf` treats ``None`` in ``colspec`` like regular python slices. It now reads from the beginning - or until the end of the line when ``colspec`` contains a ``None`` (previously raised a ``TypeError``) -- Bug in cache coherence with chained indexing and slicing; add ``_is_view`` property to ``NDFrame`` to correctly predict - views; mark ``is_copy`` on ``xs`` only if its an actual copy (and not a view) (:issue:`7084`) -- Bug in DatetimeIndex creation from string ndarray with ``dayfirst=True`` (:issue:`5917`) -- Bug in ``MultiIndex.from_arrays`` created from ``DatetimeIndex`` doesn't preserve ``freq`` and ``tz`` (:issue:`7090`) -- Bug in ``unstack`` raises ``ValueError`` when ``MultiIndex`` contains ``PeriodIndex`` (:issue:`4342`) -- Bug in ``boxplot`` and ``hist`` draws unnecessary axes (:issue:`6769`) -- Regression in ``groupby.nth()`` for out-of-bounds indexers (:issue:`6621`) -- Bug in ``quantile`` with datetime values (:issue:`6965`) -- Bug in ``Dataframe.set_index``, ``reindex`` and ``pivot`` don't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`3950`, :issue:`5878`, :issue:`6631`) -- Bug in ``MultiIndex.get_level_values`` doesn't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`7092`) -- Bug in ``Groupby`` doesn't preserve ``tz`` (:issue:`3950`) -- Bug in ``PeriodIndex`` partial string slicing (:issue:`6716`) -- Bug in the HTML repr of a truncated Series or DataFrame not showing the class name with the `large_repr` set to 'info' - (:issue:`7105`) -- Bug in ``DatetimeIndex`` specifying ``freq`` raises ``ValueError`` when passed value is too short (:issue:`7098`) -- Fixed a bug with the `info` repr not honoring the `display.max_info_columns` setting (:issue:`6939`) -- Bug ``PeriodIndex`` string slicing with out of bounds values (:issue:`5407`) -- Fixed a memory error in the hashtable implementation/factorizer on resizing of large tables (:issue:`7157`) -- Bug in ``isnull`` when applied to 0-dimensional object arrays (:issue:`7176`) -- Bug in ``query``/``eval`` where global constants were not looked up correctly - (:issue:`7178`) -- Bug in recognizing out-of-bounds positional list indexers with ``iloc`` and a multi-axis tuple indexer (:issue:`7189`) -- Bug in setitem with a single value, multi-index and integer indices (:issue:`7190`, :issue:`7218`) -- Bug in expressions evaluation with reversed ops, showing in series-dataframe ops (:issue:`7198`, :issue:`7192`) -- Bug in multi-axis indexing with > 2 ndim and a multi-index (:issue:`7199`) pandas 0.13.1 ------------- diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index c4e3fb672aef2..96ab3d1e58d5c 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -16,7 +16,7 @@ users upgrade to this version. - Ability to join a singly-indexed DataFrame with a multi-indexed DataFrame, see :ref:`Here <merging.join_on_mi>` - More consistency in groupby results and more flexible groupby specifications, See :ref:`Here<whatsnew_0140.groupby>` - Holiday calendars are now supported in ``CustomBusinessDay``, see :ref:`Here <timeseries.holiday>` - - Updated plotting options, See :ref:`Here<whatsnew_0140.plotting>`. + - Several improvements in plotting functions, including: hexbin, area and pie plots, see :ref:`Here<whatsnew_0140.plotting>`. - Performance doc section on I/O operations, See :ref:`Here <io.perf>` - :ref:`Other Enhancements <whatsnew_0140.enhancements>` @@ -35,7 +35,7 @@ users upgrade to this version. - :ref:`Known Issues <whatsnew_0140.knownissues>` -- :ref:`Bug Fixes <release.bug_fixes-0.14.0>` +- :ref:`Bug Fixes <whatsnew_0140.bug_fixes>` .. warning:: @@ -51,7 +51,7 @@ API changes - ``read_excel`` uses 0 as the default sheet (:issue:`6573`) - ``iloc`` will now accept out-of-bounds indexers for slices, e.g. a value that exceeds the length of the object being indexed. These will be excluded. This will make pandas conform more with python/numpy indexing of out-of-bounds - values. A single indexer / list of indexers that is out-of-bounds will still raise + values. A single indexer that is out-of-bounds and drops the dimensions of the object will still raise ``IndexError`` (:issue:`6296`, :issue:`6299`). This could result in an empty axis (e.g. an empty DataFrame being returned) .. ipython:: python @@ -72,6 +72,10 @@ API changes dfl.iloc[:,4] IndexError: single positional indexer is out-of-bounds +- Slicing with negative start, stop & step values handles corner cases better (:issue:`6531`): + + - ``df.iloc[:-len(df)]`` is now empty + - ``df.iloc[len(df)::-1]`` now enumerates all elements in reverse - The :meth:`DataFrame.interpolate` keyword ``downcast`` default has been changed from ``infer`` to ``None``. This is to preseve the original dtype unless explicitly requested otherwise (:issue:`6290`). @@ -99,6 +103,7 @@ API changes ``'@'`` prefix and provides you with an error message telling you so. - ``NameResolutionError`` was removed because it isn't necessary anymore. +- Define and document the order of column vs index names in query/eval (:issue:`6676`) - ``concat`` will now concatenate mixed Series and DataFrames using the Series name or numbering columns as needed (:issue:`2385`). See :ref:`the docs <merging.mixed_ndims>` - Slicing and advanced/boolean indexing operations on ``Index`` classes as well @@ -175,18 +180,20 @@ API changes - Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) - ``stack`` and ``unstack`` now raise a ``ValueError`` when the ``level`` keyword refers - to a non-unique item in the ``Index`` (previously raised a ``KeyError``). + to a non-unique item in the ``Index`` (previously raised a ``KeyError``). (:issue:`6738`) - drop unused order argument from ``Series.sort``; args now are in the same order as ``Series.order``; add ``na_position`` arg to conform to ``Series.order`` (:issue:`6847`) - default sorting algorithm for ``Series.order`` is now ``quicksort``, to conform with ``Series.sort`` (and numpy defaults) - add ``inplace`` keyword to ``Series.order/sort`` to make them inverses (:issue:`6859`) +- ``DataFrame.sort`` now places NaNs at the beginning or end of the sort according to the ``na_position`` parameter. (:issue:`3917`) - accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`), this was a regression from 0.13.1 - Added ``factorize`` functions to ``Index`` and ``Series`` to get indexer and unique values (:issue:`7090`) - ``describe`` on a DataFrame with a mix of Timestamp and string like objects returns a different Index (:issue:`7088`). Previously the index was unintentionally sorted. -- arithmetic operations with **only** ``bool`` dtypes warn for ``+``, ``-``, +- Arithmetic operations with **only** ``bool`` dtypes now give a warning indicating + that they are evaluated in Python space for ``+``, ``-``, and ``*`` operations and raise for all others (:issue:`7011`, :issue:`6762`, :issue:`7015`, :issue:`7210`) @@ -199,6 +206,26 @@ API changes NotImplementedError: operator '/' not implemented for bool dtypes +- In ``HDFStore``, ``select_as_multiple`` will always raise a ``KeyError``, when a key or the selector is not found (:issue:`6177`) +- ``df['col'] = value`` and ``df.loc[:,'col'] = value`` are now completely equivalent; + previously the ``.loc`` would not necessarily coerce the dtype of the resultant series (:issue:`6149`) +- ``dtypes`` and ``ftypes`` now return a series with ``dtype=object`` on empty containers (:issue:`5740`) +- ``df.to_csv`` will now return a string of the CSV data if neither a target path nor a buffer is provided + (:issue:`6061`) +- ``pd.infer_freq()`` will now raise a ``TypeError`` if given an invalid ``Series/Index`` + type (:issue:`6407`, :issue:`6463`) +- A tuple passed to ``DataFame.sort_index`` will be interpreted as the levels of + the index, rather than requiring a list of tuple (:issue:`4370`) +- all offset operations now return ``Timestamp`` types (rather than datetime), Business/Week frequencies were incorrect (:issue:`4069`) +- ``to_excel`` now converts ``np.inf`` into a string representation, + customizable by the ``inf_rep`` keyword argument (Excel has no native inf + representation) (:issue:`6782`) +- Replace ``pandas.compat.scipy.scoreatpercentile`` with ``numpy.percentile`` (:issue:`6810`) +- ``.quantile`` on a ``datetime[ns]`` series now returns ``Timestamp`` instead + of ``np.datetime64`` objects (:issue:`6810`) +- change ``AssertionError`` to ``TypeError`` for invalid types passed to ``concat`` (:issue:`6583`) +- Raise a ``TypeError`` when ``DataFrame`` is passed an iterator as the + ``data`` argument (:issue:`5357`) .. _whatsnew_0140.display: @@ -253,6 +280,7 @@ Display Changes ``display.max_info_columns``. The global setting can be overriden with ``verbose=True`` or ``verbose=False``. - Fixed a bug with the `info` repr not honoring the `display.max_info_columns` setting (:issue:`6939`) +- Offset/freq info now in Timestamp __repr__ (:issue:`4553`) .. _whatsnew_0140.parsing: @@ -270,6 +298,9 @@ Text Parsing API Changes ignored (:issue:`6607`) - Produce :class:`~pandas.io.parsers.ParserWarning` on fallback to python parser when no options are ignored (:issue:`6607`) +- Translate ``sep='\s+'`` to ``delim_whitespace=True`` in + :func:`read_csv`/:func:`read_table` if no other C-unsupported options + specified (:issue:`6607`) .. _whatsnew_0140.groupby: @@ -341,6 +372,18 @@ More consistent behaviour for some groupby methods: - Allow specification of a more complex groupby via ``pd.Grouper``, such as grouping by a Time and a string field simultaneously. See :ref:`the docs <groupby.specify>`. (:issue:`3794`) +- Better propagation/preservation of Series names when performing groupby + operations: + + - ``SeriesGroupBy.agg`` will ensure that the name attribute of the original + series is propagated to the result (:issue:`6265`). + - If the function provided to ``GroupBy.apply`` returns a named series, the + name of the series will be kept as the name of the column index of the + DataFrame returned by ``GroupBy.apply`` (:issue:`6124`). This facilitates + ``DataFrame.stack`` operations where the name of the column index is used as + the name of the inserted column containing the pivoted data. + + .. _whatsnew_0140.sql: SQL @@ -529,12 +572,18 @@ Plotting - ``DataFrame.plot`` and ``Series.plot`` now supports area plot with specifying ``kind='area'`` (:issue:`6656`), See :ref:`the docs<visualization.area_plot>` - Pie plots from ``Series.plot`` and ``DataFrame.plot`` with ``kind='pie'`` (:issue:`6976`), See :ref:`the docs<visualization.pie>`. - Plotting with Error Bars is now supported in the ``.plot`` method of ``DataFrame`` and ``Series`` objects (:issue:`3796`, :issue:`6834`), See :ref:`the docs<visualization.errorbars>`. -- ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``, See :ref:`the docs<visualization.table>`. +- ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``, See :ref:`the docs<visualization.table>`. The ``table`` keyword can receive the following values. + + - ``False``: Do nothing (default). + - ``True``: Draw a table using the ``DataFrame`` or ``Series`` called ``plot`` method. Data will be transposed to meet matplotlib's default layout. + - ``DataFrame`` or ``Series``: Draw matplotlib.table using the passed data. The data will be drawn as displayed in print method (not transposed automatically). + Also, helper function ``pandas.tools.plotting.table`` is added to create a table from ``DataFrame`` and ``Series``, and add it to an ``matplotlib.Axes``. + - ``plot(legend='reverse')`` will now reverse the order of legend labels for most plot kinds. (:issue:`6014`) - Line plot and area plot can be stacked by ``stacked=True`` (:issue:`6656`) -- Following keywords are now acceptable for :meth:`DataFrame.plot(kind='bar')` and :meth:`DataFrame.plot(kind='barh')`. +- Following keywords are now acceptable for :meth:`DataFrame.plot` with ``kind='bar'`` and ``kind='barh'``: - `width`: Specify the bar width. In previous versions, static value 0.5 was passed to matplotlib and it cannot be overwritten. (:issue:`6604`) - `align`: Specify the bar alignment. Default is `center` (different from matplotlib). In previous versions, pandas passes `align='edge'` to matplotlib and adjust the location to `center` by itself, and it results `align` keyword is not applied as expected. (:issue:`4525`) @@ -641,10 +690,18 @@ Deprecations returned if possible, otherwise a copy will be made. Previously the user could think that ``copy=False`` would ALWAYS return a view. (:issue:`6894`) +- The :func:`parallel_coordinates` function now takes argument ``color`` + instead of ``colors``. A ``FutureWarning`` is raised to alert that + the old ``colors`` argument will not be supported in a future release. (:issue:`6956`) + +- The :func:`parallel_coordinates` and :func:`andrews_curves` functions now take + positional argument ``frame`` instead of ``data``. A ``FutureWarning`` is + raised if the old ``data`` argument is used by name. (:issue:`6956`) + - The support for the 'mysql' flavor when using DBAPI connection objects has been deprecated. MySQL will be further supported with SQLAlchemy engines (:issue:`6900`). - - The following ``io.sql`` functions have been deprecated: ``tquery``, ``uquery``, ``read_frame``, ``frame_query``, ``write_frame``. +- The following ``io.sql`` functions have been deprecated: ``tquery``, ``uquery``, ``read_frame``, ``frame_query``, ``write_frame``. - The `percentile_width` keyword argument in :meth:`~DataFrame.describe` has been deprecated. Use the `percentiles` keyword instead, which takes a list of percentiles to display. The @@ -679,7 +736,9 @@ Enhancements ('b', 'a'): {('A', 'C'): 7, ('A', 'B'): 8}, ('b', 'b'): {('A', 'D'): 9, ('A', 'B'): 10}}) +- Added the ``sym_diff`` method to ``Index`` (:issue:`5543`) - ``DataFrame.to_latex`` now takes a longtable keyword, which if True will return a table in a longtable environment. (:issue:`6617`) +- Add option to turn off escaping in ``DataFrame.to_latex`` (:issue:`6472`) - ``pd.read_clipboard`` will, if the keyword ``sep`` is unspecified, try to detect data copied from a spreadsheet and parse accordingly. (:issue:`6223`) - Joining a singly-indexed DataFrame with a multi-indexed DataFrame (:issue:`3662`) @@ -710,8 +769,10 @@ Enhancements using ``DataFrame.to_csv`` (:issue:`5414`, :issue:`4528`) - Partially sort by only the specified levels of a MultiIndex with the ``sort_remaining`` boolean kwarg. (:issue:`3984`) -- Added a ``to_julian_date`` function to ``TimeStamp`` and ``DatetimeIndex`` - to convert to the Julian Date used primarily in astronomy. (:issue:`4041`) +- Added ``to_julian_date`` to ``TimeStamp`` and ``DatetimeIndex``. The Julian + Date is used primarily in astronomy and represents the number of days from + noon, January 1, 4713 BC. Because nanoseconds are used to define the time + in pandas the actual range of dates that you can use is 1678 AD to 2262 AD. (:issue:`4041`) - ``DataFrame.to_stata`` will now check data for compatibility with Stata data types and will upcast when needed. When it is not possible to losslessly upcast, a warning is issued (:issue:`6327`) @@ -750,7 +811,7 @@ Enhancements columns=Grouper(freq='M', key='PayDay'), values='Quantity', aggfunc=np.sum) -- str.wrap implemented (:issue:`6999`) +- Arrays of strings can be wrapped to a specified width (``str.wrap``) (:issue:`6999`) - Add :meth:`~Series.nsmallest` and :meth:`Series.nlargest` methods to Series, See :ref:`the docs <basics.nsorted>` (:issue:`3960`) - `PeriodIndex` fully supports partial string indexing like `DatetimeIndex` (:issue:`7043`) @@ -762,15 +823,36 @@ Enhancements ps ps['2013-01-02'] +- ``read_excel`` can now read milliseconds in Excel dates and times with xlrd >= 0.9.3. (:issue:`5945`) +- ``pd.stats.moments.rolling_var`` now uses Welford's method for increased numerical stability (:issue:`6817`) +- pd.expanding_apply and pd.rolling_apply now take args and kwargs that are passed on to + the func (:issue:`6289`) +- ``DataFrame.rank()`` now has a percentage rank option (:issue:`5971`) +- ``Series.rank()`` now has a percentage rank option (:issue:`5971`) +- ``Series.rank()`` and ``DataFrame.rank()`` now accept ``method='dense'`` for ranks without gaps (:issue:`6514`) +- Support passing ``encoding`` with xlwt (:issue:`3710`) +- Refactor Block classes removing `Block.items` attributes to avoid duplication + in item handling (:issue:`6745`, :issue:`6988`). +- Testing statements updated to use specialized asserts (:issue:`6175`) + + + .. _whatsnew_0140.performance: Performance ~~~~~~~~~~~ +- Performance improvement when converting ``DatetimeIndex`` to floating ordinals + using ``DatetimeConverter`` (:issue:`6636`) +- Performance improvement for ``DataFrame.shift`` (:issue:`5609`) +- Performance improvement in indexing into a multi-indexed Series (:issue:`5567`) +- Performance improvements in single-dtyped indexing (:issue:`6484`) - Improve performance of DataFrame construction with certain offsets, by removing faulty caching (e.g. MonthEnd,BusinessMonthEnd), (:issue:`6479`) - Improve performance of ``CustomBusinessDay`` (:issue:`6584`) - improve performance of slice indexing on Series with string keys (:issue:`6341`, :issue:`6372`) +- Performance improvement for ``DataFrame.from_records`` when reading a + specified number of rows from an iterable (:issue:`6700`) - Performance improvements in timedelta conversions for integer dtypes (:issue:`6754`) - Improved performance of compatible pickles (:issue:`6899`) - Improve performance in certain reindexing operations by optimizing ``take_2d`` (:issue:`6749`) @@ -782,11 +864,179 @@ Experimental There are no experimental changes in 0.14.0 + +.. _whatsnew_0140.bug_fixes: + Bug Fixes ~~~~~~~~~ -See :ref:`V0.14.0 Bug Fixes<release.bug_fixes-0.14.0>` for an extensive list of bugs that have been fixed in 0.14.0. - -See the :ref:`full release notes -<release>` or issue tracker -on GitHub for a complete list of all API changes, Enhancements and Bug Fixes. +- Bug in Series ValueError when index doesn't match data (:issue:`6532`) +- Prevent segfault due to MultiIndex not being supported in HDFStore table + format (:issue:`1848`) +- Bug in ``pd.DataFrame.sort_index`` where mergesort wasn't stable when ``ascending=False`` (:issue:`6399`) +- Bug in ``pd.tseries.frequencies.to_offset`` when argument has leading zeroes (:issue:`6391`) +- Bug in version string gen. for dev versions with shallow clones / install from tarball (:issue:`6127`) +- Inconsistent tz parsing ``Timestamp`` / ``to_datetime`` for current year (:issue:`5958`) +- Indexing bugs with reordered indexes (:issue:`6252`, :issue:`6254`) +- Bug in ``.xs`` with a Series multiindex (:issue:`6258`, :issue:`5684`) +- Bug in conversion of a string types to a DatetimeIndex with a specified frequency (:issue:`6273`, :issue:`6274`) +- Bug in ``eval`` where type-promotion failed for large expressions (:issue:`6205`) +- Bug in interpolate with ``inplace=True`` (:issue:`6281`) +- ``HDFStore.remove`` now handles start and stop (:issue:`6177`) +- ``HDFStore.select_as_multiple`` handles start and stop the same way as ``select`` (:issue:`6177`) +- ``HDFStore.select_as_coordinates`` and ``select_column`` works with a ``where`` clause that results in filters (:issue:`6177`) +- Regression in join of non_unique_indexes (:issue:`6329`) +- Issue with groupby ``agg`` with a single function and a a mixed-type frame (:issue:`6337`) +- Bug in ``DataFrame.replace()`` when passing a non- ``bool`` + ``to_replace`` argument (:issue:`6332`) +- Raise when trying to align on different levels of a multi-index assignment (:issue:`3738`) +- Bug in setting complex dtypes via boolean indexing (:issue:`6345`) +- Bug in TimeGrouper/resample when presented with a non-monotonic DatetimeIndex that would return invalid results. (:issue:`4161`) +- Bug in index name propogation in TimeGrouper/resample (:issue:`4161`) +- TimeGrouper has a more compatible API to the rest of the groupers (e.g. ``groups`` was missing) (:issue:`3881`) +- Bug in multiple grouping with a TimeGrouper depending on target column order (:issue:`6764`) +- Bug in ``pd.eval`` when parsing strings with possible tokens like ``'&'`` + (:issue:`6351`) +- Bug correctly handle placements of ``-inf`` in Panels when dividing by integer 0 (:issue:`6178`) +- ``DataFrame.shift`` with ``axis=1`` was raising (:issue:`6371`) +- Disabled clipboard tests until release time (run locally with ``nosetests -A disabled``) (:issue:`6048`). +- Bug in ``DataFrame.replace()`` when passing a nested ``dict`` that contained + keys not in the values to be replaced (:issue:`6342`) +- ``str.match`` ignored the na flag (:issue:`6609`). +- Bug in take with duplicate columns that were not consolidated (:issue:`6240`) +- Bug in interpolate changing dtypes (:issue:`6290`) +- Bug in ``Series.get``, was using a buggy access method (:issue:`6383`) +- Bug in hdfstore queries of the form ``where=[('date', '>=', datetime(2013,1,1)), ('date', '<=', datetime(2014,1,1))]`` (:issue:`6313`) +- Bug in ``DataFrame.dropna`` with duplicate indices (:issue:`6355`) +- Regression in chained getitem indexing with embedded list-like from 0.12 (:issue:`6394`) +- ``Float64Index`` with nans not comparing correctly (:issue:`6401`) +- ``eval``/``query`` expressions with strings containing the ``@`` character + will now work (:issue:`6366`). +- Bug in ``Series.reindex`` when specifying a ``method`` with some nan values was inconsistent (noted on a resample) (:issue:`6418`) +- Bug in :meth:`DataFrame.replace` where nested dicts were erroneously + depending on the order of dictionary keys and values (:issue:`5338`). +- Perf issue in concatting with empty objects (:issue:`3259`) +- Clarify sorting of ``sym_diff`` on ``Index`` objects with ``NaN`` values (:issue:`6444`) +- Regression in ``MultiIndex.from_product`` with a ``DatetimeIndex`` as input (:issue:`6439`) +- Bug in ``str.extract`` when passed a non-default index (:issue:`6348`) +- Bug in ``str.split`` when passed ``pat=None`` and ``n=1`` (:issue:`6466`) +- Bug in ``io.data.DataReader`` when passed ``"F-F_Momentum_Factor"`` and ``data_source="famafrench"`` (:issue:`6460`) +- Bug in ``sum`` of a ``timedelta64[ns]`` series (:issue:`6462`) +- Bug in ``resample`` with a timezone and certain offsets (:issue:`6397`) +- Bug in ``iat/iloc`` with duplicate indices on a Series (:issue:`6493`) +- Bug in ``read_html`` where nan's were incorrectly being used to indicate + missing values in text. Should use the empty string for consistency with the + rest of pandas (:issue:`5129`). +- Bug in ``read_html`` tests where redirected invalid URLs would make one test + fail (:issue:`6445`). +- Bug in multi-axis indexing using ``.loc`` on non-unique indices (:issue:`6504`) +- Bug that caused _ref_locs corruption when slice indexing across columns axis of a DataFrame (:issue:`6525`) +- Regression from 0.13 in the treatment of numpy ``datetime64`` non-ns dtypes in Series creation (:issue:`6529`) +- ``.names`` attribute of MultiIndexes passed to ``set_index`` are now preserved (:issue:`6459`). +- Bug in setitem with a duplicate index and an alignable rhs (:issue:`6541`) +- Bug in setitem with ``.loc`` on mixed integer Indexes (:issue:`6546`) +- Bug in ``pd.read_stata`` which would use the wrong data types and missing values (:issue:`6327`) +- Bug in ``DataFrame.to_stata`` that lead to data loss in certain cases, and could be exported using the + wrong data types and missing values (:issue:`6335`) +- ``StataWriter`` replaces missing values in string columns by empty string (:issue:`6802`) +- Inconsistent types in ``Timestamp`` addition/subtraction (:issue:`6543`) +- Bug in preserving frequency across Timestamp addition/subtraction (:issue:`4547`) +- Bug in empty list lookup caused ``IndexError`` exceptions (:issue:`6536`, :issue:`6551`) +- ``Series.quantile`` raising on an ``object`` dtype (:issue:`6555`) +- Bug in ``.xs`` with a ``nan`` in level when dropped (:issue:`6574`) +- Bug in fillna with ``method='bfill/ffill'`` and ``datetime64[ns]`` dtype (:issue:`6587`) +- Bug in sql writing with mixed dtypes possibly leading to data loss (:issue:`6509`) +- Bug in ``Series.pop`` (:issue:`6600`) +- Bug in ``iloc`` indexing when positional indexer matched ``Int64Index`` of the corresponding axis and no reordering happened (:issue:`6612`) +- Bug in ``fillna`` with ``limit`` and ``value`` specified +- Bug in ``DataFrame.to_stata`` when columns have non-string names (:issue:`4558`) +- Bug in compat with ``np.compress``, surfaced in (:issue:`6658`) +- Bug in binary operations with a rhs of a Series not aligning (:issue:`6681`) +- Bug in ``DataFrame.to_stata`` which incorrectly handles nan values and ignores ``with_index`` keyword argument (:issue:`6685`) +- Bug in resample with extra bins when using an evenly divisible frequency (:issue:`4076`) +- Bug in consistency of groupby aggregation when passing a custom function (:issue:`6715`) +- Bug in resample when ``how=None`` resample freq is the same as the axis frequency (:issue:`5955`) +- Bug in downcasting inference with empty arrays (:issue:`6733`) +- Bug in ``obj.blocks`` on sparse containers dropping all but the last items of same for dtype (:issue:`6748`) +- Bug in unpickling ``NaT (NaTType)`` (:issue:`4606`) +- Bug in ``DataFrame.replace()`` where regex metacharacters were being treated + as regexs even when ``regex=False`` (:issue:`6777`). +- Bug in timedelta ops on 32-bit platforms (:issue:`6808`) +- Bug in setting a tz-aware index directly via ``.index`` (:issue:`6785`) +- Bug in expressions.py where numexpr would try to evaluate arithmetic ops + (:issue:`6762`). +- Bug in Makefile where it didn't remove Cython generated C files with ``make + clean`` (:issue:`6768`) +- Bug with numpy < 1.7.2 when reading long strings from ``HDFStore`` (:issue:`6166`) +- Bug in ``DataFrame._reduce`` where non bool-like (0/1) integers were being + coverted into bools. (:issue:`6806`) +- Regression from 0.13 with ``fillna`` and a Series on datetime-like (:issue:`6344`) +- Bug in adding ``np.timedelta64`` to ``DatetimeIndex`` with timezone outputs incorrect results (:issue:`6818`) +- Bug in ``DataFrame.replace()`` where changing a dtype through replacement + would only replace the first occurrence of a value (:issue:`6689`) +- Better error message when passing a frequency of 'MS' in ``Period`` construction (GH5332) +- Bug in ``Series.__unicode__`` when ``max_rows=None`` and the Series has more than 1000 rows. (:issue:`6863`) +- Bug in ``groupby.get_group`` where a datetlike wasn't always accepted (:issue:`5267`) +- Bug in ``groupBy.get_group`` created by ``TimeGrouper`` raises ``AttributeError`` (:issue:`6914`) +- Bug in ``DatetimeIndex.tz_localize`` and ``DatetimeIndex.tz_convert`` converting ``NaT`` incorrectly (:issue:`5546`) +- Bug in arithmetic operations affecting ``NaT`` (:issue:`6873`) +- Bug in ``Series.str.extract`` where the resulting ``Series`` from a single + group match wasn't renamed to the group name +- Bug in ``DataFrame.to_csv`` where setting ``index=False`` ignored the + ``header`` kwarg (:issue:`6186`) +- Bug in ``DataFrame.plot`` and ``Series.plot``, where the legend behave inconsistently when plotting to the same axes repeatedly (:issue:`6678`) +- Internal tests for patching ``__finalize__`` / bug in merge not finalizing (:issue:`6923`, :issue:`6927`) +- accept ``TextFileReader`` in ``concat``, which was affecting a common user idiom (:issue:`6583`) +- Bug in C parser with leading whitespace (:issue:`3374`) +- Bug in C parser with ``delim_whitespace=True`` and ``\r``-delimited lines +- Bug in python parser with explicit multi-index in row following column header (:issue:`6893`) +- Bug in ``Series.rank`` and ``DataFrame.rank`` that caused small floats (<1e-13) to all receive the same rank (:issue:`6886`) +- Bug in ``DataFrame.apply`` with functions that used \*args`` or \*\*kwargs and returned + an empty result (:issue:`6952`) +- Bug in sum/mean on 32-bit platforms on overflows (:issue:`6915`) +- Moved ``Panel.shift`` to ``NDFrame.slice_shift`` and fixed to respect multiple dtypes. (:issue:`6959`) +- Bug in enabling ``subplots=True`` in ``DataFrame.plot`` only has single column raises ``TypeError``, and ``Series.plot`` raises ``AttributeError`` (:issue:`6951`) +- Bug in ``DataFrame.plot`` draws unnecessary axes when enabling ``subplots`` and ``kind=scatter`` (:issue:`6951`) +- Bug in ``read_csv`` from a filesystem with non-utf-8 encoding (:issue:`6807`) +- Bug in ``iloc`` when setting / aligning (:issue:`6766`) +- Bug causing UnicodeEncodeError when get_dummies called with unicode values and a prefix (:issue:`6885`) +- Bug in timeseries-with-frequency plot cursor display (:issue:`5453`) +- Bug surfaced in ``groupby.plot`` when using a ``Float64Index`` (:issue:`7025`) +- Stopped tests from failing if options data isn't able to be downloaded from Yahoo (:issue:`7034`) +- Bug in ``parallel_coordinates`` and ``radviz`` where reordering of class column + caused possible color/class mismatch (:issue:`6956`) +- Bug in ``radviz`` and ``andrews_curves`` where multiple values of 'color' + were being passed to plotting method (:issue:`6956`) +- Bug in ``Float64Index.isin()`` where containing ``nan`` s would make indices + claim that they contained all the things (:issue:`7066`). +- Bug in ``DataFrame.boxplot`` where it failed to use the axis passed as the ``ax`` argument (:issue:`3578`) +- Bug in the ``XlsxWriter`` and ``XlwtWriter`` implementations that resulted in datetime columns being formatted without the time (:issue:`7075`) + were being passed to plotting method +- :func:`read_fwf` treats ``None`` in ``colspec`` like regular python slices. It now reads from the beginning + or until the end of the line when ``colspec`` contains a ``None`` (previously raised a ``TypeError``) +- Bug in cache coherence with chained indexing and slicing; add ``_is_view`` property to ``NDFrame`` to correctly predict + views; mark ``is_copy`` on ``xs`` only if its an actual copy (and not a view) (:issue:`7084`) +- Bug in DatetimeIndex creation from string ndarray with ``dayfirst=True`` (:issue:`5917`) +- Bug in ``MultiIndex.from_arrays`` created from ``DatetimeIndex`` doesn't preserve ``freq`` and ``tz`` (:issue:`7090`) +- Bug in ``unstack`` raises ``ValueError`` when ``MultiIndex`` contains ``PeriodIndex`` (:issue:`4342`) +- Bug in ``boxplot`` and ``hist`` draws unnecessary axes (:issue:`6769`) +- Regression in ``groupby.nth()`` for out-of-bounds indexers (:issue:`6621`) +- Bug in ``quantile`` with datetime values (:issue:`6965`) +- Bug in ``Dataframe.set_index``, ``reindex`` and ``pivot`` don't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`3950`, :issue:`5878`, :issue:`6631`) +- Bug in ``MultiIndex.get_level_values`` doesn't preserve ``DatetimeIndex`` and ``PeriodIndex`` attributes (:issue:`7092`) +- Bug in ``Groupby`` doesn't preserve ``tz`` (:issue:`3950`) +- Bug in ``PeriodIndex`` partial string slicing (:issue:`6716`) +- Bug in the HTML repr of a truncated Series or DataFrame not showing the class name with the `large_repr` set to 'info' + (:issue:`7105`) +- Bug in ``DatetimeIndex`` specifying ``freq`` raises ``ValueError`` when passed value is too short (:issue:`7098`) +- Fixed a bug with the `info` repr not honoring the `display.max_info_columns` setting (:issue:`6939`) +- Bug ``PeriodIndex`` string slicing with out of bounds values (:issue:`5407`) +- Fixed a memory error in the hashtable implementation/factorizer on resizing of large tables (:issue:`7157`) +- Bug in ``isnull`` when applied to 0-dimensional object arrays (:issue:`7176`) +- Bug in ``query``/``eval`` where global constants were not looked up correctly + (:issue:`7178`) +- Bug in recognizing out-of-bounds positional list indexers with ``iloc`` and a multi-axis tuple indexer (:issue:`7189`) +- Bug in setitem with a single value, multi-index and integer indices (:issue:`7190`, :issue:`7218`) +- Bug in expressions evaluation with reversed ops, showing in series-dataframe ops (:issue:`7198`, :issue:`7192`) +- Bug in multi-axis indexing with > 2 ndim and a multi-index (:issue:`7199`) +- Fix a bug where invalid eval/query operations would blow the stack (:issue:`5198`)
Related to discussion on mailing list https://mail.python.org/pipermail/pandas-dev/2014-May/000272.html So I gave it a try, moved all content of release.rst to the whatsnew file (main chunk were the bug fixes, and for the rest some api changes and enhancements that were not listed in whatsnew). All content that was in release is not in whatsnew. Can you see what you think of this?
https://api.github.com/repos/pandas-dev/pandas/pulls/7275
2014-05-29T12:44:26Z
2014-05-30T10:38:22Z
2014-05-30T10:38:22Z
2014-06-26T09:08:58Z
ENH add sample #2419
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index a97e29add71be..273ada4bc867b 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -50,6 +50,8 @@ Known Issues Enhancements ~~~~~~~~~~~~ +- Add a sample method to NDFrame (:issue:`2419`) + .. _whatsnew_0141.performance: Performance diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ed0d92683ad54..a543eb4c7eca2 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1241,6 +1241,45 @@ def take(self, indices, axis=0, convert=True, is_copy=True): return result + def sample(self, size, replace=True): + """Take a sample from the object, analogue of numpy.random.choice + + Parameters + ---------- + size : int, size of sample to take + replace : bool, default True, whether to sample with replacements + + Returns + ------- + type of caller + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4, 5]) + >>> s.sample(3, replace=False) + 2 3 + 0 1 + 3 4 + dtype: int64 + >>> s.sample(3, replace=True) + 1 2 + 3 4 + 1 2 + dtype: int64 + + Note + ---- + If you are sampling without replacement over a larger sample size than + the object you're sampling a ValueError will be raised. + + """ + try: + from numpy.random import choice + except ImportError: + from pandas.stats.misc import choice + msk = choice(len(self), size, replace=replace) + return self.iloc[msk] + def xs(self, key, axis=0, level=None, copy=None, drop_level=True): """ Returns a cross-section (row(s) or column(s)) from the Series/DataFrame. diff --git a/pandas/stats/misc.py b/pandas/stats/misc.py index c79bae34f20c4..991b4cd4b9d95 100644 --- a/pandas/stats/misc.py +++ b/pandas/stats/misc.py @@ -297,3 +297,20 @@ def _bucket_labels(series, k): mat[v] = i return mat + 1 + + +def choice(arr, size, replace): + """Partial implementation of numpy.random.choice which is new to 1.7 + + Note: unlike numpy's version size must be a scalar. + """ + if replace: + pos = (np.random.sample(size) * len(arr)).astype('int64') + return arr[pos] + else: + if size > len(arr): + raise ValueError("Cannot take a larger sample than " + "population when 'replace=False'") + shuffle = np.arange(len(arr)) + np.random.shuffle(shuffle) + return arr[shuffle[:size]] diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4e5b00a6db765..10446e880a1af 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -8441,6 +8441,12 @@ def test_truncate_copy(self): truncated.values[:] = 5. self.assertFalse((self.tsframe.values[5:11] == 5).any()) + def test_sample(self): + df = DataFrame([[1, 2], [2, 3]], columns=['A', 'B']) + res = df.sample(5) + self.assertEqual(len(res), 5) + assert(res.index.isin(df.index).all()) + def test_xs(self): idx = self.frame.index[5] xs = self.frame.xs(idx) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 2ed24832c3270..65eacf17b539c 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -547,6 +547,12 @@ def test_xs(self): result = self.panel.xs('D', axis=2) self.assertIsNotNone(result.is_copy) + def test_sample(self): + p = self.panel + res = p.sample(5) + self.assertEqual(len(res), 5) + assert(res.major_axis.isin(p.major_axis).all()) + def test_getitem_fancy_labels(self): p = self.panel diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 6c732fe352d6a..888850b516768 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1608,6 +1608,12 @@ def test_mask(self): rs = s.where(cond, np.nan) assert_series_equal(rs, s.mask(~cond)) + def test_sample(self): + s = Series([1, 2, 2, 3]) + res = s.sample(5) + self.assertEqual(len(res), 5) + assert(res.index.isin(s.index).all()) + def test_drop(self): # unique
fixes #2419 Hmmm `np.random.choice` not available on numpy < 1.7.
https://api.github.com/repos/pandas-dev/pandas/pulls/7274
2014-05-29T07:52:03Z
2015-01-18T21:36:35Z
null
2015-01-21T02:22:04Z
FIX resample with fill_method and how #2073
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index a97e29add71be..73d1167001063 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -66,6 +66,7 @@ Bug Fixes ~~~~~~~~~ - Bug in ``Index.min`` and ``max`` doesn't handle ``nan`` and ``NaT`` properly (:issue:`7261`) +- Bug in ``resample`` where ``fill_method`` was ignored if you passed ``how`` (:issue:`7261`) - Bug in ``TimeGrouper`` doesn't exclude column specified by ``key`` (:issue:`7227`) - Bug in ``DataFrame`` and ``Series`` bar and barh plot raises ``TypeError`` when ``bottom`` and ``left`` keyword is specified (:issue:`7226`) diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index dd72a5245e7b2..812dd5aba71e0 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -252,6 +252,11 @@ def _resample_timestamps(self): # downsample grouped = obj.groupby(grouper, axis=self.axis) result = grouped.aggregate(self._agg_method) + # GH2073 + if self.fill_method is not None: + result = result.fillna(method=self.fill_method, + limit=self.limit) + else: # upsampling shortcut if self.axis: diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 45d17052d904b..88bacb3d7b8ab 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -869,6 +869,14 @@ def test_monthly_upsample(self): expected = expected.asfreq(targ, 'ffill').to_period() assert_series_equal(result, expected) + def test_fill_method_and_how_upsample(self): + # GH2073 + s = Series(range(9), + index=date_range('2010-01-01', periods=9, freq='Q')) + last = s.resample('M', fill_method='ffill') + both = s.resample('M', how='last', fill_method='ffill').astype('int64') + assert_series_equal(last, both) + def test_weekly_upsample(self): targets = ['D', 'B']
fixes #2073
https://api.github.com/repos/pandas-dev/pandas/pulls/7273
2014-05-29T05:08:19Z
2014-06-04T16:46:44Z
null
2014-07-02T11:24:26Z
BF: reading stata files - unpack read value describing stored byte order
diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 187a5f5d55533..b67a1be8d43d6 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -561,7 +561,7 @@ def _read_header(self): raise ValueError("Version of given Stata file is not 104, " "105, 108, 113 (Stata 8/9), 114 (Stata " "10/11), 115 (Stata 12) or 117 (Stata 13)") - self.byteorder = self.path_or_buf.read(1) == 0x1 and '>' or '<' + self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[0] == 0x1 and '>' or '<' self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0] self.path_or_buf.read(1) # unused diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index a83f8b3a9521f..b4be08c6b1106 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -19,9 +19,6 @@ from pandas.util.misc import is_little_endian from pandas import compat -if not is_little_endian(): - raise nose.SkipTest("known failure of test_stata on non-little endian") - class TestStata(tm.TestCase): def setUp(self):
Otherwise comparison would always result in "low endian" . Seems to resolve all the gory stata files reading tests on sparc at least closes #5781
https://api.github.com/repos/pandas-dev/pandas/pulls/7272
2014-05-29T03:20:08Z
2014-05-29T11:10:05Z
2014-05-29T11:10:05Z
2014-07-01T00:07:43Z
ENH: check for __array__ instead of ndarray subclasses when creating an Index
diff --git a/pandas/core/common.py b/pandas/core/common.py index 00fa970c0f77a..afa376a14d4da 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -2038,7 +2038,8 @@ def intersection(*seqs): def _asarray_tuplesafe(values, dtype=None): from pandas.core.index import Index - if not isinstance(values, (list, tuple, np.ndarray)): + if not (isinstance(values, (list, tuple)) + or hasattr(values, '__array__')): values = list(values) elif isinstance(values, Index): return values.values diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 105fdbb32ab22..2d3effc251e0b 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -173,6 +173,18 @@ def test_constructor_from_series(self): result = pd.infer_freq(df['date']) self.assertEqual(result,'MS') + def test_constructor_ndarray_like(self): + # GH 5460#issuecomment-44474502 + # it should be possible to convert any object that satisfies the numpy + # ndarray interface directly into an Index + class ArrayLike(object): + def __array__(self, dtype=None): + return np.arange(5) + + expected = pd.Index(np.arange(5)) + result = pd.Index(ArrayLike()) + self.assertTrue(result.equals(expected)) + def test_index_ctor_infer_periodindex(self): from pandas import period_range, PeriodIndex xp = period_range('2012-1-1', freq='M', periods=3)
This allows custom ndarray-like objects which aren't actual ndarrays to be smoothly cast to a pandas.Index: https://github.com/pydata/pandas/issues/5460#issuecomment-44474502
https://api.github.com/repos/pandas-dev/pandas/pulls/7270
2014-05-29T00:00:06Z
2014-05-29T18:03:43Z
2014-05-29T18:03:43Z
2014-09-22T14:42:25Z
added functionality to allow negative loc values to work with Index.insert
diff --git a/pandas/core/index.py b/pandas/core/index.py index 3cbefbf141491..02d6e983f5183 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1774,7 +1774,8 @@ def delete(self, loc): def insert(self, loc, item): """ - Make new Index inserting new item at location + Make new Index inserting new item at location. Follows + Python list.append semantics for negative values Parameters ---------- diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 2d3effc251e0b..d840e04b8132c 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -242,6 +242,26 @@ def test_equals(self): # Must also be an Index self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c'])) + def test_insert(self): + result = Index(['b', 'c', 'd']) + + #test 0th element + self.assertTrue(Index(['a', 'b', 'c', 'd']).equals( + result.insert(0, 'a'))) + + #test Nth element that follows Python list behavior + self.assertTrue(Index(['b', 'c', 'e', 'd']).equals( + result.insert(-1, 'e'))) + + #test loc +/- neq (0, -1) + self.assertTrue(result.insert(1, 'z').equals( + result.insert(-2, 'z'))) + + #test empty + null_index = Index([]) + self.assertTrue(Index(['a']).equals( + null_index.insert(0, 'a'))) + def test_identical(self): # index
closes #7256
https://api.github.com/repos/pandas-dev/pandas/pulls/7268
2014-05-28T18:42:04Z
2014-05-30T23:32:19Z
null
2014-07-03T03:41:24Z
TST: disable stata tests on big-endian (GH5781) / fix datetime64[ns] comparison on big-endian (GH7265)
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index 72bea8c458f9e..a83f8b3a9521f 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -19,9 +19,8 @@ from pandas.util.misc import is_little_endian from pandas import compat -def skip_if_not_little_endian(): - if not is_little_endian(): - raise nose.SkipTest("known failure of test on non-little endian") +if not is_little_endian(): + raise nose.SkipTest("known failure of test_stata on non-little endian") class TestStata(tm.TestCase): @@ -198,8 +197,6 @@ def test_read_dta4(self): tm.assert_frame_equal(parsed_117, expected) def test_read_write_dta5(self): - # skip_if_not_little_endian() - original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) @@ -212,8 +209,6 @@ def test_read_write_dta5(self): original) def test_write_dta6(self): - # skip_if_not_little_endian() - original = self.read_csv(self.csv3) original.index.name = 'index' original.index = original.index.astype(np.int32) @@ -245,8 +240,6 @@ def test_read_dta9(self): tm.assert_frame_equal(parsed, expected) def test_read_write_dta10(self): - # skip_if_not_little_endian() - original = DataFrame(data=[["string", "object", 1, 1.1, np.datetime64('2003-12-25')]], columns=['string', 'object', 'integer', 'floating', @@ -284,8 +277,6 @@ def test_encoding(self): self.assertIsInstance(result, unicode) def test_read_write_dta11(self): - # skip_if_not_little_endian() - original = DataFrame([(1, 2, 3, 4)], columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______']) formatted = DataFrame([(1, 2, 3, 4)], @@ -303,8 +294,6 @@ def test_read_write_dta11(self): tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted) def test_read_write_dta12(self): - # skip_if_not_little_endian() - original = DataFrame([(1, 2, 3, 4, 5, 6)], columns=['astringwithmorethan32characters_1', 'astringwithmorethan32characters_2', diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 39d8eb8360244..927e096f8d769 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -246,7 +246,11 @@ def test_value_counts_unique_nunique(self): # Unable to assign None continue - values[0:2] = null_obj + # special assign to the numpy array + if o.values.dtype == 'datetime64[ns]': + values[0:2] = pd.tslib.iNaT + else: + values[0:2] = null_obj # create repeated values, 'n'th element is repeated by n+1 times if isinstance(o, PeriodIndex):
related #5781 closes #7265
https://api.github.com/repos/pandas-dev/pandas/pulls/7266
2014-05-28T18:18:23Z
2014-05-28T18:56:10Z
2014-05-28T18:56:10Z
2014-06-13T03:05:58Z
DOC/TST: small change to 10min.rst / fixes for (GH7252, GH7263)
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index cbd2f60ddbcd3..438f812039fa6 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -291,12 +291,10 @@ Using the :func:`~Series.isin` method for filtering: .. ipython:: python - df['E']=['one', 'one','two','three','four','three'] - df - good_numbers=['two','four'] - df[df['E'].isin(good_numbers)] - - df.drop('E', inplace=True, axis=1) + df2 = df.copy() + df2['E']=['one', 'one','two','three','four','three'] + df2 + df2[df2['E'].isin(['two','four']) Setting ~~~~~~~ diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 75aaf68b4dd0a..187a5f5d55533 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -99,7 +99,7 @@ def _stata_elapsed_date_to_datetime(date, fmt): #TODO: IIRC relative delta doesn't play well with np.datetime? #TODO: When pandas supports more than datetime64[ns], this should be improved to use correct range, e.g. datetime[Y] for yearly if np.isnan(date): - return np.datetime64('nat') + return NaT date = int(date) stata_epoch = datetime.datetime(1960, 1, 1) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 6b235b0d6b665..0bdcff58a0b30 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2705,7 +2705,7 @@ def test_class_ops(self): import pytz def compare(x,y): - self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9)) + self.assertEqual(int(np.round(Timestamp(x).value/1e9)), int(np.round(Timestamp(y).value/1e9))) compare(Timestamp.now(),datetime.now()) compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
TST: fix stata issue with NaT (GH7252) closes #7252 closes #7263
https://api.github.com/repos/pandas-dev/pandas/pulls/7264
2014-05-28T15:50:21Z
2014-05-28T16:58:23Z
2014-05-28T16:58:23Z
2014-06-19T12:45:44Z
TST/BUG: Fix grouped_box_return_type
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index de726e670d958..1b9691257347b 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -2272,7 +2272,7 @@ def test_grouped_box_return_type(self): columns2 = 'X B C D A G Y N Q O'.split() df2 = DataFrame(random.randn(50, 10), columns=columns2) categories2 = 'A B C D E F G H I J'.split() - df2['category'] = tm.choice(categories2, size=50) + df2['category'] = categories2 * 5 types = {'dict': dict, 'axes': matplotlib.axes.Axes, 'both': tuple} for t, klass in iteritems(types):
Closes #7254.
https://api.github.com/repos/pandas-dev/pandas/pulls/7260
2014-05-28T13:30:58Z
2014-05-28T14:57:34Z
2014-05-28T14:57:34Z
2014-07-16T09:07:45Z
DOC add v0.14.1.txt release notes stub
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt new file mode 100644 index 0000000000000..9677abe2e6d2c --- /dev/null +++ b/doc/source/v0.14.1.txt @@ -0,0 +1,84 @@ +.. _whatsnew_0141: + +v0.14.1 (??, 2014) +----------------------- + +This is a minor release from 0.14.1 and includes a small number of API changes, several new +features, enhancements, and performance improvements along with a large number of bug fixes. +We recommend that all users upgrade to this version. + +- Highlights include: + + - ... + +- :ref:`Other Enhancements <whatsnew_0141.enhancements>` + +- :ref:`API Changes <whatsnew_0141.api>` + +- :ref:`Text Parsing API Changes <whatsnew_0141.parsing>` + +- :ref:`Groupby API Changes <whatsnew_0141.groupby>` + +- :ref:`Performance Improvements <whatsnew_0141.performance>` + +- :ref:`Prior Deprecations <whatsnew_0141.prior_deprecations>` + +- :ref:`Deprecations <whatsnew_0141.deprecations>` + +- :ref:`Known Issues <whatsnew_0141.knownissues>` + +- :ref:`Bug Fixes <release.bug_fixes-0.14.1>` + +.. _whatsnew_0141.api: + +API changes +~~~~~~~~~~~ + + +.. _whatsnew_0141.display: + +Display Changes +~~~~~~~~~~~~~~~ + +.. _whatsnew_0141.plotting: + +Plotting +~~~~~~~~ + +.. _whatsnew_0141.prior_deprecations: + +Prior Version Deprecations/Changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0141.deprecations: + +Deprecations +~~~~~~~~~~~~ + +.. _whatsnew_0141.knownissues: + +Known Issues +~~~~~~~~~~~~ + +.. _whatsnew_0141.enhancements: + +Enhancements +~~~~~~~~~~~~ +.. _whatsnew_0141.performance: + +Performance +~~~~~~~~~~~ + +Experimental +~~~~~~~~~~~~ + +There are no experimental changes in 0.14.1 + +Bug Fixes +~~~~~~~~~ + +See :ref:`V0.14.1 Bug Fixes<release.bug_fixes-0.14.1>` for an extensive list of bugs that have been fixed in 0.14.1. + +See the :ref:`full release notes +<release>` or issue tracker +on GitHub for a complete list of all API changes, Enhancements and Bug Fixes.
Just a stub for 0.14.1 (?) release notes. Not in index yet. This look ok @jreback ? cc #6318
https://api.github.com/repos/pandas-dev/pandas/pulls/7255
2014-05-28T05:30:28Z
2014-05-28T14:52:10Z
null
2014-07-19T06:21:48Z
COMPAT: remove compat.scipy
diff --git a/pandas/compat/scipy.py b/pandas/compat/scipy.py index 06da8799d0c96..612304f27b5ea 100644 --- a/pandas/compat/scipy.py +++ b/pandas/compat/scipy.py @@ -54,16 +54,20 @@ def fastsort(a): Parameters ---------- - a : array_like - Input array. + a : 1-d array_like object (i.e np.array or pd.Series) Returns ------- - fastsort : ndarray of type int - sorted indices into the original array + fastsort : returns a 2-tuple of array_like objects consisting of + 'a', sorted ascending, and an array_like object containing + indicies of 'a', sorted ascending on 'a'. + + Example + ------- + >>>fastsort(np.array([4,6,4,5])) + (array([4, 4, 5, 6]), array([0, 2, 3, 1], dtype=int32)) """ - # TODO: the wording in the docstring is nonsense. it = np.argsort(a) as_ = a[it] return as_, it
https://api.github.com/repos/pandas-dev/pandas/pulls/7253
2014-05-28T03:15:40Z
2014-06-01T02:53:00Z
null
2014-07-05T16:34:05Z
TST/CLN: remove prints (esp of unicode), replacing with com.pprint_thing (GH7247)
diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index a413893309582..cf7c906a273b1 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -318,7 +318,6 @@ def tearDownClass(cls): @network def test_get_options_data_warning(self): with assert_produces_warning(): - print('month: {0}, year: {1}'.format(self.month, self.year)) try: self.aapl.get_options_data(month=self.month, year=self.year) except RemoteDataError as e: @@ -327,7 +326,6 @@ def test_get_options_data_warning(self): @network def test_get_near_stock_price_warning(self): with assert_produces_warning(): - print('month: {0}, year: {1}'.format(self.month, self.year)) try: calls_near, puts_near = self.aapl.get_near_stock_price(call=True, put=True, @@ -339,7 +337,6 @@ def test_get_near_stock_price_warning(self): @network def test_get_call_data_warning(self): with assert_produces_warning(): - print('month: {0}, year: {1}'.format(self.month, self.year)) try: self.aapl.get_call_data(month=self.month, year=self.year) except RemoteDataError as e: @@ -348,7 +345,6 @@ def test_get_call_data_warning(self): @network def test_get_put_data_warning(self): with assert_produces_warning(): - print('month: {0}, year: {1}'.format(self.month, self.year)) try: self.aapl.get_put_data(month=self.month, year=self.year) except RemoteDataError as e: diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 9a5b0d7fc60ca..77555ad81a45b 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -1506,7 +1506,7 @@ def test_big_table_frame(self): recons = store.select('df') assert isinstance(recons, DataFrame) - print("\nbig_table frame [%s] -> %5.2f" % (rows, time.time() - x)) + com.pprint_thing("\nbig_table frame [%s] -> %5.2f" % (rows, time.time() - x)) def test_big_table2_frame(self): # this is a really big table: 1m rows x 60 float columns, 20 string, 20 datetime @@ -1514,7 +1514,7 @@ def test_big_table2_frame(self): raise nose.SkipTest('no big table2 frame') # create and write a big table - print("\nbig_table2 start") + com.pprint_thing("\nbig_table2 start") import time start_time = time.time() df = DataFrame(np.random.randn(1000 * 1000, 60), index=range(int( @@ -1524,7 +1524,7 @@ def test_big_table2_frame(self): for x in range(20): df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0) - print("\nbig_table2 frame (creation of df) [rows->%s] -> %5.2f" + com.pprint_thing("\nbig_table2 frame (creation of df) [rows->%s] -> %5.2f" % (len(df.index), time.time() - start_time)) def f(chunksize): @@ -1535,15 +1535,15 @@ def f(chunksize): for c in [10000, 50000, 250000]: start_time = time.time() - print("big_table2 frame [chunk->%s]" % c) + com.pprint_thing("big_table2 frame [chunk->%s]" % c) rows = f(c) - print("big_table2 frame [rows->%s,chunk->%s] -> %5.2f" - % (rows, c, time.time() - start_time)) + com.pprint_thing("big_table2 frame [rows->%s,chunk->%s] -> %5.2f" + % (rows, c, time.time() - start_time)) def test_big_put_frame(self): raise nose.SkipTest('no big put frame') - print("\nbig_put start") + com.pprint_thing("\nbig_put start") import time start_time = time.time() df = DataFrame(np.random.randn(1000 * 1000, 60), index=range(int( @@ -1553,7 +1553,7 @@ def test_big_put_frame(self): for x in range(20): df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0) - print("\nbig_put frame (creation of df) [rows->%s] -> %5.2f" + com.pprint_thing("\nbig_put frame (creation of df) [rows->%s] -> %5.2f" % (len(df.index), time.time() - start_time)) with ensure_clean_store(self.path, mode='w') as store: @@ -1561,8 +1561,8 @@ def test_big_put_frame(self): store = HDFStore(self.path, mode='w') store.put('df', df) - print(df.get_dtype_counts()) - print("big_put frame [shape->%s] -> %5.2f" + com.pprint_thing(df.get_dtype_counts()) + com.pprint_thing("big_put frame [shape->%s] -> %5.2f" % (df.shape, time.time() - start_time)) def test_big_table_panel(self): @@ -1588,7 +1588,7 @@ def test_big_table_panel(self): recons = store.select('wp') assert isinstance(recons, Panel) - print("\nbig_table panel [%s] -> %5.2f" % (rows, time.time() - x)) + com.pprint_thing("\nbig_table panel [%s] -> %5.2f" % (rows, time.time() - x)) def test_append_diff_item_order(self): @@ -3538,9 +3538,9 @@ def test_string_select(self): expected = df[df.x != 'none'] assert_frame_equal(result,expected) except Exception as detail: - print("[{0}]".format(detail)) - print(store) - print(expected) + com.pprint_thing("[{0}]".format(detail)) + com.pprint_thing(store) + com.pprint_thing(expected) df2 = df.copy() df2.loc[df2.x=='','x'] = np.nan diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 09fc991dc1726..777acdf30f1a0 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -91,7 +91,7 @@ def run_arithmetic_test(self, df, other, assert_func, check_dtype=False, assert expected.dtype.kind == 'f' assert_func(expected, result) except Exception: - print("Failed test with operator %r" % op.__name__) + com.pprint_thing("Failed test with operator %r" % op.__name__) raise def test_integer_arithmetic(self): @@ -131,8 +131,8 @@ def run_binary_test(self, df, other, assert_func, assert not used_numexpr, "Used numexpr unexpectedly." assert_func(expected, result) except Exception: - print("Failed test with operation %r" % arith) - print("test_flex was %r" % test_flex) + com.pprint_thing("Failed test with operation %r" % arith) + com.pprint_thing("test_flex was %r" % test_flex) raise def run_frame(self, df, other, binary_comp=None, run_binary=True, diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index dddfcc976c4a6..8e405dc98f3da 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -18,6 +18,7 @@ import pandas.core.format as fmt import pandas.util.testing as tm +import pandas.core.common as com from pandas.util.terminal import get_terminal_size import pandas import pandas.tslib as tslib @@ -288,7 +289,7 @@ def mkframe(n): df = mkframe((term_width // 7) - 2) self.assertFalse(has_expanded_repr(df)) df = mkframe((term_width // 7) + 2) - print( df._repr_fits_horizontal_()) + com.pprint_thing(df._repr_fits_horizontal_()) self.assertTrue(has_expanded_repr(df)) def test_to_string_repr_unicode(self): @@ -411,8 +412,6 @@ def test_to_string_truncate_indices(self): self.assertFalse(has_vertically_truncated_repr(df)) with option_context("display.max_columns", 15): if w == 20: - print(df) - print(repr(df)) self.assertTrue(has_horizontally_truncated_repr(df)) else: self.assertFalse(has_horizontally_truncated_repr(df)) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7d040b2ede0f7..4e5b00a6db765 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -4963,7 +4963,7 @@ def test_arith_flex_frame(self): assert_frame_equal(result, exp) _check_mixed_int(result, dtype = dtype) except: - print("Failing operation %r" % op) + com.pprint_thing("Failing operation %r" % op) raise # ndim >= 3 @@ -5792,7 +5792,6 @@ def make_dtnat_arr(n,nnat=None): base = int((chunksize// ncols or 1) or 1) for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2, base-1,base,base+1]: - #print( nrows,ncols) _do_test(mkdf(nrows, ncols),path) for nrows in [10,N-2,N-1,N,N+1,N+2]: @@ -5814,7 +5813,6 @@ def make_dtnat_arr(n,nnat=None): base = int(chunksize//ncols) for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2, base-1,base,base+1]: - #print(nrows, ncols) _do_test(mkdf(nrows, ncols,r_idx_nlevels=2),path,rnlvl=2) _do_test(mkdf(nrows, ncols,c_idx_nlevels=2),path,cnlvl=2) _do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2), @@ -10952,14 +10950,14 @@ def test_mode(self): # outputs in sorted order df["C"] = list(reversed(df["C"])) - print(df["C"]) - print(df["C"].mode()) + com.pprint_thing(df["C"]) + com.pprint_thing(df["C"].mode()) a, b = (df[["A", "B", "C"]].mode(), pd.DataFrame({"A": [12, np.nan], "B": [10, np.nan], "C": [8, 9]})) - print(a) - print(b) + com.pprint_thing(a) + com.pprint_thing(b) assert_frame_equal(a, b) # should work with heterogeneous types df = pd.DataFrame({"A": range(6), @@ -12981,7 +12979,6 @@ def to_series(mi, level): if isinstance(v, Index): assert v.is_(expected[k]) elif isinstance(v, Series): - #print(k) tm.assert_series_equal(v, expected[k]) else: raise AssertionError("object must be a Series or Index") diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index d85e5facfad01..5f07acf25582f 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -163,7 +163,7 @@ def test_nonzero(self): def f(): if obj1: - print("this works and shouldn't") + com.pprint_thing("this works and shouldn't") self.assertRaises(ValueError, f) self.assertRaises(ValueError, lambda : obj1 and obj2) self.assertRaises(ValueError, lambda : obj1 or obj2) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 4310a5947036f..028334afbd62c 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -730,8 +730,8 @@ def test_agg_item_by_item_raise_typeerror(self): df = DataFrame(randint(10, size=(20, 10))) def raiseException(df): - print('----------------------------------------') - print(df.to_string()) + com.pprint_thing('----------------------------------------') + com.pprint_thing(df.to_string()) raise TypeError self.assertRaises(TypeError, df.groupby(0).agg, diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 14f2ee6222238..b61b1ab925396 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -162,7 +162,7 @@ def _print(result, error = None): error = str(error) v = "%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" % (name,result,t,o,method1,method2,a,error or '') if _verbose: - print(v) + com.pprint_thing(v) try: diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 2f539bcc6d128..2ed24832c3270 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -330,13 +330,13 @@ def check_op(op, name): try: check_op(getattr(operator, op), op) except: - print("Failing operation: %r" % op) + com.pprint_thing("Failing operation: %r" % op) raise if compat.PY3: try: check_op(operator.truediv, 'div') except: - print("Failing operation: %r" % name) + com.pprint_thing("Failing operation: %r" % name) raise def test_combinePanel(self): @@ -1928,8 +1928,8 @@ def check_drop(drop_val, axis_number, aliases, expected): actual = panel.drop(drop_val, axis=alias) assert_panel_equal(actual, expected) except AssertionError: - print("Failed with axis_number %d and aliases: %s" % - (axis_number, aliases)) + com.pprint_thing("Failed with axis_number %d and aliases: %s" % + (axis_number, aliases)) raise # Items expected = Panel({"One": df}) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index b9db6e8adb634..6c732fe352d6a 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -18,6 +18,7 @@ from pandas.core.index import MultiIndex from pandas.core.indexing import IndexingError from pandas.tseries.index import Timestamp, DatetimeIndex +import pandas.core.common as com import pandas.core.config as cf import pandas.lib as lib @@ -73,7 +74,7 @@ def test_copy_index_name_checking(self): self.assertIs(self.ts, self.ts) cp = self.ts.copy() cp.index.name = 'foo' - print(self.ts.index.name) + com.pprint_thing(self.ts.index.name) self.assertIsNone(self.ts.index.name) def test_append_preserve_name(self): @@ -2744,7 +2745,7 @@ def run_ops(ops, get_ser, test_ser): if op is not None: self.assertRaises(TypeError, op, test_ser) except: - print("Failed on op %r" % op) + com.pprint_thing("Failed on op %r" % op) raise ### timedelta64 ### td1 = Series([timedelta(minutes=5,seconds=3)]*3)
closes #7247
https://api.github.com/repos/pandas-dev/pandas/pulls/7248
2014-05-27T16:26:28Z
2014-05-27T17:49:27Z
2014-05-27T17:49:27Z
2014-06-26T12:58:15Z
WRN: let + * and - pass thru on boolean with a warning
diff --git a/doc/source/release.rst b/doc/source/release.rst index ed22348e45c9f..fa541baa4e058 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -156,8 +156,8 @@ API Changes - ``to_excel`` now converts ``np.inf`` into a string representation, customizable by the ``inf_rep`` keyword argument (Excel has no native inf representation) (:issue:`6782`) -- Arithmetic ops are now disallowed when passed two bool dtype Series or - DataFrames (:issue:`6762`). +- Arithmetic ops on bool dtype arrays/scalars now give a warning indicating + that they are evaluated in Python space (:issue:`6762`, :issue:`7210`). - Added ``nunique`` and ``value_counts`` functions to ``Index`` for counting unique elements. (:issue:`6734`) - ``DataFrame.plot`` and ``Series.plot`` now support a ``table`` keyword for plotting ``matplotlib.Table``. The ``table`` keyword can receive the following values. diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 56ea0a361e741..ba67e2cd4d4c8 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -186,17 +186,18 @@ API changes - Added ``factorize`` functions to ``Index`` and ``Series`` to get indexer and unique values (:issue:`7090`) - ``describe`` on a DataFrame with a mix of Timestamp and string like objects returns a different Index (:issue:`7088`). Previously the index was unintentionally sorted. -- arithmetic operations with **only** ``bool`` dtypes now raise an error - (:issue:`7011`, :issue:`6762`, :issue:`7015`) +- arithmetic operations with **only** ``bool`` dtypes warn for ``+``, ``-``, + and ``*`` operations and raise for all others (:issue:`7011`, :issue:`6762`, + :issue:`7015`, :issue:`7210`) .. code-block:: python x = pd.Series(np.random.rand(10) > 0.5) y = True - x * y + x + y # warning generated: should do x | y instead + x / y # this raises because it doesn't make sense - # this now raises for arith ops like ``+``, ``*``, etc. - NotImplementedError: operator '*' not implemented for bool dtypes + NotImplementedError: operator '/' not implemented for bool dtypes .. _whatsnew_0140.display: diff --git a/pandas/computation/expressions.py b/pandas/computation/expressions.py index 4aff00e3a97d9..47d3fce618f89 100644 --- a/pandas/computation/expressions.py +++ b/pandas/computation/expressions.py @@ -6,6 +6,7 @@ """ +import warnings import numpy as np from pandas.core.common import _values_from_object from distutils.version import LooseVersion @@ -170,11 +171,23 @@ def _has_bool_dtype(x): return isinstance(x, (bool, np.bool_)) -def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('+', '*', '-', '/', - '//', '**'))): - if op_str in not_allowed and _has_bool_dtype(a) and _has_bool_dtype(b): - raise NotImplementedError("operator %r not implemented for bool " - "dtypes" % op_str) +def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('/', '//', '**')), + unsupported=None): + if unsupported is None: + unsupported = {'+': '|', '*': '&', '-': '^'} + + if _has_bool_dtype(a) and _has_bool_dtype(b): + if op_str in unsupported: + warnings.warn("evaluating in Python space because the %r operator" + " is not supported by numexpr for the bool " + "dtype, use %r instead" % (op_str, + unsupported[op_str])) + return False + + if op_str in not_allowed: + raise NotImplementedError("operator %r not implemented for bool " + "dtypes" % op_str) + return True def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, @@ -193,7 +206,7 @@ def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, return the results use_numexpr : whether to try to use numexpr (default True) """ - _bool_arith_check(op_str, a, b) + use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b) if use_numexpr: return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error, **eval_kwargs) diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 777acdf30f1a0..8d012b871d8ca 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -343,8 +343,8 @@ def testit(): def test_bool_ops_raise_on_arithmetic(self): df = DataFrame({'a': np.random.rand(10) > 0.5, 'b': np.random.rand(10) > 0.5}) - names = 'add', 'mul', 'sub', 'div', 'truediv', 'floordiv', 'pow' - ops = '+', '*', '-', '/', '/', '//', '**' + names = 'div', 'truediv', 'floordiv', 'pow' + ops = '/', '/', '//', '**' msg = 'operator %r not implemented for bool dtypes' for op, name in zip(ops, names): if not compat.PY3 or name != 'div': @@ -369,6 +369,49 @@ def test_bool_ops_raise_on_arithmetic(self): with tm.assertRaisesRegexp(TypeError, err_msg): f(df, True) + def test_bool_ops_warn_on_arithmetic(self): + n = 10 + df = DataFrame({'a': np.random.rand(n) > 0.5, + 'b': np.random.rand(n) > 0.5}) + names = 'add', 'mul', 'sub' + ops = '+', '*', '-' + subs = {'+': '|', '*': '&', '-': '^'} + sub_funcs = {'|': 'or_', '&': 'and_', '^': 'xor'} + for op, name in zip(ops, names): + f = getattr(operator, name) + fe = getattr(operator, sub_funcs[subs[op]]) + + with tm.use_numexpr(True, min_elements=5): + with tm.assert_produces_warning(): + r = f(df, df) + e = fe(df, df) + tm.assert_frame_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df.a, df.b) + e = fe(df.a, df.b) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df.a, True) + e = fe(df.a, True) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(False, df.a) + e = fe(False, df.a) + tm.assert_series_equal(r, e) + + with tm.assert_produces_warning(): + r = f(False, df) + e = fe(False, df) + tm.assert_frame_equal(r, e) + + with tm.assert_produces_warning(): + r = f(df, True) + e = fe(df, True) + tm.assert_frame_equal(r, e) + if __name__ == '__main__': import nose diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 1235aa3cc89d9..e74cf487e75ac 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -35,6 +35,8 @@ raise_with_traceback, httplib ) +from pandas.computation import expressions as expr + from pandas import bdate_range from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex @@ -1576,3 +1578,14 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, traceback): np.random.set_state(self.start_state) + + +@contextmanager +def use_numexpr(use, min_elements=expr._MIN_ELEMENTS): + olduse = expr._USE_NUMEXPR + oldmin = expr._MIN_ELEMENTS + expr.set_use_numexpr(use) + expr._MIN_ELEMENTS = min_elements + yield + expr._MIN_ELEMENTS = oldmin + expr.set_use_numexpr(olduse)
closes #7210
https://api.github.com/repos/pandas-dev/pandas/pulls/7245
2014-05-27T14:08:54Z
2014-05-27T19:50:10Z
2014-05-27T19:50:10Z
2014-06-17T04:57:02Z
BUG: string methods on empty series (GH7241)
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 02df98313071e..dd1ea5678698d 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1165,6 +1165,9 @@ Thus, a Series of messy strings can be "converted" into a like-indexed Series or DataFrame of cleaned-up or more useful strings, without necessitating ``get()`` to access tuples or ``re.match`` objects. +The results dtype always is object, even if no match is found and the result +only contains ``NaN``. + Named groups like .. ipython:: python diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 48eac7fb1b761..fda4432e070a0 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -30,6 +30,10 @@ API changes - Openpyxl now raises a ValueError on construction of the openpyxl writer instead of warning on pandas import (:issue:`7284`). +- For ``StringMethods.extract``, when no match is found, the result - only + containing ``NaN`` values - now also has ``dtype=object`` instead of + ``float`` (:issue:`7242`) + .. _whatsnew_0141.prior_deprecations: Prior Version Deprecations/Changes @@ -86,3 +90,4 @@ Bug Fixes wouldn't test ``True`` when it encountered an ``inf``/``-inf`` (:issue:`7315`). - Bug in inferred_freq results in None for eastern hemisphere timezones (:issue:`7310`) +- Bug all ``StringMethods`` now work on empty Series (:issue:`7242`) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 2176e0300f25f..ad64d2bf6bdd9 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -12,7 +12,7 @@ def _get_array_list(arr, others): - if isinstance(others[0], (list, np.ndarray)): + if len(others) and isinstance(others[0], (list, np.ndarray)): arrays = [arr] + list(others) else: arrays = [arr, others] @@ -88,12 +88,15 @@ def _length_check(others): return n -def _na_map(f, arr, na_result=np.nan): +def _na_map(f, arr, na_result=np.nan, dtype=object): # should really _check_ for NA - return _map(f, arr, na_mask=True, na_value=na_result) + return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype) -def _map(f, arr, na_mask=False, na_value=np.nan): +def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object): + if not len(arr): + return np.ndarray(0, dtype=dtype) + if isinstance(arr, Series): arr = arr.values if not isinstance(arr, np.ndarray): @@ -108,7 +111,7 @@ def g(x): return f(x) except (TypeError, AttributeError): return na_value - return _map(g, arr) + return _map(g, arr, dtype=dtype) if na_value is not np.nan: np.putmask(result, mask, na_value) if result.dtype == object: @@ -146,7 +149,7 @@ def str_count(arr, pat, flags=0): """ regex = re.compile(pat, flags=flags) f = lambda x: len(regex.findall(x)) - return _na_map(f, arr) + return _na_map(f, arr, dtype=int) def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): @@ -187,7 +190,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): f = lambda x: bool(regex.search(x)) else: f = lambda x: pat in x - return _na_map(f, arr, na) + return _na_map(f, arr, na, dtype=bool) def str_startswith(arr, pat, na=np.nan): @@ -206,7 +209,7 @@ def str_startswith(arr, pat, na=np.nan): startswith : array (boolean) """ f = lambda x: x.startswith(pat) - return _na_map(f, arr, na) + return _na_map(f, arr, na, dtype=bool) def str_endswith(arr, pat, na=np.nan): @@ -225,7 +228,7 @@ def str_endswith(arr, pat, na=np.nan): endswith : array (boolean) """ f = lambda x: x.endswith(pat) - return _na_map(f, arr, na) + return _na_map(f, arr, na, dtype=bool) def str_lower(arr): @@ -375,6 +378,7 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False): # and is basically useless, so we will not warn. if (not as_indexer) and regex.groups > 0: + dtype = object def f(x): m = regex.match(x) if m: @@ -383,9 +387,10 @@ def f(x): return [] else: # This is the new behavior of str_match. + dtype = bool f = lambda x: bool(regex.match(x)) - return _na_map(f, arr, na) + return _na_map(f, arr, na, dtype=dtype) def _get_single_group_name(rx): @@ -409,6 +414,9 @@ def str_extract(arr, pat, flags=0): Returns ------- extracted groups : Series (one group) or DataFrame (multiple groups) + Note that dtype of the result is always object, even when no match is + found and the result is a Series or DataFrame containing only NaN + values. Examples -------- @@ -461,13 +469,17 @@ def f(x): if regex.groups == 1: result = Series([f(val)[0] for val in arr], name=_get_single_group_name(regex), - index=arr.index) + index=arr.index, dtype=object) else: names = dict(zip(regex.groupindex.values(), regex.groupindex.keys())) columns = [names.get(1 + i, i) for i in range(regex.groups)] - result = DataFrame([f(val) for val in arr], - columns=columns, - index=arr.index) + if arr.empty: + result = DataFrame(columns=columns, dtype=object) + else: + result = DataFrame([f(val) for val in arr], + columns=columns, + index=arr.index, + dtype=object) return result @@ -536,7 +548,7 @@ def str_len(arr): ------- lengths : array """ - return _na_map(len, arr) + return _na_map(len, arr, dtype=int) def str_findall(arr, pat, flags=0): diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index e50b2ef2289c5..55ab906544fc4 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -505,12 +505,12 @@ def test_extract(self): # one group, no matches result = s.str.extract('(_)') - exp = Series([NA, NA, NA]) + exp = Series([NA, NA, NA], dtype=object) tm.assert_series_equal(result, exp) # two groups, no matches result = s.str.extract('(_)(_)') - exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]]) + exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object) tm.assert_frame_equal(result, exp) # one group, some matches @@ -585,6 +585,47 @@ def test_extract_single_series_name_is_preserved(self): tm.assert_series_equal(r, e) self.assertEqual(r.name, e.name) + def test_empty_str_methods(self): + empty_str = empty = Series(dtype=str) + empty_int = Series(dtype=int) + empty_bool = Series(dtype=bool) + empty_list = Series(dtype=list) + empty_bytes = Series(dtype=object) + + # GH7241 + # (extract) on empty series + + tm.assert_series_equal(empty_str, empty.str.cat(empty)) + tm.assert_equal('', empty.str.cat()) + tm.assert_series_equal(empty_str, empty.str.title()) + tm.assert_series_equal(empty_int, empty.str.count('a')) + tm.assert_series_equal(empty_bool, empty.str.contains('a')) + tm.assert_series_equal(empty_bool, empty.str.startswith('a')) + tm.assert_series_equal(empty_bool, empty.str.endswith('a')) + tm.assert_series_equal(empty_str, empty.str.lower()) + tm.assert_series_equal(empty_str, empty.str.upper()) + tm.assert_series_equal(empty_str, empty.str.replace('a','b')) + tm.assert_series_equal(empty_str, empty.str.repeat(3)) + tm.assert_series_equal(empty_bool, empty.str.match('^a')) + tm.assert_series_equal(empty_str, empty.str.extract('()')) + tm.assert_frame_equal(DataFrame(columns=[0,1], dtype=str), empty.str.extract('()()')) + tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies()) + tm.assert_series_equal(empty_str, empty_list.str.join('')) + tm.assert_series_equal(empty_int, empty.str.len()) + tm.assert_series_equal(empty_list, empty_list.str.findall('a')) + tm.assert_series_equal(empty_str, empty.str.pad(42)) + tm.assert_series_equal(empty_str, empty.str.center(42)) + tm.assert_series_equal(empty_list, empty.str.split('a')) + tm.assert_series_equal(empty_str, empty.str.slice(stop=1)) + tm.assert_series_equal(empty_str, empty.str.strip()) + tm.assert_series_equal(empty_str, empty.str.lstrip()) + tm.assert_series_equal(empty_str, empty.str.rstrip()) + tm.assert_series_equal(empty_str, empty.str.rstrip()) + tm.assert_series_equal(empty_str, empty.str.wrap(42)) + tm.assert_series_equal(empty_str, empty.str.get(0)) + tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii')) + tm.assert_series_equal(empty_bytes, empty.str.encode('ascii')) + def test_get_dummies(self): s = Series(['a|b', 'a|c', np.nan]) result = s.str.get_dummies('|')
closes #7241
https://api.github.com/repos/pandas-dev/pandas/pulls/7242
2014-05-27T07:42:41Z
2014-06-04T03:33:43Z
null
2014-06-16T22:44:39Z
DOC: GH3850, add .isin to 10min.rst
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index 4bca2f4a9d4c8..cbd2f60ddbcd3 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -287,6 +287,17 @@ A ``where`` operation for getting. df[df > 0] +Using the :func:`~Series.isin` method for filtering: + +.. ipython:: python + + df['E']=['one', 'one','two','three','four','three'] + df + good_numbers=['two','four'] + df[df['E'].isin(good_numbers)] + + df.drop('E', inplace=True, axis=1) + Setting ~~~~~~~ diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index ba67e2cd4d4c8..c4e3fb672aef2 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -200,6 +200,7 @@ API changes NotImplementedError: operator '/' not implemented for bool dtypes + .. _whatsnew_0140.display: Display Changes
closes #3850. Updates 10min.rst to demonstrate isin
https://api.github.com/repos/pandas-dev/pandas/pulls/7239
2014-05-26T20:20:26Z
2014-05-28T13:30:23Z
2014-05-28T13:30:23Z
2014-06-14T12:09:50Z
DOC: clean-up docstrings of option functions
diff --git a/doc/source/api.rst b/doc/source/api.rst index 2b12da9a7f92f..c037dfa8d7acf 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -489,7 +489,6 @@ Serialization / IO / Conversion Series.to_frame Series.to_hdf Series.to_sql - Series.to_gbq Series.to_msgpack Series.to_json Series.to_sparse @@ -1200,6 +1199,9 @@ Indexing, iteration .. currentmodule:: pandas +.. autosummary:: + :toctree: generated/ + Grouper .. currentmodule:: pandas.core.groupby @@ -1226,8 +1228,11 @@ Computations / Descriptive Stats .. currentmodule:: pandas +General utility functions +------------------------- + Working with options --------------------- +~~~~~~~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ @@ -1238,6 +1243,7 @@ Working with options set_option option_context + .. HACK - see github issue #4539. To ensure old links remain valid, include here the autosummaries with previous currentmodules as a comment and add diff --git a/pandas/core/config.py b/pandas/core/config.py index ebac3d40221e7..9b74ef0d9d3c0 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -224,45 +224,55 @@ def __doc__(self): opts_list=opts_list) _get_option_tmpl = """ -get_option(pat) - Retrieves the value of the specified option +get_option(pat) + +Retrieves the value of the specified option. Available options: + {opts_list} Parameters ---------- -pat - str/regexp which should match a single option. - -Note: partial matches are supported for convenience, but unless you use the -full option name (e.g. x.y.z.option_name), your code may break in future -versions if new options with similar names are introduced. +pat : str + Regexp which should match a single option. + Note: partial matches are supported for convenience, but unless you use the + full option name (e.g. x.y.z.option_name), your code may break in future + versions if new options with similar names are introduced. Returns ------- -result - the value of the option +result : the value of the option Raises ------ -OptionError if no such option exists +OptionError : if no such option exists + +Notes +----- +The available options with its descriptions: {opts_desc} """ _set_option_tmpl = """ -set_option(pat,value) - Sets the value of the specified option +set_option(pat, value) + +Sets the value of the specified option. Available options: + {opts_list} Parameters ---------- -pat - str/regexp which should match a single option. - -Note: partial matches are supported for convenience, but unless you use the -full option name (e.g. x.y.z.option_name), your code may break in future -versions if new options with similar names are introduced. - -value - new value of option. +pat : str + Regexp which should match a single option. + Note: partial matches are supported for convenience, but unless you use the + full option name (e.g. x.y.z.option_name), your code may break in future + versions if new options with similar names are introduced. +value : + new value of option. Returns ------- @@ -272,55 +282,72 @@ def __doc__(self): ------ OptionError if no such option exists +Notes +----- +The available options with its descriptions: + {opts_desc} """ _describe_option_tmpl = """ -describe_option(pat,_print_desc=False) Prints the description -for one or more registered options. +describe_option(pat, _print_desc=False) + +Prints the description for one or more registered options. Call with not arguments to get a listing for all registered options. Available options: + {opts_list} Parameters ---------- -pat - str, a regexp pattern. All matching keys will have their - description displayed. - -_print_desc - if True (default) the description(s) will be printed - to stdout otherwise, the description(s) will be returned - as a unicode string (for testing). +pat : str + Regexp pattern. All matching keys will have their description displayed. +_print_desc : bool, default True + If True (default) the description(s) will be printed to stdout. + Otherwise, the description(s) will be returned as a unicode string + (for testing). Returns ------- None by default, the description(s) as a unicode string if _print_desc is False +Notes +----- +The available options with its descriptions: + {opts_desc} """ _reset_option_tmpl = """ -reset_option(pat) - Reset one or more options to their default value. +reset_option(pat) + +Reset one or more options to their default value. Pass "all" as argument to reset all options. Available options: + {opts_list} Parameters ---------- -pat - str/regex if specified only options matching `prefix`* will be reset - -Note: partial matches are supported for convenience, but unless you use the -full option name (e.g. x.y.z.option_name), your code may break in future -versions if new options with similar names are introduced. +pat : str/regex + If specified only options matching `prefix*` will be reset. + Note: partial matches are supported for convenience, but unless you + use the full option name (e.g. x.y.z.option_name), your code may break + in future versions if new options with similar names are introduced. Returns ------- None +Notes +----- +The available options with its descriptions: + {opts_desc} """ @@ -337,6 +364,18 @@ def __doc__(self): class option_context(object): + """ + Context manager to temporarily set options in the `with` statement context. + + You need to invoke as ``option_context(pat, val, [(pat, val), ...])``. + + Examples + -------- + + >>> with option_context('display.max_rows', 10, 'display.max_columns', 5): + ... + + """ def __init__(self, *args): if not (len(args) % 2 == 0 and len(args) >= 2): @@ -589,13 +628,13 @@ def _build_option_description(k): o = _get_registered_option(k) d = _get_deprecated_option(k) - s = u('%s: ') % k + s = u('%s : ') % k if o: s += u('[default: %s] [currently: %s]') % (o.defval, _get_option(k, True)) if o.doc: - s += '\n' + '\n '.join(o.doc.strip().split('\n')) + s += '\n '.join(o.doc.strip().split('\n')) else: s += 'No description available.\n' @@ -604,7 +643,7 @@ def _build_option_description(k): s += (u(', use `%s` instead.') % d.rkey if d.rkey else '') s += u(')\n') - s += '\n' + s += '\n\n' return s @@ -615,9 +654,9 @@ def pp_options_list(keys, width=80, _print=False): from itertools import groupby def pp(name, ks): - pfx = (name + '.[' if name else '') + pfx = ('- ' + name + '.[' if name else '') ls = wrap(', '.join(ks), width, initial_indent=pfx, - subsequent_indent=' ' * len(pfx), break_long_words=False) + subsequent_indent=' ', break_long_words=False) if ls and ls[-1] and name: ls[-1] = ls[-1] + ']' return ls diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index f8c09acaef1fb..f9f3b0da22843 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -97,7 +97,6 @@ a string with the desired format of the number. This is used in some places like SeriesFormatter. See core.format.EngFormatter for an example. - """ max_colwidth_doc = """ @@ -162,7 +161,6 @@ pc_max_seq_items = """ : int or None - when pretty-printing a long sequence, no more then `max_seq_items` will be printed. If items are omitted, they will be denoted by the addition of "..." to the resulting string. @@ -179,7 +177,6 @@ pc_large_repr_doc = """ : 'truncate'/'info' - For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can show a truncated table (the default from 0.13), or switch to the view from df.info() (the behaviour in earlier versions of pandas). @@ -187,7 +184,6 @@ pc_mpl_style_doc = """ : bool - Setting this to 'default' will modify the rcParams used by matplotlib to give plots a more pleasing visual style by default. Setting this to None/False restores the values to their initial value.
Some clean-up after my last additions to api.rst to get rid off all warnings: - some clean up of api.rst itself - some clean-up of the docstrings of the option functions so they render nicely with sphinx
https://api.github.com/repos/pandas-dev/pandas/pulls/7236
2014-05-26T14:33:11Z
2014-05-26T20:22:27Z
2014-05-26T20:22:26Z
2014-06-12T21:41:35Z
BUG: grouped hist raises AttributeError with single group
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 3159bbfc34e7d..a92cb54b9077d 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -280,3 +280,5 @@ Bug Fixes - Bug in ``pandas.core.strings.str_contains`` does not properly match in a case insensitive fashion when ``regex=False`` and ``case=False`` (:issue:`7505`) - Bug in ``expanding_cov``, ``expanding_corr``, ``rolling_cov``, and ``rolling_corr`` for two arguments with mismatched index (:issue:`7512`) +- Bug in grouped `hist` doesn't handle `rot` kw and `sharex` kw properly (:issue:`7234`) + diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 2f631e28bf1e8..ddd0477a4691e 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -2272,6 +2272,8 @@ def test_time_series_plot_color_with_empty_kwargs(self): def test_grouped_hist(self): df = DataFrame(randn(500, 2), columns=['A', 'B']) df['C'] = np.random.randint(0, 4, 500) + df['D'] = ['X'] * 500 + axes = plotting.grouped_hist(df.A, by=df.C) self._check_axes_shape(axes, axes_num=4, layout=(2, 2)) @@ -2279,15 +2281,25 @@ def test_grouped_hist(self): axes = df.hist(by=df.C) self._check_axes_shape(axes, axes_num=4, layout=(2, 2)) + tm.close() + # group by a key with single value + axes = df.hist(by='D', rot=30) + self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) + self._check_ticks_props(axes, xrot=30) + tm.close() # make sure kwargs to hist are handled + xf, yf = 20, 18 + xrot, yrot = 30, 40 axes = plotting.grouped_hist(df.A, by=df.C, normed=True, - cumulative=True, bins=4) - + cumulative=True, bins=4, + xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) # height of last bin (index 5) must be 1.0 for ax in axes.ravel(): height = ax.get_children()[5].get_height() self.assertAlmostEqual(height, 1.0) + self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot, + ylabelsize=yf, yrot=yrot) tm.close() axes = plotting.grouped_hist(df.A, by=df.C, log=True) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 98c802ac1087a..f4e9b1a0f7d26 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2501,23 +2501,12 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, kwds : other plotting keyword arguments To be passed to hist function """ - import matplotlib.pyplot as plt if by is not None: axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize, sharex=sharex, sharey=sharey, layout=layout, bins=bins, + xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, **kwds) - - for ax in axes.ravel(): - if xlabelsize is not None: - plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) - if xrot is not None: - plt.setp(ax.get_xticklabels(), rotation=xrot) - if ylabelsize is not None: - plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) - if yrot is not None: - plt.setp(ax.get_yticklabels(), rotation=yrot) - return axes if column is not None: @@ -2533,21 +2522,12 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, for i, col in enumerate(com._try_sort(data.columns)): ax = axes[i // ncols, i % ncols] - ax.xaxis.set_visible(True) - ax.yaxis.set_visible(True) ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) ax.grid(grid) - if xlabelsize is not None: - plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) - if xrot is not None: - plt.setp(ax.get_xticklabels(), rotation=xrot) - if ylabelsize is not None: - plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) - if yrot is not None: - plt.setp(ax.get_yticklabels(), rotation=yrot) - + _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, + ylabelsize=ylabelsize, yrot=yrot) fig.subplots_adjust(wspace=0.3, hspace=0.3) return axes @@ -2607,23 +2587,18 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, ax.hist(values, bins=bins, **kwds) ax.grid(grid) axes = np.array([ax]) + + _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, + ylabelsize=ylabelsize, yrot=yrot) + else: if 'figure' in kwds: raise ValueError("Cannot pass 'figure' when using the " "'by' argument, since a new 'Figure' instance " "will be created") - axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize, - bins=bins, **kwds) - - for ax in axes.ravel(): - if xlabelsize is not None: - plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) - if xrot is not None: - plt.setp(ax.get_xticklabels(), rotation=xrot) - if ylabelsize is not None: - plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) - if yrot is not None: - plt.setp(ax.get_yticklabels(), rotation=yrot) + axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize, bins=bins, + xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, + **kwds) if axes.ndim == 1 and len(axes) == 1: return axes[0] @@ -2632,6 +2607,7 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, layout=None, sharex=False, sharey=False, rot=90, grid=True, + xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, **kwargs): """ Grouped histogram @@ -2658,9 +2634,15 @@ def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, def plot_group(group, ax): ax.hist(group.dropna().values, bins=bins, **kwargs) + xrot = xrot or rot + fig, axes = _grouped_plot(plot_group, data, column=column, by=by, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout, rot=rot) + + _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, + ylabelsize=ylabelsize, yrot=yrot) + fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, hspace=0.5, wspace=0.3) return axes @@ -3094,6 +3076,22 @@ def _get_xlim(lines): return left, right +def _set_ticks_props(axes, xlabelsize=None, xrot=None, + ylabelsize=None, yrot=None): + import matplotlib.pyplot as plt + + for ax in _flatten(axes): + if xlabelsize is not None: + plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) + if xrot is not None: + plt.setp(ax.get_xticklabels(), rotation=xrot) + if ylabelsize is not None: + plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) + if yrot is not None: + plt.setp(ax.get_yticklabels(), rotation=yrot) + return axes + + if __name__ == '__main__': # import pandas.rpy.common as com # sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
Includes 3 minor fixes for `hist`. - `DataFrame.hist` raises `AttributeError` when the target column specifiedwith `by` kw only contains a single value. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import pandas.util.testing as tm n=100 gender = tm.choice(['Male', 'Female'], size=n) classroom = tm.choice(['A', 'B', 'C'], size=n) single = tm.choice(['S'], size=n) df = pd.DataFrame({'gender': gender, 'classroom': classroom, 'single': single, 'height': np.random.normal(66, 4, size=n), 'weight': np.random.normal(161, 32, size=n), 'category': np.random.randint(4, size=n)}) df.hist(by='single') # AttributeError: 'AxesSubplot' object has no attribute 'ravel' ``` - `hist` can accept `rot` kw only when `by` is specified, but `rot` actually does nothing. I understand `rot` value should be used for `xticklabels` rotation if `xrot` is not specified? ``` df.hist(rot=45) # AttributeError: Unknown property rot # -> This is OK because we can use xrot kw df.hist(by='classroom', rot=45) # It raises no error, but ticks are NOT rotated. ``` - `hist` always displays `xticklabels` on all axes even if specifying `sharex=True`. Other plots only draws `xticklabels` on bottom axes if `sharex` is True.
https://api.github.com/repos/pandas-dev/pandas/pulls/7234
2014-05-24T22:12:03Z
2014-07-07T19:27:56Z
2014-07-07T19:27:56Z
2014-07-09T12:37:50Z
BUG: multi-index output formatting is buggy (GH7174)
diff --git a/pandas/core/format.py b/pandas/core/format.py index 83be9eb57b79c..c2f439877ca00 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -780,6 +780,7 @@ def write_result(self, buf): def _write_header(self, indent): truncate_h = self.fmt.truncate_h + row_levels = self.frame.index.nlevels if not self.fmt.header: # write nothing return indent @@ -819,13 +820,49 @@ def _column_header(): sentinel = None levels = self.columns.format(sparsify=sentinel, adjoin=False, names=False) - level_lengths = _get_level_lengths(levels, sentinel) - - row_levels = self.frame.index.nlevels - + inner_lvl = len(level_lengths) - 1 for lnum, (records, values) in enumerate(zip(level_lengths, levels)): + if truncate_h: + # modify the header lines + ins_col = self.fmt.tr_col_num + if self.fmt.sparsify: + recs_new = {} + # Increment tags after ... col. + for tag,span in list(records.items()): + if tag >= ins_col: + recs_new[tag + 1] = span + elif tag + span > ins_col: + recs_new[tag] = span + 1 + if lnum == inner_lvl: + values = values[:ins_col] + (u('...'),) + \ + values[ins_col:] + else: # sparse col headers do not receive a ... + values = values[:ins_col] + \ + (values[ins_col - 1],) + values[ins_col:] + else: + recs_new[tag] = span + # if ins_col lies between tags, all col headers get ... + if tag + span == ins_col: + recs_new[ins_col] = 1 + values = values[:ins_col] + (u('...'),) + \ + values[ins_col:] + records = recs_new + inner_lvl = len(level_lengths) - 1 + if lnum == inner_lvl: + records[ins_col] = 1 + else: + recs_new = {} + for tag,span in list(records.items()): + if tag >= ins_col: + recs_new[tag + 1] = span + else: + recs_new[tag] = span + recs_new[ins_col] = 1 + records = recs_new + values = values[:ins_col] + [u('...')] + values[ins_col:] + name = self.columns.names[lnum] row = [''] * (row_levels - 1) + ['' if name is None else com.pprint_thing(name)] @@ -839,17 +876,6 @@ def _column_header(): continue j += 1 row.append(v) - if truncate_h: - if self.fmt.sparsify and lnum == 0: - ins_col = row_levels + self.fmt.tr_col_num - 1 - row.insert(ins_col, '...') - - for tag in list(tags.keys()): - if tag >= ins_col: - tags[tag+1] = tags.pop(tag) - else: - row.insert(row_levels + self.fmt.tr_col_num, '...') - self.write_tr(row, indent, self.indent_delta, tags=tags, header=True) else: @@ -857,7 +883,8 @@ def _column_header(): align = self.fmt.justify if truncate_h: - col_row.insert(self.fmt.tr_col_num + 1, '...') + ins_col = row_levels + self.fmt.tr_col_num + col_row.insert(ins_col, '...') self.write_tr(col_row, indent, self.indent_delta, header=True, align=align) @@ -866,6 +893,9 @@ def _column_header(): row = [ x if x is not None else '' for x in self.frame.index.names ] + [''] * min(len(self.columns), self.max_cols) + if truncate_h: + ins_col = row_levels + self.fmt.tr_col_num + row.insert(ins_col, '') self.write_tr(row, indent, self.indent_delta, header=True) indent -= self.indent_delta @@ -948,12 +978,36 @@ def _write_hierarchical_rows(self, fmt_values, indent): adjoin=False, names=False) level_lengths = _get_level_lengths(levels, sentinel) + inner_lvl = len(level_lengths) - 1 + if truncate_v: + # Insert ... row and adjust idx_values and + # level_lengths to take this into account. + ins_row = self.fmt.tr_row_num + for lnum,records in enumerate(level_lengths): + rec_new = {} + for tag,span in list(records.items()): + if tag >= ins_row: + rec_new[tag + 1] = span + elif tag + span > ins_row: + rec_new[tag] = span + 1 + dot_row = list(idx_values[ins_row - 1]) + dot_row[-1] = u('...') + idx_values.insert(ins_row,tuple(dot_row)) + else: + rec_new[tag] = span + # If ins_row lies between tags, all cols idx cols receive ... + if tag + span == ins_row: + rec_new[ins_row] = 1 + if lnum == 0: + idx_values.insert(ins_row,tuple([u('...')]*len(level_lengths))) + level_lengths[lnum] = rec_new + + level_lengths[inner_lvl][ins_row] = 1 + for ix_col in range(len(fmt_values)): + fmt_values[ix_col].insert(ins_row,'...') + nrows += 1 for i in range(nrows): - if truncate_v and i == (self.fmt.tr_row_num): - str_sep_row = [ '...' ] * (len(row) + sparse_offset) - self.write_tr(str_sep_row, indent, self.indent_delta, tags=None) - row = [] tags = {} diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 4eab5786e8fbf..dddfcc976c4a6 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -953,8 +953,8 @@ def test_to_html_truncate_multi_index(self): <td> NaN</td> </tr> <tr> - <td>...</td> - <td>...</td> + <th>...</th> + <th>...</th> <td>...</td> <td>...</td> <td>...</td>
Closes #7174 Adjusts the truncation in the notebook for multiindex dfs. Both rows and columns are affected. http://nbviewer.ipython.org/gist/bjonen/492fea9559fd73edf579
https://api.github.com/repos/pandas-dev/pandas/pulls/7232
2014-05-24T20:17:51Z
2014-05-25T16:06:45Z
2014-05-25T16:06:44Z
2014-06-27T11:31:39Z
BUG: bar plot can now handle bottom and left kw properly
diff --git a/doc/source/v0.14.1.txt b/doc/source/v0.14.1.txt index 0aa30e536ef48..b8201556a86f8 100644 --- a/doc/source/v0.14.1.txt +++ b/doc/source/v0.14.1.txt @@ -65,3 +65,4 @@ There are no experimental changes in 0.14.1 Bug Fixes ~~~~~~~~~ +- Bug in ``DataFrame`` and ``Series`` bar and barh plot raises ``TypeError`` when ``bottom`` and ``left`` keyword is specified (:issue:`7226`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 1b9691257347b..7281b3cf685c8 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1134,6 +1134,35 @@ def test_bar_barwidth_position(self): self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9, position=0.2) self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9, position=0.2) + @slow + def test_bar_bottom_left(self): + df = DataFrame(rand(5, 5)) + ax = df.plot(kind='bar', stacked=False, bottom=1) + result = [p.get_y() for p in ax.patches] + self.assertEqual(result, [1] * 25) + + ax = df.plot(kind='bar', stacked=True, bottom=[-1, -2, -3, -4, -5]) + result = [p.get_y() for p in ax.patches[:5]] + self.assertEqual(result, [-1, -2, -3, -4, -5]) + + ax = df.plot(kind='barh', stacked=False, left=np.array([1, 1, 1, 1, 1])) + result = [p.get_x() for p in ax.patches] + self.assertEqual(result, [1] * 25) + + ax = df.plot(kind='barh', stacked=True, left=[1, 2, 3, 4, 5]) + result = [p.get_x() for p in ax.patches[:5]] + self.assertEqual(result, [1, 2, 3, 4, 5]) + + axes = df.plot(kind='bar', subplots=True, bottom=-1) + for ax in axes: + result = [p.get_y() for p in ax.patches] + self.assertEqual(result, [-1] * 5) + + axes = df.plot(kind='barh', subplots=True, left=np.array([1, 1, 1, 1, 1])) + for ax in axes: + result = [p.get_x() for p in ax.patches] + self.assertEqual(result, [1] * 5) + @slow def test_plot_scatter(self): df = DataFrame(randn(6, 4), diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 00f7a61870369..adbed61699dda 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1789,6 +1789,9 @@ def __init__(self, data, **kwargs): kwargs['align'] = kwargs.pop('align', 'center') self.tick_pos = np.arange(len(data)) + self.bottom = kwargs.pop('bottom', None) + self.left = kwargs.pop('left', None) + self.log = kwargs.pop('log',False) MPLPlot.__init__(self, data, **kwargs) @@ -1808,13 +1811,21 @@ def _args_adjust(self): if self.rot is None: self.rot = self._default_rot[self.kind] - @property - def bar_f(self): + if com.is_list_like(self.bottom): + self.bottom = np.array(self.bottom) + if com.is_list_like(self.left): + self.left = np.array(self.left) + + def _get_plot_function(self): if self.kind == 'bar': def f(ax, x, y, w, start=None, **kwds): + if self.bottom is not None: + start = start + self.bottom return ax.bar(x, y, w, bottom=start,log=self.log, **kwds) elif self.kind == 'barh': def f(ax, x, y, w, start=None, log=self.log, **kwds): + if self.left is not None: + start = start + self.left return ax.barh(x, y, w, left=start, **kwds) else: raise NotImplementedError @@ -1830,10 +1841,8 @@ def _make_plot(self): colors = self._get_colors() ncolors = len(colors) - bar_f = self.bar_f - + bar_f = self._get_plot_function() pos_prior = neg_prior = np.zeros(len(self.data)) - K = self.nseries for i, (label, y) in enumerate(self._iter_data()):
Actually not a bug, but allows `bar` and `barh` plot to accept `bottom` and `left` kw respectively to specify the starting point. Currently, passing `bottom` or `left` keyword results in `TypeError` ``` import pandas as pd import numpy as np df = pd.DataFrame(np.random.randn(5, 5)) df.plot(kind='bar', bottom=1, ax=axes[0]) # TypeError: bar() got multiple values for keyword argument 'bottom' ``` ### After fix ``` import matplotlib.pyplot as plt fig, axes = plt.subplots(1, 2) df = pd.DataFrame(np.random.randn(5, 5)) df.plot(kind='bar', bottom=1, ax=axes[0]) df2 = pd.DataFrame(np.random.rand(5, 5)) df2.plot(kind='barh', left=[1, 2, 3, 4, 5], stacked=True, ax=axes[1]) ``` ![figure_1](https://cloud.githubusercontent.com/assets/1696302/3074746/24f8bebe-e351-11e3-9506-ac7de60db7fd.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/7226
2014-05-24T14:40:20Z
2014-05-30T18:12:20Z
null
2014-06-13T03:13:25Z
BUG: boxplot returns incorrect dict
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 39067096cfd25..de726e670d958 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -10,7 +10,8 @@ from datetime import datetime, date from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range -from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip +from pandas.compat import (range, lrange, StringIO, lmap, lzip, u, zip, + iteritems, OrderedDict) from pandas.util.decorators import cache_readonly import pandas.core.common as com import pandas.util.testing as tm @@ -2245,6 +2246,48 @@ def test_grouped_hist(self): with tm.assertRaises(AttributeError): plotting.grouped_hist(df.A, by=df.C, foo='bar') + def _check_box_dict(self, returned, return_type, + expected_klass, expected_keys): + self.assertTrue(isinstance(returned, OrderedDict)) + self.assertEqual(sorted(returned.keys()), sorted(expected_keys)) + for key, value in iteritems(returned): + self.assertTrue(isinstance(value, expected_klass)) + # check returned dict has correct mapping + if return_type == 'axes': + self.assertEqual(value.get_title(), key) + elif return_type == 'both': + self.assertEqual(value.ax.get_title(), key) + elif return_type == 'dict': + line = value['medians'][0] + self.assertEqual(line.get_axes().get_title(), key) + else: + raise AssertionError + + @slow + def test_grouped_box_return_type(self): + import matplotlib.axes + + df = self.hist_df + + columns2 = 'X B C D A G Y N Q O'.split() + df2 = DataFrame(random.randn(50, 10), columns=columns2) + categories2 = 'A B C D E F G H I J'.split() + df2['category'] = tm.choice(categories2, size=50) + + types = {'dict': dict, 'axes': matplotlib.axes.Axes, 'both': tuple} + for t, klass in iteritems(types): + returned = df.groupby('classroom').boxplot(return_type=t) + self._check_box_dict(returned, t, klass, ['A', 'B', 'C']) + + returned = df.boxplot(by='classroom', return_type=t) + self._check_box_dict(returned, t, klass, ['height', 'weight', 'category']) + + returned = df2.groupby('category').boxplot(return_type=t) + self._check_box_dict(returned, t, klass, categories2) + + returned = df2.boxplot(by='category', return_type=t) + self._check_box_dict(returned, t, klass, columns2) + @slow def test_grouped_box_layout(self): df = self.hist_df diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 4d5d8d773a469..00f7a61870369 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2363,12 +2363,17 @@ def plot_group(grouped, ax): if return_type is None: ret = axes if return_type == 'axes': - ret = dict((k, ax) for k, ax in zip(d.keys(), axes)) + ret = compat.OrderedDict() + axes = _flatten(axes)[:len(d)] + for k, ax in zip(d.keys(), axes): + ret[k] = ax elif return_type == 'dict': ret = d elif return_type == 'both': - ret = dict((k, BP(ax=ax, lines=line)) for - (k, line), ax in zip(d.items(), axes)) + ret = compat.OrderedDict() + axes = _flatten(axes)[:len(d)] + for (k, line), ax in zip(d.items(), axes): + ret[k] = BP(ax=ax, lines=line) else: if layout is not None: raise ValueError("The 'layout' keyword is not supported when " @@ -2723,7 +2728,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, sharex=False, sharey=True) axes = _flatten(axes) - ret = {} + ret = compat.OrderedDict() for (key, group), ax in zip(grouped, axes): d = group.boxplot(ax=ax, column=column, fontsize=fontsize, rot=rot, grid=grid, **kwds) @@ -2804,7 +2809,6 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, ravel_axes = _flatten(axes) out_dict = compat.OrderedDict() - for i, col in enumerate(columns): ax = ravel_axes[i] gp_col = grouped[col]
#7096 returns incorrect `dict` when number of columns and subplots are different. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import pandas.util.testing as tm n=100 gender = tm.choice(['Male', 'Female'], size=n) classroom = tm.choice(['A', 'B', 'C'], size=n) df = pd.DataFrame({'gender': gender, 'classroom': classroom, 'height': np.random.normal(66, 4, size=n), 'weight': np.random.normal(161, 32, size=n), 'category': np.random.randint(4, size=n)}) df.boxplot(by='classroom', return_type='axes') # {'category': array([<matplotlib.axes.AxesSubplot object at 0x104700e90>, # <matplotlib.axes.AxesSubplot object at 0x10671ca90>], dtype=object), # 'height': array([<matplotlib.axes.AxesSubplot object at 0x106744dd0>, # <matplotlib.axes.AxesSubplot object at 0x106766b50>], dtype=object)} # This must be a dict with 3 keys ('category', 'height' and 'weight') which value is AxesSubplot (not numpy.array). ``` Also, `boxplot` sometimes return `OrderedDict` and sometimes `dict` inconsistently. ``` df.boxplot(by='classroom', return_type='dict') # OrderedDict([('category', {'medians': [<matplotlib.lines.Line2D object at 0x106fe8c10>, # ... ``` The fix makes `boxplot` to always return `OrderedDict` which has correct mapping of keys and values
https://api.github.com/repos/pandas-dev/pandas/pulls/7225
2014-05-24T13:45:55Z
2014-05-27T17:50:52Z
2014-05-27T17:50:52Z
2014-06-13T14:48:24Z
TST: tests for reindex_like consistency (GH7179)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index c3c7ae9bacbcd..b9db6e8adb634 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -5187,6 +5187,18 @@ def test_reindex_like(self): assert_series_equal(self.ts.reindex(other.index), self.ts.reindex_like(other)) + # GH 7179 + day1 = datetime(2013,3,5) + day2 = datetime(2013,5,5) + day3 = datetime(2014,3,5) + + series1 = Series([5, None, None],[day1, day2, day3]) + series2 = Series([None, None], [day1, day3]) + + result = series1.reindex_like(series2, method='pad') + expected = Series([5, np.nan], index=[day1, day3]) + assert_series_equal(result, expected) + def test_reindex_fill_value(self): #------------------------------------------------------------ # floats
closes #7179
https://api.github.com/repos/pandas-dev/pandas/pulls/7221
2014-05-23T16:23:41Z
2014-05-23T16:43:09Z
2014-05-23T16:43:09Z
2014-07-16T09:07:25Z
BUG: bug in setitem with multi-index and a 0-dim ndarray (GH7218)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 03867e3bf6543..e0ca9854262f1 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -522,7 +522,7 @@ Bug Fixes - Bug in ``query``/``eval`` where global constants were not looked up correctly (:issue:`7178`) - Bug in recognizing out-of-bounds positional list indexers with ``iloc`` and a multi-axis tuple indexer (:issue:`7189`) -- Bug in setitem with a single value, multi-index and integer indices (:issue:`7190`) +- Bug in setitem with a single value, multi-index and integer indices (:issue:`7190`, :issue:`7218`) - Bug in expressions evaluation with reversed ops, showing in series-dataframe ops (:issue:`7198`, :issue:`7192`) - Bug in multi-axis indexing with > 2 ndim and a multi-index (:issue:`7199`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index df4e5cb738899..518879105aa8b 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -339,7 +339,7 @@ def _setitem_with_indexer(self, indexer, value): # require that we are setting the right number of values that # we are indexing - if is_list_like(value) and lplane_indexer != len(value): + if is_list_like(value) and np.iterable(value) and lplane_indexer != len(value): if len(obj[idx]) != len(value): raise ValueError( @@ -386,7 +386,7 @@ def setter(item, v): def can_do_equal_len(): """ return True if we have an equal len settable """ - if not len(labels) == 1: + if not len(labels) == 1 or not np.iterable(value): return False l = len(value) diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index f5bae7b2d2c82..14f2ee6222238 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -619,14 +619,20 @@ def test_loc_setitem_multiindex(self): self.assertEqual(result, 0) df = DataFrame(-999,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index) - df.loc[(t,n),'X'] = 0 + df.loc[(t,n),'X'] = 1 result = df.loc[(t,n),'X'] - self.assertEqual(result, 0) + self.assertEqual(result, 1) df = DataFrame(columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index) - df.loc[(t,n),'X'] = 0 + df.loc[(t,n),'X'] = 2 result = df.loc[(t,n),'X'] - self.assertEqual(result, 0) + self.assertEqual(result, 2) + + # GH 7218, assinging with 0-dim arrays + df = DataFrame(-999,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index) + df.loc[(t,n), 'X'] = np.array(3) + result = df.loc[(t,n),'X'] + self.assertEqual(result,3) def test_loc_setitem_dups(self):
closes #7218
https://api.github.com/repos/pandas-dev/pandas/pulls/7219
2014-05-23T13:52:11Z
2014-05-23T14:18:45Z
2014-05-23T14:18:45Z
2014-06-25T08:58:13Z
WIP: categoricals as an internal CategoricalBlock GH5313
diff --git a/.gitignore b/.gitignore index 92a7e4d3edbf6..d1567afef699b 100644 --- a/.gitignore +++ b/.gitignore @@ -88,3 +88,5 @@ doc/source/vbench doc/source/vbench.rst doc/source/index.rst doc/build/html/index.html +# Windows specific leftover: +doc/tmp.sv diff --git a/doc/source/api.rst b/doc/source/api.rst index c3cccca3251e4..88aab0ced8420 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -429,7 +429,7 @@ Time series-related Series.tz_localize String handling -~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~ ``Series.str`` can be used to access the values of the series as strings and apply several methods to it. Due to implementation details the methods show up here as methods of the @@ -468,6 +468,60 @@ details the methods show up here as methods of the StringMethods.upper StringMethods.get_dummies +.. _api.categorical: + +Categorical +~~~~~~~~~~~ + +.. currentmodule:: pandas.core.categorical + +If the Series is of dtype ``category``, ``Series.cat`` can be used to access the the underlying +``Categorical``. This data type is similar to the otherwise underlying numpy array +and has the following usable methods and properties (all available as +``Series.cat.<method_or_property>``). + + +.. autosummary:: + :toctree: generated/ + + Categorical + Categorical.from_codes + Categorical.levels + Categorical.ordered + Categorical.reorder_levels + Categorical.remove_unused_levels + Categorical.min + Categorical.max + Categorical.mode + Categorical.describe + +``np.asarray(categorical)`` works by implementing the array interface. Be aware, that this converts +the Categorical back to a numpy array, so levels and order information is not preserved! + +.. autosummary:: + :toctree: generated/ + + Categorical.__array__ + +To create compatibility with `pandas.Series` and `numpy` arrays, the following (non-API) methods +are also introduced. + +.. autosummary:: + :toctree: generated/ + + Categorical.from_array + Categorical.get_values + Categorical.copy + Categorical.dtype + Categorical.ndim + Categorical.sort + Categorical.equals + Categorical.unique + Categorical.order + Categorical.argsort + Categorical.fillna + + Plotting ~~~~~~~~ .. currentmodule:: pandas diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 681d25fed1209..32c0a78e394c5 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1574,7 +1574,8 @@ dtypes: 'float64': np.arange(4.0, 7.0), 'bool1': [True, False, True], 'bool2': [False, True, False], - 'dates': pd.date_range('now', periods=3).values}) + 'dates': pd.date_range('now', periods=3).values}), + 'category': pd.Categorical(list("ABC)) df['tdeltas'] = df.dates.diff() df['uint64'] = np.arange(3, 6).astype('u8') df['other_dates'] = pd.date_range('20130101', periods=3).values @@ -1630,6 +1631,11 @@ All numpy dtypes are subclasses of ``numpy.generic``: subdtypes(np.generic) +.. note:: + + Pandas also defines an additional ``category`` dtype, which is not integrated into the normal + numpy hierarchy and wont show up with the above function. + .. note:: The ``include`` and ``exclude`` parameters must be non-string sequences. diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst new file mode 100644 index 0000000000000..87b59dc735969 --- /dev/null +++ b/doc/source/categorical.rst @@ -0,0 +1,759 @@ +.. _categorical: + +.. currentmodule:: pandas + +.. ipython:: python + :suppress: + + import numpy as np + import random + import os + np.random.seed(123456) + from pandas import options + import pandas as pd + np.set_printoptions(precision=4, suppress=True) + options.display.mpl_style='default' + options.display.max_rows=15 + + +*********** +Categorical +*********** + +.. versionadded:: 0.15 + +.. note:: + While there was in `pandas.Categorical` in earlier versions, the ability to use + `Categorical` data in `Series` and `DataFrame` is new. + + +This is a introduction to pandas :class:`pandas.Categorical` type, including a short comparison +with R's `factor`. + +`Categoricals` are a pandas data type, which correspond to categorical variables in +statistics: a variable, which can take on only a limited, and usually fixed, +number of possible values (commonly called `levels`). Examples are gender, social class, +blood types, country affiliations, observation time or ratings via Likert scales. + +In contrast to statistical categorical variables, a `Categorical` might have an order (e.g. +'strongly agree' vs 'agree' or 'first observation' vs. 'second observation'), but numerical +operations (additions, divisions, ...) are not possible. + +All values of the `Categorical` are either in `levels` or `np.nan`. Order is defined by +the order of the `levels`, not lexical order of the values. Internally, the data structure +consists of a levels array and an integer array of `codes` which point to the real value in the +levels array. + +`Categoricals` are useful in the following cases: + +* A string variable consisting of only a few different values. Converting such a string + variable to a categorical variable will save some memory. +* The lexical order of a variable is not the same as the logical order ("one", "two", "three"). + By converting to a categorical and specifying an order on the levels, sorting and + min/max will use the logical order instead of the lexical order. +* As a signal to other python libraries that this column should be treated as a categorical + variable (e.g. to use suitable statistical methods or plot types) + +See also the :ref:`API docs on Categoricals<api.categorical>`. + +Object Creation +--------------- + +Categorical `Series` or columns in a `DataFrame` can be crated in several ways: + +By passing a `Categorical` object to a `Series` or assigning it to a `DataFrame`: + +.. ipython:: python + + raw_cat = pd.Categorical(["a","b","c","a"]) + s = pd.Series(raw_cat) + s + df = pd.DataFrame({"A":["a","b","c","a"]}) + df["B"] = raw_cat + df + +By converting an existing `Series` or column to a ``category`` type: + +.. ipython:: python + + df = pd.DataFrame({"A":["a","b","c","a"]}) + df["B"] = df["A"].astype('category') + df + +By using some special functions: + +.. ipython:: python + + df = pd.DataFrame({'value': np.random.randint(0, 100, 20)}) + labels = [ "{0} - {1}".format(i, i + 9) for i in range(0, 100, 10) ] + + df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels) + df.head(10) + + +`Categoricals` have a specific ``category`` :ref:`dtype <basics.dtypes>`: + +.. ipython:: python + + df.dtypes + +.. note:: + + In contrast to R's `factor` function, a `Categorical` is not converting input values to + string and levels will end up the same data type as the original values. + +.. note:: + + I contrast to R's `factor` function, there is currently no way to assign/change labels at + creation time. Use `levels` to change the levels after creation time. + +To get back to the original Series or `numpy` array, use ``Series.astype(original_dtype)`` or +``np.asarray(categorical)``: + +.. ipython:: python + + s = pd.Series(["a","b","c","a"]) + s + s2 = s.astype('category') + s2 + s3 = s2.astype('string') + s3 + np.asarray(s2.cat) + +If you have already `codes` and `levels`, you can use the :func:`~pandas.Categorical.from_codes` +constructor to save the factorize step during normal constructor mode: + +.. ipython:: python + + splitter = np.random.choice([0,1], 5, p=[0.5,0.5]) + pd.Categorical.from_codes(splitter, levels=["train", "test"]) + +Description +----------- + +Using ``.describe()`` on a ``Categorical(...)`` or a ``Series(Categorical(...))`` will show +different output. + + +As part of a `Dataframe` or as a `Series` a similar output as for a `Series` of type ``string`` is +shown. Calling ``Categorical.describe()`` will show the frequencies for each level, with NA for +unused levels. + +.. ipython:: python + + cat = pd.Categorical(["a","c","c",np.nan], levels=["b","a","c",np.nan] ) + df = pd.DataFrame({"cat":cat, "s":["a","c","c",np.nan]}) + df.describe() + cat.describe() + +Working with levels +------------------- + +`Categoricals` have a `levels` property, which list their possible values. If you don't +manually specify levels, they are inferred from the passed in values. `Series` of type +``category`` expose the same interface via their `cat` property. + +.. ipython:: python + + raw_cat = pd.Categorical(["a","b","c","a"]) + raw_cat.levels + raw_cat.ordered + # Series of type "category" also expose these interface via the .cat property: + s = pd.Series(raw_cat) + s.cat.levels + s.cat.ordered + +.. note:: + New `Categorical` are automatically ordered if the passed in values are sortable or a + `levels` argument is supplied. This is a difference to R's `factors`, which are unordered + unless explicitly told to be ordered (``ordered=TRUE``). + +It's also possible to pass in the levels in a specific order: + +.. ipython:: python + + raw_cat = pd.Categorical(["a","b","c","a"], levels=["c","b","a"]) + s = pd.Series(raw_cat) + s.cat.levels + s.cat.ordered + +.. note:: + + Passing in a `levels` argument implies ``ordered=True``. You can of course overwrite that by + passing in an explicit ``ordered=False``. + +Any value omitted in the levels argument will be replaced by `np.nan`: + +.. ipython:: python + + raw_cat = pd.Categorical(["a","b","c","a"], levels=["a","b"]) + s = pd.Series(raw_cat) + s.cat.levels + s + +Renaming levels is done by assigning new values to the ``Category.levels`` or +``Series.cat.levels`` property: + +.. ipython:: python + + s = pd.Series(pd.Categorical(["a","b","c","a"])) + s + s.cat.levels = ["Group %s" % g for g in s.cat.levels] + s + s.cat.levels = [1,2,3] + s + +.. note:: + + I contrast to R's `factor`, a `Categorical` can have levels of other types than string. + +Levels must be unique or a `ValueError` is raised: + +.. ipython:: python + + try: + s.cat.levels = [1,1,1] + except ValueError as e: + print("ValueError: " + str(e)) + +Appending levels can be done by assigning a levels list longer than the current levels: + +.. ipython:: python + + s.cat.levels = [1,2,3,4] + s.cat.levels + s + +.. note:: + Adding levels in other positions can be done with ``.reorder_levels(<levels_including_new>)``. + +Removing a level is also possible, but only the last level(s) can be removed by assigning a +shorter list than current levels. Values which are omitted are replaced by `np.nan`. + +.. ipython:: python + + s.levels = [1,2] + s + +.. note:: + + It's only possible to remove or add a level at the last position. If that's not where you want + to remove an old or add a new level, use ``Category.reorder_levels(new_order)`` or + ``Series.cat.reorder_levels(new_order)`` methods before or after. + +Removing unused levels can also be done: + +.. ipython:: python + + raw = pd.Categorical(["a","b","a"], levels=["a","b","c","d"]) + c = pd.Series(raw) + raw + raw.remove_unused_levels() + raw + c.cat.remove_unused_levels() + c + +.. note:: + + In contrast to R's `factor` function, passing a `Categorical` as the sole input to the + `Categorical` constructor will *not* remove unused levels but create a new `Categorical` + which is equal to the passed in one! + + +Ordered or not... +----------------- + +If a `Categoricals` is ordered (``cat.ordered == True``), then the order of the levels has a +meaning and certain operations are possible. If the categorical is unordered, a `TypeError` is +raised. + +.. ipython:: python + + s = pd.Series(pd.Categorical(["a","b","c","a"], ordered=False)) + try: + s.sort() + except TypeError as e: + print("TypeError: " + str(e)) + s = pd.Series(pd.Categorical(["a","b","c","a"], ordered=True)) + s.sort() + s + print(s.min(), s.max()) + +.. note:: + ``ordered=True`` is not necessary needed in the second case, as lists of strings are sortable + and so the resulting `Categorical` is ordered. + +Sorting will use the order defined by levels, not any lexical order present on the data type. +This is even true for strings and numeric data: + +.. ipython:: python + + s = pd.Series(pd.Categorical([1,2,3,1])) + s.cat.levels = [2,3,1] + s + s.sort() + s + print(s.min(), s.max()) + +Reordering the levels is possible via the ``Categorical.reorder_levels(new_levels)`` or +``Series.cat.reorder_levels(new_levels)`` methods. All old levels must be included in the new +levels. + +.. ipython:: python + + s2 = pd.Series(pd.Categorical([1,2,3,1])) + s2.cat.reorder_levels([2,3,1]) + s2 + s2.sort() + s2 + print(s2.min(), s2.max()) + + +.. note:: + Note the difference between assigning new level names and reordering the levels: the first + renames levels and therefore the individual values in the `Series`, but if the first + position was sorted last, the renamed value will still be sorted last. Reordering means that the + way values are sorted is different afterwards, but not that individual values in the + `Series` are changed. + +You can also add new levels with :func:`Categorical.reorder_levels`, as long as you include all +old levels: + +.. ipython:: python + + s3 = pd.Series(pd.Categorical(["a","b","d"])) + s3.cat.reorder_levels(["a","b","c",d"]) + s3 + + +Operations +---------- + +The following operations are possible with categorical data: + +Getting the minimum and maximum, if the categorical is ordered: + +.. ipython:: python + + s = pd.Series(pd.Categorical(["a","b","c","a"], levels=["c","a","b","d"])) + print(s.min(), s.max()) + +.. note:: + + If the `Categorical` is not ordered, ``Categorical.min()`` and ``Categorical.max()`` and the + corresponding operations on `Series` will raise `TypeError`. + +The mode: + +.. ipython:: python + + raw_cat = pd.Categorical(["a","b","c","c"], levels=["c","a","b","d"]) + s = pd.Series(raw_cat) + raw_cat.mode() + s.mode() + +.. note:: + + Numeric operations like ``+``, ``-``, ``*``, ``/`` and operations based on them (e.g. + ``.median()``, which would need to compute the mean between two values if the length of an + array is even) do not work and raise a `TypeError`. + +`Series` methods like `Series.value_counts()` will use all levels, even if some levels are not +present in the data: + +.. ipython:: python + + s = pd.Series(pd.Categorical(["a","b","c","c"], levels=["c","a","b","d"])) + s.value_counts() + +Groupby will also show "unused" levels: + +.. ipython:: python + + cats = pd.Categorical(["a","b","b","b","c","c","c"], levels=["a","b","c","d"]) + df = pd.DataFrame({"cats":cats,"values":[1,2,2,2,3,4,5]}) + df.groupby("cats").mean() + + cats2 = pd.Categorical(["a","a","b","b"], levels=["a","b","c"]) + df2 = pd.DataFrame({"cats":cats2,"B":["c","d","c","d"], "values":[1,2,3,4]}) + df2.groupby(["cats","B"]).mean() + + +Pivot tables: + +.. ipython:: python + + raw_cat = pd.Categorical(["a","a","b","b"], levels=["a","b","c"]) + df = pd.DataFrame({"A":raw_cat,"B":["c","d","c","d"], "values":[1,2,3,4]}) + pd.pivot_table(df, values='values', index=['A', 'B']) + +Data munging +------------ + +The optimized pandas data access methods ``.loc``, ``.iloc``, ``.ix`` ``.at``, and ``.iat``, +work as normal, the only difference is the return type (for getting) and +that only values already in the levels can be assigned. + +Getting +~~~~~~~ + +If the slicing operation returns either a `DataFrame` or a a column of type `Series`, +the ``category`` dtype is preserved. + +.. ipython:: python + + cats = pd.Categorical(["a","b","b","b","c","c","c"], levels=["a","b","c"]) + idx = pd.Index(["h","i","j","k","l","m","n",]) + values= [1,2,2,2,3,4,5] + df = pd.DataFrame({"cats":cats,"values":values}, index=idx) + df.iloc[2:4,:] + df.iloc[2:4,:].dtypes + df.loc["h":"j","cats"] + df.ix["h":"j",0:1] + df[df["cats"] == "b"] + +An example where the `Categorical` is not preserved is if you take one single row: the +resulting `Series` is of dtype ``object``: + +.. ipython:: python + + # get the complete "h" row as a Series + df.loc["h", :] + +Returning a single item from a `Categorical` will also return the value, not a `Categorical` +of length "1". + +.. ipython:: python + + df.iat[0,0] + df["cats"].cat.levels = ["x","y","z"] + df.at["h","cats"] # returns a string + +.. note:: + This is a difference to R's `factor` function, where ``factor(c(1,2,3))[1]`` + returns a single value `factor`. + +To get a single value `Series` of type ``category`` pass in a single value list: + +.. ipython:: python + + df.loc[["h"],"cats"] + +Setting +~~~~~~~ + +Setting values in a categorical column (or `Series`) works as long as the value is included in the +`levels`: + +.. ipython:: python + + cats = pd.Categorical(["a","a","a","a","a","a","a"], levels=["a","b"]) + idx = pd.Index(["h","i","j","k","l","m","n"]) + values = [1,1,1,1,1,1,1] + df = pd.DataFrame({"cats":cats,"values":values}, index=idx) + + df.iloc[2:4,:] = [["b",2],["b",2]] + df + try: + df.iloc[2:4,:] = [["c",3],["c",3]] + except ValueError as e: + print("ValueError: " + str(e)) + +Setting values by assigning a `Categorical` will also check that the `levels` match: + +.. ipython:: python + + df.loc["j":"k","cats"] = pd.Categorical(["a","a"], levels=["a","b"]) + df + try: + df.loc["j":"k","cats"] = pd.Categorical(["b","b"], levels=["a","b","c"]) + except ValueError as e: + print("ValueError: " + str(e)) + +Assigning a `Categorical` to parts of a column of other types will use the values: + +.. ipython:: python + + df = pd.DataFrame({"a":[1,1,1,1,1], "b":["a","a","a","a","a"]}) + df.loc[1:2,"a"] = pd.Categorical(["b","b"], levels=["a","b"]) + df.loc[2:3,"b"] = pd.Categorical(["b","b"], levels=["a","b"]) + df + df.dtypes + + +Merging +~~~~~~~ + +You can concat two `DataFrames` containing categorical data together, +but the levels of these `Categoricals` need to be the same: + +.. ipython:: python + + cat = pd.Categorical(["a","b"], levels=["a","b"]) + vals = [1,2] + df = pd.DataFrame({"cats":cat, "vals":vals}) + res = pd.concat([df,df]) + res + res.dtypes + + df_different = df.copy() + df_different["cats"].cat.levels = ["a","b","c"] + + try: + pd.concat([df,df]) + except ValueError as e: + print("ValueError: " + str(e)) + +The same applies to ``df.append(df)``. + +Getting Data In/Out +------------------- + +Writing data (`Series`, `Frames`) to a HDF store and reading it in entirety works. Querying the hdf +store does not yet work. + +.. ipython:: python + :suppress: + + hdf_file = "test.h5" + +.. ipython:: python + + hdf_file = "test.h5" + s = pd.Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c'], levels=['a','b','c','d'])) + df = pd.DataFrame({"s":s, "vals":[1,2,3,4,5,6]}) + df.to_hdf(hdf_file, "frame") + df2 = pd.read_hdf(hdf_file, "frame") + df2 + try: + pd.read_hdf(hdf_file, "frame", where = ['index>2']) + except TypeError as e: + print("TypeError: " + str(e)) + +.. ipython:: python + :suppress: + + try: + os.remove(hdf_file) + except: + pass + + +Writing to a csv file will convert the data, effectively removing any information about the +`Categorical` (levels and ordering). So if you read back the csv file you have to convert the +relevant columns back to `category` and assign the right levels and level ordering. + +.. ipython:: python + :suppress: + + from pandas.compat import StringIO + +.. ipython:: python + + s = pd.Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'd'])) + # rename the levels + s.cat.levels = ["very good", "good", "bad"] + # reorder the levels and add missing levels + s.cat.reorder_levels(["very bad", "bad", "medium", "good", "very good"]) + df = pd.DataFrame({"cats":s, "vals":[1,2,3,4,5,6]}) + csv = StringIO() + df.to_csv(csv) + df2 = pd.read_csv(StringIO(csv.getvalue())) + df2.dtypes + df2["cats"] + # Redo the category + df2["cats"] = df2["cats"].astype("category") + df2["cats"].cat.reorder_levels(["very bad", "bad", "medium", "good", "very good"]) + df2.dtypes + df2["cats"] + + +Missing Data +------------ + +pandas primarily uses the value `np.nan` to represent missing data. It is by +default not included in computations. See the :ref:`Missing Data section +<missing_data>` + +There are two ways a `np.nan` can be represented in `Categorical`: either the value is not +available or `np.nan` is a valid level. + +.. ipython:: python + + s = pd.Series(pd.Categorical(["a","b",np.nan,"a"])) + s + # only two levels + s.cat.levels + s2 = pd.Series(pd.Categorical(["a","b","c","a"])) + s2.cat.levels = [1,2,np.nan] + s2 + # three levels, np.nan included + # Note: as int arrays can't hold NaN the levels were converted to float + s2.cat.levels + +Gotchas +------- + +`Categorical` is not a `numpy` array +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Currently, `Categorical` and the corresponding ``category`` `Series` is implemented as a python +object and not as a low level `numpy` array dtype. This leads to some problems. + +`numpy` itself doesn't know about the new `dtype`: + +.. ipython:: python + + try: + np.dtype("category") + except TypeError as e: + print("TypeError: " + str(e)) + + dtype = pd.Categorical(["a"]).dtype + try: + np.dtype(dtype) + except TypeError as e: + print("TypeError: " + str(e)) + + # dtype comparisons work: + dtype == np.str_ + np.str_ == dtype + +Using `numpy` functions on a `Series` of type ``category`` should not work as `Categoricals` +are not numeric data (even in the case that ``.levels`` is numeric). + +.. ipython:: python + + s = pd.Series(pd.Categorical([1,2,3,4])) + try: + np.sum(s) + #same with np.log(s),.. + except TypeError as e: + print("TypeError: " + str(e)) + +.. note:: + If such a function works, please file a bug at https://github.com/pydata/pandas! + + +Side effects +~~~~~~~~~~~~ + +Constructing a `Series` from a `Categorical` will not copy the input `Categorical`. This +means that changes to the `Series` will in most cases change the original `Categorical`: + +.. ipython:: python + + cat = pd.Categorical([1,2,3,10], levels=[1,2,3,4,10]) + s = pd.Series(cat, name="cat") + cat + s.iloc[0:2] = 10 + cat + df = pd.DataFrame(s) + df["cat"].cat.levels = [1,2,3,4,5] + cat + +Use ``copy=True`` to prevent such a behaviour: + +.. ipython:: python + + cat = pd.Categorical([1,2,3,10], levels=[1,2,3,4,10]) + s = pd.Series(cat, name="cat", copy=True) + cat + s.iloc[0:2] = 10 + cat + +.. note:: + This also happens in some cases when you supply a `numpy` array instea dof a `Categorical`: + using an int array (e.g. ``np.array([1,2,3,4])``) will exhibit the same behaviour, but using + a string array (e.g. ``np.array(["a","b","c","a"])``) will not. + + +Danger of confusion +~~~~~~~~~~~~~~~~~~~ + +Both `Series` and `Categorical` have a method ``.reorder_levels()`` but for different things. For +Series of type ``category`` this means that there is some danger to confuse both methods. + +.. ipython:: python + + s = pd.Series(pd.Categorical([1,2,3,4])) + print(s.cat.levels) + # wrong and raises an error: + try: + s.reorder_levels([4,3,2,1]) + except Exception as e: + print("Exception: " + str(e)) + # right + s.cat.reorder_levels([4,3,2,1]) + print(s.cat.levels) + +See also the API documentation for :func:`pandas.Series.reorder_levels` and +:func:`pandas.Categorical.reorder_levels` + +Old style constructor usage +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +I earlier versions, a `Categorical` could be constructed by passing in precomputed `codes` +(called then `labels`) instead of values with levels. The `codes` are interpreted as pointers +to the levels with `-1` as `NaN`. This usage is now deprecated and not available unless +``compat=True`` is passed to the constructor of `Categorical`. + +.. ipython:: python + :okwarning: + + # This raises a FutureWarning: + cat = pd.Categorical([1,2], levels=[1,2,3], compat=True) + cat.get_values() + +In the default case (``compat=False``) the first argument is interpreted as values. + +.. ipython:: python + + cat = pd.Categorical([1,2], levels=[1,2,3], compat=False) + cat.get_values() + +.. warning:: + Using Categorical with precomputed codes and levels is deprecated and a `FutureWarning` + is raised. Please change your code to use the :func:`~pandas.Categorical.from_codes` + constructor instead of adding ``compat=False``. + +No categorical index +~~~~~~~~~~~~~~~~~~~~ + +There is currently no index of type ``category``, so setting the index to a `Categorical` will +convert the `Categorical` to a normal `numpy` array first and therefore remove any custom +ordering of the levels: + +.. ipython:: python + + cats = pd.Categorical([1,2,3,4], levels=[4,2,3,1]) + strings = ["a","b","c","d"] + values = [4,2,3,1] + df = pd.DataFrame({"strings":strings, "values":values}, index=cats) + df.index + # This should sort by levels but does not as there is no CategoricalIndex! + df.sort_index() + +.. note:: + This could change if a `CategoricalIndex` is implemented (see + https://github.com/pydata/pandas/issues/7629) + +dtype in apply +~~~~~~~~~~~~~~ + +Pandas currently does not preserve the dtype in apply functions: If you apply along rows you get +a `Series` of ``object`` `dtype` (same as getting a row -> getting one element will return a +basic type) and applying along columns will also convert to object. + +.. ipython:: python + + df = pd.DataFrame({"a":[1,2,3,4], "b":["a","b","c","d"], "cats":pd.Categorical([1,2,3,2])}) + df.apply(lambda row: type(row["cats"]), axis=1) + df.apply(lambda col: col.dtype, axis=0) + + +Future compatibility +~~~~~~~~~~~~~~~~~~~~ + +As `Categorical` is not a native `numpy` dtype, the implementation details of +`Series.cat` can change if such a `numpy` dtype is implemented. \ No newline at end of file diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index f5352bc1031bc..4e1d2b471d1c0 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -130,6 +130,7 @@ See the package overview for more detail about what's in the library. merging reshaping timeseries + categorical visualization rplot io diff --git a/doc/source/v0.15.0.txt b/doc/source/v0.15.0.txt index d776848de40d0..f305d088e996f 100644 --- a/doc/source/v0.15.0.txt +++ b/doc/source/v0.15.0.txt @@ -9,7 +9,7 @@ users upgrade to this version. - Highlights include: - - Add highlites here + - The ``Categorical`` type was integrated as a first-class pandas type, see here: :ref:`Categorical Changes <whatsnew_0150.cat>` - :ref:`Other Enhancements <whatsnew_0150.enhancements>` @@ -30,13 +30,45 @@ users upgrade to this version. API changes ~~~~~~~~~~~ +.. _whatsnew_0150.cat: +Categoricals in Series/DataFrame +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:class:`~pandas.Categorical` can now be included in `Series` and `DataFrames` and gained new +methods to manipulate. Thanks to Jan Schultz for much of this API/implementation. (:issue:`3943`, :issue:`5313`, :issue:`5314`, :issue:`7444`). +.. ipython:: python + df = pd.DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']}) + # convert the raw grades to a categorical + df["grade"] = pd.Categorical(df["raw_grade"]) + # Alternative: df["grade"] = df["raw_grade"].astype("category") + df["grade"] + # Rename the levels + df["grade"].cat.levels = ["very good", "good", "very bad"] + + # Reorder the levels and simultaneously add the missing levels + df["grade"].cat.reorder_levels(["very bad", "bad", "medium", "good", "very good"]) + df["grade"] + df.sort("grade") + df.groupby("grade").size() + +See the :ref:`Categorical introduction<_categorical>` and the :ref:`API documentation<api.categorical>`. + +- `pandas.core.group_agg` and `pandas.core.factor_agg` were removed. As an alternative, construct + a dataframe and use `df.groupby(<group>).agg(<func>)`. + +- Supplying "codes/labels and levels" to the `pandas.Categorical` constructor is deprecated and does + not work without supplying ``compat=True``. The default mode now uses "values and levels". + Please change your code to use the ``Categorical.from_codes(...)`` constructor. + +- The `pandas.Categorical.labels` attribute was renamed to `pandas.Categorical.codes` and is read + only. If you want to manipulate the `Categorical`, please use one of the + :ref:`API methods on Categoricals<api.categorical>`. diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c45256c482e8f..cb6f200b259db 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -93,6 +93,8 @@ def _unique_generic(values, table_type, type_caster): return type_caster(uniques) + + def factorize(values, sort=False, order=None, na_sentinel=-1): """ Encode input values as an enumerated type or categorical variable @@ -160,7 +162,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1): if is_datetime: uniques = uniques.astype('M8[ns]') if isinstance(values, Index): - uniques = values._simple_new(uniques, None, freq=getattr(values, 'freq', None), + uniques = values._simple_new(uniques, None, freq=getattr(values, 'freq', None), tz=getattr(values, 'tz', None)) elif isinstance(values, Series): uniques = Index(uniques) @@ -196,13 +198,18 @@ def value_counts(values, sort=True, ascending=False, normalize=False, from pandas.tools.tile import cut values = Series(values).values + is_category = com.is_categorical_dtype(values.dtype) if bins is not None: try: cat, bins = cut(values, bins, retbins=True) except TypeError: raise TypeError("bins argument only works with numeric data.") - values = cat.labels + values = cat.codes + elif is_category: + bins = values.levels + cat = values + values = cat.codes dtype = values.dtype if com.is_integer_dtype(dtype): @@ -232,7 +239,10 @@ def value_counts(values, sort=True, ascending=False, normalize=False, if bins is not None: # TODO: This next line should be more efficient result = result.reindex(np.arange(len(cat.levels)), fill_value=0) - result.index = bins[:-1] + if not is_category: + result.index = bins[:-1] + else: + result.index = cat.levels if sort: result.sort() @@ -258,7 +268,7 @@ def mode(values): constructor = Series dtype = values.dtype - if com.is_integer_dtype(values.dtype): + if com.is_integer_dtype(values): values = com._ensure_int64(values) result = constructor(sorted(htable.mode_int64(values)), dtype=dtype) @@ -267,6 +277,8 @@ def mode(values): values = values.view(np.int64) result = constructor(sorted(htable.mode_int64(values)), dtype=dtype) + elif com.is_categorical_dtype(values): + result = constructor(values.mode()) else: mask = com.isnull(values) values = com._ensure_object(values) diff --git a/pandas/core/base.py b/pandas/core/base.py index ce078eb91735d..81e13687441de 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -203,7 +203,6 @@ def __unicode__(self): quote_strings=True) return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype) - class IndexOpsMixin(object): """ common ops mixin to support a unified inteface / docs for Series / Index """ @@ -287,7 +286,11 @@ def unique(self): uniques : ndarray """ from pandas.core.nanops import unique1d - return unique1d(self.values) + values = self.values + if hasattr(values,'unique'): + return values.unique() + + return unique1d(values) def nunique(self, dropna=True): """ diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index dfadd34e2d205..d049a6d64aac3 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1,13 +1,16 @@ # pylint: disable=E1101,W0232 import numpy as np +from warnings import warn from pandas import compat from pandas.compat import u -from pandas.core.algorithms import factorize +from pandas.core.algorithms import factorize, unique from pandas.core.base import PandasObject -from pandas.core.index import Index +from pandas.core.index import Index, _ensure_index +from pandas.core.indexing import _is_null_slice +from pandas.tseries.period import PeriodIndex import pandas.core.common as com from pandas.util.terminal import get_terminal_size from pandas.core.config import get_option @@ -23,7 +26,7 @@ def f(self, other): else: if other in self.levels: i = self.levels.get_loc(other) - return getattr(self.labels, op)(i) + return getattr(self._codes, op)(i) else: return np.repeat(False, len(self)) @@ -31,64 +34,246 @@ def f(self, other): return f +def _is_categorical(array): + """ return if we are a categorical possibility """ + return isinstance(array, Categorical) or isinstance(array.dtype, com.CategoricalDtype) +def _maybe_to_categorical(array): + """ coerce to a categorical if a series is given """ + if isinstance(array, com.ABCSeries): + return array.values + return array + + +def _get_codes_for_values(values, levels): + from pandas.core.algorithms import _get_data_algo, _hashtables + if values.dtype != levels.dtype: + values = com._ensure_object(values) + levels = com._ensure_object(levels) + (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables) + t = hash_klass(len(levels)) + t.map_locations(levels) + return com._ensure_platform_int(t.lookup(values)) + +_codes_doc = """The level codes of this categorical. + +Level codes are an array if integer which are the positions of the real +values in the levels array. + +There is not setter, used the other categorical methods and the item setter on +Categorical to change values in the categorical. +""" + +_levels_doc = """The levels of this categorical. + +Setting assigns new values to each level (effectively a rename of +each individual level). + +The assigned value has to be a list-like object. If the number of +level-items is less than number of level-items in the current level, +all level-items at a higher position are set to NaN. If the number of +level-items is more that the current number of level-items, new +(unused) levels are added at the end. + +To add level-items in between, use `reorder_levels`. + +Raises +------ +ValueError + If the new levels do not validate as levels + +See also +-------- +Categorical.reorder_levels +Categorical.remove_unused_levels +""" class Categorical(PandasObject): """ Represents a categorical variable in classic R / S-plus fashion + `Categoricals` can only take on only a limited, and usually fixed, number + of possible values (`levels`). In contrast to statistical categorical + variables, a `Categorical` might have an order, but numerical operations + (additions, divisions, ...) are not possible. + + All values of the `Categorical` are either in `levels` or `np.nan`. + Assigning values outside of `levels` will raise a `ValueError`. Order is + defined by the order of the `levels`, not lexical order of the values. + Parameters ---------- - labels : ndarray of integers - If levels is given, the integer at label `i` is the index of the level - for that label. I.e., the level at labels[i] is levels[labels[i]]. - Otherwise, if levels is None, these are just the labels and the levels - are assumed to be the unique labels. See from_array. + values : list-like + The values of the categorical. If levels are given, values not in levels will + be replaced with NaN. levels : Index-like (unique), optional - The unique levels for each label. If not given, the levels are assumed - to be the unique values of labels. + The unique levels for this categorical. If not given, the levels are assumed + to be the unique values of values. + ordered : boolean, optional + Whether or not this categorical is treated as a ordered categorical. If not given, + the resulting categorical will be ordered if values can be sorted. name : str, optional - Name for the Categorical variable. If levels is None, will attempt - to infer from labels. + Name for the Categorical variable. If name is None, will attempt + to infer from values. + compat : boolean, default=False + Whether to treat values as codes to the levels (old API, deprecated) + + Attributes + ---------- + levels : ndarray + The levels of this categorical + codes : Index + The codes (integer positions, which point to the levels) of this categorical, read only + ordered : boolean + Whether or not this Categorical is ordered + name : string + The name of this Categorical + + Raises + ------ + ValueError + If the levels do not validate + TypeError + If an explicit ``ordered=True`` is given but no `levels` and the `values` are not sortable - Returns - ------- - **Attributes** - * labels : ndarray - * levels : ndarray Examples -------- >>> from pandas import Categorical - >>> Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) - Categorical: - array([1, 2, 3, 1, 2, 3]) - Levels (3): Int64Index([1, 2, 3]) - - >>> Categorical([0,1,2,0,1,2], ['a', 'b', 'c']) - Categorical: - array(['a', 'b', 'c', 'a', 'b', 'c'], dtype=object) - Levels (3): Index(['a', 'b', 'c'], dtype=object) + >>> Categorical([1, 2, 3, 1, 2, 3]) + 1 + 2 + 3 + 1 + 2 + 3 + Levels (3): Int64Index([1, 2, 3], dtype=int64), ordered >>> Categorical(['a', 'b', 'c', 'a', 'b', 'c']) - Categorical: - array(['a', 'b', 'c', 'a', 'b', 'c'], dtype=object) - Levels (3): Index(['a', 'b', 'c'], dtype=object) + a + b + c + a + b + c + Levels (3): Index(['a', 'b', 'c'], dtype=object), ordered + + >>> a = Categorical(['a','b','c','a','b','c'], ['c', 'b', 'a']) + >>> a.min() + 'c' + """ + ndim = 1 + """Number of dimensions (always 1!)""" + + dtype = com.CategoricalDtype() + """The dtype (always "category")""" + + ordered = None + """Whether or not this Categorical is ordered. + + Only ordered `Categoricals` can be sorted (according to the order + of the levels) and have a min and max value. + + See also + -------- + Categorical.sort + Categorical.order + Categorical.min + Categorical.max """ - def __init__(self, labels, levels=None, name=None): + def __init__(self, values, levels=None, ordered=None, name=None, fastpath=False, compat=False): + + if fastpath: + # fast path + self._codes = values + self.name = name + self.levels = levels + self.ordered = ordered + return + + if name is None: + name = getattr(values, 'name', None) + + # sanitize input + if com.is_categorical_dtype(values): + + # we are either a Series or a Categorical + cat = values + if isinstance(values, com.ABCSeries): + cat = values.values + if levels is None: + levels = cat.levels + if ordered is None: + ordered = cat.ordered + values = values.__array__() + + elif isinstance(values, Index): + pass + + else: + + # on numpy < 1.6 datetimelike get inferred to all i8 by _sanitize_array + # which is fine, but since factorize does this correctly no need here + # this is an issue because _sanitize_array also coerces np.nan to a string + # under certain versions of numpy as well + inferred = com._possibly_infer_to_datetimelike(values) + if not isinstance(inferred, np.ndarray): + from pandas.core.series import _sanitize_array + values = _sanitize_array(values, None) + if levels is None: - if name is None: - name = getattr(labels, 'name', None) try: - labels, levels = factorize(labels, sort=True) + codes, levels = factorize(values, sort=True) + # If the underlying data structure was sortable, and the user doesn't want to + # "forget" this order, the categorical also is sorted/ordered + if ordered is None: + ordered = True except TypeError: - labels, levels = factorize(labels, sort=False) + codes, levels = factorize(values, sort=False) + if ordered: + # raise, as we don't have a sortable data structure and so the usershould + # give us one by specifying levels + raise TypeError("'values' is not ordered, please explicitly specify the level " + "order by passing in a level argument.") + else: + # there are two ways if levels are present + # the old one, where each value is a int pointer to the levels array + # the new one, where each value is also in the level array (or np.nan) + + # make sure that we always have the same type here, no matter what we get passed in + levels = self._validate_levels(levels) + + # There can be two ways: the old which passed in codes and levels directly + # and values have to be inferred and the new one, which passes in values and levels + # and _codes have to be inferred. + + # min and max can be higher and lower if not all levels are in the values + if compat and (com.is_integer_dtype(values) and + (np.min(values) >= -1) and (np.max(values) < len(levels))): + warn("Using 'values' as codes is deprecated.\n" + "'Categorical(... , compat=True)' is only there for historical reasons and " + "should not be used in new code!\n" + "See https://github.com/pydata/pandas/pull/7217", FutureWarning) + codes = values + else: + codes = _get_codes_for_values(values, levels) - self.labels = labels + # if we got levels, we can assume that the order is intended + # if ordered is unspecified + if ordered is None: + ordered = True + + self.ordered = False if ordered is None else ordered + self._codes = codes self.levels = levels self.name = name + def copy(self): + """ Copy constructor. """ + return Categorical(values=self._codes.copy(),levels=self.levels, + name=self.name, ordered=self.ordered, fastpath=True) + @classmethod def from_array(cls, data): """ @@ -102,20 +287,131 @@ def from_array(cls, data): """ return Categorical(data) - _levels = None + @classmethod + def from_codes(cls, codes, levels, ordered=True, name=None): + """ + Make a Categorical type from codes and levels arrays. - def _set_levels(self, levels): - from pandas.core.index import _ensure_index + This constructor is useful if you already have codes and levels and so do not need the + (computation intensive) factorization step, which is usually done on the constructor. + + If your data does not follow this convention, please use the normal constructor. + + Parameters + ---------- + codes : array-like, integers + An integer array, where each integer points to a level in levels or -1 for NaN + levels : index-like + The levels for the categorical. Items need to be unique. + ordered : boolean, optional + Whether or not this categorical is treated as a ordered categorical. If not given, + the resulting categorical will be ordered. + name : str, optional + Name for the Categorical variable. + """ + try: + codes = np.asarray(codes, np.int64) + except: + raise ValueError("codes need to be convertible to an arrays of integers") + + levels = cls._validate_levels(levels) + + if codes.max() >= len(levels) or codes.min() < -1: + raise ValueError("codes need to be between -1 and len(levels)-1") + + + return Categorical(codes, levels=levels, ordered=ordered, name=name, fastpath=True) + + _codes = None + + def _get_codes(self): + """ Get the level codes. + + Returns + ------- + codes : integer array view + A non writable view of the `codes` array. + """ + v = self._codes.view() + v.flags.writeable = False + return v + + def _set_codes(self, codes): + """ + Not settable by the user directly + """ + raise ValueError("cannot set Categorical codes directly") + codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc) + + _levels = None + + @classmethod + def _validate_levels(cls, levels): + """" Validates that we have good levels """ levels = _ensure_index(levels) if not levels.is_unique: raise ValueError('Categorical levels must be unique') + return levels + + def _set_levels(self, levels): + """ Sets new levels """ + levels = self._validate_levels(levels) + + if not self._levels is None and len(levels) < len(self._levels): + # remove all _codes which are larger + self._codes[self._codes >= len(levels)] = -1 self._levels = levels def _get_levels(self): + """ Gets the levels """ + # levels is an Index, which is immutable -> no need to copy return self._levels - levels = property(fget=_get_levels, fset=_set_levels) + levels = property(fget=_get_levels, fset=_set_levels, doc=_levels_doc) + + def reorder_levels(self, new_levels, ordered=None): + """ Reorders levels as specified in new_levels. + + `new_levels` must include all old levels but can also include new level items. In + contrast to assigning to `levels`, these new level items can be in arbitrary positions. + + The level reordering is done inplace. + + Raises + ------ + ValueError + If the new levels do not contain all old level items + + Parameters + ---------- + new_levels : Index-like + The levels in new order. must be of same length as the old levels + ordered : boolean, optional + Whether or not the categorical is treated as a ordered categorical. If not given, + do not change the ordered information. + """ + new_levels = self._validate_levels(new_levels) + + if len(new_levels) < len(self._levels) or len(self._levels-new_levels): + raise ValueError('Reordered levels must include all original levels') + values = self.__array__() + self._codes = _get_codes_for_values(values, new_levels) + self._levels = new_levels + if not ordered is None: + self.ordered = ordered + + def remove_unused_levels(self): + """ Removes levels which are not used. + + The level removal is done inplace. + """ + _used = sorted(np.unique(self._codes)) + new_levels = self.levels.take(_used) + new_levels = _ensure_index(new_levels) + self._codes = _get_codes_for_values(self.__array__(), new_levels) + self._levels = new_levels + __eq__ = _cat_compare_op('__eq__') __ne__ = _cat_compare_op('__ne__') @@ -124,11 +420,240 @@ def _get_levels(self): __le__ = _cat_compare_op('__le__') __ge__ = _cat_compare_op('__ge__') + # for Series/ndarray like compat + @property + def shape(self): + """ Shape of the Categorical. + + For internal compatibility with numpy arrays. + + Returns + ------- + shape : tuple + """ + + return tuple([len(self._codes)]) + def __array__(self, dtype=None): - return com.take_1d(self.levels.values, self.labels) + """ The numpy array interface. + + Returns + ------- + values : numpy array + A numpy array of the same dtype as categorical.levels.dtype + """ + return com.take_1d(self.levels.values, self._codes) + + @property + def T(self): + return self + + def get_values(self): + """ Return the values. + + For internal compatibility with pandas formatting. + + Returns + ------- + values : numpy array + A numpy array of the same dtype as categorical.levels.dtype or dtype string if periods + """ + + # if we are a period index, return a string repr + if isinstance(self.levels, PeriodIndex): + return com.take_1d(np.array(self.levels.to_native_types(), dtype=object), + self._codes) + + return np.array(self) + + def argsort(self, ascending=True, **kwargs): + """ Implements ndarray.argsort. + + For internal compatibility with numpy arrays. + + Only ordered Categoricals can be argsorted! + + Returns + ------- + argsorted : numpy array + """ + if not self.ordered: + raise TypeError("Categorical not ordered") + result = np.argsort(self._codes.copy(), **kwargs) + if not ascending: + result = result[::-1] + return result + + def order(self, inplace=False, ascending=True, **kwargs): + """ Sorts the Category by level value returning a new Categorical by default. + + Only ordered Categoricals can be sorted! + + Categorical.sort is the equivalent but sorts the Categorical inplace. + + Parameters + ---------- + ascending : boolean, default True + Sort ascending. Passing False sorts descending + na_position : {'first', 'last'} (optional, default='last') + 'first' puts NaNs at the beginning + 'last' puts NaNs at the end + inplace : boolean, default False + Do operation in place. + + Returns + ------- + y : Category or None + + See Also + -------- + Category.sort + """ + if not self.ordered: + raise TypeError("Categorical not ordered") + _sorted = np.sort(self._codes.copy()) + if not ascending: + _sorted = _sorted[::-1] + if inplace: + self._codes = _sorted + return + else: + return Categorical(values=_sorted,levels=self.levels, ordered=self.ordered, + name=self.name, fastpath=True) + + + def sort(self, inplace=True, ascending=True, **kwargs): + """ Sorts the Category inplace by level value. + + Only ordered Categoricals can be sorted! + + Catgorical.order is the equivalent but returns a new Categorical. + + Parameters + ---------- + ascending : boolean, default True + Sort ascending. Passing False sorts descending + na_position : {'first', 'last'} (optional, default='last') + 'first' puts NaNs at the beginning + 'last' puts NaNs at the end + inplace : boolean, default False + Do operation in place. + + Returns + ------- + y : Category or None + + See Also + -------- + Category.order + """ + return self.order(inplace=inplace, ascending=ascending, **kwargs) + + def ravel(self, order='C'): + """ Return a flattened (numpy) array. + + For internal compatibility with numpy arrays. + + Returns + ------- + raveled : numpy array + """ + return np.array(self) + + def view(self): + """Return a view of myself. + + For internal compatibility with numpy arrays. + + Returns + ------- + view : Categorical + Returns `self`! + """ + return self + + def to_dense(self): + """ Return my 'dense' repr """ + return np.asarray(self) + + def fillna(self, fill_value=None, method=None, limit=None, **kwargs): + """ Fill NA/NaN values using the specified method. + + Parameters + ---------- + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed Series + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + value : scalar + Value to use to fill holes (e.g. 0) + limit : int, default None + Maximum size gap to forward or backward fill (not implemented yet!) + + Returns + ------- + filled : Categorical with NA/NaN filled + """ + + if fill_value is None: + fill_value = np.nan + if limit is not None: + raise NotImplementedError + + values = self._codes + + # pad / bfill + if method is not None: + + values = self.to_dense().reshape(-1,len(self)) + values = com.interpolate_2d( + values, method, 0, None, fill_value).astype(self.levels.dtype)[0] + values = _get_codes_for_values(values, self.levels) + + else: + + if not com.isnull(fill_value) and fill_value not in self.levels: + raise ValueError("fill value must be in levels") + + mask = self._codes==-1 + if mask.any(): + values = self._codes.copy() + values[mask] = self.levels.get_loc(fill_value) + + return Categorical(values, levels=self.levels, ordered=self.ordered, + name=self.name, fastpath=True) + + def take_nd(self, indexer, allow_fill=True, fill_value=None): + """ Take the values by the indexer, fill with the fill_value. """ + if allow_fill and fill_value is None: + fill_value = np.nan + + values = com.take_1d(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value) + result = Categorical(values=values, levels=self.levels, ordered=self.ordered, + name=self.name, fastpath=True) + return result + + take = take_nd + + def _slice(self, slicer): + """ Return a slice of myself. """ + + # only allow 1 dimensional slicing, but can + # in a 2-d case be passd (slice(None),....) + if isinstance(slicer, tuple) and len(slicer) == 2: + if not _is_null_slice(slicer[0]): + raise AssertionError("invalid slicing for a 1-ndim categorical") + slicer = slicer[1] + + _codes = self._codes[slicer] + return Categorical(values=_codes,levels=self.levels, ordered=self.ordered, + name=self.name, fastpath=True) def __len__(self): - return len(self.labels) + return len(self._codes) + + def __iter__(self): + return iter(np.array(self)) def _tidy_repr(self, max_vals=20): num = max_vals // 2 @@ -138,23 +663,52 @@ def _tidy_repr(self, max_vals=20): footer=False) result = '%s\n...\n%s' % (head, tail) - # TODO: tidy_repr for footer since there may be a ton of levels? result = '%s\n%s' % (result, self._repr_footer()) return compat.text_type(result) + def _repr_level_info(self): + """ Returns a string representation of the footer.""" + + max_levels = (10 if get_option("display.max_levels") == 0 + else get_option("display.max_levels")) + level_strs = fmt.format_array(self.levels.get_values(), None) + if len(level_strs) > max_levels: + num = max_levels // 2 + head = level_strs[:num] + tail = level_strs[-(max_levels - num):] + level_strs = head + ["..."] + tail + # Strip all leading spaces, which format_array adds for columns... + level_strs = [x.strip() for x in level_strs] + levheader = "Levels (%d, %s): " % (len(self.levels), + self.levels.dtype) + width, height = get_terminal_size() + max_width = (width if get_option("display.width") == 0 + else get_option("display.width")) + if com.in_ipython_frontend(): + # 0 = no breaks + max_width = 0 + levstring = "" + start = True + cur_col_len = len(levheader) + sep_len, sep = (3, " < ") if self.ordered else (2, ", ") + for val in level_strs: + if max_width != 0 and cur_col_len + sep_len + len(val) > max_width: + levstring += "\n" + (" "* len(levheader)) + cur_col_len = len(levheader) + if not start: + levstring += sep + cur_col_len += len(val) + levstring += val + start = False + # replace to simple save space by + return levheader + "["+levstring.replace(" < ... < ", " ... ")+"]" + def _repr_footer(self): - levheader = 'Levels (%d): ' % len(self.levels) - # TODO: should max_line_width respect a setting? - levstring = np.array_repr(self.levels, max_line_width=60) - indent = ' ' * (levstring.find('[') + len(levheader) + 1) - lines = levstring.split('\n') - levstring = '\n'.join([lines[0]] + - [indent + x.lstrip() for x in lines[1:]]) namestr = "Name: %s, " % self.name if self.name is not None else "" - return u('%s\n%sLength: %d' % (levheader + levstring, namestr, - len(self))) + return u('%sLength: %d\n%s') % (namestr, + len(self), self._repr_level_info()) def _get_repr(self, name=False, length=True, na_rep='NaN', footer=True): formatter = fmt.CategoricalFormatter(self, name=name, @@ -164,39 +718,185 @@ def _get_repr(self, name=False, length=True, na_rep='NaN', footer=True): return compat.text_type(result) def __unicode__(self): + """ Unicode representation. """ width, height = get_terminal_size() max_rows = (height if get_option("display.max_rows") == 0 else get_option("display.max_rows")) - if len(self.labels) > (max_rows or 1000): + + if len(self._codes) > (max_rows or 1000): result = self._tidy_repr(min(30, max_rows) - 4) - elif len(self.labels) > 0: + elif len(self._codes) > 0: result = self._get_repr(length=len(self) > 50, name=True) else: result = 'Categorical([], %s' % self._get_repr(name=True, length=False, footer=True, - ) + ).replace("\n",", ") return result def __getitem__(self, key): + """ Return an item. """ if isinstance(key, (int, np.integer)): - i = self.labels[key] + i = self._codes[key] if i == -1: return np.nan else: return self.levels[i] else: - return Categorical(self.labels[key], self.levels) + return Categorical(values=self._codes[key], levels=self.levels, + ordered=self.ordered, fastpath=True) + + def __setitem__(self, key, value): + """ Item assignment. + + + Raises + ------ + ValueError + If (one or more) Value is not in levels or if a assigned `Categorical` has not the + same levels + + """ + + # require identical level set + if isinstance(value, Categorical): + if not value.levels.equals(self.levels): + raise ValueError("cannot set a Categorical with another, without identical levels") + + rvalue = value if com.is_list_like(value) else [value] + to_add = Index(rvalue)-self.levels + if len(to_add): + raise ValueError("cannot setitem on a Categorical with a new level," + " set the levels first") + + # set by position + if isinstance(key, (int, np.integer)): + pass + + # tuple of indexers + elif isinstance(key, tuple): + + # only allow 1 dimensional slicing, but can + # in a 2-d case be passd (slice(None),....) + if len(key) == 2: + if not _is_null_slice(key[0]): + raise AssertionError("invalid slicing for a 1-ndim categorical") + key = key[1] + elif len(key) == 1: + key = key[0] + else: + raise AssertionError("invalid slicing for a 1-ndim categorical") + + else: + key = self._codes[key] + + lindexer = self.levels.get_indexer(rvalue) + self._codes[key] = lindexer + + #### reduction ops #### + def _reduce(self, op, axis=0, skipna=True, numeric_only=None, + filter_type=None, name=None, **kwds): + """ perform the reduction type operation """ + func = getattr(self,name,None) + if func is None: + raise TypeError("Categorical cannot perform the operation {op}".format(op=name)) + return func(numeric_only=numeric_only, **kwds) + + def min(self, numeric_only=None, **kwargs): + """ The minimum value of the object. + + Only ordered `Categoricals` have a minimum! + + Raises + ------ + TypeError + If the `Categorical` is not `ordered`. + + Returns + ------- + min : the minimum of this `Categorical` + """ + if not self.ordered: + raise TypeError("Categorical not ordered") + if numeric_only: + good = self._codes != -1 + pointer = self._codes[good].min(**kwargs) + else: + pointer = self._codes.min(**kwargs) + if pointer == -1: + return np.nan + else: + return self.levels[pointer] + + + def max(self, numeric_only=None, **kwargs): + """ The maximum value of the object. + + Only ordered `Categoricals` have a maximum! + + Raises + ------ + TypeError + If the `Categorical` is not `ordered`. + + Returns + ------- + max : the maximum of this `Categorical` + """ + if not self.ordered: + raise TypeError("Categorical not ordered") + if numeric_only: + good = self._codes != -1 + pointer = self._codes[good].max(**kwargs) + else: + pointer = self._codes.max(**kwargs) + if pointer == -1: + return np.nan + else: + return self.levels[pointer] + + def mode(self): + """ + Returns the mode(s) of the Categorical. + + Empty if nothing occurs at least 2 times. Always returns `Categorical` even + if only one value. + + Returns + ------- + modes : `Categorical` (sorted) + """ + + import pandas.hashtable as htable + good = self._codes != -1 + result = Categorical(sorted(htable.mode_int64(com._ensure_int64(self._codes[good]))), + levels=self.levels,ordered=self.ordered, name=self.name, + fastpath=True) + return result + + def unique(self): + """ + Return the unique values. + + This includes all levels, even if one or more is unused. + + Returns + ------- + unique values : array + """ + return self.levels def equals(self, other): """ - Returns True if categorical arrays are equal + Returns True if categorical arrays are equal. + + The name of the `Categorical` is not compared! Parameters ---------- - other : Categorical + other : `Categorical` Returns ------- @@ -204,23 +904,31 @@ def equals(self, other): """ if not isinstance(other, Categorical): return False - - return (self.levels.equals(other.levels) and - np.array_equal(self.labels, other.labels)) + # TODO: should this also test if name is equal? + return (self.levels.equals(other.levels) and self.ordered == other.ordered and + np.array_equal(self._codes, other._codes)) def describe(self): - """ - Returns a dataframe with frequency and counts by level. + """ Describes this Categorical + + Returns + ------- + description: `DataFrame` + A dataframe with frequency and counts by level. """ # Hack? from pandas.core.frame import DataFrame counts = DataFrame({ - 'labels' : self.labels, - 'values' : self.labels } - ).groupby('labels').count().squeeze().values + 'codes' : self._codes, + 'values' : self._codes } + ).groupby('codes').count() + + counts.index = self.levels.take(counts.index) + counts = counts.reindex(self.levels) freqs = counts / float(counts.sum()) - return DataFrame({ - 'counts': counts, - 'freqs': freqs, - 'levels': self.levels - }).set_index('levels') + + from pandas.tools.merge import concat + result = concat([counts,freqs],axis=1) + result.index.name = 'levels' + result.columns = ['counts','freqs'] + return result diff --git a/pandas/core/common.py b/pandas/core/common.py index bb7f43511e905..53f7415ac8ef6 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -23,7 +23,6 @@ from pandas.core.config import get_option from pandas.core import array as pa - class PandasError(Exception): pass @@ -107,6 +106,77 @@ class to receive bound method else: setattr(cls, name, func) +class CategoricalDtypeType(type): + """ + the type of CategoricalDtype, this metaclass determines subclass ability + """ + def __init__(cls, name, bases, attrs): + pass + +class CategoricalDtype(object): + __meta__ = CategoricalDtypeType + """ + A np.dtype duck-typed class, suitable for holding a custom categorical dtype. + + THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object + """ + name = 'category' + names = None + type = CategoricalDtypeType + subdtype = None + kind = 'O' + str = '|O08' + num = 100 + shape = tuple() + itemsize = 8 + base = np.dtype('O') + isbuiltin = 0 + isnative = 0 + + def __unicode__(self): + return self.name + + def __str__(self): + """ + Return a string representation for a particular Object + + Invoked by str(df) in both py2/py3. + Yields Bytestring in Py2, Unicode String in py3. + """ + + if compat.PY3: + return self.__unicode__() + return self.__bytes__() + + def __bytes__(self): + """ + Return a string representation for a particular object. + + Invoked by bytes(obj) in py3 only. + Yields a bytestring in both py2/py3. + """ + from pandas.core.config import get_option + + encoding = get_option("display.encoding") + return self.__unicode__().encode(encoding, 'replace') + + def __repr__(self): + """ + Return a string representation for a particular object. + + Yields Bytestring in Py2, Unicode String in py3. + """ + return str(self) + + def __hash__(self): + # make myself hashable + return hash(str(self)) + + def __eq__(self, other): + if isinstance(other, compat.string_types): + return other == self.name + + return isinstance(other, CategoricalDtype) def isnull(obj): """Detect missing values (NaN in numeric arrays, None/NaN in object arrays) @@ -1640,6 +1710,8 @@ def _get_dtype_from_object(dtype): elif isinstance(dtype, compat.string_types): if dtype == 'datetime' or dtype == 'timedelta': dtype += '64' + elif dtype == 'category': + return CategoricalDtypeType try: return _get_dtype_from_object(getattr(np, dtype)) except AttributeError: @@ -1650,10 +1722,6 @@ def _get_dtype_from_object(dtype): return _get_dtype_from_object(np.dtype(dtype)) -_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type, - compat.text_type))) - - def _get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): @@ -2318,6 +2386,16 @@ def is_bool_dtype(arr_or_dtype): tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.bool_) +def is_categorical_dtype(arr_or_dtype): + if hasattr(arr_or_dtype,'dtype'): + arr_or_dtype = arr_or_dtype.dtype + + if isinstance(arr_or_dtype, CategoricalDtype): + return True + try: + return arr_or_dtype == 'category' + except: + return False def is_complex_dtype(arr_or_dtype): tipo = _get_dtype_type(arr_or_dtype) @@ -2356,6 +2434,10 @@ def _is_sequence(x): return False +_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type, + compat.text_type))) + + _ensure_float64 = algos.ensure_float64 _ensure_float32 = algos.ensure_float32 _ensure_int64 = algos.ensure_int64 diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index f9f3b0da22843..9c360f7ca7697 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -51,6 +51,12 @@ 'None' value means unlimited. """ +pc_max_levels_doc = """ +: int + This sets the maximum number of levels pandas should output when printing + out a `Categorical`. +""" + pc_max_info_cols_doc = """ : int max_info_columns is used in DataFrame.info method to decide if @@ -223,6 +229,7 @@ def mpl_style_cb(key): validator=is_instance_factory((int, type(None)))) cf.register_option('max_rows', 60, pc_max_rows_doc, validator=is_instance_factory([type(None), int])) + cf.register_option('max_levels', 8, pc_max_levels_doc, validator=is_int) cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int) cf.register_option('max_columns', 20, pc_max_cols_doc, validator=is_instance_factory([type(None), int])) diff --git a/pandas/core/format.py b/pandas/core/format.py index b11b2e7270271..be4074bdb0ae7 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -93,22 +93,17 @@ def _get_footer(self): footer += ', ' footer += "Length: %d" % len(self.categorical) - levheader = 'Levels (%d): ' % len(self.categorical.levels) - - # TODO: should max_line_width respect a setting? - levstring = np.array_repr(self.categorical.levels, max_line_width=60) - indent = ' ' * (levstring.find('[') + len(levheader) + 1) - lines = levstring.split('\n') - levstring = '\n'.join([lines[0]] + - [indent + x.lstrip() for x in lines[1:]]) + level_info = self.categorical._repr_level_info() + + # Levels are added in a newline if footer: - footer += ', ' - footer += levheader + levstring + footer += '\n' + footer += level_info return compat.text_type(footer) def _get_formatted_values(self): - return format_array(np.asarray(self.categorical), None, + return format_array(self.categorical.get_values(), None, float_format=None, na_rep=self.na_rep) @@ -157,7 +152,9 @@ def _get_footer(self): footer += 'Freq: %s' % self.series.index.freqstr if footer and self.series.name is not None: - footer += ', ' + # categories have already a comma + linebreak + if not com.is_categorical_dtype(self.series.dtype): + footer += ', ' series_name = com.pprint_thing(self.series.name, escape_chars=('\t', '\r', '\n')) @@ -169,6 +166,7 @@ def _get_footer(self): footer += ', ' footer += 'Length: %d' % len(self.series) + # TODO: in tidy_repr, with freq index, no dtype is shown -> also include a guard here? if self.dtype: name = getattr(self.series.dtype, 'name', None) if name: @@ -176,6 +174,15 @@ def _get_footer(self): footer += ', ' footer += 'dtype: %s' % com.pprint_thing(name) + # level infos are added to the end and in a new line, like it is done for Categoricals + # Only added when we request a name + if self.name and com.is_categorical_dtype(self.series.dtype): + level_info = self.series.cat._repr_level_info() + if footer: + footer += "\n" + footer += level_info + + return compat.text_type(footer) def _get_formatted_index(self): @@ -191,7 +198,7 @@ def _get_formatted_index(self): return fmt_index, have_header def _get_formatted_values(self): - return format_array(self.series.values, None, + return format_array(self.series.get_values(), None, float_format=self.float_format, na_rep=self.na_rep) @@ -829,7 +836,7 @@ def _column_header(): ins_col = self.fmt.tr_col_num if self.fmt.sparsify: recs_new = {} - # Increment tags after ... col. + # Increment tags after ... col. for tag,span in list(records.items()): if tag >= ins_col: recs_new[tag + 1] = span @@ -844,7 +851,7 @@ def _column_header(): else: recs_new[tag] = span # if ins_col lies between tags, all col headers get ... - if tag + span == ins_col: + if tag + span == ins_col: recs_new[ins_col] = 1 values = values[:ins_col] + (u('...'),) + \ values[ins_col:] @@ -895,7 +902,7 @@ def _column_header(): ] + [''] * min(len(self.columns), self.max_cols) if truncate_h: ins_col = row_levels + self.fmt.tr_col_num - row.insert(ins_col, '') + row.insert(ins_col, '') self.write_tr(row, indent, self.indent_delta, header=True) indent -= self.indent_delta @@ -981,7 +988,7 @@ def _write_hierarchical_rows(self, fmt_values, indent): inner_lvl = len(level_lengths) - 1 if truncate_v: # Insert ... row and adjust idx_values and - # level_lengths to take this into account. + # level_lengths to take this into account. ins_row = self.fmt.tr_row_num for lnum,records in enumerate(level_lengths): rec_new = {} @@ -999,7 +1006,7 @@ def _write_hierarchical_rows(self, fmt_values, indent): if tag + span == ins_row: rec_new[ins_row] = 1 if lnum == 0: - idx_values.insert(ins_row,tuple([u('...')]*len(level_lengths))) + idx_values.insert(ins_row,tuple([u('...')]*len(level_lengths))) level_lengths[lnum] = rec_new level_lengths[inner_lvl][ins_row] = 1 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b97cb11906e2f..a461dd0e247f2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -36,6 +36,7 @@ create_block_manager_from_arrays, create_block_manager_from_blocks) from pandas.core.series import Series +from pandas.core.categorical import Categorical import pandas.computation.expressions as expressions from pandas.computation.eval import eval as _eval from numpy import percentile as _quantile @@ -1539,7 +1540,7 @@ def get_value(self, index, col, takeable=False): series = self._get_item_cache(col) engine = self.index._engine - return engine.get_value(series.values, index) + return engine.get_value(series.get_values(), index) def set_value(self, index, col, value, takeable=False): """ @@ -1567,7 +1568,7 @@ def set_value(self, index, col, value, takeable=False): engine = self.index._engine engine.set_value(series.values, index, value) return self - except KeyError: + except (KeyError, TypeError): # set using a non-recursive method & reset the cache self.loc[index, col] = value @@ -2114,10 +2115,10 @@ def _sanitize_column(self, key, value): # Need to make sure new columns (which go into the BlockManager as new # blocks) are always copied - if isinstance(value, (Series, DataFrame)): - is_frame = isinstance(value, DataFrame) + def reindexer(value): + # reindex if necessary + if value.index.equals(self.index) or not len(self.index): - # copy the values value = value.values.copy() else: @@ -2133,10 +2134,18 @@ def _sanitize_column(self, key, value): # other raise TypeError('incompatible index of inserted column ' 'with frame index') + return value + + if isinstance(value, Series): + value = reindexer(value) + + elif isinstance(value, DataFrame): + value = reindexer(value).T + + elif isinstance(value, Categorical): + value = value.copy() - if is_frame: - value = value.T - elif isinstance(value, Index) or _is_sequence(value): + elif (isinstance(value, Index) or _is_sequence(value)): if len(value) != len(self.index): raise ValueError('Length of values does not match length of ' 'index') @@ -2160,6 +2169,10 @@ def _sanitize_column(self, key, value): value = np.repeat(value, len(self.index)).astype(dtype) value = com._possibly_cast_to_datetime(value, dtype) + # return categoricals directly + if isinstance(value, Categorical): + return value + # broadcast across multiple columns if necessary if key in self.columns and value.ndim == 1: if not self.columns.is_unique or isinstance(self.columns, @@ -2757,6 +2770,7 @@ def trans(v): % str(by)) if isinstance(ascending, (tuple, list)): ascending = ascending[0] + indexer = _nargsort(k, kind=kind, ascending=ascending, na_position=na_position) @@ -4069,7 +4083,7 @@ def all(self, axis=None, bool_only=None, skipna=True, level=None, numeric_only=bool_only, filter_type='bool') def _reduce(self, op, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + filter_type=None, name=None, **kwds): axis = self._get_axis_number(axis) f = lambda x: op(x, axis=axis, skipna=skipna, **kwds) labels = self._get_agg_axis(axis) @@ -4542,74 +4556,6 @@ def combineMult(self, other): _EMPTY_SERIES = Series([]) - -def group_agg(values, bounds, f): - """ - R-style aggregator - - Parameters - ---------- - values : N-length or N x K ndarray - bounds : B-length ndarray - f : ndarray aggregation function - - Returns - ------- - ndarray with same length as bounds array - """ - if values.ndim == 1: - N = len(values) - result = np.empty(len(bounds), dtype=float) - elif values.ndim == 2: - N, K = values.shape - result = np.empty((len(bounds), K), dtype=float) - - testagg = f(values[:min(1, len(values))]) - if isinstance(testagg, np.ndarray) and testagg.ndim == 2: - raise AssertionError('Function must reduce') - - for i, left_bound in enumerate(bounds): - if i == len(bounds) - 1: - right_bound = N - else: - right_bound = bounds[i + 1] - - result[i] = f(values[left_bound:right_bound]) - - return result - - -def factor_agg(factor, vec, func): - """ - Aggregate array based on Categorical - - Parameters - ---------- - factor : Categorical - length n - vec : sequence - length n - func : function - 1D array aggregation function - - Returns - ------- - ndarray corresponding to factor levels - - See Also - -------- - pandas.Categorical - """ - indexer = np.argsort(factor.labels) - unique_labels = np.arange(len(factor.levels)) - - ordered_labels = factor.labels.take(indexer) - ordered_vec = np.asarray(vec).take(indexer) - bounds = ordered_labels.searchsorted(unique_labels) - - return group_agg(ordered_vec, bounds, func) - - def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 59a457229d512..ac8747284f4bd 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3626,11 +3626,11 @@ def describe_numeric_1d(series, percentiles): def describe_categorical_1d(data): names = ['count', 'unique'] objcounts = data.value_counts() - result = [data.count(), len(objcounts)] + result = [data.count(), len(objcounts[objcounts!=0])] if result[1] > 0: top, freq = objcounts.index[0], objcounts.iloc[0] - if data.dtype == object: + if data.dtype == object or com.is_categorical_dtype(data.dtype): names += ['top', 'freq'] result += [top, freq] @@ -3782,7 +3782,8 @@ def stat_func(self, axis=None, skipna=None, level=None, return self._agg_by_level(name, axis=axis, level=level, skipna=skipna) return self._reduce(f, axis=axis, - skipna=skipna, numeric_only=numeric_only) + skipna=skipna, numeric_only=numeric_only, + name=name) stat_func.__name__ = name return stat_func diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 249aa0afdfd64..08d3fbe335f35 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -23,7 +23,8 @@ import pandas.core.common as com from pandas.core.common import(_possibly_downcast_to_dtype, isnull, notnull, _DATELIKE_DTYPES, is_numeric_dtype, - is_timedelta64_dtype, is_datetime64_dtype) + is_timedelta64_dtype, is_datetime64_dtype, + is_categorical_dtype) from pandas import _np_version_under1p7 import pandas.lib as lib @@ -147,8 +148,10 @@ def _last(x): def _count_compat(x, axis=0): - return x.size - + try: + return x.size + except: + return x.count() class Grouper(object): """ @@ -1358,7 +1361,9 @@ def get_group_levels(self): name_list = [] for ping, labels in zip(self.groupings, recons_labels): labels = com._ensure_platform_int(labels) - name_list.append(ping.group_index.take(labels)) + levels = ping.group_index.take(labels) + + name_list.append(levels) return name_list @@ -1704,6 +1709,11 @@ def levels(self): def names(self): return [self.binlabels.name] + @property + def groupings(self): + # for compat + return None + def size(self): """ Compute group sizes @@ -1866,7 +1876,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, # Is there any way to avoid this? self.grouper = np.asarray(factor) - self._labels = factor.labels + self._labels = factor.codes self._group_index = factor.levels if self.name is None: self.name = factor.name @@ -2629,7 +2639,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): if isinstance(values[0], DataFrame): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) - elif hasattr(self.grouper, 'groupings'): + elif self.grouper.groupings is not None: if len(self.grouper.groupings) > 1: key_index = MultiIndex.from_tuples(keys, names=key_names) @@ -3055,7 +3065,7 @@ def _wrap_aggregated_output(self, output, names=None): if self.axis == 1: result = result.T - return result.convert_objects() + return self._reindex_output(result).convert_objects() def _wrap_agged_blocks(self, items, blocks): if not self.as_index: @@ -3077,7 +3087,27 @@ def _wrap_agged_blocks(self, items, blocks): if self.axis == 1: result = result.T - return result.convert_objects() + return self._reindex_output(result).convert_objects() + + def _reindex_output(self, result): + """ + if we have categorical groupers, then we want to make sure that + we have a fully reindex-output to the levels. These may have not participated in + the groupings (e.g. may have all been nan groups) + + This can re-expand the output space + """ + groupings = self.grouper.groupings + if groupings is None: + return result + elif len(groupings) == 1: + return result + elif not any([ping._was_factor for ping in groupings]): + return result + + levels_list = [ ping._group_index for ping in groupings ] + index = MultiIndex.from_product(levels_list, names=self.grouper.names) + return result.reindex(**{ self.obj._get_axis_name(self.axis) : index, 'copy' : False }).sortlevel() def _iterate_column_groupbys(self): for i, colname in enumerate(self._selected_obj.columns): @@ -3419,6 +3449,11 @@ def _nargsort(items, kind='quicksort', ascending=True, na_position='last'): It adds ascending and na_position parameters. GH #6399, #5231 """ + + # specially handle Categorical + if is_categorical_dtype(items): + return items.argsort(ascending=ascending) + items = np.asanyarray(items) idx = np.arange(len(items)) mask = isnull(items) diff --git a/pandas/core/index.py b/pandas/core/index.py index 262305a335d46..6927d5a732440 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2796,7 +2796,7 @@ def from_arrays(cls, arrays, sortorder=None, names=None): cats = [Categorical.from_array(arr) for arr in arrays] levels = [c.levels for c in cats] - labels = [c.labels for c in cats] + labels = [c.codes for c in cats] if names is None: names = [c.name for c in cats] @@ -2888,7 +2888,7 @@ def from_product(cls, iterables, sortorder=None, names=None): from pandas.tools.util import cartesian_product categoricals = [Categorical.from_array(it) for it in iterables] - labels = cartesian_product([c.labels for c in categoricals]) + labels = cartesian_product([c.codes for c in categoricals]) return MultiIndex(levels=[c.levels for c in categoricals], labels=labels, sortorder=sortorder, names=names) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index d387cb647d8c2..367a283958051 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -405,7 +405,9 @@ def can_do_equal_len(): return False - if _is_list_like(value): + # we need an interable, with a ndim of at least 1 + # eg. don't pass thru np.array(0) + if _is_list_like(value) and getattr(value,'ndim',1) > 0: # we have an equal len Frame if isinstance(value, ABCDataFrame) and value.ndim > 1: @@ -1675,7 +1677,7 @@ def _is_label_like(key): def _is_list_like(obj): # Consider namedtuples to be not list like as they are useful as indices - return (np.iterable(obj) + return (hasattr(obj, '__iter__') and not isinstance(obj, compat.string_types) and not (isinstance(obj, tuple) and type(obj) is not tuple)) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 4f7f36dd4a14d..2bd318ec2430f 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -16,6 +16,7 @@ _possibly_infer_to_datetimelike) from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (_maybe_convert_indices, _length_of_indexer) +from pandas.core.categorical import Categorical, _maybe_to_categorical, _is_categorical import pandas.core.common as com from pandas.sparse.array import _maybe_to_sparse, SparseArray import pandas.lib as lib @@ -49,11 +50,13 @@ class Block(PandasObject): is_timedelta = False is_bool = False is_object = False + is_categorical = False is_sparse = False _can_hold_na = False _downcast_dtype = None _can_consolidate = True _verify_integrity = True + _validate_ndim = True _ftype = 'dense' def __init__(self, values, placement, ndim=None, fastpath=False): @@ -84,6 +87,9 @@ def is_datelike(self): """ return True if I am a non-datelike """ return self.is_datetime or self.is_timedelta + def to_dense(self): + return self.values.view() + @property def fill_value(self): return np.nan @@ -92,7 +98,12 @@ def fill_value(self): def mgr_locs(self): return self._mgr_locs - def make_block_same_class(self, values, placement, copy=False, + @property + def array_dtype(self): + """ the dtype to return if I want to construct this block as an array """ + return self.dtype + + def make_block_same_class(self, values, placement, copy=False, fastpath=True, **kwargs): """ Wrap given values in a block of same type as self. @@ -103,7 +114,7 @@ def make_block_same_class(self, values, placement, copy=False, if copy: values = values.copy() return make_block(values, placement, klass=self.__class__, - fastpath=True) + fastpath=fastpath, **kwargs) @mgr_locs.setter def mgr_locs(self, new_mgr_locs): @@ -161,7 +172,7 @@ def getitem_block(self, slicer, new_mgr_locs=None): new_values = self._slice(slicer) - if new_values.ndim != self.ndim: + if self._validate_ndim and new_values.ndim != self.ndim: raise ValueError("Only same dim slicing is allowed") return self.make_block_same_class(new_values, new_mgr_locs) @@ -326,6 +337,15 @@ def _astype(self, dtype, copy=False, raise_on_error=True, values=None, Coerce to the new type (if copy=True, return a new copy) raise on an except if raise == True """ + + # may need to convert to categorical + # this is only called for non-categoricals + if com.is_categorical_dtype(dtype): + return make_block(Categorical(self.values), + ndim=self.ndim, + placement=self.mgr_locs) + + # astype processing dtype = np.dtype(dtype) if self.dtype == dtype: if copy: @@ -431,6 +451,10 @@ def to_native_types(self, slicer=None, na_rep='', **kwargs): values[mask] = na_rep return values.tolist() + def _validate_merge(self, blocks): + """ validate that we can merge these blocks """ + return True + # block actions #### def copy(self, deep=True): values = self.values @@ -1014,6 +1038,72 @@ def equals(self, other): return np.array_equal(self.values, other.values) +class NonConsolidatableMixIn(object): + """ hold methods for the nonconsolidatable blocks """ + _can_consolidate = False + _verify_integrity = False + _validate_ndim = False + _holder = None + + def __init__(self, values, placement, + ndim=None, fastpath=False,): + + # kludgetastic + if ndim is None: + if len(placement) != 1: + ndim = 1 + else: + ndim = 2 + self.ndim = ndim + + self.mgr_locs = placement + + if not isinstance(values, self._holder): + raise TypeError("values must be {0}".format(self._holder.__name__)) + + self.values = values + + def get_values(self, dtype=None): + """ need to to_dense myself (and always return a ndim sized object) """ + values = self.values.to_dense() + if values.ndim == self.ndim - 1: + values = values.reshape((1,) + values.shape) + return values + + def iget(self, col): + + if self.ndim == 2 and isinstance(col, tuple): + col, loc = col + if col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values[loc] + else: + if col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values + + def should_store(self, value): + return isinstance(value, self._holder) + + def set(self, locs, values, check=False): + assert locs.tolist() == [0] + self.values = values + + def get(self, item): + if self.ndim == 1: + loc = self.items.get_loc(item) + return self.values[loc] + else: + return self.values + + def _slice(self, slicer): + """ return a slice of my values (but densify first) """ + return self.get_values()[slicer] + + def _try_cast_result(self, result, dtype=None): + return result + + class NumericBlock(Block): __slots__ = () is_numeric = True @@ -1336,9 +1426,9 @@ def _try_cast(self, element): return element def should_store(self, value): - return not issubclass(value.dtype.type, + return not (issubclass(value.dtype.type, (np.integer, np.floating, np.complexfloating, - np.datetime64, np.bool_)) + np.datetime64, np.bool_)) or com.is_categorical_dtype(value)) def replace(self, to_replace, value, inplace=False, filter=None, regex=False): @@ -1444,6 +1534,123 @@ def re_replacer(s): make_block(new_values, fastpath=True, placement=self.mgr_locs)] +class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock): + __slots__ = () + is_categorical = True + _can_hold_na = True + _holder = Categorical + + def __init__(self, values, placement, + fastpath=False, **kwargs): + + # coerce to categorical if we can + super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), + fastpath=True, placement=placement, + **kwargs) + + def to_dense(self): + return self.values.to_dense().view() + + @property + def shape(self): + return (len(self.mgr_locs), len(self.values)) + + @property + def array_dtype(self): + """ the dtype to return if I want to construct this block as an array """ + return np.object_ + + def _slice(self, slicer): + """ return a slice of my values """ + + # slice the category + # return same dims as we currently have + return self.values._slice(slicer) + + def fillna(self, value, limit=None, inplace=False, downcast=None): + # we may need to upcast our fill to match our dtype + if limit is not None: + raise NotImplementedError + + values = self.values if inplace else self.values.copy() + return [self.make_block_same_class(values=values.fillna(fill_value=value, + limit=limit), + placement=self.mgr_locs)] + + def interpolate(self, method='pad', axis=0, inplace=False, + limit=None, fill_value=None, **kwargs): + + values = self.values if inplace else self.values.copy() + return self.make_block_same_class(values=values.fillna(fill_value=fill_value, + method=method, + limit=limit), + placement=self.mgr_locs) + + def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): + """ + Take values according to indexer and return them as a block.bb + + """ + if fill_tuple is None: + fill_value = None + else: + fill_value = fill_tuple[0] + + # axis doesn't matter; we are really a single-dim object + # but are passed the axis depending on the calling routing + # if its REALLY axis 0, then this will be a reindex and not a take + new_values = self.values.take_nd(indexer, fill_value=fill_value) + + # if we are a 1-dim object, then always place at 0 + if self.ndim == 1: + new_mgr_locs = [0] + else: + if new_mgr_locs is None: + new_mgr_locs = self.mgr_locs + + return self.make_block_same_class(new_values, new_mgr_locs) + + def _astype(self, dtype, copy=False, raise_on_error=True, values=None, + klass=None): + """ + Coerce to the new type (if copy=True, return a new copy) + raise on an except if raise == True + """ + + if dtype == com.CategoricalDtype(): + values = self.values + else: + values = np.array(self.values).astype(dtype) + + if copy: + values = values.copy() + + return make_block(values, + ndim=self.ndim, + placement=self.mgr_locs) + + def _validate_merge(self, blocks): + """ validate that we can merge these blocks """ + + levels = self.values.levels + for b in blocks: + if not levels.equals(b.values.levels): + raise ValueError("incompatible levels in categorical block merge") + + return True + + def to_native_types(self, slicer=None, na_rep='', **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + # Categorical is always one dimension + values = values[slicer] + values = np.array(values, dtype=object) + mask = isnull(values) + values[mask] = na_rep + # Blocks.to_native_type returns list of lists, but we are always only a list + return [values.tolist()] class DatetimeBlock(Block): __slots__ = () @@ -1589,16 +1796,14 @@ def get_values(self, dtype=None): .reshape(self.values.shape) return self.values - -class SparseBlock(Block): +class SparseBlock(NonConsolidatableMixIn, Block): """ implement as a list of sparse arrays of the same dtype """ __slots__ = () is_sparse = True is_numeric = True _can_hold_na = True - _can_consolidate = False - _verify_integrity = False _ftype = 'sparse' + _holder = SparseArray def __init__(self, values, placement, ndim=None, fastpath=False,): @@ -1653,11 +1858,6 @@ def sp_values(self, v): fill_value=self.values.fill_value, copy=False) - def iget(self, col): - if col != 0: - raise IndexError("SparseBlock only contains one item") - return self.values - @property def sp_index(self): return self.values.sp_index @@ -1672,31 +1872,6 @@ def __len__(self): except: return 0 - def should_store(self, value): - return isinstance(value, SparseArray) - - def set(self, locs, values, check=False): - assert locs.tolist() == [0] - self.values = values - - def get(self, item): - if self.ndim == 1: - loc = self.items.get_loc(item) - return self.values[loc] - else: - return self.values - - def _slice(self, slicer): - """ return a slice of my values (but densify first) """ - return self.get_values()[slicer] - - def get_values(self, dtype=None): - """ need to to_dense myself (and always return a ndim sized object) """ - values = self.values.to_dense() - if values.ndim == self.ndim - 1: - values = values.reshape((1,) + values.shape) - return values - def copy(self, deep=True): return self.make_block_same_class(values=self.values, sparse_index=self.sp_index, @@ -1797,9 +1972,6 @@ def sparse_reindex(self, new_index): return self.make_block_same_class(values, sparse_index=new_index, placement=self.mgr_locs) - def _try_cast_result(self, result, dtype=None): - return result - def make_block(values, placement, klass=None, ndim=None, dtype=None, fastpath=False): @@ -1823,6 +1995,8 @@ def make_block(values, placement, klass=None, ndim=None, klass = DatetimeBlock elif issubclass(vtype, np.complexfloating): klass = ComplexBlock + elif _is_categorical(values): + klass = CategoricalBlock else: @@ -1936,7 +2110,7 @@ def make_empty(self, axes=None): # preserve dtype if possible if self.ndim == 1: - blocks = np.array([], dtype=self.dtype) + blocks = np.array([], dtype=self.array_dtype) else: blocks = [] return self.__class__(blocks, axes) @@ -2599,6 +2773,7 @@ def iget(self, i, fastpath=True): # fastpath shortcut for select a single-dim from a 2-dim BM return SingleBlockManager([ block.make_block_same_class(values, placement=slice(0, len(values)), + ndim=1, fastpath=True) ], self.axes[1]) @@ -2660,11 +2835,20 @@ def set(self, item, value, check=False): if check, then validate that we are not setting the same data in-place """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment + # can prob also fix the various if tests for sparse/categorical + value_is_sparse = isinstance(value, SparseArray) + value_is_cat = _is_categorical(value) + value_is_nonconsolidatable = value_is_sparse or value_is_cat if value_is_sparse: + # sparse assert self.ndim == 2 + def value_getitem(placement): + return value + elif value_is_cat: + # categorical def value_getitem(placement): return value else: @@ -2733,7 +2917,7 @@ def value_getitem(placement): unfit_count = len(unfit_mgr_locs) new_blocks = [] - if value_is_sparse: + if value_is_nonconsolidatable: # This code (ab-)uses the fact that sparse blocks contain only # one item. new_blocks.extend( @@ -2930,8 +3114,8 @@ def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): blk = self.blocks[blkno] # Otherwise, slicing along items axis is necessary. - if blk.is_sparse: - # A sparse block, it's easy, because there's only one item + if not blk._can_consolidate: + # A non-consolidatable block, it's easy, because there's only one item # and each mgr loc is a copy of that single item. for mgr_loc in mgr_locs: newblk = blk.copy(deep=True) @@ -3146,6 +3330,10 @@ def convert(self, **kwargs): def dtype(self): return self._values.dtype + @property + def array_dtype(self): + return self._block.array_dtype + @property def ftype(self): return self._block.ftype @@ -3166,6 +3354,10 @@ def get_ftypes(self): def values(self): return self._values.view() + def get_values(self): + """ return a dense type view """ + return np.array(self._block.to_dense(),copy=False) + @property def itemsize(self): return self._values.itemsize @@ -3250,6 +3442,7 @@ def form_blocks(arrays, names, axes): object_items = [] sparse_items = [] datetime_items = [] + cat_items = [] extra_locs = [] names_idx = Index(names) @@ -3290,6 +3483,8 @@ def form_blocks(arrays, names, axes): int_items.append((i, k, v)) elif v.dtype == np.bool_: bool_items.append((i, k, v)) + elif _is_categorical(v): + cat_items.append((i, k, v)) else: object_items.append((i, k, v)) @@ -3326,6 +3521,14 @@ def form_blocks(arrays, names, axes): sparse_blocks = _sparse_blockify(sparse_items) blocks.extend(sparse_blocks) + if len(cat_items) > 0: + cat_blocks = [ make_block(array, + klass=CategoricalBlock, + fastpath=True, + placement=[i] + ) for i, names, array in cat_items ] + blocks.extend(cat_blocks) + if len(extra_locs): shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) @@ -3437,12 +3640,16 @@ def _lcd_dtype(l): have_complex = len(counts[ComplexBlock]) > 0 have_dt64 = len(counts[DatetimeBlock]) > 0 have_td64 = len(counts[TimeDeltaBlock]) > 0 + have_cat = len(counts[CategoricalBlock]) > 0 have_sparse = len(counts[SparseBlock]) > 0 have_numeric = have_float or have_complex or have_int + has_non_numeric = have_dt64 or have_td64 or have_cat + if (have_object or (have_bool and have_numeric) or - (have_numeric and (have_dt64 or have_td64))): + (have_numeric and has_non_numeric) or + have_cat): return np.dtype(object) elif have_bool: return np.dtype(bool) @@ -3731,7 +3938,9 @@ def get_empty_dtype_and_na(join_units): if dtype is None: continue - if issubclass(dtype.type, (np.object_, np.bool_)): + if com.is_categorical_dtype(dtype): + upcast_cls = 'category' + elif issubclass(dtype.type, (np.object_, np.bool_)): upcast_cls = 'object' elif is_datetime64_dtype(dtype): upcast_cls = 'datetime' @@ -3754,6 +3963,8 @@ def get_empty_dtype_and_na(join_units): # create the result if 'object' in upcast_classes: return np.dtype(np.object_), np.nan + elif 'category' in upcast_classes: + return com.CategoricalDtype(), np.nan elif 'float' in upcast_classes: return np.dtype(np.float64), np.nan elif 'datetime' in upcast_classes: @@ -3788,10 +3999,15 @@ def concatenate_join_units(join_units, concat_axis, copy): # FIXME: optimization potential: if len(join_units) == 1, single join unit # is densified and sparsified back. - if any(unit.is_sparse for unit in join_units): - # If one of the units was sparse, concat_values are 2d and there's only - # one item. - return SparseArray(concat_values[0]) + if any(unit.needs_block_conversion for unit in join_units): + + # need to ask the join unit block to convert to the underlying repr for us + blocks = [ unit.block for unit in join_units if unit.block is not None ] + + # may need to validate this combination + blocks[0]._validate_merge(blocks) + + return blocks[0]._holder(concat_values[0]) else: return concat_values @@ -4017,8 +4233,10 @@ def is_null(self): return True @cache_readonly - def is_sparse(self): - return self.block is not None and self.block.is_sparse + def needs_block_conversion(self): + """ we might need to convert the joined values to a suitable block repr """ + block = self.block + return block is not None and (block.is_sparse or block.is_categorical) def get_reindexed_values(self, empty_dtype, upcasted_na): if upcasted_na is None: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 780edec6ea25b..abe1974705243 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -456,10 +456,12 @@ def na_op(x, y): result = np.empty(x.size, dtype=dtype) mask = notnull(x) & notnull(y) result[mask] = op(x[mask], y[mask]) - else: + elif isinstance(x, pa.Array): result = pa.empty(len(x), dtype=x.dtype) mask = notnull(x) result[mask] = op(x[mask], y) + else: + raise TypeError("{typ} cannot perform the operation {op}".format(typ=type(x).__name__,op=str_rep)) result, changed = com._maybe_upcast_putmask(result, ~mask, pa.NA) @@ -562,7 +564,7 @@ def wrapper(self, other): mask = isnull(self) - values = self.values + values = self.get_values() other = _index.convert_scalar(values, other) if issubclass(values.dtype.type, np.datetime64): @@ -749,12 +751,15 @@ def na_op(x, y): yrav = yrav[mask] if np.prod(xrav.shape) and np.prod(yrav.shape): result[mask] = op(xrav, yrav) - else: + elif hasattr(x,'size'): result = np.empty(x.size, dtype=x.dtype) mask = notnull(xrav) xrav = xrav[mask] if np.prod(xrav.shape): result[mask] = op(xrav, y) + else: + raise TypeError("cannot perform operation {op} between objects " + "of type {x} and {y}".format(op=name,x=type(x),y=type(y))) result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan) result = result.reshape(x.shape) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index e9f8893355f2d..1e6ed56386f63 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -97,7 +97,7 @@ def panel_index(time, panels, names=['time', 'panel']): time_factor = Categorical.from_array(time) panel_factor = Categorical.from_array(panels) - labels = [time_factor.labels, panel_factor.labels] + labels = [time_factor.codes, panel_factor.codes] levels = [time_factor.levels, panel_factor.levels] return MultiIndex(levels, labels, sortorder=None, names=names, verify_integrity=False) @@ -1045,7 +1045,7 @@ def _apply_2d(self, func, axis): return self._construct_return_type(dict(results)) def _reduce(self, op, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): + filter_type=None, name=None, **kwds): axis_name = self._get_axis_name(axis) axis_number = self._get_axis_number(axis_name) f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds) diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index e1712be7b5a5f..43784e15ab163 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -1013,13 +1013,13 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False): if dummy_na: number_of_cols += 1 - dummy_mat = np.eye(number_of_cols).take(cat.labels, axis=0) + dummy_mat = np.eye(number_of_cols).take(cat.codes, axis=0) if dummy_na: levels = np.append(cat.levels, np.nan) else: # reset NaN GH4446 - dummy_mat[cat.labels == -1] = 0 + dummy_mat[cat.codes == -1] = 0 if prefix is not None: dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) @@ -1067,7 +1067,7 @@ def make_axis_dummies(frame, axis='minor', transform=None): if transform is not None: mapped_items = items.map(transform) cat = Categorical.from_array(mapped_items.take(labels)) - labels = cat.labels + labels = cat.codes items = cat.levels values = np.eye(len(items), dtype=float) diff --git a/pandas/core/series.py b/pandas/core/series.py index a484efe75e284..eff558d875c4a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -199,9 +199,10 @@ def __init__(self, data=None, index=None, dtype=None, name=None, else: data = data.reindex(index, copy=copy) elif isinstance(data, Categorical): + if dtype is not None: + raise ValueError("cannot specify a dtype with a Categorical") if name is None: name = data.name - data = np.asarray(data) elif isinstance(data, types.GeneratorType): data = list(data) elif isinstance(data, (set, frozenset)): @@ -369,7 +370,7 @@ def __array__(self, result=None): """ the array interface, return my values """ - return self.values + return self.get_values() def __array_wrap__(self, result, context=None): """ @@ -382,6 +383,14 @@ def __array_prepare__(self, result, context=None): """ Gets called prior to a ufunc """ + + # nice error message for non-ufunc types + if context is not None and not isinstance(self.values, np.ndarray): + obj = context[1][0] + raise TypeError("{obj} with dtype {dtype} cannot perform " + "the numpy op {op}".format(obj=type(obj).__name__, + dtype=getattr(obj,'dtype',None), + op=context[0].__name__)) return result # complex @@ -664,7 +673,10 @@ def _set_with(self, key, value): pass if not isinstance(key, (list, Series, pa.Array, Series)): - key = list(key) + try: + key = list(key) + except: + key = [ key ] if isinstance(key, Index): key_type = key.inferred_type @@ -870,6 +882,9 @@ def _tidy_repr(self, max_vals=20): def _repr_footer(self): + namestr = u("Name: %s, ") % com.pprint_thing( + self.name) if self.name is not None else "" + # time series if self.is_time_series: if self.index.freq is not None: @@ -877,13 +892,17 @@ def _repr_footer(self): else: freqstr = u('') - namestr = u("Name: %s, ") % com.pprint_thing( - self.name) if self.name is not None else "" return u('%s%sLength: %d') % (freqstr, namestr, len(self)) + # Categorical + if com.is_categorical_dtype(self.dtype): + level_info = self.cat._repr_level_info() + return u('%sLength: %d, dtype: %s\n%s') % (namestr, + len(self), + str(self.dtype.name), + level_info) + # reg series - namestr = u("Name: %s, ") % com.pprint_thing( - self.name) if self.name is not None else "" return u('%sLength: %d, dtype: %s') % (namestr, len(self), str(self.dtype.name)) @@ -994,7 +1013,7 @@ def values(self): def get_values(self): """ same as values (but handles sparseness conversions); is a view """ - return self._data.values + return self._data.get_values() def tolist(self): """ Convert Series to a nested list """ @@ -1387,8 +1406,8 @@ def dot(self, other): else: # pragma: no cover raise TypeError('unsupported type: %s' % type(other)) -#------------------------------------------------------------------------------ -# Combination + #------------------------------------------------------------------------------ + # Combination def append(self, to_append, verify_integrity=False): """ @@ -2004,9 +2023,19 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): index=self.index).__finalize__(self) def _reduce(self, op, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): - """ perform a reduction operation """ - return op(_values_from_object(self), skipna=skipna, **kwds) + filter_type=None, name=None, **kwds): + """ + perform a reduction operation + + if we have an ndarray as a value, then simply perform the operation, + otherwise delegate to the object + + """ + delegate = self.values + if isinstance(delegate, np.ndarray): + return op(delegate, skipna=skipna, **kwds) + return delegate._reduce(op=op, axis=axis, skipna=skipna, numeric_only=numeric_only, + filter_type=filter_type, name=name, **kwds) def _reindex_indexer(self, new_index, indexer, copy): if indexer is None: @@ -2377,6 +2406,14 @@ def to_period(self, freq=None, copy=True): new_index = self.index.to_period(freq=freq) return self._constructor(new_values, index=new_index).__finalize__(self) + #------------------------------------------------------------------------------ + # Categorical methods + + @property + def cat(self): + if not com.is_categorical_dtype(self.dtype): + raise TypeError("Can only use .cat accessor with a 'category' dtype") + return self.values Series._setup_axes(['index'], info_axis=0, stat_axis=0, aliases={'rows': 0}) @@ -2454,6 +2491,13 @@ def _try_cast(arr, take_fast_path): if copy: subarr = data.copy() + elif isinstance(data, Categorical): + subarr = data + + if copy: + subarr = data.copy() + return subarr + elif isinstance(data, list) and len(data) > 0: if dtype is not None: try: diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index cee1867e73179..0e6c41a25bbe5 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1773,6 +1773,9 @@ def set_atom(self, block, block_items, existing_col, min_itemsize, raise TypeError( "[unicode] is not implemented as a table column") + elif dtype == 'category': + raise NotImplementedError + # this is basically a catchall; if say a datetime64 has nans then will # end up here ### elif inferred_type == 'string' or dtype == 'object': @@ -3494,7 +3497,7 @@ def read(self, where=None, columns=None, **kwargs): factors = [Categorical.from_array(a.values) for a in self.index_axes] levels = [f.levels for f in factors] N = [len(f.levels) for f in factors] - labels = [f.labels for f in factors] + labels = [f.codes for f in factors] # compute the key key = factor_indexer(N[1:], labels) diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index d0d1b02577f89..6a944284035c8 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -9,7 +9,7 @@ import numpy as np import pandas -from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range, +from pandas import (Series, DataFrame, Panel, MultiIndex, Categorical, bdate_range, date_range, Index, DatetimeIndex, isnull) from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf, IncompatibilityWarning, PerformanceWarning, @@ -4348,6 +4348,28 @@ def test_query_with_nested_special_character(self): result = store.select('test', 'a = "test & test"') tm.assert_frame_equal(expected, result) + def test_categorical(self): + # FIXME + + with ensure_clean_store(self.path) as store: + + s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], levels=['a','b','c','d'])) + + self.assertRaises(NotImplementedError, store.append, 's', s, format='table') + #store.append('s', s, format='table') + #result = store.select('s') + #tm.assert_series_equal(s, result) + + df = DataFrame({"s":s, "vals":[1,2,3,4,5,6]}) + self.assertRaises(NotImplementedError, store.append, 'df', df, format='table') + #store.append('df', df, format='table') + #result = store.select('df') + #tm.assert_frame_equal(df, df2) + + # Ok, this doesn't work yet + # FIXME: TypeError: cannot pass a where specification when reading from a Fixed format store. this store must be selected in its entirety + #result = store.select('df', where = ['index>2']) + #tm.assert_frame_equal(df[df.index>2],result) def _test_sort(obj): if isinstance(obj, DataFrame): diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index b045867b06263..1a2673342df45 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -198,6 +198,9 @@ def test_read_dta4(self): columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled', 'labeled_with_missings', 'float_labelled']) + # these are all categoricals + expected = pd.concat([ Series(pd.Categorical(value)) for col, value in expected.iteritems() ],axis=1) + tm.assert_frame_equal(parsed_113, expected) tm.assert_frame_equal(parsed_114, expected) tm.assert_frame_equal(parsed_115, expected) diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index 48576266c3b5f..bb428b7e4c6bb 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -213,6 +213,10 @@ def values(self): """ return the array """ return self._data._values + def __array__(self, result=None): + """ the array interface, return my values """ + return self._data._values + def get_values(self): """ same as values """ return self._data._values.to_dense().view() @@ -299,6 +303,11 @@ def __array_finalize__(self, obj): self.name = getattr(obj, 'name', None) self.fill_value = getattr(obj, 'fill_value', None) + def _reduce(self, op, axis=0, skipna=True, numeric_only=None, + filter_type=None, name=None, **kwds): + """ perform a reduction operation """ + return op(self.get_values(), skipna=skipna, **kwds) + def __getstate__(self): # pickling return dict(_typ=self._typ, diff --git a/pandas/stats/plm.py b/pandas/stats/plm.py index 3c67119427ae0..54b687e277a38 100644 --- a/pandas/stats/plm.py +++ b/pandas/stats/plm.py @@ -759,7 +759,6 @@ def __init__(self, y, x, window_type='full_sample', window=None, def _var_beta_panel(y, x, beta, xx, rmse, cluster_axis, nw_lags, nobs, df, nw_overlap): - from pandas.core.frame import group_agg xx_inv = math.inv(xx) yv = y.values @@ -782,7 +781,7 @@ def _var_beta_panel(y, x, beta, xx, rmse, cluster_axis, x = x.swaplevel(0, 1).sortlevel(0) resid = resid.swaplevel(0, 1).sortlevel(0) - m = group_agg(x.values * resid.values, x.index._bounds, + m = _group_agg(x.values * resid.values, x.index._bounds, lambda x: np.sum(x, axis=0)) if nw_lags is None: @@ -795,6 +794,40 @@ def _var_beta_panel(y, x, beta, xx, rmse, cluster_axis, return np.dot(xx_inv, np.dot(xox, xx_inv)) +def _group_agg(values, bounds, f): + """ + R-style aggregator + + Parameters + ---------- + values : N-length or N x K ndarray + bounds : B-length ndarray + f : ndarray aggregation function + + Returns + ------- + ndarray with same length as bounds array + """ + if values.ndim == 1: + N = len(values) + result = np.empty(len(bounds), dtype=float) + elif values.ndim == 2: + N, K = values.shape + result = np.empty((len(bounds), K), dtype=float) + + testagg = f(values[:min(1, len(values))]) + if isinstance(testagg, np.ndarray) and testagg.ndim == 2: + raise AssertionError('Function must reduce') + + for i, left_bound in enumerate(bounds): + if i == len(bounds) - 1: + right_bound = N + else: + right_bound = bounds[i + 1] + + result[i] = f(values[left_bound:right_bound]) + + return result def _xx_time_effects(x, y): """ diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py index c6caadad39abd..5a34048fd8c8c 100644 --- a/pandas/stats/tests/test_ols.py +++ b/pandas/stats/tests/test_ols.py @@ -787,6 +787,21 @@ def test_auto_rolling_window_type(self): assert_frame_equal(window_model.beta, rolling_model.beta) + def test_group_agg(self): + from pandas.stats.plm import _group_agg + + values = np.ones((10, 2)) * np.arange(10).reshape((10, 1)) + bounds = np.arange(5) * 2 + f = lambda x: x.mean(axis=0) + + agged = _group_agg(values, bounds, f) + + assert(agged[1][0] == 2.5) + assert(agged[2][0] == 4.5) + + # test a function that doesn't aggregate + f2 = lambda x: np.zeros((2, 2)) + self.assertRaises(Exception, _group_agg, values, bounds, f2) def _check_non_raw_results(model): _check_repr(model) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index ec2c64242f146..6353ad53a88ef 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -203,6 +203,7 @@ class TestValueCounts(tm.TestCase): _multiprocess_can_split_ = True def test_value_counts(self): + np.random.seed(1234) from pandas.tools.tile import cut arr = np.random.randn(4) @@ -212,7 +213,7 @@ def test_value_counts(self): result = algos.value_counts(factor) expected = algos.value_counts(np.asarray(factor)) - tm.assert_series_equal(result, expected) + tm.assert_series_equal(result.sort_index(), expected.sort_index()) def test_value_counts_bins(self): s = [1, 2, 3, 4] diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index a195b57382b95..0aa7f2b67c7c6 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -2,22 +2,18 @@ from datetime import datetime from pandas.compat import range, lrange, u -import nose import re import numpy as np +import pandas as pd -from pandas.core.categorical import Categorical -from pandas.core.index import Index, Int64Index, MultiIndex -from pandas.core.frame import DataFrame -from pandas.tseries.period import PeriodIndex -from pandas.util.testing import assert_almost_equal -import pandas.core.common as com -from pandas.tseries.period import PeriodIndex +from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex, + Timestamp, _np_version_under1p7) +import pandas.core.common as com +import pandas.compat as compat import pandas.util.testing as tm - class TestCategorical(tm.TestCase): _multiprocess_can_split_ = True @@ -30,29 +26,122 @@ def test_getitem(self): self.assertEqual(self.factor[-1], 'c') subf = self.factor[[0, 1, 2]] - tm.assert_almost_equal(subf.labels, [0, 1, 1]) + tm.assert_almost_equal(subf._codes, [0, 1, 1]) subf = self.factor[np.asarray(self.factor) == 'c'] - tm.assert_almost_equal(subf.labels, [2, 2, 2]) + tm.assert_almost_equal(subf._codes, [2, 2, 2]) def test_constructor_unsortable(self): - raise nose.SkipTest('skipping for now') - - arr = np.array([1, 2, 3, datetime.now()], dtype='O') # it works! + arr = np.array([1, 2, 3, datetime.now()], dtype='O') factor = Categorical.from_array(arr) + self.assertFalse(factor.ordered) + + def test_constructor(self): + # There are multiple ways to call a constructor + + # old style: two arrays, one a pointer to the labels + # old style is now only available with compat=True + exp_arr = np.array(["a", "b", "c", "a", "b", "c"]) + with tm.assert_produces_warning(FutureWarning): + c_old = Categorical([0,1,2,0,1,2], levels=["a","b","c"], compat=True) + self.assert_numpy_array_equal(c_old.__array__(), exp_arr) + # the next one are from the old docs + with tm.assert_produces_warning(FutureWarning): + c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3], compat=True) + self.assert_numpy_array_equal(c_old2.__array__(), np.array([1, 2, 3, 1, 2, 3])) + with tm.assert_produces_warning(FutureWarning): + c_old3 = Categorical([0,1,2,0,1,2], ['a', 'b', 'c'], compat=True) + self.assert_numpy_array_equal(c_old3.__array__(), np.array(['a', 'b', 'c', 'a', 'b', 'c'])) + + with tm.assert_produces_warning(FutureWarning): + cat = pd.Categorical([1,2], levels=[1,2,3], compat=True) + self.assert_numpy_array_equal(cat.__array__(), np.array([2,3])) + + with tm.assert_produces_warning(None): + cat = pd.Categorical([1,2], levels=[1,2,3], compat=False) + self.assert_numpy_array_equal(cat.__array__(), np.array([1,2])) + + # new style + c1 = Categorical(exp_arr) + self.assert_numpy_array_equal(c1.__array__(), exp_arr) + c2 = Categorical(exp_arr, levels=["a","b","c"]) + self.assert_numpy_array_equal(c2.__array__(), exp_arr) + c2 = Categorical(exp_arr, levels=["c","b","a"]) + self.assert_numpy_array_equal(c2.__array__(), exp_arr) + + # Categorical as input + c1 = Categorical(["a", "b", "c", "a"]) + c2 = Categorical(c1) + self.assertTrue(c1.equals(c2)) + + c1 = Categorical(["a", "b", "c", "a"], levels=["a","b","c","d"]) + c2 = Categorical(c1) + self.assertTrue(c1.equals(c2)) + + c1 = Categorical(["a", "b", "c", "a"], levels=["a","c","b"]) + c2 = Categorical(c1) + self.assertTrue(c1.equals(c2)) + + c1 = Categorical(["a", "b", "c", "a"], levels=["a","c","b"]) + c2 = Categorical(c1, levels=["a","b","c"]) + self.assert_numpy_array_equal(c1.__array__(), c2.__array__()) + self.assert_numpy_array_equal(c2.levels, np.array(["a","b","c"])) + + # Series of dtype category + c1 = Categorical(["a", "b", "c", "a"], levels=["a","b","c","d"]) + c2 = Categorical(Series(c1)) + self.assertTrue(c1.equals(c2)) + + c1 = Categorical(["a", "b", "c", "a"], levels=["a","c","b"]) + c2 = Categorical(Series(c1)) + self.assertTrue(c1.equals(c2)) + + # Series + c1 = Categorical(["a", "b", "c", "a"]) + c2 = Categorical(Series(["a", "b", "c", "a"])) + self.assertTrue(c1.equals(c2)) - def test_factor_agg(self): - import pandas.core.frame as frame + c1 = Categorical(["a", "b", "c", "a"], levels=["a","b","c","d"]) + c2 = Categorical(Series(["a", "b", "c", "a"]), levels=["a","b","c","d"]) + self.assertTrue(c1.equals(c2)) - arr = np.arange(len(self.factor)) + # This should result in integer levels, not float! + cat = pd.Categorical([1,2,3,np.nan], levels=[1,2,3]) + self.assertTrue(com.is_integer_dtype(cat.levels)) - f = np.sum - agged = frame.factor_agg(self.factor, arr, f) - labels = self.factor.labels - for i, idx in enumerate(self.factor.levels): - self.assertEqual(f(arr[labels == i]), agged[i]) + def test_from_codes(self): + + # too few levels + def f(): + Categorical.from_codes([1,2], [1,2]) + self.assertRaises(ValueError, f) + + # no int codes + def f(): + Categorical.from_codes(["a"], [1,2]) + self.assertRaises(ValueError, f) + + # no unique levels + def f(): + Categorical.from_codes([0,1,2], ["a","a","b"]) + self.assertRaises(ValueError, f) + + # too negative + def f(): + Categorical.from_codes([-2,1,2], ["a","b","c"]) + self.assertRaises(ValueError, f) + + + exp = Categorical(["a","b","c"]) + res = Categorical.from_codes([0,1,2], ["a","b","c"]) + self.assertTrue(exp.equals(res)) + + # Not available in earlier numpy versions + if hasattr(np.random, "choice"): + codes = np.random.choice([0,1], 5, p=[0.9,0.1]) + pd.Categorical.from_codes(codes, levels=["train", "test"]) def test_comparisons(self): result = self.factor[self.factor == 'a'] @@ -97,7 +186,7 @@ def test_na_flags_int_levels(self): labels = np.random.randint(0, 10, 20) labels[::5] = -1 - cat = Categorical(labels, levels) + cat = Categorical(labels, levels, fastpath=True) repr(cat) self.assert_numpy_array_equal(com.isnull(cat), labels == -1) @@ -127,96 +216,1301 @@ def test_describe(self): def test_print(self): expected = [" a", " b", " b", " a", " a", " c", " c", " c", - "Levels (3): Index([a, b, c], dtype=object)"] + "Levels (3, object): [a < b < c]"] expected = "\n".join(expected) - # hack because array_repr changed in numpy > 1.6.x actual = repr(self.factor) - pat = "Index\(\['a', 'b', 'c']" - sub = "Index([a, b, c]" - actual = re.sub(pat, sub, actual) - self.assertEqual(actual, expected) def test_big_print(self): - factor = Categorical([0,1,2,0,1,2]*100, ['a', 'b', 'c'], name='cat') + factor = Categorical([0,1,2,0,1,2]*100, ['a', 'b', 'c'], name='cat', fastpath=True) expected = [" a", " b", " c", " a", " b", " c", " a", " b", " c", " a", " b", " c", " a", "...", " c", " a", " b", " c", " a", " b", " c", " a", " b", " c", " a", " b", " c", - "Levels (3): Index([a, b, c], dtype=object)", - "Name: cat, Length: 600" ] + "Name: cat, Length: 600", + "Levels (3, object): [a, b, c]"] expected = "\n".join(expected) - # hack because array_repr changed in numpy > 1.6.x actual = repr(factor) - pat = "Index\(\['a', 'b', 'c']" - sub = "Index([a, b, c]" - actual = re.sub(pat, sub, actual) - self.assertEqual(actual, expected) + self.assertEqual(expected, actual) def test_empty_print(self): factor = Categorical([], ["a","b","c"], name="cat") - expected = ("Categorical([], Name: cat, Levels (3): " - "Index([a, b, c], dtype=object)") + expected = ("Categorical([], Name: cat, Levels (3, object): [a < b < c]") # hack because array_repr changed in numpy > 1.6.x actual = repr(factor) - pat = "Index\(\['a', 'b', 'c']" - sub = "Index([a, b, c]" - actual = re.sub(pat, sub, actual) self.assertEqual(actual, expected) factor = Categorical([], ["a","b","c"]) - expected = ("Categorical([], Levels (3): " - "Index([a, b, c], dtype=object)") - # hack because array_repr changed in numpy > 1.6.x + expected = ("Categorical([], Levels (3, object): [a < b < c]") actual = repr(factor) - pat = "Index\(\['a', 'b', 'c']" - sub = "Index([a, b, c]" - actual = re.sub(pat, sub, actual) - self.assertEqual(actual, expected) + self.assertEqual(expected, actual) factor = Categorical([], []) - expected = ("Categorical([], Levels (0): " - "Index([], dtype=object)") - self.assertEqual(repr(factor), expected) + expected = ("Categorical([], Levels (0, object): []") + self.assertEqual(expected, repr(factor)) def test_periodindex(self): idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02', '2014-03', '2014-03'], freq='M') - cat1 = Categorical.from_array(idx1) - exp_arr = np.array([0, 0, 1, 1, 2, 2]) + cat1 = Categorical.from_array(idx1) + str(cat1) + exp_arr = np.array([0, 0, 1, 1, 2, 2],dtype='int64') exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M') - - self.assert_numpy_array_equal(cat1.labels, exp_arr) + self.assert_numpy_array_equal(cat1._codes, exp_arr) self.assertTrue(cat1.levels.equals(exp_idx)) - idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01', '2014-03', '2014-01'], freq='M') cat2 = Categorical.from_array(idx2) - - exp_arr = np.array([2, 2, 1, 0, 2, 0]) - - self.assert_numpy_array_equal(cat2.labels, exp_arr) - self.assertTrue(cat2.levels.equals(exp_idx)) + str(cat2) + exp_arr = np.array([2, 2, 1, 0, 2, 0],dtype='int64') + exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M') + self.assert_numpy_array_equal(cat2._codes, exp_arr) + self.assertTrue(cat2.levels.equals(exp_idx2)) idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09', '2013-08', '2013-07', '2013-05'], freq='M') cat3 = Categorical.from_array(idx3) - - exp_arr = np.array([6, 5, 4, 3, 2, 1, 0]) + exp_arr = np.array([6, 5, 4, 3, 2, 1, 0],dtype='int64') exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09', '2013-10', '2013-11', '2013-12'], freq='M') - - self.assert_numpy_array_equal(cat3.labels, exp_arr) + self.assert_numpy_array_equal(cat3._codes, exp_arr) self.assertTrue(cat3.levels.equals(exp_idx)) + def test_level_assigments(self): + s = pd.Categorical(["a","b","c","a"]) + exp = np.array([1,2,3,1]) + s.levels = [1,2,3] + self.assert_numpy_array_equal(s.__array__(), exp) + self.assert_numpy_array_equal(s.levels, np.array([1,2,3])) + # lengthen + s.levels = [1,2,3,4] + # does nothing to the values but only the the levels + self.assert_numpy_array_equal(s.__array__(), exp) + self.assert_numpy_array_equal(s.levels, np.array([1,2,3,4])) + # shorten + exp2 = np.array([1,2,np.nan,1]) + s.levels = [1,2] + self.assert_numpy_array_equivalent(s.__array__(), exp2) # doesn't work with nan :-( + self.assertTrue(np.isnan(s.__array__()[2])) + self.assert_numpy_array_equal(s.levels, np.array([1,2])) + + def test_reorder_levels(self): + cat = Categorical(["a","b","c","a"], ordered=True) + exp_levels = np.array(["c","b","a"]) + exp_values = np.array(["a","b","c","a"]) + cat.reorder_levels(["c","b","a"]) + self.assert_numpy_array_equal(cat.levels, exp_levels) + self.assert_numpy_array_equal(cat.__array__(), exp_values) + + # not all "old" included in "new" + def f(): + cat.reorder_levels(["a"]) + self.assertRaises(ValueError, f) + + # still not all "old" in "new" + def f(): + cat.reorder_levels(["a","b","d"]) + self.assertRaises(ValueError, f) + + # This works: all "old" included in "new" + cat.reorder_levels(["a","b","c","d"]) + exp_levels = np.array(["a","b","c","d"]) + self.assert_numpy_array_equal(cat.levels, exp_levels) + + # internals... + c = Categorical([1,2,3,4,1], levels=[1,2,3,4]) + self.assert_numpy_array_equal(c._codes, np.array([0,1,2,3,0])) + self.assert_numpy_array_equal(c.levels , np.array([1,2,3,4] )) + self.assert_numpy_array_equal(c.get_values() , np.array([1,2,3,4,1] )) + c.reorder_levels([4,3,2,1]) # all "pointers" to '4' must be changed from 3 to 0,... + self.assert_numpy_array_equal(c._codes , np.array([3,2,1,0,3])) # positions are changed + self.assert_numpy_array_equal(c.levels , np.array([4,3,2,1])) # levels are now in new order + self.assert_numpy_array_equal(c.get_values() , np.array([1,2,3,4,1])) # output is the same + self.assertTrue(c.min(), 4) + self.assertTrue(c.max(), 1) + + def f(): + c.reorder_levels([4,3,2,10]) + self.assertRaises(ValueError, f) + + def test_remove_unused_levels(self): + c = Categorical(["a","b","c","d","a"], levels=["a","b","c","d","e"]) + self.assert_numpy_array_equal(c.levels , np.array(["a","b","c","d","e"])) + c.remove_unused_levels() + self.assert_numpy_array_equal(c.levels , np.array(["a","b","c","d"])) + + def test_nan_handling(self): + + # Nans are represented as -1 in codes + c = Categorical(["a","b",np.nan,"a"]) + self.assert_numpy_array_equal(c.levels , np.array(["a","b"])) + self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0])) + + # If levels have nan included, the code should point to that instead + c = Categorical(["a","b",np.nan,"a"], levels=["a","b",np.nan]) + self.assert_numpy_array_equal(c.levels , np.array(["a","b",np.nan],dtype=np.object_)) + self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0])) + + # Changing levels should also make the replaced level np.nan + c = Categorical(["a","b","c","a"]) + c.levels = ["a","b",np.nan] + self.assert_numpy_array_equal(c.levels , np.array(["a","b",np.nan],dtype=np.object_)) + self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0])) + + def test_codes_immutable(self): + + # Codes should be read only + c = Categorical(["a","b","c","a", np.nan]) + exp = np.array([0,1,2,0, -1]) + self.assert_numpy_array_equal(c.codes, exp) + + # Assignments to codes should raise + def f(): + c.codes = np.array([0,1,2,0,1]) + self.assertRaises(ValueError, f) + + # changes in the codes array should raise + # np 1.6.1 raises RuntimeError rather than ValueError + codes= c.codes + def f(): + codes[4] = 1 + if _np_version_under1p7: + self.assertRaises(RuntimeError, f) + else: + self.assertRaises(ValueError, f) + + # But even after getting the codes, the original array should still be writeable! + c[4] = "a" + exp = np.array([0,1,2,0, 0]) + self.assert_numpy_array_equal(c.codes, exp) + c._codes[4] = 2 + exp = np.array([0,1,2,0, 2]) + self.assert_numpy_array_equal(c.codes, exp) + + + def test_min_max(self): + + # unordered cats have no min/max + cat = Categorical(["a","b","c","d"], ordered=False) + self.assertRaises(TypeError, lambda : cat.min()) + self.assertRaises(TypeError, lambda : cat.max()) + cat = Categorical(["a","b","c","d"], ordered=True) + _min = cat.min() + _max = cat.max() + self.assertEqual(_min, "a") + self.assertEqual(_max, "d") + cat = Categorical(["a","b","c","d"], levels=['d','c','b','a'], ordered=True) + _min = cat.min() + _max = cat.max() + self.assertEqual(_min, "d") + self.assertEqual(_max, "a") + cat = Categorical([np.nan,"b","c",np.nan], levels=['d','c','b','a'], ordered=True) + _min = cat.min() + _max = cat.max() + self.assertTrue(np.isnan(_min)) + self.assertEqual(_max, "b") + + _min = cat.min(numeric_only=True) + self.assertEqual(_min, "c") + _max = cat.max(numeric_only=True) + self.assertEqual(_max, "b") + + cat = Categorical([np.nan,1,2,np.nan], levels=[5,4,3,2,1], ordered=True) + _min = cat.min() + _max = cat.max() + self.assertTrue(np.isnan(_min)) + self.assertEqual(_max, 1) + + _min = cat.min(numeric_only=True) + self.assertEqual(_min, 2) + _max = cat.max(numeric_only=True) + self.assertEqual(_max, 1) + + + def test_mode(self): + s = Categorical([1,1,2,4,5,5,5], levels=[5,4,3,2,1], ordered=True) + res = s.mode() + exp = Categorical([5], levels=[5,4,3,2,1], ordered=True) + self.assertTrue(res.equals(exp)) + s = Categorical([1,1,1,4,5,5,5], levels=[5,4,3,2,1], ordered=True) + res = s.mode() + exp = Categorical([5,1], levels=[5,4,3,2,1], ordered=True) + self.assertTrue(res.equals(exp)) + s = Categorical([1,2,3,4,5], levels=[5,4,3,2,1], ordered=True) + res = s.mode() + exp = Categorical([], levels=[5,4,3,2,1], ordered=True) + self.assertTrue(res.equals(exp)) + # NaN should not become the mode! + s = Categorical([np.nan,np.nan,np.nan,4,5], levels=[5,4,3,2,1], ordered=True) + res = s.mode() + exp = Categorical([], levels=[5,4,3,2,1], ordered=True) + self.assertTrue(res.equals(exp)) + s = Categorical([np.nan,np.nan,np.nan,4,5,4], levels=[5,4,3,2,1], ordered=True) + res = s.mode() + exp = Categorical([4], levels=[5,4,3,2,1], ordered=True) + self.assertTrue(res.equals(exp)) + s = Categorical([np.nan,np.nan,4,5,4], levels=[5,4,3,2,1], ordered=True) + res = s.mode() + exp = Categorical([4], levels=[5,4,3,2,1], ordered=True) + self.assertTrue(res.equals(exp)) + + + def test_sort(self): + + # unordered cats are not sortable + cat = Categorical(["a","b","b","a"], ordered=False) + self.assertRaises(TypeError, lambda : cat.sort()) + cat = Categorical(["a","c","b","d"], ordered=True) + + # order + res = cat.order() + exp = np.array(["a","b","c","d"],dtype=object) + self.assert_numpy_array_equal(res.__array__(), exp) + + cat = Categorical(["a","c","b","d"], levels=["a","b","c","d"], ordered=True) + res = cat.order() + exp = np.array(["a","b","c","d"],dtype=object) + self.assert_numpy_array_equal(res.__array__(), exp) + + res = cat.order(ascending=False) + exp = np.array(["d","c","b","a"],dtype=object) + self.assert_numpy_array_equal(res.__array__(), exp) + + # sort (inplace order) + cat1 = cat.copy() + cat1.sort() + exp = np.array(["a","b","c","d"],dtype=object) + self.assert_numpy_array_equal(cat1.__array__(), exp) + + def test_slicing_directly(self): + cat = Categorical(["a","b","c","d","a","b","c"]) + sliced = cat[3] + tm.assert_equal(sliced, "d") + sliced = cat[3:5] + expected = Categorical(["d","a"], levels=['a', 'b', 'c', 'd']) + self.assert_numpy_array_equal(sliced._codes, expected._codes) + tm.assert_index_equal(sliced.levels, expected.levels) + +class TestCategoricalAsBlock(tm.TestCase): + _multiprocess_can_split_ = True + + def setUp(self): + self.factor = Categorical.from_array(['a', 'b', 'b', 'a', + 'a', 'c', 'c', 'c']) + + df = DataFrame({'value': np.random.randint(0, 10000, 100)}) + labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ] + + df = df.sort(columns=['value'], ascending=True) + df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels) + self.cat = df + + def test_dtypes(self): + + dtype = com.CategoricalDtype() + hash(dtype) + self.assertTrue(com.is_categorical_dtype(dtype)) + + s = Series(self.factor,name='A') + + # dtypes + self.assertTrue(com.is_categorical_dtype(s.dtype)) + self.assertTrue(com.is_categorical_dtype(s)) + self.assertFalse(com.is_categorical_dtype(np.dtype('float64'))) + + # np.dtype doesn't know about our new dtype + def f(): + np.dtype(dtype) + self.assertRaises(TypeError, f) + + self.assertFalse(dtype == np.str_) + self.assertFalse(np.str_ == dtype) + + def test_basic(self): + + # test basic creation / coercion of categoricals + s = Series(self.factor,name='A') + self.assertEqual(s.dtype,'category') + self.assertEqual(len(s),len(self.factor)) + str(s.values) + str(s) + + # in a frame + df = DataFrame({'A' : self.factor }) + result = df['A'] + tm.assert_series_equal(result,s) + result = df.iloc[:,0] + tm.assert_series_equal(result,s) + self.assertEqual(len(df),len(self.factor)) + str(df.values) + str(df) + + df = DataFrame({'A' : s }) + result = df['A'] + tm.assert_series_equal(result,s) + self.assertEqual(len(df),len(self.factor)) + str(df.values) + str(df) + + # multiples + df = DataFrame({'A' : s, 'B' : s, 'C' : 1}) + result1 = df['A'] + result2 = df['B'] + tm.assert_series_equal(result1,s) + tm.assert_series_equal(result2,s) + self.assertEqual(len(df),len(self.factor)) + str(df.values) + str(df) + + def test_creation_astype(self): + l = ["a","b","c","a"] + s = pd.Series(l) + exp = pd.Series(Categorical(l)) + res = s.astype('category') + tm.assert_series_equal(res, exp) + + l = [1,2,3,1] + s = pd.Series(l) + exp = pd.Series(Categorical(l)) + res = s.astype('category') + tm.assert_series_equal(res, exp) + + df = pd.DataFrame({"cats":[1,2,3,4,5,6], "vals":[1,2,3,4,5,6]}) + cats = Categorical([1,2,3,4,5,6]) + exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]}) + df["cats"] = df["cats"].astype("category") + tm.assert_frame_equal(exp_df, df) + + + df = pd.DataFrame({"cats":['a', 'b', 'b', 'a', 'a', 'd'], "vals":[1,2,3,4,5,6]}) + cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd']) + exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]}) + df["cats"] = df["cats"].astype("category") + tm.assert_frame_equal(exp_df, df) + + def test_sideeffects_free(self): + + # Passing a categorical to a Series and then changing values in either the series or the + # categorical should not change the values in the other one! + cat = Categorical(["a","b","c","a"]) + s = pd.Series(cat, copy=True) + self.assertFalse(s.cat is cat) + s.cat.levels = [1,2,3] + exp_s = np.array([1,2,3,1]) + exp_cat = np.array(["a","b","c","a"]) + self.assert_numpy_array_equal(s.__array__(), exp_s) + self.assert_numpy_array_equal(cat.__array__(), exp_cat) + + # setting + s[0] = 2 + exp_s2 = np.array([2,2,3,1]) + self.assert_numpy_array_equal(s.__array__(), exp_s2) + self.assert_numpy_array_equal(cat.__array__(), exp_cat) + + # however, copy is False by default + # so this WILL change values + cat = Categorical(["a","b","c","a"]) + s = pd.Series(cat) + self.assertTrue(s.cat is cat) + s.cat.levels = [1,2,3] + exp_s = np.array([1,2,3,1]) + self.assert_numpy_array_equal(s.__array__(), exp_s) + self.assert_numpy_array_equal(cat.__array__(), exp_s) + + s[0] = 2 + exp_s2 = np.array([2,2,3,1]) + self.assert_numpy_array_equal(s.__array__(), exp_s2) + self.assert_numpy_array_equal(cat.__array__(), exp_s2) + + def test_nan_handling(self): + + # Nans are represented as -1 in labels + s = Series(Categorical(["a","b",np.nan,"a"])) + self.assert_numpy_array_equal(s.cat.levels, np.array(["a","b"])) + self.assert_numpy_array_equal(s.cat._codes, np.array([0,1,-1,0])) + + # If levels have nan included, the label should point to that instead + s2 = Series(Categorical(["a","b",np.nan,"a"], levels=["a","b",np.nan])) + self.assert_numpy_array_equal(s2.cat.levels, + np.array(["a","b",np.nan], dtype=np.object_)) + self.assert_numpy_array_equal(s2.cat._codes, np.array([0,1,2,0])) + + # Changing levels should also make the replaced level np.nan + s3 = Series(Categorical(["a","b","c","a"])) + s3.cat.levels = ["a","b",np.nan] + self.assert_numpy_array_equal(s3.cat.levels, + np.array(["a","b",np.nan], dtype=np.object_)) + self.assert_numpy_array_equal(s3.cat._codes, np.array([0,1,2,0])) + + + def test_series_delegations(self): + + # invalid accessor + self.assertRaises(TypeError, lambda : Series([1,2,3]).cat) + tm.assertRaisesRegexp(TypeError, + r"Can only use .cat accessor with a 'category' dtype", + lambda : Series([1,2,3]).cat) + self.assertRaises(TypeError, lambda : Series(['a','b','c']).cat) + self.assertRaises(TypeError, lambda : Series(np.arange(5.)).cat) + self.assertRaises(TypeError, lambda : Series([Timestamp('20130101')]).cat) + + # Series should delegate calls to '.level', '.ordered' and '.reorder()' to the categorical + s = Series(Categorical(["a","b","c","a"], ordered=True)) + exp_levels = np.array(["a","b","c"]) + self.assert_numpy_array_equal(s.cat.levels, exp_levels) + + s.cat.levels = [1,2,3] + exp_levels = np.array([1,2,3]) + self.assert_numpy_array_equal(s.cat.levels, exp_levels) + self.assertEqual(s.cat.ordered, True) + s.cat.ordered = False + self.assertEqual(s.cat.ordered, False) + + # reorder + s = Series(Categorical(["a","b","c","a"], ordered=True)) + exp_levels = np.array(["c","b","a"]) + exp_values = np.array(["a","b","c","a"]) + s.cat.reorder_levels(["c","b","a"]) + self.assert_numpy_array_equal(s.cat.levels, exp_levels) + self.assert_numpy_array_equal(s.cat.__array__(), exp_values) + self.assert_numpy_array_equal(s.__array__(), exp_values) + + # remove unused levels + s = Series(Categorical(["a","b","b","a"], levels=["a","b","c"])) + exp_levels = np.array(["a","b"]) + exp_values = np.array(["a","b","b","a"]) + s.cat.remove_unused_levels() + self.assert_numpy_array_equal(s.cat.levels, exp_levels) + self.assert_numpy_array_equal(s.cat.__array__(), exp_values) + self.assert_numpy_array_equal(s.__array__(), exp_values) + + # This method is likely to be confused, so test that it raises an error on wrong inputs: + def f(): + s.reorder_levels([4,3,2,1]) + self.assertRaises(Exception, f) + # right: s.cat.reorder_levels([4,3,2,1]) + + def test_series_functions_no_warnings(self): + df = pd.DataFrame({'value': np.random.randint(0, 100, 20)}) + labels = [ "{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)] + with tm.assert_produces_warning(False): + df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels) + + def test_assignment_to_dataframe(self): + # assignment + df = DataFrame({'value': np.array(np.random.randint(0, 10000, 100),dtype='int32')}) + labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ] + + df = df.sort(columns=['value'], ascending=True) + d = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels) + s = Series(d) + df['D'] = d + str(df) + + result = df.dtypes + expected = Series([np.dtype('int32'), com.CategoricalDtype()],index=['value','D']) + tm.assert_series_equal(result,expected) + + df['E'] = s + str(df) + + result = df.dtypes + expected = Series([np.dtype('int32'), com.CategoricalDtype(), com.CategoricalDtype()], + index=['value','D','E']) + tm.assert_series_equal(result,expected) + + result1 = df['D'] + result2 = df['E'] + self.assertTrue(result1._data._block.values.equals(d)) + + # sorting + s.name = 'E' + self.assertTrue(result2.sort_index().equals(s)) + + # FIXME? + #### what does this compare to? ### + result = df.sort_index() + + cat = pd.Categorical([1,2,3,10], levels=[1,2,3,4,10]) + df = pd.DataFrame(pd.Series(cat)) + + def test_describe(self): + + # Categoricals should not show up together with numerical columns + result = self.cat.describe() + self.assertEquals(len(result.columns),1) + + # empty levels show up as NA + s = Series(Categorical(["a","b","b","b"], levels=['a','b','c'], ordered=True)) + result = s.cat.describe() + + expected = DataFrame([[1,0.25],[3,0.75],[np.nan,np.nan]], + columns=['counts','freqs'], + index=Index(['a','b','c'],name='levels')) + tm.assert_frame_equal(result,expected) + + result = s.describe() + expected = Series([4,2,"b",3],index=['count','unique','top', 'freq']) + tm.assert_series_equal(result,expected) + + # NA as a level + cat = pd.Categorical(["a","c","c",np.nan], levels=["b","a","c",np.nan] ) + result = cat.describe() + + expected = DataFrame([[np.nan, np.nan],[1,0.25],[2,0.5], [1,0.25]], + columns=['counts','freqs'], + index=Index(['b','a','c',np.nan],name='levels')) + tm.assert_frame_equal(result,expected) + + + # In a frame, describe() for the cat should be the same as for string arrays (count, unique, + # top, freq) + cat = pd.Series(pd.Categorical(["a","b","c","c"])) + df3 = pd.DataFrame({"cat":cat, "s":["a","b","c","c"]}) + res = df3.describe() + self.assert_numpy_array_equal(res["cat"].values, res["s"].values) + + def test_repr(self): + a = pd.Series(pd.Categorical([1,2,3,4], name="a")) + exp = u("0 1\n1 2\n2 3\n3 4\n" + + "Name: a, dtype: category\nLevels (4, int64): [1 < 2 < 3 < 4]") + + self.assertEqual(exp, a.__unicode__()) + + a = pd.Series(pd.Categorical(["a","b"] *25, name="a")) + exp = u("".join(["%s a\n%s b\n"%(i,i+1) for i in range(0,10,2)]) + "...\n" + + "".join(["%s a\n%s b\n"%(i,i+1) for i in range(40,50,2)]) + + "Name: a, Length: 50, dtype: category\n" + + "Levels (2, object): [a < b]") + self.assertEqual(exp,a._tidy_repr()) + + levs = list("abcdefghijklmnopqrstuvwxyz") + a = pd.Series(pd.Categorical(["a","b"], name="a", levels=levs)) + exp = u("0 a\n1 b\n" + + "Name: a, dtype: category\n" + "Levels (26, object): [a < b < c < d ... w < x < y < z]") + self.assertEqual(exp,a.__unicode__()) + + + def test_groupby_sort(self): + + # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby + # This should result in a properly sorted Series so that the plot + # has a sorted x axis + #self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') + + res = self.cat.groupby(['value_group'])['value_group'].count() + exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] + tm.assert_series_equal(res, exp) + + def test_min_max(self): + # unordered cats have no min/max + cat = Series(Categorical(["a","b","c","d"], ordered=False)) + self.assertRaises(TypeError, lambda : cat.min()) + self.assertRaises(TypeError, lambda : cat.max()) + + cat = Series(Categorical(["a","b","c","d"], ordered=True)) + _min = cat.min() + _max = cat.max() + self.assertEqual(_min, "a") + self.assertEqual(_max, "d") + + cat = Series(Categorical(["a","b","c","d"], levels=['d','c','b','a'], ordered=True)) + _min = cat.min() + _max = cat.max() + self.assertEqual(_min, "d") + self.assertEqual(_max, "a") + + cat = Series(Categorical([np.nan,"b","c",np.nan], levels=['d','c','b','a'], ordered=True)) + _min = cat.min() + _max = cat.max() + self.assertTrue(np.isnan(_min)) + self.assertEqual(_max, "b") + + cat = Series(Categorical([np.nan,1,2,np.nan], levels=[5,4,3,2,1], ordered=True)) + _min = cat.min() + _max = cat.max() + self.assertTrue(np.isnan(_min)) + self.assertEqual(_max, 1) + + def test_mode(self): + s = Series(Categorical([1,1,2,4,5,5,5], levels=[5,4,3,2,1], ordered=True)) + res = s.mode() + exp = Series(Categorical([5], levels=[5,4,3,2,1], ordered=True)) + tm.assert_series_equal(res, exp) + s = Series(Categorical([1,1,1,4,5,5,5], levels=[5,4,3,2,1], ordered=True)) + res = s.mode() + exp = Series(Categorical([5,1], levels=[5,4,3,2,1], ordered=True)) + tm.assert_series_equal(res, exp) + s = Series(Categorical([1,2,3,4,5], levels=[5,4,3,2,1], ordered=True)) + res = s.mode() + exp = Series(Categorical([], levels=[5,4,3,2,1], ordered=True)) + tm.assert_series_equal(res, exp) + + def test_value_counts(self): + + s = pd.Series(pd.Categorical(["a","b","c","c","c","b"], levels=["c","a","b","d"])) + res = s.value_counts(sort=False) + exp = Series([3,1,2,0], index=["c","a","b","d"]) + tm.assert_series_equal(res, exp) + res = s.value_counts(sort=True) + exp = Series([3,2,1,0], index=["c","b","a","d"]) + tm.assert_series_equal(res, exp) + + def test_groupby(self): + + cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], levels=["a","b","c","d"]) + data = DataFrame({"a":[1,1,1,2,2,2,3,4,5], "b":cats}) + + expected = DataFrame({ 'a' : Series([1,2,4,np.nan],index=Index(['a','b','c','d'],name='b')) }) + result = data.groupby("b").mean() + tm.assert_frame_equal(result, expected) + + raw_cat1 = Categorical(["a","a","b","b"], levels=["a","b","z"]) + raw_cat2 = Categorical(["c","d","c","d"], levels=["c","d","y"]) + df = DataFrame({"A":raw_cat1,"B":raw_cat2, "values":[1,2,3,4]}) + + # single grouper + gb = df.groupby("A") + expected = DataFrame({ 'values' : Series([3,7,np.nan],index=Index(['a','b','z'],name='A')) }) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # multiple groupers + gb = df.groupby(['A','B']) + expected = DataFrame({ 'values' : Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan], + index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B'])) }) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # multiple groupers with a non-cat + df = df.copy() + df['C'] = ['foo','bar']*2 + gb = df.groupby(['A','B','C']) + expected = DataFrame({ 'values' : + Series(np.nan,index=pd.MultiIndex.from_product([['a','b','z'], + ['c','d','y'], + ['foo','bar']], + names=['A','B','C'])) + }).sortlevel() + expected.iloc[[1,2,7,8],0] = [1,2,3,4] + result = gb.sum() + tm.assert_frame_equal(result, expected) + + def test_pivot_table(self): + + raw_cat1 = Categorical(["a","a","b","b"], levels=["a","b","z"]) + raw_cat2 = Categorical(["c","d","c","d"], levels=["c","d","y"]) + df = DataFrame({"A":raw_cat1,"B":raw_cat2, "values":[1,2,3,4]}) + result = pd.pivot_table(df, values='values', index=['A', 'B']) + + expected = Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan], + index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B']), + name='values') + tm.assert_series_equal(result, expected) + + def test_count(self): + + s = Series(Categorical([np.nan,1,2,np.nan], levels=[5,4,3,2,1], ordered=True)) + result = s.count() + self.assertEqual(result, 2) + + def test_sort(self): + + # unordered cats are not sortable + cat = Series(Categorical(["a","b","b","a"], ordered=False)) + self.assertRaises(TypeError, lambda : cat.sort()) + + cat = Series(Categorical(["a","c","b","d"], ordered=True)) + + res = cat.order() + exp = np.array(["a","b","c","d"]) + self.assert_numpy_array_equal(res.__array__(), exp) + + cat = Series(Categorical(["a","c","b","d"], levels=["a","b","c","d"], ordered=True)) + res = cat.order() + exp = np.array(["a","b","c","d"]) + self.assert_numpy_array_equal(res.__array__(), exp) + + res = cat.order(ascending=False) + exp = np.array(["d","c","b","a"]) + self.assert_numpy_array_equal(res.__array__(), exp) + + raw_cat1 = Categorical(["a","b","c","d"], levels=["a","b","c","d"], ordered=False) + raw_cat2 = Categorical(["a","b","c","d"], levels=["d","c","b","a"]) + s = ["a","b","c","d"] + df = DataFrame({"unsort":raw_cat1,"sort":raw_cat2, "string":s, "values":[1,2,3,4]}) + + # Cats must be sorted in a dataframe + res = df.sort(columns=["string"], ascending=False) + exp = np.array(["d", "c", "b", "a"]) + self.assert_numpy_array_equal(res["sort"].cat.__array__(), exp) + self.assertEqual(res["sort"].dtype, "category") + + res = df.sort(columns=["sort"], ascending=False) + exp = df.sort(columns=["string"], ascending=True) + self.assert_numpy_array_equal(res["values"], exp["values"]) + self.assertEqual(res["sort"].dtype, "category") + self.assertEqual(res["unsort"].dtype, "category") + + def f(): + df.sort(columns=["unsort"], ascending=False) + self.assertRaises(TypeError, f) + + + def test_slicing(self): + cat = Series(Categorical([1,2,3,4])) + reversed = cat[::-1] + exp = np.array([4,3,2,1]) + self.assert_numpy_array_equal(reversed.__array__(), exp) + + df = DataFrame({'value': (np.arange(100)+1).astype('int64')}) + df['D'] = pd.cut(df.value, bins=[0,25,50,75,100]) + + expected = Series([11,'(0, 25]'],index=['value','D']) + result = df.iloc[10] + tm.assert_series_equal(result,expected) + + expected = DataFrame({'value': np.arange(11,21).astype('int64')}, + index=np.arange(10,20).astype('int64')) + expected['D'] = pd.cut(expected.value, bins=[0,25,50,75,100]) + result = df.iloc[10:20] + tm.assert_frame_equal(result,expected) + + expected = Series([9,'(0, 25]'],index=['value','D']) + result = df.loc[8] + tm.assert_series_equal(result,expected) + + def test_slicing_and_getting_ops(self): + + # systematically test the slicing operations: + # for all slicing ops: + # - returning a dataframe + # - returning a column + # - returning a row + # - returning a single value + + cats = pd.Categorical(["a","c","b","c","c","c","c"], levels=["a","b","c"]) + idx = pd.Index(["h","i","j","k","l","m","n"]) + values= [1,2,3,4,5,6,7] + df = pd.DataFrame({"cats":cats,"values":values}, index=idx) + + # the expected values + cats2 = pd.Categorical(["b","c"], levels=["a","b","c"]) + idx2 = pd.Index(["j","k"]) + values2= [3,4] + + # 2:4,: | "j":"k",: + exp_df = pd.DataFrame({"cats":cats2,"values":values2}, index=idx2) + + # :,"cats" | :,0 + exp_col = pd.Series(cats,index=idx,name='cats') + + # "j",: | 2,: + exp_row = pd.Series(["b",3], index=["cats","values"], dtype="object", name="j") + + # "j","cats | 2,0 + exp_val = "b" + + # iloc + # frame + res_df = df.iloc[2:4,:] + tm.assert_frame_equal(res_df, exp_df) + self.assertTrue(com.is_categorical_dtype(res_df["cats"])) + + # row + res_row = df.iloc[2,:] + tm.assert_series_equal(res_row, exp_row) + tm.assert_isinstance(res_row["cats"], compat.string_types) + + # col + res_col = df.iloc[:,0] + tm.assert_series_equal(res_col, exp_col) + self.assertTrue(com.is_categorical_dtype(res_col)) + + # single value + res_val = df.iloc[2,0] + self.assertEqual(res_val, exp_val) + + # loc + # frame + res_df = df.loc["j":"k",:] + tm.assert_frame_equal(res_df, exp_df) + self.assertTrue(com.is_categorical_dtype(res_df["cats"])) + + # row + res_row = df.loc["j",:] + tm.assert_series_equal(res_row, exp_row) + tm.assert_isinstance(res_row["cats"], compat.string_types) + + # col + res_col = df.loc[:,"cats"] + tm.assert_series_equal(res_col, exp_col) + self.assertTrue(com.is_categorical_dtype(res_col)) + + # single value + res_val = df.loc["j","cats"] + self.assertEqual(res_val, exp_val) + + # ix + # frame + #res_df = df.ix["j":"k",[0,1]] # doesn't work? + res_df = df.ix["j":"k",:] + tm.assert_frame_equal(res_df, exp_df) + self.assertTrue(com.is_categorical_dtype(res_df["cats"])) + + # row + res_row = df.ix["j",:] + tm.assert_series_equal(res_row, exp_row) + tm.assert_isinstance(res_row["cats"], compat.string_types) + + # col + res_col = df.ix[:,"cats"] + tm.assert_series_equal(res_col, exp_col) + self.assertTrue(com.is_categorical_dtype(res_col)) + + # single value + res_val = df.ix["j",0] + self.assertEqual(res_val, exp_val) + + # iat + res_val = df.iat[2,0] + self.assertEqual(res_val, exp_val) + + # at + res_val = df.at["j","cats"] + self.assertEqual(res_val, exp_val) + + # fancy indexing + exp_fancy = df.iloc[[2]] + + res_fancy = df[df["cats"] == "b"] + tm.assert_frame_equal(res_fancy,exp_fancy) + res_fancy = df[df["values"] == 3] + tm.assert_frame_equal(res_fancy,exp_fancy) + + # get_value + res_val = df.get_value("j","cats") + self.assertEqual(res_val, exp_val) + + # i : int, slice, or sequence of integers + res_row = df.irow(2) + tm.assert_series_equal(res_row, exp_row) + tm.assert_isinstance(res_row["cats"], compat.string_types) + + res_df = df.irow(slice(2,4)) + tm.assert_frame_equal(res_df, exp_df) + self.assertTrue(com.is_categorical_dtype(res_df["cats"])) + + res_df = df.irow([2,3]) + tm.assert_frame_equal(res_df, exp_df) + self.assertTrue(com.is_categorical_dtype(res_df["cats"])) + + res_col = df.icol(0) + tm.assert_series_equal(res_col, exp_col) + self.assertTrue(com.is_categorical_dtype(res_col)) + + res_df = df.icol(slice(0,2)) + tm.assert_frame_equal(res_df, df) + self.assertTrue(com.is_categorical_dtype(res_df["cats"])) + + res_df = df.icol([0,1]) + tm.assert_frame_equal(res_df, df) + self.assertTrue(com.is_categorical_dtype(res_df["cats"])) + + def test_assigning_ops(self): + + # systematically test the assigning operations: + # for all slicing ops: + # for value in levels and value not in levels: + # - assign a single value -> exp_single_cats_value + # - assign a complete row (mixed values) -> exp_single_row + # - assign multiple rows (mixed values) (-> array) -> exp_multi_row + # - assign a part of a column with dtype == categorical -> exp_parts_cats_col + # - assign a part of a column with dtype != categorical -> exp_parts_cats_col + + cats = pd.Categorical(["a","a","a","a","a","a","a"], levels=["a","b"]) + idx = pd.Index(["h","i","j","k","l","m","n"]) + values = [1,1,1,1,1,1,1] + orig = pd.DataFrame({"cats":cats,"values":values}, index=idx) + + ### the expected values + # changed single row + cats1 = pd.Categorical(["a","a","b","a","a","a","a"], levels=["a","b"]) + idx1 = pd.Index(["h","i","j","k","l","m","n"]) + values1 = [1,1,2,1,1,1,1] + exp_single_row = pd.DataFrame({"cats":cats1,"values":values1}, index=idx1) + + #changed multiple rows + cats2 = pd.Categorical(["a","a","b","b","a","a","a"], levels=["a","b"]) + idx2 = pd.Index(["h","i","j","k","l","m","n"]) + values2 = [1,1,2,2,1,1,1] + exp_multi_row = pd.DataFrame({"cats":cats2,"values":values2}, index=idx2) + + # changed part of the cats column + cats3 = pd.Categorical(["a","a","b","b","a","a","a"], levels=["a","b"]) + idx3 = pd.Index(["h","i","j","k","l","m","n"]) + values3 = [1,1,1,1,1,1,1] + exp_parts_cats_col = pd.DataFrame({"cats":cats3,"values":values3}, index=idx3) + + # changed single value in cats col + cats4 = pd.Categorical(["a","a","b","a","a","a","a"], levels=["a","b"]) + idx4 = pd.Index(["h","i","j","k","l","m","n"]) + values4 = [1,1,1,1,1,1,1] + exp_single_cats_value = pd.DataFrame({"cats":cats4,"values":values4}, index=idx4) + + #### iloc ##### + ################ + # - assign a single value -> exp_single_cats_value + df = orig.copy() + df.iloc[2,0] = "b" + tm.assert_frame_equal(df, exp_single_cats_value) + + # - assign a single value not in the current level set + def f(): + df = orig.copy() + df.iloc[2,0] = "c" + self.assertRaises(ValueError, f) + + # - assign a complete row (mixed values) -> exp_single_row + df = orig.copy() + df.iloc[2,:] = ["b",2] + tm.assert_frame_equal(df, exp_single_row) + + # - assign a complete row (mixed values) not in level set + def f(): + df = orig.copy() + df.iloc[2,:] = ["c",2] + self.assertRaises(ValueError, f) + + # - assign multiple rows (mixed values) -> exp_multi_row + df = orig.copy() + df.iloc[2:4,:] = [["b",2],["b",2]] + tm.assert_frame_equal(df, exp_multi_row) + + def f(): + df = orig.copy() + df.iloc[2:4,:] = [["c",2],["c",2]] + self.assertRaises(ValueError, f) + + # - assign a part of a column with dtype == categorical -> exp_parts_cats_col + df = orig.copy() + df.iloc[2:4,0] = pd.Categorical(["b","b"], levels=["a","b"]) + tm.assert_frame_equal(df, exp_parts_cats_col) + + with tm.assertRaises(ValueError): + # different levels -> not sure if this should fail or pass + df = orig.copy() + df.iloc[2:4,0] = pd.Categorical(["b","b"], levels=["a","b","c"]) + + with tm.assertRaises(ValueError): + # different values + df = orig.copy() + df.iloc[2:4,0] = pd.Categorical(["c","c"], levels=["a","b","c"]) + + # - assign a part of a column with dtype != categorical -> exp_parts_cats_col + df = orig.copy() + df.iloc[2:4,0] = ["b","b"] + tm.assert_frame_equal(df, exp_parts_cats_col) + + with tm.assertRaises(ValueError): + df.iloc[2:4,0] = ["c","c"] + + #### loc ##### + ################ + # - assign a single value -> exp_single_cats_value + df = orig.copy() + df.loc["j","cats"] = "b" + tm.assert_frame_equal(df, exp_single_cats_value) + + # - assign a single value not in the current level set + def f(): + df = orig.copy() + df.loc["j","cats"] = "c" + self.assertRaises(ValueError, f) + + # - assign a complete row (mixed values) -> exp_single_row + df = orig.copy() + df.loc["j",:] = ["b",2] + tm.assert_frame_equal(df, exp_single_row) + + # - assign a complete row (mixed values) not in level set + def f(): + df = orig.copy() + df.loc["j",:] = ["c",2] + self.assertRaises(ValueError, f) + + # - assign multiple rows (mixed values) -> exp_multi_row + df = orig.copy() + df.loc["j":"k",:] = [["b",2],["b",2]] + tm.assert_frame_equal(df, exp_multi_row) + + def f(): + df = orig.copy() + df.loc["j":"k",:] = [["c",2],["c",2]] + self.assertRaises(ValueError, f) + + # - assign a part of a column with dtype == categorical -> exp_parts_cats_col + df = orig.copy() + df.loc["j":"k","cats"] = pd.Categorical(["b","b"], levels=["a","b"]) + tm.assert_frame_equal(df, exp_parts_cats_col) + + with tm.assertRaises(ValueError): + # different levels -> not sure if this should fail or pass + df = orig.copy() + df.loc["j":"k","cats"] = pd.Categorical(["b","b"], levels=["a","b","c"]) + + with tm.assertRaises(ValueError): + # different values + df = orig.copy() + df.loc["j":"k","cats"] = pd.Categorical(["c","c"], levels=["a","b","c"]) + + # - assign a part of a column with dtype != categorical -> exp_parts_cats_col + df = orig.copy() + df.loc["j":"k","cats"] = ["b","b"] + tm.assert_frame_equal(df, exp_parts_cats_col) + + with tm.assertRaises(ValueError): + df.loc["j":"k","cats"] = ["c","c"] + + #### ix ##### + ################ + # - assign a single value -> exp_single_cats_value + df = orig.copy() + df.ix["j",0] = "b" + tm.assert_frame_equal(df, exp_single_cats_value) + + # - assign a single value not in the current level set + def f(): + df = orig.copy() + df.ix["j",0] = "c" + self.assertRaises(ValueError, f) + + # - assign a complete row (mixed values) -> exp_single_row + df = orig.copy() + df.ix["j",:] = ["b",2] + tm.assert_frame_equal(df, exp_single_row) + + # - assign a complete row (mixed values) not in level set + def f(): + df = orig.copy() + df.ix["j",:] = ["c",2] + self.assertRaises(ValueError, f) + + # - assign multiple rows (mixed values) -> exp_multi_row + df = orig.copy() + df.ix["j":"k",:] = [["b",2],["b",2]] + tm.assert_frame_equal(df, exp_multi_row) + + def f(): + df = orig.copy() + df.ix["j":"k",:] = [["c",2],["c",2]] + self.assertRaises(ValueError, f) + + # - assign a part of a column with dtype == categorical -> exp_parts_cats_col + df = orig.copy() + df.ix["j":"k",0] = pd.Categorical(["b","b"], levels=["a","b"]) + tm.assert_frame_equal(df, exp_parts_cats_col) + + with tm.assertRaises(ValueError): + # different levels -> not sure if this should fail or pass + df = orig.copy() + df.ix["j":"k",0] = pd.Categorical(["b","b"], levels=["a","b","c"]) + + with tm.assertRaises(ValueError): + # different values + df = orig.copy() + df.ix["j":"k",0] = pd.Categorical(["c","c"], levels=["a","b","c"]) + + # - assign a part of a column with dtype != categorical -> exp_parts_cats_col + df = orig.copy() + df.ix["j":"k",0] = ["b","b"] + tm.assert_frame_equal(df, exp_parts_cats_col) + + with tm.assertRaises(ValueError): + df.ix["j":"k",0] = ["c","c"] + + # iat + df = orig.copy() + df.iat[2,0] = "b" + tm.assert_frame_equal(df, exp_single_cats_value) + + # - assign a single value not in the current level set + def f(): + df = orig.copy() + df.iat[2,0] = "c" + self.assertRaises(ValueError, f) + + # at + # - assign a single value -> exp_single_cats_value + df = orig.copy() + df.at["j","cats"] = "b" + tm.assert_frame_equal(df, exp_single_cats_value) + + # - assign a single value not in the current level set + def f(): + df = orig.copy() + df.at["j","cats"] = "c" + self.assertRaises(ValueError, f) + + # fancy indexing + catsf = pd.Categorical(["a","a","c","c","a","a","a"], levels=["a","b","c"]) + idxf = pd.Index(["h","i","j","k","l","m","n"]) + valuesf = [1,1,3,3,1,1,1] + df = pd.DataFrame({"cats":catsf,"values":valuesf}, index=idxf) + + exp_fancy = exp_multi_row.copy() + exp_fancy["cats"].cat.levels = ["a","b","c"] + + df[df["cats"] == "c"] = ["b",2] + tm.assert_frame_equal(df, exp_multi_row) + + # set_value + df = orig.copy() + df.set_value("j","cats", "b") + tm.assert_frame_equal(df, exp_single_cats_value) + + def f(): + df = orig.copy() + df.set_value("j","cats", "c") + self.assertRaises(ValueError, f) + + # Assigning a Category to parts of a int/... column uses the values of the Catgorical + df = pd.DataFrame({"a":[1,1,1,1,1], "b":["a","a","a","a","a"]}) + exp = pd.DataFrame({"a":[1,"b","b",1,1], "b":["a","a","b","b","a"]}) + df.loc[1:2,"a"] = pd.Categorical(["b","b"], levels=["a","b"]) + df.loc[2:3,"b"] = pd.Categorical(["b","b"], levels=["a","b"]) + tm.assert_frame_equal(df, exp) + + + def test_concat(self): + cat = pd.Categorical(["a","b"], levels=["a","b"]) + vals = [1,2] + df = pd.DataFrame({"cats":cat, "vals":vals}) + cat2 = pd.Categorical(["a","b","a","b"], levels=["a","b"]) + vals2 = [1,2,1,2] + exp = pd.DataFrame({"cats":cat2, "vals":vals2}, index=pd.Index([0, 1, 0, 1])) + + res = pd.concat([df,df]) + tm.assert_frame_equal(exp, res) + + # Concat should raise if the two categoricals do not have the same levels + cat3 = pd.Categorical(["a","b"], levels=["a","b","c"]) + vals3 = [1,2] + df_wrong_levels = pd.DataFrame({"cats":cat3, "vals":vals3}) + + def f(): + pd.concat([df,df_wrong_levels]) + self.assertRaises(ValueError, f) + + def test_append(self): + cat = pd.Categorical(["a","b"], levels=["a","b"]) + vals = [1,2] + df = pd.DataFrame({"cats":cat, "vals":vals}) + cat2 = pd.Categorical(["a","b","a","b"], levels=["a","b"]) + vals2 = [1,2,1,2] + exp = pd.DataFrame({"cats":cat2, "vals":vals2}, index=pd.Index([0, 1, 0, 1])) + + res = df.append(df) + tm.assert_frame_equal(exp, res) + + # Concat should raise if the two categoricals do not have the same levels + cat3 = pd.Categorical(["a","b"], levels=["a","b","c"]) + vals3 = [1,2] + df_wrong_levels = pd.DataFrame({"cats":cat3, "vals":vals3}) + + def f(): + df.append(df_wrong_levels) + self.assertRaises(ValueError, f) + + def test_na_actions(self): + + cat = pd.Categorical([1,2,3,np.nan], levels=[1,2,3]) + vals = ["a","b",np.nan,"d"] + df = pd.DataFrame({"cats":cat, "vals":vals}) + cat2 = pd.Categorical([1,2,3,3], levels=[1,2,3]) + vals2 = ["a","b","b","d"] + df_exp_fill = pd.DataFrame({"cats":cat2, "vals":vals2}) + cat3 = pd.Categorical([1,2,3], levels=[1,2,3]) + vals3 = ["a","b",np.nan] + df_exp_drop_cats = pd.DataFrame({"cats":cat3, "vals":vals3}) + cat4 = pd.Categorical([1,2], levels=[1,2,3]) + vals4 = ["a","b"] + df_exp_drop_all = pd.DataFrame({"cats":cat4, "vals":vals4}) + + # fillna + res = df.fillna(value={"cats":3, "vals":"b"}) + tm.assert_frame_equal(res, df_exp_fill) + + def f(): + df.fillna(value={"cats":4, "vals":"c"}) + self.assertRaises(ValueError, f) + + res = df.fillna(method='pad') + tm.assert_frame_equal(res, df_exp_fill) + + res = df.dropna(subset=["cats"]) + tm.assert_frame_equal(res, df_exp_drop_cats) + + res = df.dropna() + tm.assert_frame_equal(res, df_exp_drop_all) + + def test_astype_to_other(self): + + s = self.cat['value_group'] + expected = s + tm.assert_series_equal(s.astype('category'),expected) + tm.assert_series_equal(s.astype(com.CategoricalDtype()),expected) + self.assertRaises(ValueError, lambda : s.astype('float64')) + + cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])) + exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) + tm.assert_series_equal(cat.astype('str'), exp) + s2 = Series(Categorical.from_array(['1', '2', '3', '4'])) + exp2 = Series([1,2,3,4]).astype(int) + tm.assert_series_equal(s2.astype('int') , exp2) + + # object don't sort correctly, so just compare that we have the same values + def cmp(a,b): + tm.assert_almost_equal(np.sort(np.unique(a)),np.sort(np.unique(b))) + expected = Series(np.array(s.values),name='value_group') + cmp(s.astype('object'),expected) + cmp(s.astype(np.object_),expected) + + # array conversion + tm.assert_almost_equal(np.array(s),np.array(s.values)) + + def test_numeric_like_ops(self): + + # numeric ops should not succeed + for op in ['__add__','__sub__','__mul__','__truediv__']: + self.assertRaises(TypeError, lambda : getattr(self.cat,op)(self.cat)) + + # reduction ops should not succeed (unless specifically defined, e.g. min/max) + s = self.cat['value_group'] + for op in ['kurt','skew','var','std','mean','sum','median']: + self.assertRaises(TypeError, lambda : getattr(s,op)(numeric_only=False)) + + # mad technically works because it takes always the numeric data + + # numpy ops + s = pd.Series(pd.Categorical([1,2,3,4])) + self.assertRaises(TypeError, lambda : np.sum(s)) + + # numeric ops on a Series + for op in ['__add__','__sub__','__mul__','__truediv__']: + self.assertRaises(TypeError, lambda : getattr(s,op)(2)) + + # invalid ufunc + self.assertRaises(TypeError, lambda : np.log(s)) if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - # '--with-coverage', '--cover-package=pandas.core'], - exit=False) + # '--with-coverage', '--cover-package=pandas.core'] + exit=False) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 1cada8efb6c6f..2e1bbc88e36ff 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -6365,6 +6365,27 @@ def test_to_csv_line_terminators(self): 'three,3,6\n') self.assertEqual(buf.getvalue(), expected) + def test_to_csv_from_csv_categorical(self): + + # CSV with categoricals should result in the same output as when one would add a "normal" + # Series/DataFrame. + s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])) + s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) + res = StringIO() + s.to_csv(res) + exp = StringIO() + s2.to_csv(exp) + self.assertEqual(res.getvalue(), exp.getvalue()) + + df = DataFrame({"s":s}) + df2 = DataFrame({"s":s2}) + res = StringIO() + df.to_csv(res) + exp = StringIO() + df2.to_csv(exp) + self.assertEqual(res.getvalue(), exp.getvalue()) + + def test_info(self): io = StringIO() self.frame.info(buf=io) @@ -12999,11 +13020,16 @@ def test_select_dtypes_include(self): 'b': list(range(1, 4)), 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), - 'e': [True, False, True]}) + 'e': [True, False, True], + 'f': pd.Categorical(list('abc'))}) ri = df.select_dtypes(include=[np.number]) ei = df[['b', 'c', 'd']] tm.assert_frame_equal(ri, ei) + ri = df.select_dtypes(include=[np.number,'category']) + ei = df[['b', 'c', 'd', 'f']] + tm.assert_frame_equal(ri, ei) + def test_select_dtypes_exclude(self): df = DataFrame({'a': list('abc'), 'b': list(range(1, 4)), diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 434591a86d0c4..ea4d66074e65a 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2901,9 +2901,9 @@ def test_no_dummy_key_names(self): def test_groupby_categorical(self): levels = ['foo', 'bar', 'baz', 'qux'] - labels = np.random.randint(0, 4, size=100) + codes = np.random.randint(0, 4, size=100) - cats = Categorical(labels, levels, name='myfactor') + cats = Categorical.from_codes(codes, levels, name='myfactor') data = DataFrame(np.random.randn(100, 4)) @@ -2919,7 +2919,7 @@ def test_groupby_categorical(self): grouped = data.groupby(cats) desc_result = grouped.describe() - idx = cats.labels.argsort() + idx = cats.codes.argsort() ord_labels = np.asarray(cats).take(idx) ord_data = data.take(idx) expected = ord_data.groupby(ord_labels, sort=False).describe() @@ -3049,20 +3049,29 @@ def test_cython_median(self): def test_groupby_categorical_no_compress(self): data = Series(np.random.randn(9)) - labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) - cats = Categorical(labels, [0, 1, 2]) + codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) + cats = Categorical.from_codes(codes, [0, 1, 2]) result = data.groupby(cats).mean() - exp = data.groupby(labels).mean() + exp = data.groupby(codes).mean() assert_series_equal(result, exp) - labels = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3]) - cats = Categorical(labels, [0, 1, 2, 3]) + codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3]) + cats = Categorical.from_codes(codes, [0, 1, 2, 3]) result = data.groupby(cats).mean() - exp = data.groupby(labels).mean().reindex(cats.levels) + exp = data.groupby(codes).mean().reindex(cats.levels) assert_series_equal(result, exp) + cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], levels=["a","b","c","d"]) + data = DataFrame({"a":[1,1,1,2,2,2,3,4,5], "b":cats}) + + result = data.groupby("b").mean() + result = result["a"].values + exp = np.array([1,2,4,np.nan]) + self.assert_numpy_array_equivalent(result, exp) + + def test_groupby_first_datetime64(self): df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)]) df[1] = df[1].view('M8[ns]') diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 6fb88eb5597a9..a8486beb57042 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1842,6 +1842,11 @@ def test_from_arrays(self): result = MultiIndex.from_arrays(arrays) self.assertEqual(list(result), list(self.index)) + # infer correctly + result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']]) + self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')]))) + self.assertTrue(result.levels[1].equals(Index(['a','b']))) + def test_from_product(self): first = ['foo', 'bar', 'buz'] second = ['a', 'b', 'c'] @@ -1907,7 +1912,7 @@ def test_get_level_values_na(self): expected = [np.nan, np.nan, np.nan] assert_array_equal(values.values.astype(float), expected) values = index.get_level_values(1) - expected = ['a', np.nan, 1] + expected = np.array(['a', np.nan, 1],dtype=object) assert_array_equal(values.values, expected) if not _np_version_under1p7: diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 255da1af7d11b..f8798e794d22c 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -8,7 +8,6 @@ from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex from pandas.core.datetools import bday -from pandas.core.frame import group_agg from pandas.core.panel import Panel from pandas.core.series import remove_na import pandas.core.common as com @@ -1827,19 +1826,6 @@ def test_get_attr(self): self.panel['i'] = self.panel['ItemA'] assert_frame_equal(self.panel['i'], self.panel.i) - def test_group_agg(self): - values = np.ones((10, 2)) * np.arange(10).reshape((10, 1)) - bounds = np.arange(5) * 2 - f = lambda x: x.mean(axis=0) - - agged = group_agg(values, bounds, f) - - assert(agged[1][0] == 2.5) - assert(agged[2][0] == 4.5) - - # test a function that doesn't aggregate - f2 = lambda x: np.zeros((2, 2)) - self.assertRaises(Exception, group_agg, values, bounds, f2) def test_from_frame_level1_unsorted(self): tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index 7dc5d9bd411fb..e88a8c3b2874c 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -8,7 +8,6 @@ from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex from pandas.core.datetools import bday -from pandas.core.frame import group_agg from pandas.core.panel import Panel from pandas.core.panel4d import Panel4D from pandas.core.series import remove_na @@ -1027,19 +1026,6 @@ def test_rename(self): def test_get_attr(self): assert_panel_equal(self.panel4d['l1'], self.panel4d.l1) - def test_group_agg(self): - values = np.ones((10, 2)) * np.arange(10).reshape((10, 1)) - bounds = np.arange(5) * 2 - f = lambda x: x.mean(axis=0) - - agged = group_agg(values, bounds, f) - - assert(agged[1][0] == 2.5) - assert(agged[2][0] == 4.5) - - # test a function that doesn't aggregate - f2 = lambda x: np.zeros((2, 2)) - self.assertRaises(Exception, group_agg, values, bounds, f2) def test_from_frame_level1_unsorted(self): raise nose.SkipTest("skipping for now") diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index d08f7e1d547c8..1ae6ceb7ae2b4 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -472,14 +472,11 @@ def test_constructor_generator(self): assert_series_equal(result, exp) def test_constructor_categorical(self): - cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c']) - res = Series(cat) - exp = Series({0: 'a', 1: 'b', 2: 'c', 3: 'a', 4: 'b', 5: 'c'}) - assert_series_equal(res, exp) - + cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True) cat.name = 'foo' res = Series(cat) self.assertEqual(res.name, cat.name) + self.assertTrue(res.values.equals(cat)) def test_constructor_maskedarray(self): data = ma.masked_all((3,), dtype=float) diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index d17e2e2dcb12b..ee594ef031e82 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -1029,7 +1029,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None): else: factor = Categorical.from_array(concat_index) levels.append(factor.levels) - label_list.append(factor.labels) + label_list.append(factor.codes) if len(names) == len(levels): names = list(names) diff --git a/pandas/tools/tests/test_tile.py b/pandas/tools/tests/test_tile.py index 78c8201f0bcca..7390a4b11095b 100644 --- a/pandas/tools/tests/test_tile.py +++ b/pandas/tools/tests/test_tile.py @@ -27,25 +27,25 @@ def test_simple(self): def test_bins(self): data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]) result, bins = cut(data, 3, retbins=True) - assert_equal(result.labels, [0, 0, 0, 1, 2, 0]) + assert_equal(result.codes, [0, 0, 0, 1, 2, 0]) assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7]) def test_right(self): data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) result, bins = cut(data, 4, right=True, retbins=True) - assert_equal(result.labels, [0, 0, 0, 2, 3, 0, 0]) + assert_equal(result.codes, [0, 0, 0, 2, 3, 0, 0]) assert_almost_equal(bins, [0.1905, 2.575, 4.95, 7.325, 9.7]) def test_noright(self): data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) result, bins = cut(data, 4, right=False, retbins=True) - assert_equal(result.labels, [0, 0, 0, 2, 3, 0, 1]) + assert_equal(result.codes, [0, 0, 0, 2, 3, 0, 1]) assert_almost_equal(bins, [0.2, 2.575, 4.95, 7.325, 9.7095]) def test_arraylike(self): data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1] result, bins = cut(data, 3, retbins=True) - assert_equal(result.labels, [0, 0, 0, 1, 2, 0]) + assert_equal(result.codes, [0, 0, 0, 1, 2, 0]) assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7]) def test_bins_not_monotonic(self): @@ -160,7 +160,7 @@ def test_cut_out_of_bounds(self): result = cut(arr, [-1, 0, 1]) - mask = result.labels == -1 + mask = result.codes == -1 ex_mask = (arr < -1) | (arr > 1) self.assert_numpy_array_equal(mask, ex_mask) diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py index c2512ba2b4b38..b28f7c89606de 100644 --- a/pandas/tools/tile.py +++ b/pandas/tools/tile.py @@ -189,7 +189,7 @@ def _bins_to_cuts(x, bins, right=True, labels=None, retbins=False, levels = np.asarray(levels, dtype=object) np.putmask(ids, na_mask, 0) - fac = Categorical(ids - 1, levels, name=name) + fac = Categorical(ids - 1, levels, name=name, fastpath=True) else: fac = ids - 1 if has_nas:
This PR creates a `CategoricalBlock` as a first class internal object, on par with blocks like `Datetime,Timedelta,Object,Numeric` etc. closes #5313 closes #5314 closes #3943 TODOS - [x] test with `select_dtypes` #7434, https://github.com/jreback/pandas/commit/a43c6c041c7870c63992bc15b427adc35f7ab5d8 **Code Changes** - [x] `factor_agg/group_agg` in `core/frame.py` -> look at unittests // a google search didn't turn up any questions/usage -> remove them? - [x] Add a API change note about the group_agg and factor_agg - [x] Add a API change note about Categorical.labels -> Categorical.codes - [x] Printing level information when a Series of type categorical is printed? - [x] Fix the remaining "FIXME" in tests - [x] Groupby -> include each level, even if group is empty - [x] Pivotable -> include each level, even if group is empty - [x] `Series(categorical).describe()` / `Categorical.unique()` -> should this return all levels or only used levels? - [x] Series(cat).describe() -> show information about the levels? - [x] sort by index? - [x] df.to_csv: fails due to a slicer error? - [x] concat/append -> should retain categoricals - [x] concat/append -> should raise on different levels - [x] sorting a dataframe by a categorical variable does not use the level ordering - [x] sorting by a unsortable categorical should not be possible and should raise - [x] reorder_levels: raise if set(old_levels) != set(new_levels)? - [x] min/max and numeric_only=True - [x] df.to_hdf: fails due to `categorical.T` not implemented - [x] `Category.describe()` with empty levels (will be fixed with groupby) - [x] TST: apply -> look into it if/how it works -> should probably convert first and don't try to preserve the categorical - [x] TST: def test_sort_dataframe(self): -> sort df, cats must also be sorted! - [x] TST: Add tests about some more numpy function that should fail - [x] labels vs. pointers: should the (internal) name of integer array be renamed? -> I would like to see a rename to something with a underscore and it would be nice to rename it to another name (`_pointers`, `_level_idx`, `_level_pointer`) **Documentation** - [x] Add release notes - [x] Add a API change note about the old constructor mode - [x] Document the Categorical methods - [x] Link classes/methods in categorical.rst -> not done, just a link to API docs and in special cases. - [x] Add Categorical API to API docs **Future** - [ ] HDF support? -> see above... #7621 - [ ] meta infos / "Contrasts": R's factor has a "contrast" property which indicates how this factor should be used in statistical models -> don't do it in the PR
https://api.github.com/repos/pandas-dev/pandas/pulls/7217
2014-05-23T13:13:36Z
2014-07-14T21:42:11Z
2014-07-14T21:42:11Z
2014-09-19T14:42:05Z
DOC/CLN for GH7213/GH7206
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index d816bc171c300..8f3d2fe8eb079 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -413,7 +413,7 @@ regularity will result in a ``DatetimeIndex`` (but frequency is lost): Time/Date Components ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -There are several time/date properties that one can access from ``Timestamp`` or a collection of timestamps like a ``DateTimeIndex``. +There are several time/date properties that one can access from ``Timestamp`` or a collection of timestamps like a ``DateTimeIndex``. .. csv-table:: :header: "Property", "Description" @@ -604,7 +604,7 @@ in the usual way. # Skip new years dt = datetime(2013, 12, 17) dt + bmth_us - + # Define date index with custom offset from pandas import DatetimeIndex DatetimeIndex(start='20100101',end='20120101',freq=bmth_us) @@ -789,8 +789,8 @@ methods to return a list of holidays and only ``rules`` need to be defined in a specific holiday calendar class. Further, ``start_date`` and ``end_date`` class attributes determine over what date range holidays are generated. These should be overwritten on the ``AbstractHolidayCalendar`` class to have the range -apply to all calendar subclasses. ``USFederalHolidayCalendar`` is the -only calendar that exists and primarily serves as an example for developing +apply to all calendar subclasses. ``USFederalHolidayCalendar`` is the +only calendar that exists and primarily serves as an example for developing other calendars. For holidays that occur on fixed dates (e.g., US Memorial Day or July 4th) an @@ -823,12 +823,12 @@ An example of how holidays and holiday calendars are defined: cal = ExampleCalendar() cal.holidays(datetime(2012, 1, 1), datetime(2012, 12, 31)) -Using this calendar, creating an index or doing offset arithmetic skips weekends +Using this calendar, creating an index or doing offset arithmetic skips weekends and holidays (i.e., Memorial Day/July 4th). .. ipython:: python - DatetimeIndex(start='7/1/2012', end='7/10/2012', + DatetimeIndex(start='7/1/2012', end='7/10/2012', freq=CDay(calendar=cal)).to_pydatetime() offset = CustomBusinessDay(calendar=cal) datetime(2012, 5, 25) + offset @@ -840,11 +840,11 @@ Ranges are defined by the ``start_date`` and ``end_date`` class attributes of ``AbstractHolidayCalendar``. The defaults are below. .. ipython:: python - + AbstractHolidayCalendar.start_date AbstractHolidayCalendar.end_date -These dates can be overwritten by setting the attributes as +These dates can be overwritten by setting the attributes as datetime/Timestamp/string. .. ipython:: python @@ -1120,7 +1120,7 @@ Passing string represents lower frequency than `PeriodIndex` returns partial sli dfp dfp['2013-01-01 10H'] -As the same as `DatetimeIndex`, the endpoints will be included in the result. Below example slices data starting from 10:00 to 11:59. +As the same as `DatetimeIndex`, the endpoints will be included in the result. Below example slices data starting from 10:00 to 11:59. .. ipython:: python diff --git a/pandas/core/base.py b/pandas/core/base.py index 68200f5ed8d31..0e7bc0fee8a48 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -201,16 +201,6 @@ def __unicode__(self): return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype) -# facilitate the properties on the wrapped ops -def _field_accessor(name, docstring=None): - op_accessor = '_{0}'.format(name) - def f(self): - return self._ops_compat(name,op_accessor) - - f.__name__ = name - f.__doc__ = docstring - return property(f) - class IndexOpsMixin(object): """ common ops mixin to support a unified inteface / docs for Series / Index """ @@ -219,24 +209,9 @@ def _is_allowed_index_op(self, name): raise TypeError("cannot perform an {name} operations on this type {typ}".format( name=name,typ=type(self._get_access_object()))) - def _is_allowed_datetime_index_op(self, name): - if not self._allow_datetime_index_ops: - raise TypeError("cannot perform an {name} operations on this type {typ}".format( - name=name,typ=type(self._get_access_object()))) - - def _is_allowed_period_index_op(self, name): - if not self._allow_period_index_ops: - raise TypeError("cannot perform an {name} operations on this type {typ}".format( - name=name,typ=type(self._get_access_object()))) - def _ops_compat(self, name, op_accessor): - from pandas.tseries.index import DatetimeIndex - from pandas.tseries.period import PeriodIndex + obj = self._get_access_object() - if isinstance(obj, DatetimeIndex): - self._is_allowed_datetime_index_op(name) - elif isinstance(obj, PeriodIndex): - self._is_allowed_period_index_op(name) try: return self._wrap_access_object(getattr(obj,op_accessor)) except AttributeError: @@ -336,6 +311,44 @@ def factorize(self, sort=False, na_sentinel=-1): from pandas.core.algorithms import factorize return factorize(self, sort=sort, na_sentinel=na_sentinel) +# facilitate the properties on the wrapped ops +def _field_accessor(name, docstring=None): + op_accessor = '_{0}'.format(name) + def f(self): + return self._ops_compat(name,op_accessor) + + f.__name__ = name + f.__doc__ = docstring + return property(f) + +class DatetimeIndexOpsMixin(object): + """ common ops mixin to support a unified inteface datetimelike Index """ + + def _is_allowed_datetime_index_op(self, name): + if not self._allow_datetime_index_ops: + raise TypeError("cannot perform an {name} operations on this type {typ}".format( + name=name,typ=type(self._get_access_object()))) + + def _is_allowed_period_index_op(self, name): + if not self._allow_period_index_ops: + raise TypeError("cannot perform an {name} operations on this type {typ}".format( + name=name,typ=type(self._get_access_object()))) + + def _ops_compat(self, name, op_accessor): + + from pandas.tseries.index import DatetimeIndex + from pandas.tseries.period import PeriodIndex + obj = self._get_access_object() + if isinstance(obj, DatetimeIndex): + self._is_allowed_datetime_index_op(name) + elif isinstance(obj, PeriodIndex): + self._is_allowed_period_index_op(name) + try: + return self._wrap_access_object(getattr(obj,op_accessor)) + except AttributeError: + raise TypeError("cannot perform an {name} operations on this type {typ}".format( + name=name,typ=type(obj))) + date = _field_accessor('date','Returns numpy array of datetime.date. The date part of the Timestamps') time = _field_accessor('time','Returns numpy array of datetime.time. The time part of the Timestamps') year = _field_accessor('year', "The year of the datetime") diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 49430be8c98a6..39d8eb8360244 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -4,7 +4,7 @@ import pandas.compat as compat import pandas as pd from pandas.compat import u, StringIO -from pandas.core.base import FrozenList, FrozenNDArray +from pandas.core.base import FrozenList, FrozenNDArray, DatetimeIndexOpsMixin from pandas.util.testing import assertRaisesRegexp, assert_isinstance from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex from pandas import _np_version_under1p7 @@ -181,7 +181,13 @@ def check_ops_properties(self, props, filter=None, ignore_failures=False): # we mostly care about Series hwere anyhow if not ignore_failures: for o in self.not_valid_objs: - self.assertRaises(TypeError, lambda : getattr(o,op)) + + # an object that is datetimelike will raise a TypeError, otherwise + # an AttributeError + if issubclass(type(o), DatetimeIndexOpsMixin): + self.assertRaises(TypeError, lambda : getattr(o,op)) + else: + self.assertRaises(AttributeError, lambda : getattr(o,op)) class TestIndexOps(Ops): @@ -462,6 +468,13 @@ def test_ops_properties_basic(self): for op in ['year','day','second','weekday']: self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op)) + # attribute access should still work! + s = Series(dict(year=2000,month=1,day=10)) + self.assertEquals(s.year,2000) + self.assertEquals(s.month,1) + self.assertEquals(s.day,10) + self.assertRaises(AttributeError, lambda : s.weekday) + class TestPeriodIndexOps(Ops): _allowed = '_allow_period_index_ops' diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index e0ae6a30cbf1a..c3c7ae9bacbcd 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -5617,12 +5617,6 @@ def test_asfreq(self): self.assertEqual(len(result), 0) self.assertIsNot(result, ts) - def test_weekday(self): - # Just run the function - def f(): - self.ts.weekday - self.assertRaises(TypeError, f) - def test_diff(self): # Just run the function self.ts.diff() diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index c1a22f547a86c..0e18509d9bc26 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -15,6 +15,7 @@ from pandas.tseries.frequencies import ( infer_freq, to_offset, get_period_alias, Resolution, get_reso_string, get_offset) +from pandas.core.base import DatetimeIndexOpsMixin from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay from pandas.tseries.tools import parse_time_string, normalize_date from pandas.util.decorators import cache_readonly @@ -102,7 +103,7 @@ def _ensure_datetime64(other): _midnight = time(0, 0) -class DatetimeIndex(Int64Index): +class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index): """ Immutable ndarray of datetime64 data, represented internally as int64, and which can be boxed to Timestamp objects that are subclasses of datetime and diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 1d528c9545863..5516c4634ea58 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -8,6 +8,7 @@ from pandas.tseries.frequencies import (get_freq_code as _gfc, _month_numbers, FreqGroup) from pandas.tseries.index import DatetimeIndex, Int64Index, Index +from pandas.core.base import DatetimeIndexOpsMixin from pandas.tseries.tools import parse_time_string import pandas.tseries.frequencies as _freq_mod @@ -500,7 +501,7 @@ def wrapper(self, other): return wrapper -class PeriodIndex(Int64Index): +class PeriodIndex(DatetimeIndexOpsMixin, Int64Index): """ Immutable ndarray holding ordinal values indicating regular periods in time such as particular years, quarters, months, etc. A value of 1 is the
DOC: doc fixups for (GH7213) CLN: cleanup related to (GH7206) don't create the disabled properties at all for Series related #7206 related #7213
https://api.github.com/repos/pandas-dev/pandas/pulls/7216
2014-05-23T12:46:10Z
2014-05-23T13:52:50Z
2014-05-23T13:52:50Z
2014-06-26T19:53:59Z
DOC: fix datetime-like Series ops in docs + some extra api docs
diff --git a/doc/source/api.rst b/doc/source/api.rst index c6e2ae74a27cb..2b12da9a7f92f 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -127,6 +127,7 @@ Data manipulations :toctree: generated/ melt + pivot pivot_table crosstab cut @@ -134,6 +135,7 @@ Data manipulations merge concat get_dummies + factorize Top-level missing data ~~~~~~~~~~~~~~~~~~~~~~ @@ -320,6 +322,7 @@ Computations / Descriptive Stats :toctree: generated/ Series.abs + Series.all Series.any Series.autocorr Series.between @@ -362,6 +365,7 @@ Reindexing / Selection / Label manipulation Series.align Series.drop + Series.equals Series.first Series.head Series.idxmax @@ -422,22 +426,6 @@ Time series-related Series.resample Series.tz_convert Series.tz_localize - Series.year - Series.month - Series.day - Series.hour - Series.minute - Series.second - Series.microsecond - Series.nanosecond - Series.date - Series.time - Series.dayofyear - Series.weekofyear - Series.week - Series.dayofweek - Series.weekday - Series.quarter String handling ~~~~~~~~~~~~~~~~~~~ @@ -500,8 +488,12 @@ Serialization / IO / Conversion Series.to_dict Series.to_frame Series.to_hdf + Series.to_sql + Series.to_gbq + Series.to_msgpack Series.to_json Series.to_sparse + Series.to_dense Series.to_string Series.to_clipboard @@ -624,6 +616,7 @@ Computations / Descriptive Stats :toctree: generated/ DataFrame.abs + DataFrame.all DataFrame.any DataFrame.clip DataFrame.clip_lower @@ -666,6 +659,7 @@ Reindexing / Selection / Label manipulation DataFrame.drop DataFrame.drop_duplicates DataFrame.duplicated + DataFrame.equals DataFrame.filter DataFrame.first DataFrame.head @@ -759,14 +753,18 @@ Serialization / IO / Conversion DataFrame.to_pickle DataFrame.to_csv DataFrame.to_hdf + DataFrame.to_sql DataFrame.to_dict DataFrame.to_excel DataFrame.to_json DataFrame.to_html DataFrame.to_latex DataFrame.to_stata + DataFrame.to_msgpack + DataFrame.to_gbq DataFrame.to_records DataFrame.to_sparse + DataFrame.to_dense DataFrame.to_string DataFrame.to_clipboard @@ -911,6 +909,7 @@ Reindexing / Selection / Label manipulation Panel.add_prefix Panel.add_suffix Panel.drop + Panel.equals Panel.filter Panel.first Panel.last @@ -1039,6 +1038,7 @@ Modifying and Computations Index.copy Index.delete Index.diff + Index.sym_diff Index.drop Index.equals Index.factorize @@ -1197,8 +1197,13 @@ Indexing, iteration GroupBy.groups GroupBy.indices GroupBy.get_group + +.. currentmodule:: pandas + Grouper +.. currentmodule:: pandas.core.groupby + Function application ~~~~~~~~~~~~~~~~~~~~ .. autosummary:: @@ -1219,6 +1224,20 @@ Computations / Descriptive Stats GroupBy.var GroupBy.ohlc +.. currentmodule:: pandas + +Working with options +-------------------- + +.. autosummary:: + :toctree: generated/ + + describe_option + reset_option + get_option + set_option + option_context + .. HACK - see github issue #4539. To ensure old links remain valid, include here the autosummaries with previous currentmodules as a comment and add diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 65796d95fed0a..d816bc171c300 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -579,8 +579,7 @@ calendars which account for local holidays and local weekend conventions. bday_egypt = CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt) dt = datetime(2013, 4, 30) dt + 2 * bday_egypt - dts = date_range(dt, periods=5, freq=bday_egypt).to_series() - dts + dts = date_range(dt, periods=5, freq=bday_egypt) Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split())) As of v0.14 holiday calendars can be used to provide the list of holidays. See the diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index 91226fe180548..fd726af3912f0 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -380,7 +380,7 @@ Experimental Features bday_egypt = CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt) dt = datetime(2013, 4, 30) print(dt + 2 * bday_egypt) - dts = date_range(dt, periods=5, freq=bday_egypt).to_series() + dts = date_range(dt, periods=5, freq=bday_egypt) print(Series(dts.weekday, dts).map(Series('Mon Tue Wed Thu Fri Sat Sun'.split()))) Bug Fixes
Closes #7213. \+ added some entries in api.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/7215
2014-05-23T12:21:43Z
2014-05-23T12:48:21Z
2014-05-23T12:48:21Z
2014-06-27T12:23:56Z
TST/DOC: Test for and document incompatibility with openpyxl 2.0.0 and later
diff --git a/README.md b/README.md index 6f50c1e2ea10e..79a84440d6a5c 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,8 @@ pip install pandas - [xlrd/xlwt](http://www.python-excel.org/) - Excel reading (xlrd) and writing (xlwt) - [openpyxl](http://packages.python.org/openpyxl/) - - openpyxl version 1.6.1 or higher, for writing .xlsx files + - openpyxl version 1.6.1 or higher, but lower than 2.0.0, for + writing .xlsx files - xlrd >= 0.9.0 - [XlsxWriter](https://pypi.python.org/pypi/XlsxWriter) - Alternative Excel writer. diff --git a/doc/source/install.rst b/doc/source/install.rst index 18ad031e8a880..7cce761445c51 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -97,7 +97,7 @@ Optional Dependencies * `statsmodels <http://statsmodels.sourceforge.net/>`__ * Needed for parts of :mod:`pandas.stats` * `openpyxl <http://packages.python.org/openpyxl/>`__, `xlrd/xlwt <http://www.python-excel.org/>`__ - * openpyxl version 1.6.1 or higher + * openpyxl version 1.6.1 or higher, but lower than 2.0.0 * Needed for Excel I/O * `XlsxWriter <https://pypi.python.org/pypi/XlsxWriter>`__ * Alternative Excel writer. diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.14.0.txt index 525b6f123a617..16cd23183e424 100644 --- a/doc/source/v0.14.0.txt +++ b/doc/source/v0.14.0.txt @@ -31,6 +31,8 @@ users upgrade to this version. - :ref:`Deprecations <whatsnew_0140.deprecations>` +- :ref:`Known Issues <whatsnew_0140.knownissues>` + - :ref:`Bug Fixes <release.bug_fixes-0.14.0>` .. warning:: @@ -630,6 +632,14 @@ Deprecations in a future release. You can use the future behavior now by passing ``return_type='axes'`` to boxplot. +.. _whatsnew_0140.knownissues: + +Known Issues +~~~~~~~~~~~~ + +- OpenPyXL 2.0.0 breaks backwards compatibility (:issue:`7196`) + + .. _whatsnew_0140.enhancements: Enhancements diff --git a/pandas/compat/openpyxl_compat.py b/pandas/compat/openpyxl_compat.py new file mode 100644 index 0000000000000..4a48cdac98dd2 --- /dev/null +++ b/pandas/compat/openpyxl_compat.py @@ -0,0 +1,26 @@ +""" +Detect incompatible version of OpenPyXL + +GH7169 +""" + +from distutils.version import LooseVersion + +start_ver = '1.6.1' +stop_ver = '2.0.0' + +def is_compat(): + """ + Detect whether the installed version of openpyxl is supported + Returns True/False if openpyxl is installed, None otherwise + """ + try: + import openpyxl + except ImportError: + return None + + ver = LooseVersion(openpyxl.__version__) + if ver < LooseVersion(start_ver) or LooseVersion(stop_ver) <= ver: + return False + + return True diff --git a/pandas/io/excel.py b/pandas/io/excel.py index e81c279c5820d..36af8b1da0e30 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -16,6 +16,7 @@ from pandas.core import config from pandas.core.common import pprint_thing import pandas.compat as compat +import pandas.compat.openpyxl_compat as openpyxl_compat import pandas.core.common as com from warnings import warn from distutils.version import LooseVersion @@ -617,7 +618,12 @@ def _convert_to_style(cls, style_dict): return xls_style -register_writer(_OpenpyxlWriter) + +if openpyxl_compat.is_compat(): + register_writer(_OpenpyxlWriter) +else: + warn('Installed openpyxl is not supported at this time. Use >={} and <{}.' + .format(openpyxl_compat.start_ver, openpyxl_compat.stop_ver)) class _XlwtWriter(ExcelWriter): diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 5d4c4e4a9fdd8..ef7e57076c908 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -1,6 +1,6 @@ # pylint: disable=E1101 -from pandas.compat import u, range, map +from pandas.compat import u, range, map, openpyxl_compat from datetime import datetime, date, time import os from distutils.version import LooseVersion @@ -45,6 +45,10 @@ def _skip_if_no_openpyxl(): except ImportError: raise nose.SkipTest('openpyxl not installed, skipping') + if not openpyxl_compat.is_compat(): + raise nose.SkipTest('need %s <= openpyxl < %s, skipping' % + (openpyxl_compat.start_ver, openpyxl_compat.stop_ver)) + def _skip_if_no_xlsxwriter(): try:
Let users down easy if v0.14.0 remains incompatible with openpyxl 2.0.0 and later closes #7169 .
https://api.github.com/repos/pandas-dev/pandas/pulls/7214
2014-05-23T02:12:43Z
2014-05-24T13:05:08Z
2014-05-24T13:05:08Z
2014-06-12T23:47:30Z