title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
CLN: removes cython implementation of groupby count
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cb237b93c70ba..e81aaebe77807 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4562,7 +4562,7 @@ def _count_level(self, level, axis=0, numeric_only=False): level_index = count_axis.levels[level] labels = com._ensure_int64(count_axis.labels[level]) - counts = lib.count_level_2d(mask, labels, len(level_index)) + counts = lib.count_level_2d(mask, labels, len(level_index), axis=0) result = DataFrame(counts, index=level_index, columns=agg_axis) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 43110494d675b..1f5855e63dee8 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -69,7 +69,7 @@ 'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount', 'resample', 'describe', - 'rank', 'quantile', 'count', + 'rank', 'quantile', 'fillna', 'mad', 'any', 'all', @@ -149,9 +149,6 @@ def _last(x): return _last(x) -def _count_compat(x, axis=0): - return x.count() # .size != .count(); count excludes nan - class Grouper(object): """ A Grouper allows the user to specify a groupby instruction for a target object @@ -801,11 +798,6 @@ def size(self): numeric_only=False, _convert=True) last = _groupby_function('last', 'last', _last_compat, numeric_only=False, _convert=True) - _count = _groupby_function('_count', 'count', _count_compat, - numeric_only=False) - - def count(self, axis=0): - return self._count().astype('int64') def ohlc(self): """ @@ -1463,7 +1455,6 @@ def get_group_levels(self): 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) }, 'last': 'group_last', - 'count': 'group_count', } _cython_arity = { @@ -3468,6 +3459,24 @@ def _apply_to_column_groupbys(self, func): in self._iterate_column_groupbys()), keys=self._selected_obj.columns, axis=1) + def count(self): + from functools import partial + from pandas.lib import count_level_2d + from pandas.core.common import _isnull_ndarraylike as isnull + + data, _ = self._get_data_to_aggregate() + ids, _, ngroups = self.grouper.group_info + mask = ids != -1 + + val = ((mask & ~isnull(blk.get_values())) for blk in data.blocks) + loc = (blk.mgr_locs for blk in data.blocks) + + counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1) + blk = map(make_block, map(counter, val), loc) + + return self._wrap_agged_blocks(data.items, list(blk)) + + from pandas.tools.plotting import boxplot_frame_groupby DataFrameGroupBy.boxplot = boxplot_frame_groupby diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 7b2d849695c98..2b4974155d44c 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -1253,19 +1253,32 @@ def lookup_values(ndarray[object] values, dict mapping): return maybe_convert_objects(result) +@cython.boundscheck(False) +@cython.wraparound(False) def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask, - ndarray[int64_t] labels, Py_ssize_t max_bin): + ndarray[int64_t, ndim=1] labels, + Py_ssize_t max_bin, + int axis): cdef: Py_ssize_t i, j, k, n ndarray[int64_t, ndim=2] counts + assert(axis == 0 or axis == 1) n, k = (<object> mask).shape - counts = np.zeros((max_bin, k), dtype='i8') - for i from 0 <= i < n: - for j from 0 <= j < k: - if mask[i, j]: - counts[labels[i], j] += 1 + if axis == 0: + counts = np.zeros((max_bin, k), dtype='i8') + with nogil: + for i from 0 <= i < n: + for j from 0 <= j < k: + counts[labels[i], j] += mask[i, j] + + else: # axis == 1 + counts = np.zeros((n, max_bin), dtype='i8') + with nogil: + for i from 0 <= i < n: + for j from 0 <= j < k: + counts[i, labels[j]] += mask[i, j] return counts diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index c086919d94644..b055d75df4cf4 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -971,44 +971,6 @@ def group_var_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, """ -group_count_template = """@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, - ndarray[int64_t] counts, - ndarray[%(c_type)s, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, lab, ncounts = len(counts) - Py_ssize_t N = values.shape[0], K = values.shape[1] - %(c_type)s val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(values) != len(labels): - raise AssertionError("len(index) != len(labels)") - - - %(nogil)s - %(tab)sfor i in range(N): - %(tab)s lab = labels[i] - %(tab)s if lab < 0: - %(tab)s continue - - %(tab)s counts[lab] += 1 - %(tab)s for j in range(K): - %(tab)s val = values[i, j] - - %(tab)s # not nan - %(tab)s nobs[lab, j] += val == val and val != iNaT - - %(tab)sfor i in range(ncounts): - %(tab)s for j in range(K): - %(tab)s out[i, j] = nobs[i, j] -""" - # add passing bin edges, instead of labels @@ -1995,8 +1957,6 @@ def generate_from_template(template, exclude=None): groupby_min_max = [group_min_template, group_max_template] -groupby_count = [group_count_template] - templates_1d = [map_indices_template, pad_template, backfill_template, @@ -2051,12 +2011,6 @@ def generate_take_cython_file(): print(generate_put_min_max_template(template, use_ints=True), file=f) - for template in groupby_count: - print(generate_put_selection_template(template, use_ints=True, - use_datelikes=True, - use_objects=True), - file=f) - for template in nobool_1d_templates: print(generate_from_template(template, exclude=['bool']), file=f) diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx index c0ecd04749e58..2f2fd528999d6 100644 --- a/pandas/src/generated.pyx +++ b/pandas/src/generated.pyx @@ -7930,192 +7930,6 @@ def group_max_int64(ndarray[int64_t, ndim=2] out, out[i, j] = maxx[i, j] -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, lab, ncounts = len(counts) - Py_ssize_t N = values.shape[0], K = values.shape[1] - float64_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(values) != len(labels): - raise AssertionError("len(index) != len(labels)") - - - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[lab, j] += val == val and val != iNaT - - for i in range(ncounts): - for j in range(K): - out[i, j] = nobs[i, j] - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, lab, ncounts = len(counts) - Py_ssize_t N = values.shape[0], K = values.shape[1] - float32_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(values) != len(labels): - raise AssertionError("len(index) != len(labels)") - - - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[lab, j] += val == val and val != iNaT - - for i in range(ncounts): - for j in range(K): - out[i, j] = nobs[i, j] - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_int64(ndarray[int64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int64_t, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, lab, ncounts = len(counts) - Py_ssize_t N = values.shape[0], K = values.shape[1] - int64_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(values) != len(labels): - raise AssertionError("len(index) != len(labels)") - - - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[lab, j] += val == val and val != iNaT - - for i in range(ncounts): - for j in range(K): - out[i, j] = nobs[i, j] - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_object(ndarray[object, ndim=2] out, - ndarray[int64_t] counts, - ndarray[object, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, lab, ncounts = len(counts) - Py_ssize_t N = values.shape[0], K = values.shape[1] - object val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(values) != len(labels): - raise AssertionError("len(index) != len(labels)") - - - - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[lab, j] += val == val and val != iNaT - - for i in range(ncounts): - for j in range(K): - out[i, j] = nobs[i, j] - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_count_int64(ndarray[int64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int64_t, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, lab, ncounts = len(counts) - Py_ssize_t N = values.shape[0], K = values.shape[1] - int64_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(values) != len(labels): - raise AssertionError("len(index) != len(labels)") - - - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[lab, j] += val == val and val != iNaT - - for i in range(ncounts): - for j in range(K): - out[i, j] = nobs[i, j] - - @cython.wraparound(False) @cython.boundscheck(False) def left_join_indexer_unique_float64(ndarray[float64_t] left, diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index f5693983f1cc1..a85e68602493b 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2481,6 +2481,30 @@ def test_size(self): self.assertEqual(result[key], len(group)) def test_count(self): + from string import ascii_lowercase + n = 1 << 15 + dr = date_range('2015-08-30', periods=n // 10, freq='T') + + df = DataFrame({ + '1st':np.random.choice(list(ascii_lowercase), n), + '2nd':np.random.randint(0, 5, n), + '3rd':np.random.randn(n).round(3), + '4th':np.random.randint(-10, 10, n), + '5th':np.random.choice(dr, n), + '6th':np.random.randn(n).round(3), + '7th':np.random.randn(n).round(3), + '8th':np.random.choice(dr, n) - np.random.choice(dr, 1), + '9th':np.random.choice(list(ascii_lowercase), n)}) + + for col in df.columns.drop(['1st', '2nd', '4th']): + df.loc[np.random.choice(n, n // 10), col] = np.nan + + df['9th'] = df['9th'].astype('category') + + for key in '1st', '2nd', ['1st', '2nd']: + left = df.groupby(key).count() + right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1) + assert_frame_equal(left, right) # GH5610 # count counts non-nulls @@ -4966,7 +4990,7 @@ def test_groupby_whitelist(self): 'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount', 'resample', 'describe', - 'rank', 'quantile', 'count', + 'rank', 'quantile', 'fillna', 'mad', 'any', 'all', @@ -4987,7 +5011,7 @@ def test_groupby_whitelist(self): 'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount', 'resample', 'describe', - 'rank', 'quantile', 'count', + 'rank', 'quantile', 'fillna', 'mad', 'any', 'all', @@ -5253,7 +5277,6 @@ def test__cython_agg_general(self): ('max', np.max), ('first', lambda x: x.iloc[0]), ('last', lambda x: x.iloc[-1]), - ('count', np.size), ] df = DataFrame(np.random.randn(1000)) labels = np.random.randint(0, 50, size=1000).astype(float) @@ -5439,26 +5462,26 @@ def test_first_last_max_min_on_time_data(self): def test_groupby_preserves_sort(self): # Test to ensure that groupby always preserves sort order of original # object. Issue #8588 and #9651 - - df = DataFrame({'int_groups':[3,1,0,1,0,3,3,3], - 'string_groups':['z','a','z','a','a','g','g','g'], + + df = DataFrame({'int_groups':[3,1,0,1,0,3,3,3], + 'string_groups':['z','a','z','a','a','g','g','g'], 'ints':[8,7,4,5,2,9,1,1], 'floats':[2.3,5.3,6.2,-2.4,2.2,1.1,1.1,5], 'strings':['z','d','a','e','word','word2','42','47']}) # Try sorting on different types and with different group types - for sort_column in ['ints', 'floats', 'strings', ['ints','floats'], + for sort_column in ['ints', 'floats', 'strings', ['ints','floats'], ['ints','strings']]: - for group_column in ['int_groups', 'string_groups', + for group_column in ['int_groups', 'string_groups', ['int_groups','string_groups']]: df = df.sort_values(by=sort_column) g = df.groupby(group_column) - + def test_sort(x): assert_frame_equal(x, x.sort_values(by=sort_column)) - + g.apply(test_sort) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 0f55f79b8b9b9..df61387734cb3 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -829,7 +829,7 @@ def _check_counts(frame, axis=0): index = frame._get_axis(axis) for i in range(index.nlevels): result = frame.count(axis=axis, level=i) - expected = frame.groupby(axis=axis, level=i).count(axis=axis) + expected = frame.groupby(axis=axis, level=i).count() expected = expected.reindex_like(result).astype('i8') assert_frame_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/11013
2015-09-05T22:48:36Z
2015-09-07T20:30:05Z
2015-09-07T20:30:05Z
2015-09-08T01:37:28Z
DOC: Docstring Redesign to fix the problem of unexpected keyword arg (issue10888)
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 38c5593e5911a..edf03db309120 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -1033,7 +1033,7 @@ Bug Fixes - Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`) - Bug in ``Index.take`` may add unnecessary ``freq`` attribute (:issue:`10791`) - Bug in ``merge`` with empty ``DataFrame`` may raise ``IndexError`` (:issue:`10824`) - +- Bug in ``to_latex`` where unexpected keyword argument for some documented arguments (:issue:`10888`) - Bug in ``read_csv`` when using the ``nrows`` or ``chunksize`` parameters if file contains only a header line (:issue:`9535`) - Bug in serialization of ``category`` types in HDF5 in presence of alternate encodings. (:issue:`10366`) diff --git a/pandas/core/format.py b/pandas/core/format.py index 47d0ef37383c4..f0608cbb654f8 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -24,11 +24,9 @@ import itertools import csv -docstring_to_string = """ - Parameters - ---------- - frame : DataFrame - object to render +common_docstring = """ + Parameters + ---------- buf : StringIO-like, optional buffer to write to columns : sequence, optional @@ -51,20 +49,27 @@ sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row, default True + index_names : bool, optional + Prints the names of the indexes, default True""" + +justify_docstring = """ justify : {'left', 'right'}, default None Left or right-justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out - of the box. - index_names : bool, optional - Prints the names of the indexes, default True + of the box.""" + +force_unicode_docstring = """ force_unicode : bool, default False Always return a unicode result. Deprecated in v0.10.0 as string - formatting is now rendered to unicode by default. + formatting is now rendered to unicode by default.""" + +return_docstring = """ Returns ------- formatted : string (or unicode, depending on data and options)""" +docstring_to_string = common_docstring + justify_docstring + force_unicode_docstring + return_docstring class CategoricalFormatter(object): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b4bb06fe83649..5ab75f7d2658a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1413,7 +1413,7 @@ def to_stata( write_index=write_index) writer.write_file() - @Appender(fmt.docstring_to_string, indents=1) + @Appender(fmt.common_docstring + fmt.justify_docstring + fmt.return_docstring, indents=1) def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, @@ -1441,7 +1441,7 @@ def to_string(self, buf=None, columns=None, col_space=None, result = formatter.buf.getvalue() return result - @Appender(fmt.docstring_to_string, indents=1) + @Appender(fmt.common_docstring + fmt.justify_docstring + fmt.return_docstring, indents=1) def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, @@ -1491,7 +1491,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, if buf is None: return formatter.buf.getvalue() - @Appender(fmt.docstring_to_string, indents=1) + @Appender(fmt.common_docstring + fmt.return_docstring, indents=1) def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
Fixed issue #10888
https://api.github.com/repos/pandas-dev/pandas/pulls/11011
2015-09-05T21:24:50Z
2015-09-09T14:55:53Z
2015-09-09T14:55:53Z
2015-09-09T15:04:00Z
TST: corrected URL for newly added Excel test data file
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 073fc55357df7..d8ef68b78b581 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -415,11 +415,8 @@ def test_read_xlrd_Book(self): @tm.network def test_read_from_http_url(self): - # TODO: remove this when merging into master - url = ('https://raw.github.com/davidovitch/pandas/master/' + url = ('https://raw.github.com/pydata/pandas/master/' 'pandas/io/tests/data/test1' + self.ext) -# url = ('https://raw.github.com/pydata/pandas/master/' -# 'pandas/io/tests/data/test' + self.ext) url_table = read_excel(url) local_table = self.get_exceldf('test1') tm.assert_frame_equal(url_table, local_table)
There was one small thing overlooked when merging PR #10964: the url of the renamed test file was still pointing to my repo instead of pydata/pandas.
https://api.github.com/repos/pandas-dev/pandas/pulls/11009
2015-09-05T20:12:55Z
2015-09-05T20:20:06Z
null
2015-09-05T20:20:06Z
BUG: Bug in pickling of a non-regular freq DatetimeIndex #11002
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 68d3861599cbd..1a31f38b585cf 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -894,7 +894,7 @@ Bug Fixes - Bug in clearing the cache on ``DataFrame.pop`` and a subsequent inplace op (:issue:`10912`) - Bug in indexing with a mixed-integer ``Index`` causing an ``ImportError`` (:issue:`10610`) - Bug in ``Series.count`` when index has nulls (:issue:`10946`) - +- Bug in pickling of a non-regular freq ``DatetimeIndex`` (:issue:`11002`) - Bug causing ``DataFrame.where`` to not respect the ``axis`` parameter when the frame has a symmetric shape. (:issue:`9736`) - Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index b1198f9758938..4ba15d319dc62 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -120,7 +120,8 @@ def _new_DatetimeIndex(cls, d): # data are already in UTC # so need to localize tz = d.pop('tz',None) - result = cls.__new__(cls, **d) + + result = cls.__new__(cls, verify_integrity=False, **d) if tz is not None: result = result.tz_localize('UTC').tz_convert(tz) return result diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 84a4c3e08e493..a021195ea6c04 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2142,8 +2142,8 @@ def test_period_resample_with_local_timezone_dateutil(self): def test_pickle(self): - #GH4606 + # GH4606 p = self.round_trip_pickle(NaT) self.assertTrue(p is NaT) @@ -2153,6 +2153,11 @@ def test_pickle(self): self.assertTrue(idx_p[1] is NaT) self.assertTrue(idx_p[2] == idx[2]) + # GH11002 + # don't infer freq + idx = date_range('1750-1-1', '2050-1-1', freq='7D') + idx_p = self.round_trip_pickle(idx) + tm.assert_index_equal(idx, idx_p) def _simple_ts(start, end, freq='D'): rng = date_range(start, end, freq=freq)
closes #11002
https://api.github.com/repos/pandas-dev/pandas/pulls/11006
2015-09-05T18:27:44Z
2015-09-05T23:22:57Z
2015-09-05T23:22:57Z
2015-09-05T23:22:57Z
DOC: consistent doc-string with function declaration, added missing param doc-string for sql.py
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index c0b69e435f494..2ed0126505c41 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -143,8 +143,8 @@ def execute(sql, con, cur=None, params=None): Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - cur : deprecated, cursor is obtained from connection - params : list or tuple, optional + cur : deprecated, cursor is obtained from connection, default: None + params : list or tuple, optional, default: None List of parameters to pass to execute method. Returns @@ -189,8 +189,9 @@ def tquery(sql, con=None, cur=None, retry=True): ---------- sql: string SQL query to be executed - con: DBAPI2 connection - cur: deprecated, cursor is obtained from connection + con: DBAPI2 connection, default: None + cur: deprecated, cursor is obtained from connection, default: None + retry: boolean value to specify whether to retry after failure, default: True Returns ------- @@ -242,9 +243,10 @@ def uquery(sql, con=None, cur=None, retry=True, params=None): ---------- sql: string SQL query to be executed - con: DBAPI2 connection - cur: deprecated, cursor is obtained from connection - params: list or tuple, optional + con: DBAPI2 connection, default: None + cur: deprecated, cursor is obtained from connection, default: None + retry: boolean value to specify whether to retry after failure, default: True + params: list or tuple, optional, default: None List of parameters to pass to execute method. Returns @@ -294,12 +296,12 @@ def read_sql_table(table_name, con, schema=None, index_col=None, schema : string, default None Name of SQL schema in database to query (if database flavor supports this). If None, use default schema (default). - index_col : string, optional + index_col : string, optional, default: None Column to set as index coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. - parse_dates : list or dict + parse_dates : list or dict, default: None - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of @@ -308,7 +310,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None, to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite - columns : list + columns : list, default: None List of column names to select from sql table chunksize : int, default None If specified, return an iterator where `chunksize` is the number of @@ -369,18 +371,18 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - index_col : string, optional + index_col : string, optional, default: None Column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets - params : list, tuple or dict, optional + params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'} - parse_dates : list or dict + parse_dates : list or dict, default: None - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of @@ -428,18 +430,18 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - index_col : string, optional + index_col : string, optional, default: None column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets - params : list, tuple or dict, optional + params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'} - parse_dates : list or dict + parse_dates : list or dict, default: None - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of @@ -448,7 +450,7 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite - columns : list + columns : list, default: None List of column names to select from sql table (only used when reading a table). chunksize : int, default None @@ -1061,13 +1063,13 @@ def read_table(self, table_name, index_col=None, coerce_float=True, ---------- table_name : string Name of SQL table in database - index_col : string, optional + index_col : string, optional, default: None Column to set as index coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point. This can result in loss of precision. - parse_dates : list or dict + parse_dates : list or dict, default: None - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of @@ -1076,7 +1078,7 @@ def read_table(self, table_name, index_col=None, coerce_float=True, to the keyword arguments of :func:`pandas.to_datetime`. Especially useful with databases without native Datetime support, such as SQLite - columns : list + columns : list, default: None List of column names to select from sql table schema : string, default None Name of SQL schema in database to query (if database flavor @@ -1123,18 +1125,18 @@ def read_query(self, sql, index_col=None, coerce_float=True, ---------- sql : string SQL query to be executed - index_col : string, optional + index_col : string, optional, default: None Column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets - params : list, tuple or dict, optional + params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'} - parse_dates : list or dict + parse_dates : list or dict, default: None - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of @@ -1143,6 +1145,9 @@ def read_query(self, sql, index_col=None, coerce_float=True, to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite + chunksize : int, default None + If specified, return an iterator where `chunksize` is the number + of rows to include in each chunk. Returns ------- @@ -1650,11 +1655,11 @@ def get_schema(frame, name, flavor='sqlite', keys=None, con=None, dtype=None): The flavor of SQL to use. Ignored when using SQLAlchemy connectable. 'mysql' is deprecated and will be removed in future versions, but it will be further supported through SQLAlchemy engines. - keys : string or sequence + keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that - library. + library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should
Added default values for params in doc-string and added missing param doc-string that are needed for new users.
https://api.github.com/repos/pandas-dev/pandas/pulls/11004
2015-09-05T18:15:57Z
2015-09-05T18:38:17Z
2015-09-05T18:38:17Z
2015-09-05T18:39:41Z
ENH: add Series.astype with the new tz dtype
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index dd13e8fabf0e9..7e96fdad29193 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1753,22 +1753,56 @@ TZ aware Dtypes .. versionadded:: 0.17.0 -``Series/DatetimeIndex`` with a timezone naive value are represented with a dtype of ``datetime64[ns]``. +``Series/DatetimeIndex`` with a timezone **naive** value are represented with a dtype of ``datetime64[ns]``. .. ipython:: python - dr = pd.date_range('20130101',periods=3) - dr - s = Series(dr) - s + dr_naive = pd.date_range('20130101',periods=3) + dr_naive + s_naive = Series(dr_naive) + s_naive -``Series/DatetimeIndex`` with a timezone aware value are represented with a dtype of ``datetime64[ns, tz]``. +``Series/DatetimeIndex`` with a timezone **aware** value are represented with a dtype of ``datetime64[ns, tz]``. .. ipython:: python - dr = pd.date_range('20130101',periods=3,tz='US/Eastern') - dr - s = Series(dr) - s + dr_aware = pd.date_range('20130101',periods=3,tz='US/Eastern') + dr_aware + s_aware = Series(dr_aware) + s_aware + +Both of these ``Series`` can be manipulated via the ``.dt`` accessor, see :ref:`here <basics.dt_accessors>`. +See the :ref:`docs <timeseries.dtypes>` for more details. + +Further more you can ``.astype(...)`` timezone aware (and naive). + +.. ipython:: python + + # make this naive + s_aware.astype('datetime64[ns]') + + # convert + s_aware.astype('datetime64[ns, CET]') + s_naive.astype('datetime64[ns, CET]') + +.. note:: + + Using the ``.values`` accessor on a ``Series``, returns an numpy array of the data. + These values are converted to UTC, as numpy does not currently support timezones (even though it is *printing* in the local timezone!). + + .. ipython:: python + + s_naive.values + s_aware.values + + Further note that once converted to a numpy array these would lose the tz tenor. + + .. ipython:: python + + Series(s_aware.values) + + However, these can be easily converted + + .. ipython:: python -Both of these ``Series`` can be manipulated via the ``.dt`` accessor, see the :ref:`docs <basics.dt_accessors>` as well. + Series(s_aware).dt.tz_localize('UTC').dt.tz_convert('US/Eastern') diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index f88e5c0a11f9f..9eb005a604b0c 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -447,9 +447,9 @@ Datetime with TZ We are adding an implementation that natively supports datetime with timezones. A ``Series`` or a ``DataFrame`` column previously *could* be assigned a datetime with timezones, and would work as an ``object`` dtype. This had performance issues with a large -number rows. (:issue:`8260`, :issue:`10763`) +number rows. See the :ref:`docs <timeseries.timezone_series>` for more details. (:issue:`8260`, :issue:`10763`). -The new implementation allows for having a single-timezone across all rows, and operating on it in a performant manner. +The new implementation allows for having a single-timezone across all rows, with operations in a performant manner. .. ipython:: python @@ -469,13 +469,15 @@ This uses a new-dtype representation as well, that is very similar in look-and-f .. ipython:: python df['B'].dtype - type(df['B']).dtype + type(df['B'].dtype) .. note:: There is a slightly different string repr for the underlying ``DatetimeIndex`` as a result of the dtype changes, but functionally these are the same. + Previous Behavior: + .. code-block:: python In [1]: pd.date_range('20130101',periods=3,tz='US/Eastern') @@ -486,12 +488,13 @@ This uses a new-dtype representation as well, that is very similar in look-and-f In [2]: pd.date_range('20130101',periods=3,tz='US/Eastern').dtype Out[2]: dtype('<M8[ns]') + New Behavior: + .. ipython:: python pd.date_range('20130101',periods=3,tz='US/Eastern') pd.date_range('20130101',periods=3,tz='US/Eastern').dtype - .. _whatsnew_0170.api_breaking.convert_objects: Changes to convert_objects diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 58ee36142d4fd..94eccad8e0185 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -18,6 +18,7 @@ array_equivalent, _maybe_convert_string_to_object, is_categorical, needs_i8_conversion, is_datetimelike_v_numeric, is_internal_type) +from pandas.core.dtypes import DatetimeTZDtype from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import maybe_convert_indices, length_of_indexer @@ -1868,6 +1869,26 @@ def __init__(self, values, placement, fastpath=True, placement=placement, **kwargs) + def _astype(self, dtype, **kwargs): + """ + these automatically copy, so copy=True has no effect + raise on an except if raise == True + """ + + # if we are passed a datetime64[ns, tz] + if com.is_datetime64tz_dtype(dtype): + dtype = DatetimeTZDtype(dtype) + + values = self.values + if getattr(values,'tz',None) is None: + values = DatetimeIndex(values).tz_localize('UTC') + values = values.tz_convert(dtype.tz) + return self.make_block(values) + + # delegate + return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) + + def _can_hold_element(self, element): if is_list_like(element): element = np.array(element) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 8da821a1fbb9a..0794ae5003983 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1072,6 +1072,20 @@ def test_constructor_with_datetime_tz(self): expected = Series(DatetimeIndex(s._values).asobject) assert_series_equal(result, expected) + result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz) + assert_series_equal(result, s) + + # astype - datetime64[ns, tz] + result = Series(s.values).astype('datetime64[ns, US/Eastern]') + assert_series_equal(result, s) + + result = Series(s.values).astype(s.dtype) + assert_series_equal(result, s) + + result = s.astype('datetime64[ns, CET]') + expected = Series(date_range('20130101 06:00:00',periods=3,tz='CET')) + assert_series_equal(result, expected) + # short str self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
add ability to astype using the new dtype
https://api.github.com/repos/pandas-dev/pandas/pulls/11003
2015-09-05T18:15:55Z
2015-09-07T20:20:50Z
2015-09-07T20:20:50Z
2015-09-07T20:20:50Z
ENH: Reduce for loops in df.corr(method='kendall')
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c5c0f9e82fa94..355d03ae43c15 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -116,14 +116,14 @@ copy : boolean, default True If False, do not copy data unnecessarily indicator : boolean or string, default False - If True, adds a column to output DataFrame called "_merge" with - information on the source of each row. - If string, column with information on source of each row will be added to - output DataFrame, and column will be named value of string. - Information column is Categorical-type and takes on a value of "left_only" - for observations whose merge key only appears in 'left' DataFrame, - "right_only" for observations whose merge key only appears in 'right' - DataFrame, and "both" if the observation's merge key is found in both. + If True, adds a column to output DataFrame called "_merge" with + information on the source of each row. + If string, column with information on source of each row will be added to + output DataFrame, and column will be named value of string. + Information column is Categorical-type and takes on a value of "left_only" + for observations whose merge key only appears in 'left' DataFrame, + "right_only" for observations whose merge key only appears in 'right' + DataFrame, and "both" if the observation's merge key is found in both. .. versionadded:: 0.17.0 @@ -4381,7 +4381,7 @@ def corr(self, method='pearson', min_periods=1): correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): - for j, bc in enumerate(mat): + for j, bc in enumerate(mat[i:], i): valid = mask[i] & mask[j] if valid.sum() < min_periods: c = NA
Reduce `for` loops in `df.corr(method='kendall')` Don't need to loop through lower triangle of the matrix.
https://api.github.com/repos/pandas-dev/pandas/pulls/11000
2015-09-05T12:44:43Z
2015-10-18T14:04:11Z
null
2017-08-25T04:52:52Z
DOC: fix NaNs in categories
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index 534ab0e343398..ddd4fb81ed1f1 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -149,8 +149,8 @@ Using ``.describe()`` on categorical data will produce similar output to a `Seri .. ipython:: python - cat = pd.Categorical(["a","c","c",np.nan], categories=["b","a","c",np.nan] ) - df = pd.DataFrame({"cat":cat, "s":["a","c","c",np.nan]}) + cat = pd.Categorical(["a", "c", "c", np.nan], categories=["b", "a", "c"]) + df = pd.DataFrame({"cat":cat, "s":["a", "c", "c", np.nan]}) df.describe() df["cat"].describe() @@ -642,10 +642,10 @@ a code of ``-1``. .. ipython:: python - s = pd.Series(["a","b",np.nan,"a"], dtype="category") + s = pd.Series(["a", "b", np.nan, "a"], dtype="category") # only two categories s - s.codes + s.cat.codes Methods for working with missing data, e.g. :meth:`~Series.isnull`, :meth:`~Series.fillna`, @@ -653,8 +653,7 @@ Methods for working with missing data, e.g. :meth:`~Series.isnull`, :meth:`~Seri .. ipython:: python - c = pd.Series(["a","b",np.nan], dtype="category") - s = pd.Series(c) + s = pd.Series(["a", "b", np.nan], dtype="category") s pd.isnull(s) s.fillna("a") diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst index 1992288fd4d00..d1a2ba59d7fdf 100644 --- a/doc/source/remote_data.rst +++ b/doc/source/remote_data.rst @@ -62,6 +62,7 @@ Yahoo! Finance -------------- .. ipython:: python + :okwarning: import pandas.io.data as web import datetime diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 29b955a55fcc9..9795c082ddb98 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1278,17 +1278,12 @@ frequency. Arithmetic is not allowed between ``Period`` with different ``freq`` .. ipython:: python p = Period('2012', freq='A-DEC') - p + 1 - p - 3 - p = Period('2012-01', freq='2M') - p + 2 - p - 1 - + @okexcept p == Period('2012-01', freq='3M') diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 4378d182b3128..2eaf143a3e0b8 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -387,6 +387,7 @@ The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used. np.random.seed(123456) .. ipython:: python + :okwarning: df = pd.DataFrame(np.random.rand(10,5)) plt.figure();
Fix warning + error in the categorical docs
https://api.github.com/repos/pandas-dev/pandas/pulls/10999
2015-09-05T11:43:32Z
2015-09-05T14:17:28Z
2015-09-05T14:17:28Z
2015-09-05T14:17:28Z
asv bench cleanup - groupby
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index f1ac09b8b2516..138977a29463e 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -3,7 +3,7 @@ from itertools import product -class groupby_agg_builtins1(object): +class groupby_agg_builtins(object): goal_time = 0.2 def setup(self): @@ -14,18 +14,11 @@ def setup(self): def time_groupby_agg_builtins1(self): self.df.groupby('jim').agg([sum, min, max]) - -class groupby_agg_builtins2(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(27182) - self.n = 100000 - self.df = DataFrame(np.random.randint(1, (self.n / 100), (self.n, 3)), columns=['jim', 'joe', 'jolie']) - def time_groupby_agg_builtins2(self): self.df.groupby(['jim', 'joe']).agg([sum, min, max]) +#---------------------------------------------------------------------- +# dict return values class groupby_apply_dict_return(object): goal_time = 0.2 @@ -39,33 +32,49 @@ def time_groupby_apply_dict_return(self): self.data.groupby(self.labels).apply(self.f) -class groupby_dt_size(object): +#---------------------------------------------------------------------- +# First / last functions + +class groupby_first_last(object): goal_time = 0.2 def setup(self): - self.n = 100000 - self.offsets = np.random.randint(self.n, size=self.n).astype('timedelta64[ns]') - self.dates = (np.datetime64('now') + self.offsets) - self.df = DataFrame({'key1': np.random.randint(0, 500, size=self.n), 'key2': np.random.randint(0, 100, size=self.n), 'value1': np.random.randn(self.n), 'value2': np.random.randn(self.n), 'value3': np.random.randn(self.n), 'dates': self.dates, }) + self.labels = np.arange(10000).repeat(10) + self.data = Series(randn(len(self.labels))) + self.data[::3] = np.nan + self.data[1::3] = np.nan + self.data2 = Series(randn(len(self.labels)), dtype='float32') + self.data2[::3] = np.nan + self.data2[1::3] = np.nan + self.labels = self.labels.take(np.random.permutation(len(self.labels))) - def time_groupby_dt_size(self): - self.df.groupby(['dates']).size() + def time_groupby_first_float32(self): + self.data2.groupby(self.labels).first() + def time_groupby_first_float64(self): + self.data.groupby(self.labels).first() -class groupby_dt_timegrouper_size(object): - goal_time = 0.2 + def time_groupby_last_float32(self): + self.data2.groupby(self.labels).last() - def setup(self): - self.n = 100000 - self.offsets = np.random.randint(self.n, size=self.n).astype('timedelta64[ns]') - self.dates = (np.datetime64('now') + self.offsets) - self.df = DataFrame({'key1': np.random.randint(0, 500, size=self.n), 'key2': np.random.randint(0, 100, size=self.n), 'value1': np.random.randn(self.n), 'value2': np.random.randn(self.n), 'value3': np.random.randn(self.n), 'dates': self.dates, }) + def time_groupby_last_float64(self): + self.data.groupby(self.labels).last() - def time_groupby_dt_timegrouper_size(self): - self.df.groupby(TimeGrouper(key='dates', freq='M')).size() + def time_groupby_nth_float32_any(self): + self.data2.groupby(self.labels).nth(0, dropna='all') + + def time_groupby_nth_float32_none(self): + self.data2.groupby(self.labels).nth(0) + + def time_groupby_nth_float64_any(self): + self.data.groupby(self.labels).nth(0, dropna='all') + + def time_groupby_nth_float64_none(self): + self.data.groupby(self.labels).nth(0) +# with datetimes (GH7555) -class groupby_first_datetimes(object): +class groupby_first_last_datetimes(object): goal_time = 0.2 def setup(self): @@ -74,50 +83,37 @@ def setup(self): def time_groupby_first_datetimes(self): self.df.groupby('b').first() + def time_groupby_last_datetimes(self): + self.df.groupby('b').last() -class groupby_first_float32(object): - goal_time = 0.2 - - def setup(self): - self.labels = np.arange(10000).repeat(10) - self.data = Series(randn(len(self.labels))) - self.data[::3] = np.nan - self.data[1::3] = np.nan - self.data2 = Series(randn(len(self.labels)), dtype='float32') - self.data2[::3] = np.nan - self.data2[1::3] = np.nan - self.labels = self.labels.take(np.random.permutation(len(self.labels))) + def time_groupby_nth_datetimes_any(self): + self.df.groupby('b').nth(0, dropna='all') - def time_groupby_first_float32(self): - self.data2.groupby(self.labels).first() + def time_groupby_nth_datetimes_none(self): + self.df.groupby('b').nth(0) -class groupby_first_float64(object): +class groupby_first_last_object(object): goal_time = 0.2 def setup(self): - self.labels = np.arange(10000).repeat(10) - self.data = Series(randn(len(self.labels))) - self.data[::3] = np.nan - self.data[1::3] = np.nan - self.data2 = Series(randn(len(self.labels)), dtype='float32') - self.data2[::3] = np.nan - self.data2[1::3] = np.nan - self.labels = self.labels.take(np.random.permutation(len(self.labels))) + self.df = DataFrame({'a': (['foo'] * 100000), 'b': range(100000)}) - def time_groupby_first_float64(self): - self.data.groupby(self.labels).first() + def time_groupby_first_object(self): + self.df.groupby('b').first() + def time_groupby_last_object(self): + self.df.groupby('b').last() -class groupby_first_object(object): - goal_time = 0.2 + def time_groupby_nth_object_any(self): + self.df.groupby('b').nth(0, dropna='any') - def setup(self): - self.df = DataFrame({'a': (['foo'] * 100000), 'b': range(100000), }) + def time_groupby_nth_object_none(self): + self.df.groupby('b').nth(0) - def time_groupby_first_object(self): - self.df.groupby('b').first() +#---------------------------------------------------------------------- +# DataFrame Apply overhead class groupby_frame_apply(object): goal_time = 0.2 @@ -128,28 +124,18 @@ def setup(self): self.labels2 = np.random.randint(0, 3, size=self.N) self.df = DataFrame({'key': self.labels, 'key2': self.labels2, 'value1': randn(self.N), 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N / 4)), }) - def time_groupby_frame_apply(self): - self.df.groupby(['key', 'key2']).apply(self.f) - def f(self, g): return 1 - -class groupby_frame_apply_overhead(object): - goal_time = 0.2 - - def setup(self): - self.N = 10000 - self.labels = np.random.randint(0, 2000, size=self.N) - self.labels2 = np.random.randint(0, 3, size=self.N) - self.df = DataFrame({'key': self.labels, 'key2': self.labels2, 'value1': randn(self.N), 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N / 4)), }) + def time_groupby_frame_apply(self): + self.df.groupby(['key', 'key2']).apply(self.f) def time_groupby_frame_apply_overhead(self): self.df.groupby('key').apply(self.f) - def f(self, g): - return 1 +#---------------------------------------------------------------------- +# 2d grouping, aggregate many columns class groupby_frame_cython_many_columns(object): goal_time = 0.2 @@ -158,53 +144,67 @@ def setup(self): self.labels = np.random.randint(0, 100, size=1000) self.df = DataFrame(randn(1000, 1000)) - def time_groupby_frame_cython_many_columns(self): + def time_sum(self): self.df.groupby(self.labels).sum() -class groupby_frame_median(object): +#---------------------------------------------------------------------- +# single key, long, integer key + +class groupby_frame_singlekey_integer(object): goal_time = 0.2 def setup(self): - self.data = np.random.randn(100000, 2) + self.data = np.random.randn(100000, 1) self.labels = np.random.randint(0, 1000, size=100000) self.df = DataFrame(self.data) - def time_groupby_frame_median(self): - self.df.groupby(self.labels).median() + def time_sum(self): + self.df.groupby(self.labels).sum() + +#---------------------------------------------------------------------- +# median -class groupby_frame_nth_any(object): +class groupby_frame(object): goal_time = 0.2 def setup(self): - self.df = DataFrame(np.random.randint(1, 100, (10000, 2))) + self.data = np.random.randn(100000, 2) + self.labels = np.random.randint(0, 1000, size=100000) + self.df = DataFrame(self.data) + + def time_groupby_frame_median(self): + self.df.groupby(self.labels).median() + + def time_groupby_simple_compress_timing(self): + self.df.groupby(self.labels).mean() - def time_groupby_frame_nth_any(self): - self.df.groupby(0).nth(0, dropna='any') +#---------------------------------------------------------------------- +# DataFrame nth -class groupby_frame_nth_none(object): +class groupby_nth(object): goal_time = 0.2 def setup(self): self.df = DataFrame(np.random.randint(1, 100, (10000, 2))) + def time_groupby_frame_nth_any(self): + self.df.groupby(0).nth(0, dropna='any') + def time_groupby_frame_nth_none(self): self.df.groupby(0).nth(0) + def time_groupby_series_nth_any(self): + self.df[1].groupby(self.df[0]).nth(0, dropna='any') -class groupby_frame_singlekey_integer(object): - goal_time = 0.2 - - def setup(self): - self.data = np.random.randn(100000, 1) - self.labels = np.random.randint(0, 1000, size=100000) - self.df = DataFrame(self.data) + def time_groupby_series_nth_none(self): + self.df[1].groupby(self.df[0]).nth(0) - def time_groupby_frame_singlekey_integer(self): - self.df.groupby(self.labels).sum() +#---------------------------------------------------------------------- +# groupby_indices replacement, chop up Series class groupby_indices(object): goal_time = 0.2 @@ -240,70 +240,8 @@ def time_groupby_int64_overflow(self): self.df.groupby(list('abcde')).max() -class groupby_int_count(object): - goal_time = 0.2 - - def setup(self): - self.n = 10000 - self.df = DataFrame({'key1': randint(0, 500, size=self.n), 'key2': randint(0, 100, size=self.n), 'ints': randint(0, 1000, size=self.n), 'ints2': randint(0, 1000, size=self.n), }) - - def time_groupby_int_count(self): - self.df.groupby(['key1', 'key2']).count() - - -class groupby_last_datetimes(object): - goal_time = 0.2 - - def setup(self): - self.df = DataFrame({'a': date_range('1/1/2011', periods=100000, freq='s'), 'b': range(100000), }) - - def time_groupby_last_datetimes(self): - self.df.groupby('b').last() - - -class groupby_last_float32(object): - goal_time = 0.2 - - def setup(self): - self.labels = np.arange(10000).repeat(10) - self.data = Series(randn(len(self.labels))) - self.data[::3] = np.nan - self.data[1::3] = np.nan - self.data2 = Series(randn(len(self.labels)), dtype='float32') - self.data2[::3] = np.nan - self.data2[1::3] = np.nan - self.labels = self.labels.take(np.random.permutation(len(self.labels))) - - def time_groupby_last_float32(self): - self.data2.groupby(self.labels).last() - - -class groupby_last_float64(object): - goal_time = 0.2 - - def setup(self): - self.labels = np.arange(10000).repeat(10) - self.data = Series(randn(len(self.labels))) - self.data[::3] = np.nan - self.data[1::3] = np.nan - self.data2 = Series(randn(len(self.labels)), dtype='float32') - self.data2[::3] = np.nan - self.data2[1::3] = np.nan - self.labels = self.labels.take(np.random.permutation(len(self.labels))) - - def time_groupby_last_float64(self): - self.data.groupby(self.labels).last() - - -class groupby_last_object(object): - goal_time = 0.2 - - def setup(self): - self.df = DataFrame({'a': (['foo'] * 100000), 'b': range(100000), }) - - def time_groupby_last_object(self): - self.df.groupby('b').last() - +#---------------------------------------------------------------------- +# count() speed class groupby_multi_count(object): goal_time = 0.2 @@ -318,38 +256,37 @@ def setup(self): self.value2[(np.random.rand(self.n) > 0.5)] = np.nan self.obj = tm.choice(list('ab'), size=self.n).astype(object) self.obj[(np.random.randn(self.n) > 0.5)] = np.nan - self.df = DataFrame({'key1': np.random.randint(0, 500, size=self.n), 'key2': np.random.randint(0, 100, size=self.n), 'dates': self.dates, 'value2': self.value2, 'value3': np.random.randn(self.n), 'ints': np.random.randint(0, 1000, size=self.n), 'obj': self.obj, 'offsets': self.offsets, }) + self.df = DataFrame({'key1': np.random.randint(0, 500, size=self.n), + 'key2': np.random.randint(0, 100, size=self.n), + 'dates': self.dates, + 'value2': self.value2, + 'value3': np.random.randn(self.n), + 'ints': np.random.randint(0, 1000, size=self.n), + 'obj': self.obj, + 'offsets': self.offsets, }) def time_groupby_multi_count(self): self.df.groupby(['key1', 'key2']).count() -class groupby_multi_cython(object): +class groupby_int_count(object): goal_time = 0.2 def setup(self): - self.N = 100000 - self.ngroups = 100 - self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) - self.simple_series = Series(np.random.randn(self.N)) - self.key1 = self.df['key1'] - - def time_groupby_multi_cython(self): - self.df.groupby(['key1', 'key2']).sum() + self.n = 10000 + self.df = DataFrame({'key1': randint(0, 500, size=self.n), + 'key2': randint(0, 100, size=self.n), + 'ints': randint(0, 1000, size=self.n), + 'ints2': randint(0, 1000, size=self.n), }) - def get_test_data(self, ngroups=100, n=100000): - self.unique_groups = range(self.ngroups) - self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) - if (len(self.arr) < n): - self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) - random.shuffle(self.arr) - return self.arr + def time_groupby_int_count(self): + self.df.groupby(['key1', 'key2']).count() - def f(self): - self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) +#---------------------------------------------------------------------- +# group with different functions per column -class groupby_multi_different_functions(object): +class groupby_agg_multi(object): goal_time = 0.2 def setup(self): @@ -358,19 +295,10 @@ def setup(self): self.df = DataFrame({'key1': self.fac1.take(np.random.randint(0, 3, size=100000)), 'key2': self.fac2.take(np.random.randint(0, 2, size=100000)), 'value1': np.random.randn(100000), 'value2': np.random.randn(100000), 'value3': np.random.randn(100000), }) def time_groupby_multi_different_functions(self): - self.df.groupby(['key1', 'key2']).agg({'value1': 'mean', 'value2': 'var', 'value3': 'sum', }) - - -class groupby_multi_different_numpy_functions(object): - goal_time = 0.2 - - def setup(self): - self.fac1 = np.array(['A', 'B', 'C'], dtype='O') - self.fac2 = np.array(['one', 'two'], dtype='O') - self.df = DataFrame({'key1': self.fac1.take(np.random.randint(0, 3, size=100000)), 'key2': self.fac2.take(np.random.randint(0, 2, size=100000)), 'value1': np.random.randn(100000), 'value2': np.random.randn(100000), 'value3': np.random.randn(100000), }) + self.df.groupby(['key1', 'key2']).agg({'value1': 'mean', 'value2': 'var', 'value3': 'sum'}) def time_groupby_multi_different_numpy_functions(self): - self.df.groupby(['key1', 'key2']).agg({'value1': np.mean, 'value2': np.var, 'value3': np.sum, }) + self.df.groupby(['key1', 'key2']).agg({'value1': np.mean, 'value2': np.var, 'value3': np.sum}) class groupby_multi_index(object): @@ -389,7 +317,7 @@ def time_groupby_multi_index(self): self.df.groupby(list('abcd')).max() -class groupby_multi_python(object): +class groupby_multi(object): goal_time = 0.2 def setup(self): @@ -399,9 +327,6 @@ def setup(self): self.simple_series = Series(np.random.randn(self.N)) self.key1 = self.df['key1'] - def time_groupby_multi_python(self): - self.df.groupby(['key1', 'key2'])['data1'].agg((lambda x: x.values.sum())) - def get_test_data(self, ngroups=100, n=100000): self.unique_groups = range(self.ngroups) self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) @@ -413,33 +338,26 @@ def get_test_data(self, ngroups=100, n=100000): def f(self): self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) + def time_groupby_multi_cython(self): + self.df.groupby(['key1', 'key2']).sum() -class groupby_multi_series_op(object): - goal_time = 0.2 - - def setup(self): - self.N = 100000 - self.ngroups = 100 - self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) - self.simple_series = Series(np.random.randn(self.N)) - self.key1 = self.df['key1'] + def time_groupby_multi_python(self): + self.df.groupby(['key1', 'key2'])['data1'].agg((lambda x: x.values.sum())) def time_groupby_multi_series_op(self): self.df.groupby(['key1', 'key2'])['data1'].agg(np.std) - def get_test_data(self, ngroups=100, n=100000): - self.unique_groups = range(self.ngroups) - self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) - if (len(self.arr) < n): - self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) - random.shuffle(self.arr) - return self.arr + def time_groupby_series_simple_cython(self): + self.simple_series.groupby(self.key1).sum() - def f(self): - self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) + def time_groupby_series_simple_rank(self): + self.df.groupby('key1').rank(pct=True) -class groupby_multi_size(object): +#---------------------------------------------------------------------- +# size() speed + +class groupby_size(object): goal_time = 0.2 def setup(self): @@ -451,22 +369,17 @@ def setup(self): def time_groupby_multi_size(self): self.df.groupby(['key1', 'key2']).size() + def time_groupby_dt_size(self): + self.df.groupby(['dates']).size() -class groupby_ngroups_10000_all(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) + def time_groupby_dt_timegrouper_size(self): + self.df.groupby(TimeGrouper(key='dates', freq='M')).size() - def time_groupby_ngroups_10000_all(self): - self.df.groupby('value')['timestamp'].all() +#---------------------------------------------------------------------- +# groupby with a variable value for ngroups -class groupby_ngroups_10000_any(object): +class groupby_ngroups_10000(object): goal_time = 0.2 def setup(self): @@ -476,809 +389,101 @@ def setup(self): self.rng = np.arange(self.ngroups) self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - def time_groupby_ngroups_10000_any(self): - self.df.groupby('value')['timestamp'].any() - - -class groupby_ngroups_10000_count(object): - goal_time = 0.2 + def time_all(self): + self.df.groupby('value')['timestamp'].all() - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) + def time_any(self): + self.df.groupby('value')['timestamp'].any() - def time_groupby_ngroups_10000_count(self): + def time_count(self): self.df.groupby('value')['timestamp'].count() - -class groupby_ngroups_10000_cumcount(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_cumcount(self): + def time_cumcount(self): self.df.groupby('value')['timestamp'].cumcount() - -class groupby_ngroups_10000_cummax(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_cummax(self): + def time_cummax(self): self.df.groupby('value')['timestamp'].cummax() - -class groupby_ngroups_10000_cummin(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_cummin(self): + def time_cummin(self): self.df.groupby('value')['timestamp'].cummin() - -class groupby_ngroups_10000_cumprod(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_cumprod(self): + def time_cumprod(self): self.df.groupby('value')['timestamp'].cumprod() - -class groupby_ngroups_10000_cumsum(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_cumsum(self): + def time_cumsum(self): self.df.groupby('value')['timestamp'].cumsum() - -class groupby_ngroups_10000_describe(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_describe(self): + def time_describe(self): self.df.groupby('value')['timestamp'].describe() - -class groupby_ngroups_10000_diff(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_diff(self): + def time_diff(self): self.df.groupby('value')['timestamp'].diff() - -class groupby_ngroups_10000_first(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_first(self): + def time_first(self): self.df.groupby('value')['timestamp'].first() - -class groupby_ngroups_10000_head(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_head(self): + def time_head(self): self.df.groupby('value')['timestamp'].head() - -class groupby_ngroups_10000_last(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_last(self): + def time_last(self): self.df.groupby('value')['timestamp'].last() - -class groupby_ngroups_10000_mad(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_mad(self): + def time_mad(self): self.df.groupby('value')['timestamp'].mad() - -class groupby_ngroups_10000_max(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_max(self): + def time_max(self): self.df.groupby('value')['timestamp'].max() - -class groupby_ngroups_10000_mean(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_mean(self): + def time_mean(self): self.df.groupby('value')['timestamp'].mean() - -class groupby_ngroups_10000_median(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_median(self): + def time_median(self): self.df.groupby('value')['timestamp'].median() - -class groupby_ngroups_10000_min(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_min(self): + def time_min(self): self.df.groupby('value')['timestamp'].min() - -class groupby_ngroups_10000_nunique(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_nunique(self): + def time_nunique(self): self.df.groupby('value')['timestamp'].nunique() + def time_pct_change(self): + self.df.groupby('value')['timestamp'].pct_change() -class groupby_ngroups_10000_pct_change(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_pct_change(self): - self.df.groupby('value')['timestamp'].pct_change() - - -class groupby_ngroups_10000_prod(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_prod(self): - self.df.groupby('value')['timestamp'].prod() - - -class groupby_ngroups_10000_rank(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_rank(self): - self.df.groupby('value')['timestamp'].rank() - - -class groupby_ngroups_10000_sem(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_sem(self): - self.df.groupby('value')['timestamp'].sem() - - -class groupby_ngroups_10000_size(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_size(self): - self.df.groupby('value')['timestamp'].size() - - -class groupby_ngroups_10000_skew(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_skew(self): - self.df.groupby('value')['timestamp'].skew() - - -class groupby_ngroups_10000_std(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_std(self): - self.df.groupby('value')['timestamp'].std() - - -class groupby_ngroups_10000_sum(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_sum(self): - self.df.groupby('value')['timestamp'].sum() - - -class groupby_ngroups_10000_tail(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_tail(self): - self.df.groupby('value')['timestamp'].tail() - - -class groupby_ngroups_10000_unique(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_unique(self): - self.df.groupby('value')['timestamp'].unique() - - -class groupby_ngroups_10000_value_counts(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_value_counts(self): - self.df.groupby('value')['timestamp'].value_counts() - - -class groupby_ngroups_10000_var(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 10000 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_10000_var(self): - self.df.groupby('value')['timestamp'].var() - - -class groupby_ngroups_100_all(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_all(self): - self.df.groupby('value')['timestamp'].all() - - -class groupby_ngroups_100_any(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_any(self): - self.df.groupby('value')['timestamp'].any() - - -class groupby_ngroups_100_count(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_count(self): - self.df.groupby('value')['timestamp'].count() - - -class groupby_ngroups_100_cumcount(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_cumcount(self): - self.df.groupby('value')['timestamp'].cumcount() - - -class groupby_ngroups_100_cummax(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_cummax(self): - self.df.groupby('value')['timestamp'].cummax() - - -class groupby_ngroups_100_cummin(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_cummin(self): - self.df.groupby('value')['timestamp'].cummin() - - -class groupby_ngroups_100_cumprod(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_cumprod(self): - self.df.groupby('value')['timestamp'].cumprod() - - -class groupby_ngroups_100_cumsum(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_cumsum(self): - self.df.groupby('value')['timestamp'].cumsum() - - -class groupby_ngroups_100_describe(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_describe(self): - self.df.groupby('value')['timestamp'].describe() - - -class groupby_ngroups_100_diff(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_diff(self): - self.df.groupby('value')['timestamp'].diff() - - -class groupby_ngroups_100_first(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_first(self): - self.df.groupby('value')['timestamp'].first() - - -class groupby_ngroups_100_head(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_head(self): - self.df.groupby('value')['timestamp'].head() - - -class groupby_ngroups_100_last(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_last(self): - self.df.groupby('value')['timestamp'].last() - - -class groupby_ngroups_100_mad(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_mad(self): - self.df.groupby('value')['timestamp'].mad() - - -class groupby_ngroups_100_max(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_max(self): - self.df.groupby('value')['timestamp'].max() - - -class groupby_ngroups_100_mean(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_mean(self): - self.df.groupby('value')['timestamp'].mean() - - -class groupby_ngroups_100_median(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_median(self): - self.df.groupby('value')['timestamp'].median() - - -class groupby_ngroups_100_min(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_min(self): - self.df.groupby('value')['timestamp'].min() - - -class groupby_ngroups_100_nunique(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_nunique(self): - self.df.groupby('value')['timestamp'].nunique() - - -class groupby_ngroups_100_pct_change(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_pct_change(self): - self.df.groupby('value')['timestamp'].pct_change() - - -class groupby_ngroups_100_prod(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_prod(self): - self.df.groupby('value')['timestamp'].prod() - - -class groupby_ngroups_100_rank(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_rank(self): - self.df.groupby('value')['timestamp'].rank() - - -class groupby_ngroups_100_sem(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_sem(self): - self.df.groupby('value')['timestamp'].sem() - - -class groupby_ngroups_100_size(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_size(self): - self.df.groupby('value')['timestamp'].size() - - -class groupby_ngroups_100_skew(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_skew(self): - self.df.groupby('value')['timestamp'].skew() - - -class groupby_ngroups_100_std(object): - goal_time = 0.2 - - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - - def time_groupby_ngroups_100_std(self): - self.df.groupby('value')['timestamp'].std() + def time_prod(self): + self.df.groupby('value')['timestamp'].prod() + def time_rank(self): + self.df.groupby('value')['timestamp'].rank() -class groupby_ngroups_100_sum(object): - goal_time = 0.2 + def time_sem(self): + self.df.groupby('value')['timestamp'].sem() - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) + def time_size(self): + self.df.groupby('value')['timestamp'].size() + + def time_skew(self): + self.df.groupby('value')['timestamp'].skew() + + def time_std(self): + self.df.groupby('value')['timestamp'].std() - def time_groupby_ngroups_100_sum(self): + def time_sum(self): self.df.groupby('value')['timestamp'].sum() + def time_tail(self): + self.df.groupby('value')['timestamp'].tail() -class groupby_ngroups_100_tail(object): - goal_time = 0.2 + def time_unique(self): + self.df.groupby('value')['timestamp'].unique() - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) + def time_value_counts(self): + self.df.groupby('value')['timestamp'].value_counts() - def time_groupby_ngroups_100_tail(self): - self.df.groupby('value')['timestamp'].tail() + def time_var(self): + self.df.groupby('value')['timestamp'].var() -class groupby_ngroups_100_unique(object): +class groupby_ngroups_100(object): goal_time = 0.2 def setup(self): @@ -1288,145 +493,127 @@ def setup(self): self.rng = np.arange(self.ngroups) self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) - def time_groupby_ngroups_100_unique(self): - self.df.groupby('value')['timestamp'].unique() - + def time_all(self): + self.df.groupby('value')['timestamp'].all() -class groupby_ngroups_100_value_counts(object): - goal_time = 0.2 + def time_any(self): + self.df.groupby('value')['timestamp'].any() - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) + def time_count(self): + self.df.groupby('value')['timestamp'].count() - def time_groupby_ngroups_100_value_counts(self): - self.df.groupby('value')['timestamp'].value_counts() + def time_cumcount(self): + self.df.groupby('value')['timestamp'].cumcount() + def time_cummax(self): + self.df.groupby('value')['timestamp'].cummax() -class groupby_ngroups_100_var(object): - goal_time = 0.2 + def time_cummin(self): + self.df.groupby('value')['timestamp'].cummin() - def setup(self): - np.random.seed(1234) - self.ngroups = 100 - self.size = (self.ngroups * 2) - self.rng = np.arange(self.ngroups) - self.df = DataFrame(dict(timestamp=self.rng.take(np.random.randint(0, self.ngroups, size=self.size)), value=np.random.randint(0, self.size, size=self.size))) + def time_cumprod(self): + self.df.groupby('value')['timestamp'].cumprod() - def time_groupby_ngroups_100_var(self): - self.df.groupby('value')['timestamp'].var() + def time_cumsum(self): + self.df.groupby('value')['timestamp'].cumsum() + def time_describe(self): + self.df.groupby('value')['timestamp'].describe() -class groupby_nth_datetimes_any(object): - goal_time = 0.2 + def time_diff(self): + self.df.groupby('value')['timestamp'].diff() - def setup(self): - self.df = DataFrame({'a': date_range('1/1/2011', periods=100000, freq='s'), 'b': range(100000), }) + def time_first(self): + self.df.groupby('value')['timestamp'].first() - def time_groupby_nth_datetimes_any(self): - self.df.groupby('b').nth(0, dropna='all') + def time_head(self): + self.df.groupby('value')['timestamp'].head() + def time_last(self): + self.df.groupby('value')['timestamp'].last() -class groupby_nth_datetimes_none(object): - goal_time = 0.2 + def time_mad(self): + self.df.groupby('value')['timestamp'].mad() - def setup(self): - self.df = DataFrame({'a': date_range('1/1/2011', periods=100000, freq='s'), 'b': range(100000), }) + def time_max(self): + self.df.groupby('value')['timestamp'].max() - def time_groupby_nth_datetimes_none(self): - self.df.groupby('b').nth(0) + def time_mean(self): + self.df.groupby('value')['timestamp'].mean() + def time_median(self): + self.df.groupby('value')['timestamp'].median() -class groupby_nth_float32_any(object): - goal_time = 0.2 + def time_min(self): + self.df.groupby('value')['timestamp'].min() - def setup(self): - self.labels = np.arange(10000).repeat(10) - self.data = Series(randn(len(self.labels))) - self.data[::3] = np.nan - self.data[1::3] = np.nan - self.data2 = Series(randn(len(self.labels)), dtype='float32') - self.data2[::3] = np.nan - self.data2[1::3] = np.nan - self.labels = self.labels.take(np.random.permutation(len(self.labels))) + def time_nunique(self): + self.df.groupby('value')['timestamp'].nunique() - def time_groupby_nth_float32_any(self): - self.data2.groupby(self.labels).nth(0, dropna='all') + def time_pct_change(self): + self.df.groupby('value')['timestamp'].pct_change() + def time_prod(self): + self.df.groupby('value')['timestamp'].prod() -class groupby_nth_float32_none(object): - goal_time = 0.2 + def time_rank(self): + self.df.groupby('value')['timestamp'].rank() - def setup(self): - self.labels = np.arange(10000).repeat(10) - self.data = Series(randn(len(self.labels))) - self.data[::3] = np.nan - self.data[1::3] = np.nan - self.data2 = Series(randn(len(self.labels)), dtype='float32') - self.data2[::3] = np.nan - self.data2[1::3] = np.nan - self.labels = self.labels.take(np.random.permutation(len(self.labels))) + def time_sem(self): + self.df.groupby('value')['timestamp'].sem() - def time_groupby_nth_float32_none(self): - self.data2.groupby(self.labels).nth(0) + def time_size(self): + self.df.groupby('value')['timestamp'].size() + def time_skew(self): + self.df.groupby('value')['timestamp'].skew() -class groupby_nth_float64_any(object): - goal_time = 0.2 + def time_std(self): + self.df.groupby('value')['timestamp'].std() - def setup(self): - self.labels = np.arange(10000).repeat(10) - self.data = Series(randn(len(self.labels))) - self.data[::3] = np.nan - self.data[1::3] = np.nan - self.data2 = Series(randn(len(self.labels)), dtype='float32') - self.data2[::3] = np.nan - self.data2[1::3] = np.nan - self.labels = self.labels.take(np.random.permutation(len(self.labels))) + def time_sum(self): + self.df.groupby('value')['timestamp'].sum() - def time_groupby_nth_float64_any(self): - self.data.groupby(self.labels).nth(0, dropna='all') + def time_tail(self): + self.df.groupby('value')['timestamp'].tail() + def time_unique(self): + self.df.groupby('value')['timestamp'].unique() -class groupby_nth_float64_none(object): - goal_time = 0.2 + def time_value_counts(self): + self.df.groupby('value')['timestamp'].value_counts() - def setup(self): - self.labels = np.arange(10000).repeat(10) - self.data = Series(randn(len(self.labels))) - self.data[::3] = np.nan - self.data[1::3] = np.nan - self.data2 = Series(randn(len(self.labels)), dtype='float32') - self.data2[::3] = np.nan - self.data2[1::3] = np.nan - self.labels = self.labels.take(np.random.permutation(len(self.labels))) + def time_var(self): + self.df.groupby('value')['timestamp'].var() - def time_groupby_nth_float64_none(self): - self.data.groupby(self.labels).nth(0) +#---------------------------------------------------------------------- +# Series.value_counts -class groupby_nth_object_any(object): +class series_value_counts(object): goal_time = 0.2 def setup(self): - self.df = DataFrame({'a': (['foo'] * 100000), 'b': range(100000), }) + self.s = Series(np.random.randint(0, 1000, size=100000)) + self.s2 = self.s.astype(float) - def time_groupby_nth_object_any(self): - self.df.groupby('b').nth(0, dropna='any') + self.K = 1000 + self.N = 100000 + self.uniques = tm.makeStringIndex(self.K).values + self.s3 = Series(np.tile(self.uniques, (self.N // self.K))) + def time_value_counts_int64(self): + self.s.value_counts() -class groupby_nth_object_none(object): - goal_time = 0.2 + def time_value_counts_float64(self): + self.s2.value_counts() - def setup(self): - self.df = DataFrame({'a': (['foo'] * 100000), 'b': range(100000), }) + def time_value_counts_strings(self): + self.s.value_counts() - def time_groupby_nth_object_none(self): - self.df.groupby('b').nth(0) +#---------------------------------------------------------------------- +# pivot_table class groupby_pivot_table(object): goal_time = 0.2 @@ -1442,62 +629,8 @@ def time_groupby_pivot_table(self): self.df.pivot_table(index='key1', columns=['key2', 'key3']) -class groupby_series_nth_any(object): - goal_time = 0.2 - - def setup(self): - self.df = DataFrame(np.random.randint(1, 100, (10000, 2))) - - def time_groupby_series_nth_any(self): - self.df[1].groupby(self.df[0]).nth(0, dropna='any') - - -class groupby_series_nth_none(object): - goal_time = 0.2 - - def setup(self): - self.df = DataFrame(np.random.randint(1, 100, (10000, 2))) - - def time_groupby_series_nth_none(self): - self.df[1].groupby(self.df[0]).nth(0) - - -class groupby_series_simple_cython(object): - goal_time = 0.2 - - def setup(self): - self.N = 100000 - self.ngroups = 100 - self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) - self.simple_series = Series(np.random.randn(self.N)) - self.key1 = self.df['key1'] - - def time_groupby_series_simple_cython(self): - self.df.groupby('key1').rank(pct=True) - - def get_test_data(self, ngroups=100, n=100000): - self.unique_groups = range(self.ngroups) - self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) - if (len(self.arr) < n): - self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) - random.shuffle(self.arr) - return self.arr - - def f(self): - self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) - - -class groupby_simple_compress_timing(object): - goal_time = 0.2 - - def setup(self): - self.data = np.random.randn(1000000, 2) - self.labels = np.random.randint(0, 1000, size=1000000) - self.df = DataFrame(self.data) - - def time_groupby_simple_compress_timing(self): - self.df.groupby(self.labels).mean() - +#---------------------------------------------------------------------- +# Sum booleans #2692 class groupby_sum_booleans(object): goal_time = 0.2 @@ -1510,6 +643,9 @@ def time_groupby_sum_booleans(self): self.df.groupby('ii').sum() +#---------------------------------------------------------------------- +# multi-indexed group sum #9049 + class groupby_sum_multiindex(object): goal_time = 0.2 @@ -1521,6 +657,9 @@ def time_groupby_sum_multiindex(self): self.df.groupby(level=[0, 1]).sum() +#------------------------------------------------------------------------------- +# Transform testing + class groupby_transform(object): goal_time = 0.2 @@ -1535,7 +674,9 @@ def setup(self): self.secid_max = int('F0000000', 16) self.step = ((self.secid_max - self.secid_min) // (self.n_securities - 1)) self.security_ids = map((lambda x: hex(x)[2:10].upper()), range(self.secid_min, (self.secid_max + 1), self.step)) - self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in range(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id']) + self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], + labels=[[i for i in range(self.n_dates) for _ in range(self.n_securities)], (range(self.n_securities) * self.n_dates)], + names=['date', 'security_id']) self.n_data = len(self.data_index) self.columns = Index(['factor{}'.format(i) for i in range(1, (self.n_columns + 1))]) self.data = DataFrame(np.random.randn(self.n_data, self.n_columns), index=self.data_index, columns=self.columns) @@ -1550,8 +691,11 @@ def setup(self): def time_groupby_transform(self): self.data.groupby(level='security_id').transform(self.f_fillna) + def time_groupby_transform_ufunc(self): + self.data.groupby(level='date').transform(np.max) -class groupby_transform_multi_key1(object): + +class groupby_transform_multi_key(object): goal_time = 0.2 def setup(self): @@ -1628,66 +772,3 @@ def setup(self): def time_groupby_transform_series2(self): self.df.groupby('id')['val'].transform(np.mean) - - -class groupby_transform_ufunc(object): - goal_time = 0.2 - - def setup(self): - self.n_dates = 400 - self.n_securities = 250 - self.n_columns = 3 - self.share_na = 0.1 - self.dates = date_range('1997-12-31', periods=self.n_dates, freq='B') - self.dates = Index(map((lambda x: (((x.year * 10000) + (x.month * 100)) + x.day)), self.dates)) - self.secid_min = int('10000000', 16) - self.secid_max = int('F0000000', 16) - self.step = ((self.secid_max - self.secid_min) // (self.n_securities - 1)) - self.security_ids = map((lambda x: hex(x)[2:10].upper()), range(self.secid_min, (self.secid_max + 1), self.step)) - self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in range(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id']) - self.n_data = len(self.data_index) - self.columns = Index(['factor{}'.format(i) for i in range(1, (self.n_columns + 1))]) - self.data = DataFrame(np.random.randn(self.n_data, self.n_columns), index=self.data_index, columns=self.columns) - self.step = int((self.n_data * self.share_na)) - for column_index in range(self.n_columns): - self.index = column_index - while (self.index < self.n_data): - self.data.set_value(self.data_index[self.index], self.columns[column_index], np.nan) - self.index += self.step - self.f_fillna = (lambda x: x.fillna(method='pad')) - - def time_groupby_transform_ufunc(self): - self.data.groupby(level='date').transform(np.max) - - -class series_value_counts_float64(object): - goal_time = 0.2 - - def setup(self): - self.s = Series(np.random.randint(0, 1000, size=100000)).astype(float) - - def time_series_value_counts_float64(self): - self.s.value_counts() - - -class series_value_counts_int64(object): - goal_time = 0.2 - - def setup(self): - self.s = Series(np.random.randint(0, 1000, size=100000)) - - def time_series_value_counts_int64(self): - self.s.value_counts() - - -class series_value_counts_strings(object): - goal_time = 0.2 - - def setup(self): - self.K = 1000 - self.N = 100000 - self.uniques = tm.makeStringIndex(self.K).values - self.s = Series(np.tile(self.uniques, (self.N // self.K))) - - def time_series_value_counts_strings(self): - self.s.value_counts()
Clean-up of the groupby benchmarks. This is an example of how they can be cleaned up (grouping benchmarks with the same setup in common classes, removing a lot of the setup functions in this way)
https://api.github.com/repos/pandas-dev/pandas/pulls/10998
2015-09-05T11:37:01Z
2015-09-12T23:46:46Z
2015-09-12T23:46:46Z
2015-09-13T00:29:16Z
DOC: Update pip reference links
diff --git a/README.md b/README.md index 947dfc5928249..fbac24a34bfd2 100644 --- a/README.md +++ b/README.md @@ -221,7 +221,7 @@ cloning the git repo), execute: python setup.py install ``` -or for installing in [development mode](http://www.pip-installer.org/en/latest/usage.html): +or for installing in [development mode](https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs): ```sh python setup.py develop @@ -229,7 +229,7 @@ python setup.py develop Alternatively, you can use `pip` if you want all the dependencies pulled in automatically (the `-e` option is for installing it in [development -mode](http://www.pip-installer.org/en/latest/usage.html)): +mode](https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs)): ```sh pip install -e .
Change to link to https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs
https://api.github.com/repos/pandas-dev/pandas/pulls/10997
2015-09-05T03:21:30Z
2015-09-05T10:09:02Z
2015-09-05T10:09:02Z
2017-08-25T04:52:33Z
DOC: Improve reindex examples and docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d3a63f9f5d851..f2a724361df4a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1775,7 +1775,9 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, New labels / index to conform to. Preferably an Index object to avoid duplicating data method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional - Method to use for filling holes in reindexed DataFrame: + method to use for filling holes in reindexed DataFrame. + Please note: this is only applicable to DataFrames/Series with a + monotonically increasing/decreasing index. * default: don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap @@ -1799,7 +1801,118 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, Examples -------- - >>> df.reindex(index=[date1, date2, date3], columns=['A', 'B', 'C']) + + Create a dataframe with some fictional data. + + >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] + >>> df = pd.DataFrame({ + ... 'http_status': [200,200,404,404,301], + ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}, + ... index=index) + >>> df + http_status response_time + Firefox 200 0.04 + Chrome 200 0.02 + Safari 404 0.07 + IE10 404 0.08 + Konqueror 301 1.00 + + Create a new index and reindex the dataframe. By default + values in the new index that do not have corresponding + records in the dataframe are assigned ``NaN``. + + >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', + ... 'Chrome'] + >>> df.reindex(new_index) + http_status response_time + Safari 404 0.07 + Iceweasel NaN NaN + Comodo Dragon NaN NaN + IE10 404 0.08 + Chrome 200 0.02 + + We can fill in the missing values by passing a value to + the keyword ``fill_value``. Because the index is not monotonically + increasing or decreasing, we cannot use arguments to the keyword + ``method`` to fill the ``NaN`` values. + + >>> df.reindex(new_index, fill_value=0) + http_status response_time + Safari 404 0.07 + Iceweasel 0 0.00 + Comodo Dragon 0 0.00 + IE10 404 0.08 + Chrome 200 0.02 + + >>> df.reindex(new_index, fill_value='missing') + http_status response_time + Safari 404 0.07 + Iceweasel missing missing + Comodo Dragon missing missing + IE10 404 0.08 + Chrome 200 0.02 + + To further illustrate the filling functionality in + ``reindex``, we will create a dataframe with a + monotonically increasing index (for example, a sequence + of dates). + + >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') + >>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]}, + index=date_index) + >>> df2 + prices + 2010-01-01 100 + 2010-01-02 101 + 2010-01-03 NaN + 2010-01-04 100 + 2010-01-05 89 + 2010-01-06 88 + + Suppose we decide to expand the dataframe to cover a wider + date range. + + >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') + >>> df2.reindex(date_index2) + prices + 2009-12-29 NaN + 2009-12-30 NaN + 2009-12-31 NaN + 2010-01-01 100 + 2010-01-02 101 + 2010-01-03 NaN + 2010-01-04 100 + 2010-01-05 89 + 2010-01-06 88 + 2010-01-07 NaN + + The index entries that did not have a value in the original data frame + (for example, '2009-12-29') are by default filled with ``NaN``. + If desired, we can fill in the missing values using one of several + options. + + For example, to backpropagate the last valid value to fill the ``NaN`` + values, pass ``bfill`` as an argument to the ``method`` keyword. + + >>> df2.reindex(date_index2, method='bfill') + prices + 2009-12-29 100 + 2009-12-30 100 + 2009-12-31 100 + 2010-01-01 100 + 2010-01-02 101 + 2010-01-03 NaN + 2010-01-04 100 + 2010-01-05 89 + 2010-01-06 88 + 2010-01-07 NaN + + Please note that the ``NaN`` value present in the original dataframe + (at index value 2010-01-03) will not be filled by any of the + value propagation schemes. This is because filling while reindexing + does not look at dataframe values, but only compares the original and + desired indexes. If you do want to fill in the ``NaN`` values present + in the original dataframe, use the ``fillna()`` method. Returns -------
Fixes https://github.com/pydata/pandas/issues/10995 Done/waiting for feedback.
https://api.github.com/repos/pandas-dev/pandas/pulls/10996
2015-09-04T22:09:10Z
2015-10-26T12:04:00Z
2015-10-26T12:04:00Z
2015-10-26T12:04:04Z
BUG: pivot_table with margins=True fails for categorical dtype, #10989
diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index 89fe9463282b6..a4a175fb75716 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -159,6 +159,20 @@ def _add_margins(table, data, values, rows, cols, aggfunc): grand_margin = _compute_grand_margin(data, values, aggfunc) + # categorical index or columns will fail below when 'All' is added + # here we'll convert all categorical indices to object + def convert_categorical(ind): + _convert = lambda ind: (ind.astype('object') + if ind.dtype.name == 'category' else ind) + if isinstance(ind, MultiIndex): + return ind.set_levels([_convert(lev) for lev in ind.levels]) + else: + return _convert(ind) + + table.index = convert_categorical(table.index) + if hasattr(table, 'columns'): + table.columns = convert_categorical(table.columns) + if not values and isinstance(table, Series): # If there are no values and the table is a series, then there is only # one column in the data. Compute grand margin and return it. diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py index 34789a3c52cb7..106e0fa7a259a 100644 --- a/pandas/tools/tests/test_pivot.py +++ b/pandas/tools/tests/test_pivot.py @@ -719,6 +719,20 @@ def test_crosstab_dropna(self): ('two', 'dull'), ('two', 'shiny')]) assert_equal(res.columns.values, m.values) + def test_categorical_margins(self): + # GH 10989 + data = pd.DataFrame({'x': np.arange(8), + 'y': np.arange(8) // 4, + 'z': np.arange(8) % 2}) + data.y = data.y.astype('category') + data.z = data.z.astype('category') + table = data.pivot_table('x', 'y', 'z', margins=True) + assert_equal(table.values, [[1, 2, 1.5], + [5, 6, 5.5], + [3, 4, 3.5]]) + + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
This is a fix for the issue reported in #10989. I suspect this is an example of "fixing the symptom" rather than "fixing the problem", but I think it makes clear what the source of the problem is: to compute margins, the pivot table must add a row and/or column to the result. If the index or column is categorical, a new value cannot be added. Let me know if you think there are better approaches to this.
https://api.github.com/repos/pandas-dev/pandas/pulls/10993
2015-09-04T20:00:17Z
2015-10-20T17:29:50Z
null
2015-10-20T17:29:50Z
Revising the HDF5 docs a bit, added another level to the toc
diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index fb63d0c6d66f1..f4469482ec290 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -107,7 +107,7 @@ See the package overview for more detail about what's in the library. .. toctree:: - :maxdepth: 3 + :maxdepth: 4 {% if single -%} {{ single }} diff --git a/doc/source/io.rst b/doc/source/io.rst index ded314229225c..31d0be6151ba4 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -25,15 +25,15 @@ clipdf = DataFrame({'A':[1,2,3],'B':[4,5,6],'C':['p','q','r']}, index=['x','y','z']) -******************************* +=============================== IO Tools (Text, CSV, HDF5, ...) -******************************* +=============================== The pandas I/O API is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas`` object. * :ref:`read_csv<io.read_csv_table>` - * :ref:`read_excel<io.excel>` + * :ref:`read_excel<io.excel_reader>` * :ref:`read_hdf<io.hdf5>` * :ref:`read_sql<io.sql>` * :ref:`read_json<io.json_reader>` @@ -48,7 +48,7 @@ object. The corresponding ``writer`` functions are object methods that are accessed like ``df.to_csv()`` * :ref:`to_csv<io.store_in_csv>` - * :ref:`to_excel<io.excel>` + * :ref:`to_excel<io.excel_writer>` * :ref:`to_hdf<io.hdf5>` * :ref:`to_sql<io.sql>` * :ref:`to_json<io.json_writer>` @@ -279,7 +279,7 @@ columns will come through as object dtype as with the rest of pandas objects. .. _io.dtypes: Specifying column data types -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''''''''''' Starting with v0.10, you can indicate the data type for the whole DataFrame or individual columns: @@ -300,10 +300,13 @@ individual columns: Specifying ``dtype`` with ``engine`` other than 'c' raises a ``ValueError``. +Naming and Using Columns +'''''''''''''''''''''''' + .. _io.headers: Handling column names -~~~~~~~~~~~~~~~~~~~~~ ++++++++++++++++++++++ A file may or may not have a header row. pandas assumes the first row should be used as the column names: @@ -335,7 +338,7 @@ If the header is in a row other than the first, pass the row number to .. _io.usecols: Filtering columns (``usecols``) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++++++++++++++++++++++++++++++++ The ``usecols`` argument allows you to select any subset of the columns in a file, either using the column names or position numbers: @@ -347,10 +350,14 @@ file, either using the column names or position numbers: pd.read_csv(StringIO(data), usecols=['b', 'd']) pd.read_csv(StringIO(data), usecols=[0, 2, 3]) +Comments and Empty Lines +'''''''''''''''''''''''' + .. _io.skiplines: Ignoring line comments and empty lines -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +++++++++++++++++++++++++++++++++++++++ + If the ``comment`` parameter is specified, then completely commented lines will be ignored. By default, completely blank lines will be ignored as well. Both of these are API changes introduced in version 0.15. @@ -391,10 +398,51 @@ If ``skip_blank_lines=False``, then ``read_csv`` will not ignore blank lines: print(data) pd.read_csv(StringIO(data), comment='#', skiprows=4, header=1) +.. _io.comments: + +Comments +++++++++ + +Sometimes comments or meta data may be included in a file: + +.. ipython:: python + :suppress: + + data = ("ID,level,category\n" + "Patient1,123000,x # really unpleasant\n" + "Patient2,23000,y # wouldn't take his medicine\n" + "Patient3,1234018,z # awesome") + + with open('tmp.csv', 'w') as fh: + fh.write(data) + +.. ipython:: python + + print(open('tmp.csv').read()) + +By default, the parser includes the comments in the output: + +.. ipython:: python + + df = pd.read_csv('tmp.csv') + df + +We can suppress the comments using the ``comment`` keyword: + +.. ipython:: python + + df = pd.read_csv('tmp.csv', comment='#') + df + +.. ipython:: python + :suppress: + + os.remove('tmp.csv') + .. _io.unicode: Dealing with Unicode Data -~~~~~~~~~~~~~~~~~~~~~~~~~ +''''''''''''''''''''''''' The ``encoding`` argument should be used for encoded unicode data, which will result in byte strings being decoded to unicode in the result: @@ -414,7 +462,7 @@ standard encodings .. _io.index_col: Index columns and trailing delimiters -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +''''''''''''''''''''''''''''''''''''' If a file has one more column of data than the number of column names, the first column will be used as the DataFrame's row names: @@ -444,8 +492,11 @@ index column inference and discard the last column, pass ``index_col=False``: .. _io.parse_dates: +Date Handling +''''''''''''' + Specifying Date Columns -~~~~~~~~~~~~~~~~~~~~~~~ ++++++++++++++++++++++++ To better facilitate working with datetime data, :func:`~pandas.io.parsers.read_csv` and :func:`~pandas.io.parsers.read_table` @@ -545,27 +596,9 @@ data columns: specify `index_col` as a column label rather then as an index on the resulting frame. -.. _io.float_precision: - -Specifying method for floating-point conversion -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The parameter ``float_precision`` can be specified in order to use -a specific floating-point converter during parsing with the C engine. -The options are the ordinary converter, the high-precision converter, and -the round-trip converter (which is guaranteed to round-trip values after -writing to a file). For example: - -.. ipython:: python - - val = '0.3066101993807095471566981359501369297504425048828125' - data = 'a,b,c\n1,2,{0}'.format(val) - abs(pd.read_csv(StringIO(data), engine='c', float_precision=None)['c'][0] - float(val)) - abs(pd.read_csv(StringIO(data), engine='c', float_precision='high')['c'][0] - float(val)) - abs(pd.read_csv(StringIO(data), engine='c', float_precision='round_trip')['c'][0] - float(val)) - - Date Parsing Functions -~~~~~~~~~~~~~~~~~~~~~~ +++++++++++++++++++++++ + Finally, the parser allows you to specify a custom ``date_parser`` function to take full advantage of the flexibility of the date parsing API: @@ -618,7 +651,8 @@ a single date rather than the entire array. Inferring Datetime Format -~~~~~~~~~~~~~~~~~~~~~~~~~ ++++++++++++++++++++++++++ + If you have ``parse_dates`` enabled for some or all of your columns, and your datetime strings are all formatted the same way, you may get a large speed up by setting ``infer_datetime_format=True``. If set, pandas will attempt @@ -656,7 +690,8 @@ representing December 30th, 2011 at 00:00:00) os.remove('foo.csv') International Date Formats -~~~~~~~~~~~~~~~~~~~~~~~~~~ +++++++++++++++++++++++++++ + While US date formats tend to be MM/DD/YYYY, many international formats use DD/MM/YYYY instead. For convenience, a ``dayfirst`` keyword is provided: @@ -674,10 +709,31 @@ DD/MM/YYYY instead. For convenience, a ``dayfirst`` keyword is provided: pd.read_csv('tmp.csv', parse_dates=[0]) pd.read_csv('tmp.csv', dayfirst=True, parse_dates=[0]) +.. _io.float_precision: + +Specifying method for floating-point conversion +''''''''''''''''''''''''''''''''''''''''''''''' + +The parameter ``float_precision`` can be specified in order to use +a specific floating-point converter during parsing with the C engine. +The options are the ordinary converter, the high-precision converter, and +the round-trip converter (which is guaranteed to round-trip values after +writing to a file). For example: + +.. ipython:: python + + val = '0.3066101993807095471566981359501369297504425048828125' + data = 'a,b,c\n1,2,{0}'.format(val) + abs(pd.read_csv(StringIO(data), engine='c', float_precision=None)['c'][0] - float(val)) + abs(pd.read_csv(StringIO(data), engine='c', float_precision='high')['c'][0] - float(val)) + abs(pd.read_csv(StringIO(data), engine='c', float_precision='round_trip')['c'][0] - float(val)) + + .. _io.thousands: Thousand Separators -~~~~~~~~~~~~~~~~~~~ +''''''''''''''''''' + For large numbers that have been written with a thousands separator, you can set the ``thousands`` keyword to a string of length 1 so that integers will be parsed correctly: @@ -721,7 +777,7 @@ The ``thousands`` keyword allows integers to be parsed correctly .. _io.na_values: NA Values -~~~~~~~~~ +''''''''' To control which values are parsed as missing values (which are signified by ``NaN``), specifiy a string in ``na_values``. If you specify a list of strings, then all values in @@ -762,54 +818,14 @@ the default values, in addition to the string ``"Nope"`` are recognized as ``NaN .. _io.infinity: Infinity -~~~~~~~~ +'''''''' ``inf`` like values will be parsed as ``np.inf`` (positive infinity), and ``-inf`` as ``-np.inf`` (negative infinity). These will ignore the case of the value, meaning ``Inf``, will also be parsed as ``np.inf``. -.. _io.comments: - -Comments -~~~~~~~~ -Sometimes comments or meta data may be included in a file: - -.. ipython:: python - :suppress: - - data = ("ID,level,category\n" - "Patient1,123000,x # really unpleasant\n" - "Patient2,23000,y # wouldn't take his medicine\n" - "Patient3,1234018,z # awesome") - - with open('tmp.csv', 'w') as fh: - fh.write(data) - -.. ipython:: python - - print(open('tmp.csv').read()) - -By default, the parse includes the comments in the output: - -.. ipython:: python - - df = pd.read_csv('tmp.csv') - df - -We can suppress the comments using the ``comment`` keyword: - -.. ipython:: python - - df = pd.read_csv('tmp.csv', comment='#') - df - -.. ipython:: python - :suppress: - - os.remove('tmp.csv') - Returning Series -~~~~~~~~~~~~~~~~ +'''''''''''''''' Using the ``squeeze`` keyword, the parser will return output with a single column as a ``Series``: @@ -842,7 +858,7 @@ as a ``Series``: .. _io.boolean: Boolean values -~~~~~~~~~~~~~~ +'''''''''''''' The common values ``True``, ``False``, ``TRUE``, and ``FALSE`` are all recognized as boolean. Sometime you would want to recognize some other values @@ -859,7 +875,7 @@ options: .. _io.bad_lines: Handling "bad" lines -~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''' Some files may have malformed lines with too few fields or too many. Lines with too few fields will have NA values filled in the trailing fields. Lines with @@ -894,7 +910,7 @@ You can elect to skip bad lines: .. _io.quoting: Quoting and Escape Characters -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +''''''''''''''''''''''''''''' Quotes (and other escape characters) in embedded fields can be handled in any number of ways. One way is to use backslashes; to properly parse this data, you @@ -909,7 +925,8 @@ should pass the ``escapechar`` option: .. _io.fwf: Files with Fixed Width Columns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''''''''''''' + While ``read_csv`` reads delimited data, the :func:`~pandas.io.parsers.read_fwf` function works with data files that have known and fixed column widths. The function parameters to ``read_fwf`` are largely the same as `read_csv` with @@ -982,8 +999,11 @@ is whitespace). os.remove('bar.csv') +Indexes +''''''' + Files with an "implicit" index column -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++++++++++++++++++++++++++++++++++++++ .. ipython:: python :suppress: @@ -1021,7 +1041,7 @@ to do as before: Reading an index with a ``MultiIndex`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +++++++++++++++++++++++++++++++++++++++ .. _io.csv_multiindex: @@ -1044,7 +1064,7 @@ returned object: .. _io.multi_index_columns: Reading columns with a ``MultiIndex`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++++++++++++++++++++++++++++++++++++++ By specifying list of row locations for the ``header`` argument, you can read in a ``MultiIndex`` for the columns. Specifying non-consecutive @@ -1088,7 +1108,7 @@ with ``df.to_csv(..., index=False``), then any ``names`` on the columns index wi .. _io.sniff: Automatically "sniffing" the delimiter -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''''''''''''''''''''' ``read_csv`` is capable of inferring delimited (not necessarily comma-separated) files, as pandas uses the :class:`python:csv.Sniffer` @@ -1109,7 +1129,7 @@ class of the csv module. For this, you have to specify ``sep=None``. .. _io.chunking: Iterating through files chunk by chunk -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''''''''''''''''''''' Suppose you wish to iterate through a (potentially very large) file lazily rather than reading the entire file into memory, such as the following: @@ -1148,7 +1168,7 @@ Specifying ``iterator=True`` will also return the ``TextFileReader`` object: os.remove('tmp2.sv') Specifying the parser engine -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''''''''''' Under the hood pandas uses a fast and efficient parser implemented in C as well as a python implementation which is currently more feature-complete. Where @@ -1163,10 +1183,13 @@ options include: Specifying any of the above options will produce a ``ParserWarning`` unless the python engine is selected explicitly using ``engine='python'``. +Writing out Data +'''''''''''''''' + .. _io.store_in_csv: Writing to CSV format -~~~~~~~~~~~~~~~~~~~~~ ++++++++++++++++++++++ The Series and DataFrame objects have an instance method ``to_csv`` which allows storing the contents of the object as a comma-separated-values file. The @@ -1197,7 +1220,7 @@ function takes a number of arguments. Only the first is required. - ``date_format``: Format string for datetime objects Writing a formatted string -~~~~~~~~~~~~~~~~~~~~~~~~~~ +++++++++++++++++++++++++++ .. _io.formatting: @@ -1235,7 +1258,7 @@ Read and write ``JSON`` format files and strings. .. _io.json_writer: Writing JSON -~~~~~~~~~~~~ +'''''''''''' A ``Series`` or ``DataFrame`` can be converted to a valid JSON string. Use ``to_json`` with optional parameters: @@ -1426,7 +1449,7 @@ which can be dealt with by specifying a simple ``default_handler``: .. _io.json_reader: Reading JSON -~~~~~~~~~~~~ +'''''''''''' Reading a JSON string to pandas object can take a number of parameters. The parser will try to parse a ``DataFrame`` if ``typ`` is not supplied or @@ -1488,9 +1511,9 @@ be set to ``False`` if you need to preserve string-like numbers (e.g. '1', '2') Large integer values may be converted to dates if ``convert_dates=True`` and the data and / or column labels appear 'date-like'. The exact threshold depends on the ``date_unit`` specified. 'date-like' means that the column label meets one of the following criteria: - * it ends with ``'_at'`` + * it ends with ``'_at'`` * it ends with ``'_time'`` - * it begins with ``'timestamp'`` + * it begins with ``'timestamp'`` * it is ``'modified'`` * it is ``'date'`` @@ -1631,7 +1654,7 @@ The speedup is less noticeable for smaller datasets: .. _io.json_normalize: Normalization -~~~~~~~~~~~~~ +''''''''''''' .. versionadded:: 0.13.0 @@ -1665,7 +1688,7 @@ HTML .. _io.read_html: Reading HTML Content -~~~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''''' .. warning:: @@ -1820,7 +1843,7 @@ succeeds, the function will return*. .. _io.html: Writing to HTML files -~~~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''''' ``DataFrame`` objects have an instance method ``to_html`` which renders the contents of the ``DataFrame`` as an HTML table. The function arguments are as @@ -1961,8 +1984,10 @@ module and use the same parsing code as the above to convert tabular data into a DataFrame. See the :ref:`cookbook<cookbook.excel>` for some advanced strategies +.. _io.excel_reader: + Reading Excel Files -~~~~~~~~~~~~~~~~~~~ +''''''''''''''''''' .. versionadded:: 0.16 @@ -2102,8 +2127,13 @@ missing data to recover integer dtype: cfun = lambda x: int(x) if x else -1 read_excel('path_to_file.xls', 'Sheet1', converters={'MyInts': cfun}) +.. _io.excel_writer: + Writing Excel Files -~~~~~~~~~~~~~~~~~~~ +''''''''''''''''''' + +Writing Excel Files to Disk ++++++++++++++++++++++++++++ To write a DataFrame object to a sheet of an Excel file, you can use the ``to_excel`` instance method. The arguments are largely the same as ``to_csv`` @@ -2149,10 +2179,49 @@ one can pass an :class:`~pandas.io.excel.ExcelWriter`. 1``). You can pass ``convert_float=False`` to disable this behavior, which may give a slight performance improvement. +.. _io.excel_writing_buffer: + +Writing Excel Files to Memory ++++++++++++++++++++++++++++++ + +.. versionadded:: 0.17 + +Pandas supports writing Excel files to buffer-like objects such as ``StringIO`` or +``BytesIO`` using :class:`~pandas.io.excel.ExcelWriter`. + +.. code-block:: python + + # Safe import for either Python 2.x or 3.x + try: + from io import BytesIO + except ImportError: + from cStringIO import StringIO as BytesIO + + bio = BytesIO() + + # By setting the 'engine' in the ExcelWriter constructor. + writer = ExcelWriter(bio, engine='xlsxwriter') + df.to_excel(writer, sheet_name='Sheet1') + + # Save the workbook + writer.save() + + # Seek to the beginning and read to copy the workbook to a variable in memory + bio.seek(0) + workbook = bio.read() + +.. note:: + + ``engine`` is optional but recommended. Setting the engine determines + the version of workbook produced. Setting ``engine='xlrd'`` will produce an + Excel 2003-format workbook (xls). Using either ``'openpyxl'`` or + ``'xlsxwriter'`` will produce an Excel 2007-format workbook (xlsx). If + omitted, an Excel 2007-formatted workbook is produced. + .. _io.excel.writers: Excel writer engines -~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''' .. versionadded:: 0.13 @@ -2194,45 +2263,6 @@ argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are: df.to_excel('path_to_file.xlsx', sheet_name='Sheet1') -.. _io.excel_writing_buffer: - -Writing Excel Files to Memory -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. versionadded:: 0.17 - -Pandas supports writing Excel files to buffer-like objects such as ``StringIO`` or -``BytesIO`` using :class:`~pandas.io.excel.ExcelWriter`. - -.. code-block:: python - - # Safe import for either Python 2.x or 3.x - try: - from io import BytesIO - except ImportError: - from cStringIO import StringIO as BytesIO - - bio = BytesIO() - - # By setting the 'engine' in the ExcelWriter constructor. - writer = ExcelWriter(bio, engine='xlsxwriter') - df.to_excel(writer, sheet_name='Sheet1') - - # Save the workbook - writer.save() - - # Seek to the beginning and read to copy the workbook to a variable in memory - bio.seek(0) - workbook = bio.read() - -.. note:: - - ``engine`` is optional but recommended. Setting the engine determines - the version of workbook produced. Setting ``engine='xlrd'`` will produce an - Excel 2003-format workbook (xls). Using either ``'openpyxl'`` or - ``'xlsxwriter'`` will produce an Excel 2007-format workbook (xlsx). If - omitted, an Excel 2007-formatted workbook is produced. - .. _io.clipboard: Clipboard @@ -2387,7 +2417,7 @@ pandas objects. os.remove('foo2.msg') Read/Write API -~~~~~~~~~~~~~~ +'''''''''''''' Msgpacks can also be read from and written to strings. @@ -2502,7 +2532,7 @@ Closing a Store, Context Manager Read/Write API -~~~~~~~~~~~~~~ +'''''''''''''' ``HDFStore`` supports an top-level API using ``read_hdf`` for reading and ``to_hdf`` for writing, similar to how ``read_csv`` and ``to_csv`` work. (new in 0.11.0) @@ -2581,7 +2611,7 @@ This is also true for the major axis of a ``Panel``: .. _io.hdf5-fixed: Fixed Format -~~~~~~~~~~~~ +'''''''''''' .. note:: @@ -2610,7 +2640,7 @@ This format is specified by default when using ``put`` or ``to_hdf`` or by ``for .. _io.hdf5-table: Table Format -~~~~~~~~~~~~ +'''''''''''' ``HDFStore`` supports another ``PyTables`` format on disk, the ``table`` format. Conceptually a ``table`` is shaped very much like a DataFrame, @@ -2654,7 +2684,7 @@ enable ``put/append/to_hdf`` to by default store in the ``table`` format. .. _io.hdf5-keys: Hierarchical Keys -~~~~~~~~~~~~~~~~~ +''''''''''''''''' Keys to a store can be specified as a string. These can be in a hierarchical path-name like format (e.g. ``foo/bar/bah``), which will @@ -2679,8 +2709,11 @@ everything in the sub-store and BELOW, so be *careful*. .. _io.hdf5-types: +Storing Types +''''''''''''' + Storing Mixed Types in a Table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +++++++++++++++++++++++++++++++ Storing mixed-dtype data is supported. Strings are stored as a fixed-width using the maximum size of the appended column. Subsequent @@ -2714,7 +2747,7 @@ defaults to `nan`. store.root.df_mixed.table Storing Multi-Index DataFrames -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +++++++++++++++++++++++++++++++ Storing multi-index dataframes as tables is very similar to storing/selecting from homogeneous index DataFrames. @@ -2739,8 +2772,11 @@ storing/selecting from homogeneous index DataFrames. .. _io.hdf5-query: +Querying +'''''''' + Querying a Table -~~~~~~~~~~~~~~~~ +++++++++++++++++ .. warning:: @@ -2755,20 +2791,20 @@ data. A query is specified using the ``Term`` class under the hood, as a boolean expression. - - ``index`` and ``columns`` are supported indexers of a DataFrame - - ``major_axis``, ``minor_axis``, and ``items`` are supported indexers of - the Panel - - if ``data_columns`` are specified, these can be used as additional indexers +- ``index`` and ``columns`` are supported indexers of a DataFrame +- ``major_axis``, ``minor_axis``, and ``items`` are supported indexers of + the Panel +- if ``data_columns`` are specified, these can be used as additional indexers Valid comparison operators are: - - ``=, ==, !=, >, >=, <, <=`` +``=, ==, !=, >, >=, <, <=`` Valid boolean expressions are combined with: - - ``|`` : or - - ``&`` : and - - ``(`` and ``)`` : for grouping +- ``|`` : or +- ``&`` : and +- ``(`` and ``)`` : for grouping These rules are similar to how boolean expressions are used in pandas for indexing. @@ -2781,28 +2817,28 @@ These rules are similar to how boolean expressions are used in pandas for indexi The following are valid expressions: - - ``'index>=date'`` - - ``"columns=['A', 'D']"`` - - ``"columns in ['A', 'D']"`` - - ``'columns=A'`` - - ``'columns==A'`` - - ``"~(columns=['A','B'])"`` - - ``'index>df.index[3] & string="bar"'`` - - ``'(index>df.index[3] & index<=df.index[6]) | string="bar"'`` - - ``"ts>=Timestamp('2012-02-01')"`` - - ``"major_axis>=20130101"`` +- ``'index>=date'`` +- ``"columns=['A', 'D']"`` +- ``"columns in ['A', 'D']"`` +- ``'columns=A'`` +- ``'columns==A'`` +- ``"~(columns=['A','B'])"`` +- ``'index>df.index[3] & string="bar"'`` +- ``'(index>df.index[3] & index<=df.index[6]) | string="bar"'`` +- ``"ts>=Timestamp('2012-02-01')"`` +- ``"major_axis>=20130101"`` The ``indexers`` are on the left-hand side of the sub-expression: - - ``columns``, ``major_axis``, ``ts`` +``columns``, ``major_axis``, ``ts`` The right-hand side of the sub-expression (after a comparison operator) can be: - - functions that will be evaluated, e.g. ``Timestamp('2012-02-01')`` - - strings, e.g. ``"bar"`` - - date-like, e.g. ``20130101``, or ``"20130101"`` - - lists, e.g. ``"['A','B']"`` - - variables that are defined in the local names space, e.g. ``date`` +- functions that will be evaluated, e.g. ``Timestamp('2012-02-01')`` +- strings, e.g. ``"bar"`` +- date-like, e.g. ``20130101``, or ``"20130101"`` +- lists, e.g. ``"['A','B']"`` +- variables that are defined in the local names space, e.g. ``date`` .. note:: @@ -2893,7 +2929,8 @@ space. These are in terms of the total number of rows in a table. .. _io.hdf5-timedelta: -**Using timedelta64[ns]** +Using timedelta64[ns] ++++++++++++++++++++++ .. versionadded:: 0.13 @@ -2901,10 +2938,6 @@ Beginning in 0.13.0, you can store and query using the ``timedelta64[ns]`` type. specified in the format: ``<float>(<unit>)``, where float may be signed (and fractional), and unit can be ``D,s,ms,us,ns`` for the timedelta. Here's an example: -.. warning:: - - This requires ``numpy >= 1.7`` - .. ipython:: python from datetime import timedelta @@ -2915,7 +2948,7 @@ specified in the format: ``<float>(<unit>)``, where float may be signed (and fra store.select('dftd',"C<'-3.5D'") Indexing -~~~~~~~~ +++++++++ You can create/modify an index for a table with ``create_table_index`` after data is already in the table (after and ``append/put`` @@ -2943,7 +2976,7 @@ indexed dimension as the ``where``. See `here <http://stackoverflow.com/questions/17893370/ptrepack-sortby-needs-full-index>`__ for how to create a completely-sorted-index (CSI) on an existing store. Query via Data Columns -~~~~~~~~~~~~~~~~~~~~~~ +++++++++++++++++++++++ You can designate (and index) certain columns that you want to be able to perform queries (other than the `indexable` columns, which you can @@ -2983,7 +3016,7 @@ append/put operation (Of course you can simply read in the data and create a new table!) Iterator -~~~~~~~~ +++++++++ Starting in ``0.11.0``, you can pass, ``iterator=True`` or ``chunksize=number_in_a_chunk`` to ``select`` and ``select_as_multiple`` to return an iterator on the results. @@ -3029,9 +3062,10 @@ chunks. print store.select('dfeq',where=c) Advanced Queries -~~~~~~~~~~~~~~~~ +++++++++++++++++ -**Select a Single Column** +Select a Single Column +^^^^^^^^^^^^^^^^^^^^^^ To retrieve a single indexable or data column, use the method ``select_column``. This will, for example, enable you to get the index @@ -3045,7 +3079,8 @@ These do not currently accept the ``where`` selector. .. _io.hdf5-selecting_coordinates: -**Selecting coordinates** +Selecting coordinates +^^^^^^^^^^^^^^^^^^^^^ Sometimes you want to get the coordinates (a.k.a the index locations) of your query. This returns an ``Int64Index`` of the resulting locations. These coordinates can also be passed to subsequent @@ -3061,7 +3096,8 @@ Sometimes you want to get the coordinates (a.k.a the index locations) of your qu .. _io.hdf5-where_mask: -**Selecting using a where mask** +Selecting using a where mask +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Sometime your query can involve creating a list of rows to select. Usually this ``mask`` would be a resulting ``index`` from an indexing operation. This example selects the months of @@ -3075,7 +3111,8 @@ a datetimeindex which are 5. where = c[DatetimeIndex(c).month==5].index store.select('df_mask',where=where) -**Storer Object** +Storer Object +^^^^^^^^^^^^^ If you want to inspect the stored object, retrieve via ``get_storer``. You could use this programmatically to say get the number @@ -3087,7 +3124,7 @@ of rows in an object. Multiple Table Queries -~~~~~~~~~~~~~~~~~~~~~~ +++++++++++++++++++++++ New in 0.10.1 are the methods ``append_to_multiple`` and ``select_as_multiple``, that can perform appending/selecting from @@ -3136,7 +3173,7 @@ results. Delete from a Table -~~~~~~~~~~~~~~~~~~~ +''''''''''''''''''' You can delete from a table selectively by specifying a ``where``. In deleting rows, it is important to understand the ``PyTables`` deletes @@ -3152,15 +3189,15 @@ simple use case. You store panel-type data, with dates in the ``major_axis`` and ids in the ``minor_axis``. The data is then interleaved like this: - - date_1 - - id_1 - - id_2 - - . - - id_n - - date_2 - - id_1 - - . - - id_n +- date_1 + - id_1 + - id_2 + - . + - id_n +- date_2 + - id_1 + - . + - id_n It should be clear that a delete operation on the ``major_axis`` will be fairly quick, as one chunk is removed, then the following data moved. On @@ -3174,21 +3211,29 @@ the table using a ``where`` that selects all but the missing data. store.remove('wp', 'major_axis>20000102' ) store.select('wp') -Please note that HDF5 **DOES NOT RECLAIM SPACE** in the h5 files -automatically. Thus, repeatedly deleting (or removing nodes) and adding -again **WILL TEND TO INCREASE THE FILE SIZE**. To *clean* the file, use -``ptrepack`` (see below). +.. warning:: + + Please note that HDF5 **DOES NOT RECLAIM SPACE** in the h5 files + automatically. Thus, repeatedly deleting (or removing nodes) and adding + again **WILL TEND TO INCREASE THE FILE SIZE**. To *clean* the file, use + :ref:`ptrepack <io.hdf5-ptrepack>` + +.. _io.hdf5-notes: + +Notes & Caveats +''''''''''''''' + Compression -~~~~~~~~~~~ ++++++++++++ ``PyTables`` allows the stored data to be compressed. This applies to all kinds of stores, not just tables. - - Pass ``complevel=int`` for a compression level (1-9, with 0 being no - compression, and the default) - - Pass ``complib=lib`` where lib is any of ``zlib, bzip2, lzo, blosc`` for - whichever compression library you prefer. +- Pass ``complevel=int`` for a compression level (1-9, with 0 being no + compression, and the default) +- Pass ``complib=lib`` where lib is any of ``zlib, bzip2, lzo, blosc`` for + whichever compression library you prefer. ``HDFStore`` will use the file based compression scheme if no overriding ``complib`` or ``complevel`` options are provided. ``blosc`` offers very @@ -3197,14 +3242,21 @@ may not be installed (by Python) by default. Compression for all objects within the file - - ``store_compressed = HDFStore('store_compressed.h5', complevel=9, complib='blosc')`` +.. code-block:: python + + store_compressed = HDFStore('store_compressed.h5', complevel=9, complib='blosc') Or on-the-fly compression (this only applies to tables). You can turn off file compression for a specific table by passing ``complevel=0`` - - ``store.append('df', df, complib='zlib', complevel=5)`` +.. code-block:: python + + store.append('df', df, complib='zlib', complevel=5) -**ptrepack** +.. _io.hdf5-ptrepack: + +ptrepack +++++++++ ``PyTables`` offers better write performance when tables are compressed after they are written, as opposed to turning on compression at the very @@ -3212,42 +3264,39 @@ beginning. You can use the supplied ``PyTables`` utility ``ptrepack``. In addition, ``ptrepack`` can change compression levels after the fact. - - ``ptrepack --chunkshape=auto --propindexes --complevel=9 --complib=blosc in.h5 out.h5`` +.. code-block:: console + + ptrepack --chunkshape=auto --propindexes --complevel=9 --complib=blosc in.h5 out.h5 Furthermore ``ptrepack in.h5 out.h5`` will *repack* the file to allow you to reuse previously deleted space. Alternatively, one can simply remove the file and write again, or use the ``copy`` method. -.. _io.hdf5-notes: +.. _io.hdf5-caveats: -Notes & Caveats -~~~~~~~~~~~~~~~ - - - Once a ``table`` is created its items (Panel) / columns (DataFrame) - are fixed; only exactly the same columns can be appended - - If a row has ``np.nan`` for **EVERY COLUMN** (having a ``nan`` - in a string, or a ``NaT`` in a datetime-like column counts as having - a value), then those rows **WILL BE DROPPED IMPLICITLY**. This limitation - *may* be addressed in the future. - - ``HDFStore`` is **not-threadsafe for writing**. The underlying - ``PyTables`` only supports concurrent reads (via threading or - processes). If you need reading and writing *at the same time*, you - need to serialize these operations in a single thread in a single - process. You will corrupt your data otherwise. See the (:issue:`2397`) for more information. - - If you use locks to manage write access between multiple processes, you - may want to use :py:func:`~os.fsync` before releasing write locks. For - convenience you can use ``store.flush(fsync=True)`` to do this for you. - - ``PyTables`` only supports fixed-width string columns in - ``tables``. The sizes of a string based indexing column - (e.g. *columns* or *minor_axis*) are determined as the maximum size - of the elements in that axis or by passing the parameter - - Be aware that timezones (e.g., ``pytz.timezone('US/Eastern')``) - are not necessarily equal across timezone versions. So if data is - localized to a specific timezone in the HDFStore using one version - of a timezone library and that data is updated with another version, the data - will be converted to UTC since these timezones are not considered - equal. Either use the same version of timezone library or use ``tz_convert`` with - the updated timezone definition. +Caveats ++++++++ + +.. warning:: + + ``HDFStore`` is **not-threadsafe for writing**. The underlying + ``PyTables`` only supports concurrent reads (via threading or + processes). If you need reading and writing *at the same time*, you + need to serialize these operations in a single thread in a single + process. You will corrupt your data otherwise. See the (:issue:`2397`) for more information. + +- If you use locks to manage write access between multiple processes, you + may want to use :py:func:`~os.fsync` before releasing write locks. For + convenience you can use ``store.flush(fsync=True)`` to do this for you. +- Once a ``table`` is created its items (Panel) / columns (DataFrame) + are fixed; only exactly the same columns can be appended +- Be aware that timezones (e.g., ``pytz.timezone('US/Eastern')``) + are not necessarily equal across timezone versions. So if data is + localized to a specific timezone in the HDFStore using one version + of a timezone library and that data is updated with another version, the data + will be converted to UTC since these timezones are not considered + equal. Either use the same version of timezone library or use ``tz_convert`` with + the updated timezone definition. .. warning:: @@ -3258,8 +3307,10 @@ Notes & Caveats Other identifiers cannot be used in a ``where`` clause and are generally a bad idea. +.. _io.hdf5-data_types: + DataTypes -~~~~~~~~~ +''''''''' ``HDFStore`` will map an object dtype to the ``PyTables`` underlying dtype. This means the following types are known to work: @@ -3281,7 +3332,7 @@ object : ``strings`` ``np.nan`` .. _io.hdf5-categorical: Categorical Data -~~~~~~~~~~~~~~~~ +++++++++++++++++ .. versionadded:: 0.15.2 @@ -3326,7 +3377,7 @@ stored in a more efficient manner. String Columns -~~~~~~~~~~~~~~ +++++++++++++++ **min_itemsize** @@ -3345,7 +3396,7 @@ Starting in 0.11.0, passing a ``min_itemsize`` dict will cause all passed column .. note:: - If you are not passing any *data_columns*, then the min_itemsize will be the maximum of the length of any string passed + If you are not passing any ``data_columns``, then the ``min_itemsize`` will be the maximum of the length of any string passed .. ipython:: python @@ -3381,7 +3432,7 @@ You could inadvertently turn an actual ``nan`` value into a missing value. .. _io.external_compatibility: External Compatibility -~~~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''''' ``HDFStore`` writes ``table`` format objects in specific formats suitable for producing loss-less round trips to pandas objects. For external @@ -3470,7 +3521,7 @@ Now you can import the ``DataFrame`` into R: single HDF5 file. Backwards Compatibility -~~~~~~~~~~~~~~~~~~~~~~~ +''''''''''''''''''''''' 0.10.1 of ``HDFStore`` can read tables created in a prior version of pandas, however query terms using the @@ -3508,7 +3559,7 @@ number of options, please see the docstring. Performance -~~~~~~~~~~~ +''''''''''' - ``tables`` format come with a writing performance penalty as compared to ``fixed`` stores. The benefit is the ability to append/delete and @@ -3531,7 +3582,7 @@ Performance for more information and some solutions. Experimental -~~~~~~~~~~~~ +'''''''''''' HDFStore supports ``Panel4D`` storage. @@ -3625,7 +3676,7 @@ If you want to manage your own connections you can pass one of those instead: data = pd.read_sql_table('data', conn) Writing DataFrames -~~~~~~~~~~~~~~~~~~ +'''''''''''''''''' Assuming the following data is in a DataFrame ``data``, we can insert it into the database using :func:`~pandas.DataFrame.to_sql`. @@ -3699,7 +3750,7 @@ default ``Text`` type for string columns: a categorical. Reading Tables -~~~~~~~~~~~~~~ +'''''''''''''' :func:`~pandas.read_sql_table` will read a database table given the table name and optionally a subset of columns to read. @@ -3739,7 +3790,7 @@ to pass to :func:`pandas.to_datetime`: You can check if a table exists using :func:`~pandas.io.sql.has_table` Schema support -~~~~~~~~~~~~~~ +'''''''''''''' .. versionadded:: 0.15.0 @@ -3754,7 +3805,7 @@ have schema's). For example: pd.read_sql_table('table', engine, schema='other_schema') Querying -~~~~~~~~ +'''''''' You can query using raw SQL in the :func:`~pandas.read_sql_query` function. In this case you must use the SQL variant appropriate for your database. @@ -3798,7 +3849,7 @@ variant appropriate for your database. Engine connection examples -~~~~~~~~~~~~~~~~~~~~~~~~~~ +'''''''''''''''''''''''''' To connect with SQLAlchemy you use the :func:`create_engine` function to create an engine object from database URI. You only need to create the engine once per database you are @@ -3827,7 +3878,7 @@ For more information see the examples the SQLAlchemy `documentation <http://docs Sqlite fallback -~~~~~~~~~~~~~~~ +''''''''''''''' The use of sqlite is supported without using SQLAlchemy. This mode requires a Python database adapter which respect the `Python @@ -3951,7 +4002,7 @@ Stata Format .. _io.stata_writer: Writing to Stata format -~~~~~~~~~~~~~~~~~~~~~~~ +''''''''''''''''''''''' The method :func:`~pandas.core.frame.DataFrame.to_stata` will write a DataFrame into a .dta file. The format version of this file is always 115 (Stata 12). @@ -4001,7 +4052,7 @@ outside of this range, the variable is cast to ``int16``. .. _io.stata_reader: Reading from Stata format -~~~~~~~~~~~~~~~~~~~~~~~~~ +''''''''''''''''''''''''' The top-level function ``read_stata`` will read a dta file and return either a DataFrame or a :class:`~pandas.io.stata.StataReader` that can @@ -4068,7 +4119,7 @@ values will have ``object`` data type. .. _io.stata-categorical: Categorical Data -~~~~~~~~~~~~~~~~ +++++++++++++++++ .. versionadded:: 0.15.2 @@ -4121,7 +4172,7 @@ cleanly to its tabular data model. For reading and writing other file formats into and from pandas, we recommend these packages from the broader community. netCDF -~~~~~~ +'''''' xray_ provides data structures inspired by the pandas DataFrame for working with multi-dimensional datasets, with a focus on the netCDF file format and @@ -4131,12 +4182,14 @@ easy conversion to and from pandas. .. _io.sas: +.. _io.sas_reader: + SAS Format ---------- .. versionadded:: 0.17.0 -The top-level function :function:`read_sas` currently can read (but +The top-level function :func:`read_sas` currently can read (but not write) SAS xport (.XPT) format files. Pandas cannot currently handle SAS7BDAT files. diff --git a/doc/source/themes/nature_with_gtoc/static/nature.css_t b/doc/source/themes/nature_with_gtoc/static/nature.css_t index 61b0e2cce5e5a..33644101eb425 100644 --- a/doc/source/themes/nature_with_gtoc/static/nature.css_t +++ b/doc/source/themes/nature_with_gtoc/static/nature.css_t @@ -31,7 +31,7 @@ div.bodywrapper { /* ugly hack, probably not attractive with other font size for re*/ margin: 0 0 0 {{ theme_sidebarwidth|toint}}px; min-width: 540px; - max-width: 720px; + max-width: 800px; } diff --git a/doc/source/themes/nature_with_gtoc/theme.conf b/doc/source/themes/nature_with_gtoc/theme.conf index 1cc40044646bb..290a07bde8806 100644 --- a/doc/source/themes/nature_with_gtoc/theme.conf +++ b/doc/source/themes/nature_with_gtoc/theme.conf @@ -2,3 +2,6 @@ inherit = basic stylesheet = nature.css pygments_style = tango + +[options] +sidebarwidth = 270
- makes the toc 4 levels - increases the width of the side bar a bit - increases the overall width just a bit ![io tools text csv hdf5 pandas 0 16 2 557 g23b9715 documentation](https://cloud.githubusercontent.com/assets/953992/9696820/5bbb9528-5346-11e5-8e52-15759d02257a.png) ![io tools text csv hdf5 pandas 0 16 2 557 g23b9715 documentation 1](https://cloud.githubusercontent.com/assets/953992/9696821/5d955190-5346-11e5-9e3f-3025c232f166.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/10992
2015-09-04T19:16:02Z
2015-09-05T14:19:49Z
2015-09-05T14:19:49Z
2015-09-05T14:19:49Z
ENH: Added warning when excel file contains duplicate column names
diff --git a/pandas/io/excel.py b/pandas/io/excel.py index d5258cb32e6e0..8f1025ff59b1d 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -309,6 +309,9 @@ def _excel2num(x): cols.append(_excel2num(rng)) return cols + if len(parse_cols) != len(set(parse_cols)): + warn('You have duplicate column names. Consider re-name them. ') + if isinstance(parse_cols, int): return i <= parse_cols elif isinstance(parse_cols, compat.string_types):
See #10970 and #10982. Users face bugs but no informative errors are given. This should warn them and inform them what might be wrong.
https://api.github.com/repos/pandas-dev/pandas/pulls/10991
2015-09-04T17:40:16Z
2015-09-05T17:59:11Z
null
2015-09-05T18:24:05Z
COMPAT/TST: fix group_info dtype issues, xref #10981
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index f42825a11933b..7a5770d3968ec 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1793,13 +1793,13 @@ def indices(self): @cache_readonly def group_info(self): ngroups = self.ngroups - obs_group_ids = np.arange(ngroups) + obs_group_ids = np.arange(ngroups, dtype='int64') rep = np.diff(np.r_[0, self.bins]) if ngroups == len(self.bins): - comp_ids = np.repeat(np.arange(ngroups), rep) + comp_ids = np.repeat(np.arange(ngroups, dtype='int64'), rep) else: - comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep) + comp_ids = np.repeat(np.r_[-1, np.arange(ngroups, dtype='int64')], rep) return comp_ids, obs_group_ids, ngroups @@ -2552,8 +2552,8 @@ def nunique(self, dropna=True): # group boundries are where group ids change # unique observations are where sorted values change - idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] - inc = np.r_[1, val[1:] != val[:-1]] + idx = com._ensure_int64(np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]) + inc = com._ensure_int64(np.r_[1, val[1:] != val[:-1]]) # 1st item of each group is a new unique observation mask = isnull(val) diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 49d344631e4b9..ec03d558e45b8 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -919,7 +919,7 @@ def test_resample_timegrouper(self): def test_resample_group_info(self): # GH10914 for n, k in product((10000, 100000), (10, 100, 1000)): dr = date_range(start='2015-08-27', periods=n // 10, freq='T') - ts = Series(np.random.randint(0, n // k, n), + ts = Series(np.random.randint(0, n // k, n).astype('int64'), index=np.random.choice(dr, n)) left = ts.resample('30T', how='nunique') @@ -1585,7 +1585,7 @@ def test_aggregate_with_nat(self): # check TimeGrouper's aggregation is identical as normal groupby n = 20 - data = np.random.randn(n, 4) + data = np.random.randn(n, 4).astype('int64') normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
closes #10981
https://api.github.com/repos/pandas-dev/pandas/pulls/10988
2015-09-04T14:09:13Z
2015-09-04T15:34:30Z
2015-09-04T15:34:30Z
2015-09-13T20:00:25Z
TST: test_nanops turns off bottneck for all tests after
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index bb6cb5a444dd9..3615cc3dc8ad8 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -842,6 +842,44 @@ def no_nans(x): _consistency_data = _create_consistency_data() class TestMomentsConsistency(Base): + base_functions = [ + (lambda v: Series(v).count(), None, 'count'), + (lambda v: Series(v).max(), None, 'max'), + (lambda v: Series(v).min(), None, 'min'), + (lambda v: Series(v).sum(), None, 'sum'), + (lambda v: Series(v).mean(), None, 'mean'), + (lambda v: Series(v).std(), 1, 'std'), + (lambda v: Series(v).cov(Series(v)), None, 'cov'), + (lambda v: Series(v).corr(Series(v)), None, 'corr'), + (lambda v: Series(v).var(), 1, 'var'), + #(lambda v: Series(v).skew(), 3, 'skew'), # restore once GH 8086 is fixed + #(lambda v: Series(v).kurt(), 4, 'kurt'), # restore once GH 8086 is fixed + #(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods, 'quantile'), + # lambda v: Series(v).quantile(0.3), None, 'quantile'), # restore once GH 8084 is fixed + (lambda v: Series(v).median(), None ,'median'), + (np.nanmax, 1, 'max'), + (np.nanmin, 1, 'min'), + (np.nansum, 1, 'sum'), + ] + if np.__version__ >= LooseVersion('1.8.0'): + base_functions += [ + (np.nanmean, 1, 'mean'), + (lambda v: np.nanstd(v, ddof=1), 1 ,'std'), + (lambda v: np.nanvar(v, ddof=1), 1 ,'var'), + ] + if np.__version__ >= LooseVersion('1.9.0'): + base_functions += [ + (np.nanmedian, 1, 'median'), + ] + no_nan_functions = [ + (np.max, None, 'max'), + (np.min, None, 'min'), + (np.sum, None, 'sum'), + (np.mean, None, 'mean'), + (lambda v: np.std(v, ddof=1), 1 ,'std'), + (lambda v: np.var(v, ddof=1), 1 ,'var'), + (np.median, None, 'median'), + ] def _create_data(self): super(TestMomentsConsistency, self)._create_data() @@ -877,9 +915,11 @@ def _non_null_values(x): # self.assertTrue(_non_null_values(corr_x_x).issubset(set([1.]))) # restore once rolling_cov(x, x) is identically equal to var(x) if is_constant: + exp = x.max() if isinstance(x, Series) else x.max().max() + # check mean of constant series expected = x * np.nan - expected[count_x >= max(min_periods, 1)] = x.max().max() + expected[count_x >= max(min_periods, 1)] = exp assert_equal(mean_x, expected) # check correlation of constant series with itself is NaN @@ -1030,44 +1070,6 @@ def _ewma(s, com, min_periods, adjust, ignore_na): @slow def test_expanding_consistency(self): - base_functions = [ - (mom.expanding_count, lambda v: Series(v).count(), None), - (mom.expanding_max, lambda v: Series(v).max(), None), - (mom.expanding_min, lambda v: Series(v).min(), None), - (mom.expanding_sum, lambda v: Series(v).sum(), None), - (mom.expanding_mean, lambda v: Series(v).mean(), None), - (mom.expanding_std, lambda v: Series(v).std(), 1), - (mom.expanding_cov, lambda v: Series(v).cov(Series(v)), None), - (mom.expanding_corr, lambda v: Series(v).corr(Series(v)), None), - (mom.expanding_var, lambda v: Series(v).var(), 1), - #(mom.expanding_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed - #(mom.expanding_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed - #(lambda x, min_periods: mom.expanding_quantile(x, 0.3, min_periods=min_periods), - # lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed - (mom.expanding_median, lambda v: Series(v).median(), None), - (mom.expanding_max, np.nanmax, 1), - (mom.expanding_min, np.nanmin, 1), - (mom.expanding_sum, np.nansum, 1), - ] - if np.__version__ >= LooseVersion('1.8.0'): - base_functions += [ - (mom.expanding_mean, np.nanmean, 1), - (mom.expanding_std, lambda v: np.nanstd(v, ddof=1), 1), - (mom.expanding_var, lambda v: np.nanvar(v, ddof=1), 1), - ] - if np.__version__ >= LooseVersion('1.9.0'): - base_functions += [ - (mom.expanding_median, np.nanmedian, 1), - ] - no_nan_functions = [ - (mom.expanding_max, np.max, None), - (mom.expanding_min, np.min, None), - (mom.expanding_sum, np.sum, None), - (mom.expanding_mean, np.mean, None), - (mom.expanding_std, lambda v: np.std(v, ddof=1), 1), - (mom.expanding_var, lambda v: np.var(v, ddof=1), 1), - (mom.expanding_median, np.median, None), - ] # suppress warnings about empty slices, as we are deliberately testing with empty/0-length Series/DataFrames with warnings.catch_warnings(): @@ -1095,12 +1097,14 @@ def test_expanding_consistency(self): # or (b) expanding_apply of np.nanxyz() for (x, is_constant, no_nans) in self.data: assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal - functions = base_functions + functions = self.base_functions # GH 8269 if no_nans: - functions = base_functions + no_nan_functions - for (expanding_f, f, require_min_periods) in functions: + functions = self.base_functions + self.no_nan_functions + for (f, require_min_periods, name) in functions: + expanding_f = getattr(mom,'expanding_{0}'.format(name)) + if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods): continue @@ -1113,7 +1117,9 @@ def test_expanding_consistency(self): else: expanding_f_result = expanding_f(x, min_periods=min_periods) expanding_apply_f_result = mom.expanding_apply(x, func=f, min_periods=min_periods) - assert_equal(expanding_f_result, expanding_apply_f_result) + + if not tm._incompat_bottleneck_version(name): + assert_equal(expanding_f_result, expanding_apply_f_result) if (expanding_f in [mom.expanding_cov, mom.expanding_corr]) and isinstance(x, DataFrame): # test pairwise=True @@ -1127,45 +1133,6 @@ def test_expanding_consistency(self): @slow def test_rolling_consistency(self): - base_functions = [ - (mom.rolling_count, lambda v: Series(v).count(), None), - (mom.rolling_max, lambda v: Series(v).max(), None), - (mom.rolling_min, lambda v: Series(v).min(), None), - (mom.rolling_sum, lambda v: Series(v).sum(), None), - (mom.rolling_mean, lambda v: Series(v).mean(), None), - (mom.rolling_std, lambda v: Series(v).std(), 1), - (mom.rolling_cov, lambda v: Series(v).cov(Series(v)), None), - (mom.rolling_corr, lambda v: Series(v).corr(Series(v)), None), - (mom.rolling_var, lambda v: Series(v).var(), 1), - #(mom.rolling_skew, lambda v: Series(v).skew(), 3), # restore once GH 8086 is fixed - #(mom.rolling_kurt, lambda v: Series(v).kurt(), 4), # restore once GH 8086 is fixed - #(lambda x, window, min_periods, center: mom.rolling_quantile(x, window, 0.3, min_periods=min_periods, center=center), - # lambda v: Series(v).quantile(0.3), None), # restore once GH 8084 is fixed - (mom.rolling_median, lambda v: Series(v).median(), None), - (mom.rolling_max, np.nanmax, 1), - (mom.rolling_min, np.nanmin, 1), - (mom.rolling_sum, np.nansum, 1), - ] - if np.__version__ >= LooseVersion('1.8.0'): - base_functions += [ - (mom.rolling_mean, np.nanmean, 1), - (mom.rolling_std, lambda v: np.nanstd(v, ddof=1), 1), - (mom.rolling_var, lambda v: np.nanvar(v, ddof=1), 1), - ] - if np.__version__ >= LooseVersion('1.9.0'): - base_functions += [ - (mom.rolling_median, np.nanmedian, 1), - ] - no_nan_functions = [ - (mom.rolling_max, np.max, None), - (mom.rolling_min, np.min, None), - (mom.rolling_sum, np.sum, None), - (mom.rolling_mean, np.mean, None), - (mom.rolling_std, lambda v: np.std(v, ddof=1), 1), - (mom.rolling_var, lambda v: np.var(v, ddof=1), 1), - (mom.rolling_median, np.median, None), - ] - for window in [1, 2, 3, 10, 20]: for min_periods in set([0, 1, 2, 3, 4, window]): if min_periods and (min_periods > window): @@ -1195,11 +1162,14 @@ def test_rolling_consistency(self): for (x, is_constant, no_nans) in self.data: assert_equal = assert_series_equal if isinstance(x, Series) else assert_frame_equal - functions = base_functions + functions = self.base_functions + # GH 8269 if no_nans: - functions = base_functions + no_nan_functions - for (rolling_f, f, require_min_periods) in functions: + functions = self.base_functions + self.no_nan_functions + for (f, require_min_periods, name) in functions: + rolling_f = getattr(mom,'rolling_{0}'.format(name)) + if require_min_periods and (min_periods is not None) and (min_periods < require_min_periods): continue @@ -1214,7 +1184,8 @@ def test_rolling_consistency(self): rolling_f_result = rolling_f(x, window=window, min_periods=min_periods, center=center) rolling_apply_f_result = mom.rolling_apply(x, window=window, func=f, min_periods=min_periods, center=center) - assert_equal(rolling_f_result, rolling_apply_f_result) + if not tm._incompat_bottleneck_version(name): + assert_equal(rolling_f_result, rolling_apply_f_result) if (rolling_f in [mom.rolling_cov, mom.rolling_corr]) and isinstance(x, DataFrame): # test pairwise=True diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index aea165b907c05..e07d6cc3d9b90 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -12471,7 +12471,9 @@ def test_stat_operators_attempt_obj_array(self): self.assertEqual(df.values.dtype, np.object_) result = getattr(df, meth)(1) expected = getattr(df.astype('f8'), meth)(1) - assert_series_equal(result, expected) + + if not tm._incompat_bottleneck_version(meth): + assert_series_equal(result, expected) def test_mean(self): self._check_stat_op('mean', np.mean, check_dates=True) @@ -12696,9 +12698,10 @@ def wrapper(x): assert_series_equal(result0, frame.apply(skipna_wrapper), check_dtype=check_dtype, check_less_precise=check_less_precise) - assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1), - check_dtype=False, - check_less_precise=check_less_precise) + if not tm._incompat_bottleneck_version(name): + assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1), + check_dtype=False, + check_less_precise=check_less_precise) # check dtypes if check_dtype: @@ -12727,8 +12730,9 @@ def wrapper(x): all_na = self.frame * np.NaN r0 = getattr(all_na, name)(axis=0) r1 = getattr(all_na, name)(axis=1) - self.assertTrue(np.isnan(r0).all()) - self.assertTrue(np.isnan(r1).all()) + if not tm._incompat_bottleneck_version(name): + self.assertTrue(np.isnan(r0).all()) + self.assertTrue(np.isnan(r1).all()) def test_mode(self): df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11], diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index ec6ab4e0d2ab1..f7b6f947d8924 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2014,7 +2014,10 @@ def test_cythonized_aggers(self): df = DataFrame(data) df.loc[2:10:2,'C'] = nan - def _testit(op): + def _testit(name): + + op = lambda x: getattr(x,name)() + # single column grouped = df.drop(['B'], axis=1).groupby('A') exp = {} @@ -2035,18 +2038,19 @@ def _testit(op): exp.name = 'C' result = op(grouped)['C'] - assert_series_equal(result, exp) - - _testit(lambda x: x.count()) - _testit(lambda x: x.sum()) - _testit(lambda x: x.std()) - _testit(lambda x: x.var()) - _testit(lambda x: x.sem()) - _testit(lambda x: x.mean()) - _testit(lambda x: x.median()) - _testit(lambda x: x.prod()) - _testit(lambda x: x.min()) - _testit(lambda x: x.max()) + if not tm._incompat_bottleneck_version(name): + assert_series_equal(result, exp) + + _testit('count') + _testit('sum') + _testit('std') + _testit('var') + _testit('sem') + _testit('mean') + _testit('median') + _testit('prod') + _testit('min') + _testit('max') def test_max_min_non_numeric(self): # #2700 diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index a903b76b3ac7f..fe56d5d1da6bd 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -9,12 +9,13 @@ import pandas.core.nanops as nanops import pandas.util.testing as tm -nanops._USE_BOTTLENECK = False - +use_bn = nanops._USE_BOTTLENECK class TestnanopsDataFrame(tm.TestCase): + def setUp(self): np.random.seed(11235) + nanops._USE_BOTTLENECK = False self.arr_shape = (11, 7, 5) @@ -116,6 +117,9 @@ def setUp(self): self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0] self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0] + def tearDown(self): + nanops._USE_BOTTLENECK = use_bn + def check_results(self, targ, res, axis): res = getattr(res, 'asm8', res) res = getattr(res, 'values', res) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 9cdc769dd7d74..64edf29915206 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -168,7 +168,8 @@ def wrapper(x): for i in range(obj.ndim): result = f(axis=i) - assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i)) + if not tm._incompat_bottleneck_version(name): + assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i)) self.assertRaises(Exception, f, axis=obj.ndim) diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index 289f7f134aa27..3772d4b9c272b 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -144,7 +144,8 @@ def wrapper(x): for i in range(obj.ndim): result = f(axis=i) - assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i)) + if not tm._incompat_bottleneck_version(name): + assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i)) self.assertRaises(Exception, f, axis=obj.ndim) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index a195455c116fb..878bfdf3ac9fd 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -246,6 +246,38 @@ def _skip_if_python26(): import nose raise nose.SkipTest("skipping on python2.6") +def _incompat_bottleneck_version(method): + """ skip if we have bottleneck installed + and its >= 1.0 + as we don't match the nansum/nanprod behavior for all-nan + ops, see GH9422 + """ + if method not in ['sum','prod']: + return False + try: + import bottleneck as bn + return bn.__version__ >= LooseVersion('1.0') + except ImportError: + return False + +def skip_if_no_ne(engine='numexpr'): + import nose + _USE_NUMEXPR = pd.computation.expressions._USE_NUMEXPR + + if engine == 'numexpr': + try: + import numexpr as ne + except ImportError: + raise nose.SkipTest("numexpr not installed") + + if not _USE_NUMEXPR: + raise nose.SkipTest("numexpr disabled") + + if ne.__version__ < LooseVersion('2.0'): + raise nose.SkipTest("numexpr version too low: " + "%s" % ne.__version__) + + #------------------------------------------------------------------------------ # locale utilities @@ -1986,24 +2018,6 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always", % extra_warnings) -def skip_if_no_ne(engine='numexpr'): - import nose - _USE_NUMEXPR = pd.computation.expressions._USE_NUMEXPR - - if engine == 'numexpr': - try: - import numexpr as ne - except ImportError: - raise nose.SkipTest("numexpr not installed") - - if not _USE_NUMEXPR: - raise nose.SkipTest("numexpr disabled") - - if ne.__version__ < LooseVersion('2.0'): - raise nose.SkipTest("numexpr version too low: " - "%s" % ne.__version__) - - def disabled(t): t.disabled = True return t
xref #9422
https://api.github.com/repos/pandas-dev/pandas/pulls/10986
2015-09-04T03:26:51Z
2015-09-04T12:57:48Z
2015-09-04T12:57:48Z
2015-09-04T12:57:48Z
ENH: Added skipcols option for CParser
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 9ad992c434984..103ab2e06b115 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -157,6 +157,8 @@ class ParserWarning(Warning): usecols : array-like Return a subset of the columns. Results in much faster parsing time and lower memory usage. +skipcols : array-like + Returns a subset of the columns excluding the columns specified in this parameter. mangle_dupe_cols : boolean, default True Duplicate columns will be specified as 'X.0'...'X.N', rather than 'X'...'X' tupleize_cols : boolean, default False @@ -294,6 +296,7 @@ def _read(filepath_or_buffer, kwds): 'date_parser': None, 'usecols': None, + 'skipcols': None, # 'nrows': None, # 'iterator': False, @@ -364,6 +367,7 @@ def parser_f(filepath_or_buffer, converters=None, dtype=None, usecols=None, + skipcols=None, engine=None, delim_whitespace=False, @@ -453,6 +457,7 @@ def parser_f(filepath_or_buffer, converters=converters, dtype=dtype, usecols=usecols, + skipcols=skipcols, verbose=verbose, encoding=encoding, squeeze=squeeze, @@ -786,6 +791,9 @@ def __init__(self, kwds): if kwds.get('usecols'): raise ValueError("cannot specify usecols when " "specifying a multi-index header") + if kwds.get('skipcols'): + raise ValueError("cannot specify skipcols when " + "specifying a multi-index header") if kwds.get('names'): raise ValueError("cannot specify names when " "specifying a multi-index header") @@ -1077,6 +1085,7 @@ def __init__(self, src, **kwds): # XXX self.usecols = self._reader.usecols + self.skipcols = self._reader.skipcols passed_names = self.names is None @@ -1101,7 +1110,18 @@ def __init__(self, src, **kwds): else: self.names = lrange(self._reader.table_width) - # If the names were inferred (not passed by user) and usedcols is + # Convert skipcols into indices if skipcols is specified + if self.skipcols: + skip_indices = [] + for u in self.skipcols: + if isinstance(u, string_types): + skip_indices.append(self.names.index(u)) + else: + skip_indices.append(u) + else: + skip_indices = [] + + # If the names were inferred (not passed by user) and usecols is # defined, then ensure names refers to the used columns, not the # document's columns. if self.usecols and passed_names: @@ -1111,11 +1131,24 @@ def __init__(self, src, **kwds): col_indices.append(self.names.index(u)) else: col_indices.append(u) + if len(col_indices) < len(skip_indices): + raise ValueError("Usecols should be a larger set than Skipcols") + # Update col_indices with a difference between skipcols and usecols + col_indices = list(set(col_indices)^set(skip_indices)) self.names = [n for i, n in enumerate(self.names) if i in col_indices] if len(self.names) < len(self.usecols): raise ValueError("Usecols do not match names.") + # If only skipcols is specified, excluding column indices in skip_indices + if self.usecols is None and self.skipcols: + self.names = [n for i, n in enumerate(self.names) + if i not in skip_indices] + if len(self.names) < len(skipcols): + raise ValueError("Skipcols do not match names.") + # Update usecols by self.names (to ensure there won't be changes needed in other functions) + self.usecols = self.names + self._set_noconvert_columns() self.orig_names = self.names
See #10882 But I am having trouble testing this change. When I run `nosetests pandas/io/tests/test_parsers.py`, it says `Segmentation fault: 11` and this pops up `Python quit unexpectedly while using the parser.so plug-in.` Any ideas of how to deal with this?
https://api.github.com/repos/pandas-dev/pandas/pulls/10985
2015-09-04T03:01:12Z
2015-10-18T14:03:47Z
null
2015-10-18T14:03:47Z
TST/DOC #10846 Test and document use of SQLAlchemy expressions in read_sql()
diff --git a/doc/source/io.rst b/doc/source/io.rst index 5ad9af310225d..c05b2555dfeda 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3918,6 +3918,42 @@ connecting to. For more information see the examples the SQLAlchemy `documentation <http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html>`__ +Advanced SQLAlchemy queries +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can use SQLAlchemy constructs to describe your query. + +Use :func:`sqlalchemy.text` to specify query parameters in a backend-neutral way + +.. ipython:: python + + import sqlalchemy as sa + pd.read_sql(sa.text('SELECT * FROM data where Col_1=:col1'), engine, params={'col1': 'X'}) + +If you have an SQLAlchemy description of your database you can express where conditions using SQLAlchemy expressions + +.. ipython:: python + + metadata = sa.MetaData() + data_table = sa.Table('data', metadata, + sa.Column('index', sa.Integer), + sa.Column('Date', sa.DateTime), + sa.Column('Col_1', sa.String), + sa.Column('Col_2', sa.Float), + sa.Column('Col_3', sa.Boolean), + ) + + pd.read_sql(sa.select([data_table]).where(data_table.c.Col_3 == True), engine) + +You can combine SQLAlchemy expressions with parameters passed to :func:`read_sql` using :func:`sqlalchemy.bindparam` + +.. ipython:: python + + import datetime as dt + expr = sa.select([data_table]).where(data_table.c.Date > sa.bindparam('date')) + pd.read_sql(expr, engine, params={'date': dt.datetime(2010, 10, 18)}) + + Sqlite fallback ''''''''''''''' diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 2ed0126505c41..34f28e2fbfacb 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -364,9 +364,9 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, Parameters ---------- - sql : string - SQL query to be executed - con : SQLAlchemy connectable(engine/connection) or database string URI + sql : string SQL query or SQLAlchemy Selectable (select or text object) + to be executed. + con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. @@ -423,8 +423,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, Parameters ---------- - sql : string - SQL query to be executed or database table name. + sql : string SQL query or SQLAlchemy Selectable (select or text object) + to be executed, or database table name. con : SQLAlchemy connectable(engine/connection) or database string URI or DBAPI2 connection (fallback mode) Using SQLAlchemy makes it possible to use any DB supported by that diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index d61c5f0740a91..15e241dae895e 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -951,6 +951,35 @@ def test_to_sql_read_sql_with_database_uri(self): tm.assert_frame_equal(test_frame1, test_frame3) tm.assert_frame_equal(test_frame1, test_frame4) + def _make_iris_table_metadata(self): + sa = sqlalchemy + metadata = sa.MetaData() + iris = sa.Table('iris', metadata, + sa.Column('SepalLength', sa.REAL), + sa.Column('SepalWidth', sa.REAL), + sa.Column('PetalLength', sa.REAL), + sa.Column('PetalWidth', sa.REAL), + sa.Column('Name', sa.TEXT) + ) + + return iris + + def test_query_by_text_obj(self): + # WIP : GH10846 + name_text = sqlalchemy.text('select * from iris where name=:name') + iris_df = sql.read_sql(name_text, self.conn, params={'name': 'Iris-versicolor'}) + all_names = set(iris_df['Name']) + self.assertEqual(all_names, set(['Iris-versicolor'])) + + def test_query_by_select_obj(self): + # WIP : GH10846 + iris = self._make_iris_table_metadata() + + name_select = sqlalchemy.select([iris]).where(iris.c.Name == sqlalchemy.bindparam('name')) + iris_df = sql.read_sql(name_select, self.conn, params={'name': 'Iris-setosa'}) + all_names = set(iris_df['Name']) + self.assertEqual(all_names, set(['Iris-setosa'])) + class _EngineToConnMixin(object): """
This provides 2 tests using SQLALchemy expressions to read SQL data into a dataframe and accompanying documentation. It attempts to address #10846 by explaining how this feature is already supported by pandas. Closes #10846
https://api.github.com/repos/pandas-dev/pandas/pulls/10983
2015-09-03T16:18:35Z
2015-09-22T12:45:07Z
2015-09-22T12:45:07Z
2015-09-22T12:45:07Z
DOC: Examples for Series.apply docstring
diff --git a/pandas/core/series.py b/pandas/core/series.py index 2890730956c75..116ae9f31b5a4 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2064,13 +2064,84 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): Positional arguments to pass to function in addition to the value Additional keyword arguments will be passed as keywords to the function + Returns + ------- + y : Series or DataFrame if func returns a Series + See also -------- Series.map: For element-wise operations - Returns - ------- - y : Series or DataFrame if func returns a Series + Examples + -------- + + Create a series with typical summer temperatures for each city. + + >>> import pandas as pd + >>> import numpy as np + >>> series = pd.Series([20, 21, 12], index=['London', + ... 'New York','Helsinki']) + London 20 + New York 21 + Helsinki 12 + dtype: int64 + + Square the values by defining a function and passing it as an + argument to ``apply()``. + + >>> def square(x): + ... return x**2 + >>> series.apply(square) + London 400 + New York 441 + Helsinki 144 + dtype: int64 + + Square the values by passing an anonymous function as an + argument to ``apply()``. + + >>> series.apply(lambda x: x**2) + London 400 + New York 441 + Helsinki 144 + dtype: int64 + + Define a custom function that needs additional positional + arguments and pass these additional arguments using the + ``args`` keyword. + + >>> def subtract_custom_value(x, custom_value): + ... return x-custom_value + + >>> series.apply(subtract_custom_value, args=(5,)) + London 15 + New York 16 + Helsinki 7 + dtype: int64 + + Define a custom function that takes keyword arguments + and pass these arguments to ``apply``. + + >>> def add_custom_values(x, **kwargs): + ... for month in kwargs: + ... x+=kwargs[month] + ... return x + + >>> series.apply(add_custom_values, june=30, july=20, august=25) + London 95 + New York 96 + Helsinki 87 + dtype: int64 + + Use a function from the Numpy library. + + >>> series.apply(np.log) + London 2.995732 + New York 3.044522 + Helsinki 2.484907 + dtype: float64 + + """ if len(self) == 0: return self._constructor(dtype=self.dtype,
I thought it might be nice to have some examples showing how to pass custom functions and functions with additional keywords.
https://api.github.com/repos/pandas-dev/pandas/pulls/10977
2015-09-03T00:07:23Z
2015-09-04T22:18:42Z
2015-09-04T22:18:42Z
2015-09-04T22:18:51Z
CLN: removes BinGrouper kind of cython methods
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 534117b8e9249..caa5d83da6b87 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1793,17 +1793,25 @@ def indices(self): def group_info(self): ngroups = self.ngroups obs_group_ids = np.arange(ngroups) - comp_ids = np.repeat(np.arange(ngroups), np.diff(np.r_[0, self.bins])) + rep = np.diff(np.r_[0, self.bins]) + + if ngroups == len(self.bins): + comp_ids = np.repeat(np.arange(ngroups), rep) + else: + comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep) + return comp_ids, obs_group_ids, ngroups @cache_readonly def ngroups(self): - return len(self.binlabels) + return len(self.result_index) @cache_readonly def result_index(self): - mask = self.binlabels.asi8 == tslib.iNaT - return self.binlabels[~mask] + if len(self.binlabels) != 0 and isnull(self.binlabels[0]): + return self.binlabels[1:] + + return self.binlabels @property def levels(self): @@ -1839,40 +1847,14 @@ def size(self): #---------------------------------------------------------------------- # cython aggregation - _cython_functions = { - 'add': 'group_add_bin', - 'prod': 'group_prod_bin', - 'mean': 'group_mean_bin', - 'min': 'group_min_bin', - 'max': 'group_max_bin', - 'var': 'group_var_bin', - 'ohlc': 'group_ohlc', - 'first': { - 'name': 'group_nth_bin', - 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) - }, - 'last': 'group_last_bin', - 'count': 'group_count_bin', - } + _cython_functions = {'ohlc': 'group_ohlc'} + _cython_functions.update(BaseGrouper._cython_functions) + _cython_functions.pop('median') _name_functions = { 'ohlc': lambda *args: ['open', 'high', 'low', 'close'] } - def _aggregate(self, result, counts, values, agg_func, is_numeric=True): - - if values.ndim > 3: - # punting for now - raise NotImplementedError("number of dimensions is currently " - "limited to 3") - elif values.ndim > 2: - for i, chunk in enumerate(values.transpose(2, 0, 1)): - agg_func(result[:, :, i], counts, chunk, self.bins) - else: - agg_func(result, counts, values, self.bins) - - return result - def agg_series(self, obj, func): dummy = obj[:0] grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy) diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index 29a991a9acfd3..c086919d94644 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -751,105 +751,6 @@ def group_last_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, out[i, j] = resx[i, j] """ -group_last_bin_template = """@cython.wraparound(False) -@cython.boundscheck(False) -def group_last_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, - ndarray[int64_t] counts, - ndarray[%(c_type)s, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - %(dest_type2)s val, count - ndarray[%(dest_type2)s, ndim=2] resx, nobs - - nobs = np.zeros_like(out) - resx = np.empty_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - resx[b, j] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = %(nan_val)s - else: - out[i, j] = resx[i, j] -""" - -group_nth_bin_template = """@cython.wraparound(False) -@cython.boundscheck(False) -def group_nth_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, - ndarray[int64_t] counts, - ndarray[%(c_type)s, ndim=2] values, - ndarray[int64_t] bins, int64_t rank): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - %(dest_type2)s val, count - ndarray[%(dest_type2)s, ndim=2] resx, nobs - - nobs = np.zeros_like(out) - resx = np.empty_like(out) - - if len(bin) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - if nobs[b, j] == rank: - resx[b, j] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = %(nan_val)s - else: - out[i, j] = resx[i, j] -""" - group_nth_template = """@cython.wraparound(False) @cython.boundscheck(False) def group_nth_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, @@ -961,69 +862,6 @@ def group_add_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, out[i, j] = sumx[i, j] """ -group_add_bin_template = """@cython.wraparound(False) -@cython.boundscheck(False) -def group_add_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, - ndarray[int64_t] counts, - ndarray[%(dest_type2)s, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b, nbins - %(dest_type2)s val, count - ndarray[%(dest_type2)s, ndim=2] sumx, nobs - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - N, K = (<object> values).shape - - with nogil: - - b = 0 - if K > 1: - - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = sumx[i, j] -""" - group_prod_template = """@cython.wraparound(False) @cython.boundscheck(False) def group_prod_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, @@ -1083,68 +921,6 @@ def group_prod_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, out[i, j] = prodx[i, j] """ -group_prod_bin_template = """@cython.wraparound(False) -@cython.boundscheck(False) -def group_prod_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, - ndarray[int64_t] counts, - ndarray[%(dest_type2)s, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - %(dest_type2)s val, count - ndarray[%(dest_type2)s, ndim=2] prodx, nobs - - nobs = np.zeros_like(out) - prodx = np.ones_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - N, K = (<object> values).shape - - with nogil: - - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - prodx[b, j] *= val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - prodx[b, 0] *= val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = prodx[i, j] -""" - group_var_template = """@cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) @@ -1195,72 +971,6 @@ def group_var_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, """ -group_var_bin_template = """@cython.wraparound(False) -@cython.boundscheck(False) -def group_var_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, - ndarray[int64_t] counts, - ndarray[%(dest_type2)s, ndim=2] values, - ndarray[int64_t] bins): - - cdef: - Py_ssize_t i, j, N, K, ngroups, b - %(dest_type2)s val, ct - ndarray[%(dest_type2)s, ndim=2] nobs, sumx, sumxx - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - sumxx = np.zeros_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - sumxx[b, j] += val * val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - sumxx[b, 0] += val * val - - for i in range(ngroups): - for j in range(K): - ct = nobs[i, j] - if ct < 2: - out[i, j] = NAN - else: - out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / - (ct * ct - ct)) -""" - group_count_template = """@cython.boundscheck(False) @cython.wraparound(False) def group_count_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, @@ -1299,115 +1009,12 @@ def group_count_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, %(tab)s out[i, j] = nobs[i, j] """ -group_count_bin_template = """@cython.wraparound(False) -@cython.boundscheck(False) -def group_count_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, - ndarray[int64_t] counts, - ndarray[%(c_type)s, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, ngroups - Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 - %(c_type)s val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(bins) == 0: - return - ngroups = len(bins) + (bins[len(bins) - 1] != N) - - %(nogil)s - %(tab)sfor i in range(N): - %(tab)s while b < ngroups - 1 and i >= bins[b]: - %(tab)s b += 1 - - %(tab)s counts[b] += 1 - %(tab)s for j in range(K): - %(tab)s val = values[i, j] - - %(tab)s # not nan - %(tab)s nobs[b, j] += val == val and val != iNaT - - %(tab)sfor i in range(ngroups): - %(tab)s for j in range(K): - %(tab)s out[i, j] = nobs[i, j] -""" - # add passing bin edges, instead of labels #---------------------------------------------------------------------- # group_min, group_max -group_min_bin_template = """@cython.wraparound(False) -@cython.boundscheck(False) -def group_min_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, - ndarray[int64_t] counts, - ndarray[%(dest_type2)s, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - %(dest_type2)s val, count - ndarray[%(dest_type2)s, ndim=2] minx, nobs - - nobs = np.zeros_like(out) - - minx = np.empty_like(out) - minx.fill(%(inf_val)s) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - if val < minx[b, j]: - minx[b, j] = val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - if val < minx[b, 0]: - minx[b, 0] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = %(nan_val)s - else: - out[i, j] = minx[i, j] -""" - group_max_template = """@cython.wraparound(False) @cython.boundscheck(False) def group_max_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, @@ -1471,72 +1078,6 @@ def group_max_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, out[i, j] = maxx[i, j] """ -group_max_bin_template = """@cython.wraparound(False) -@cython.boundscheck(False) -def group_max_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, - ndarray[int64_t] counts, - ndarray[%(dest_type2)s, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - %(dest_type2)s val, count - ndarray[%(dest_type2)s, ndim=2] maxx, nobs - - nobs = np.zeros_like(out) - maxx = np.empty_like(out) - maxx.fill(-%(inf_val)s) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - if val > maxx[b, j]: - maxx[b, j] = val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - if val > maxx[b, 0]: - maxx[b, 0] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = %(nan_val)s - else: - out[i, j] = maxx[i, j] -""" - - group_min_template = """@cython.wraparound(False) @cython.boundscheck(False) def group_min_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, @@ -1656,141 +1197,50 @@ def group_mean_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, out[i, j] = sumx[i, j] / count """ -group_mean_bin_template = """ -@cython.boundscheck(False) -def group_mean_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, - ndarray[int64_t] counts, - ndarray[%(dest_type2)s, ndim=2] values, - ndarray[int64_t] bins): - cdef: - Py_ssize_t i, j, N, K, ngroups, b - %(dest_type2)s val, count - ndarray[%(dest_type2)s, ndim=2] sumx, nobs - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - N, K = (<object> values).shape - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - - for i in range(ngroups): - for j in range(K): - count = nobs[i, j] - if count == 0: - out[i, j] = NAN - else: - out[i, j] = sumx[i, j] / count -""" - group_ohlc_template = """@cython.wraparound(False) @cython.boundscheck(False) def group_ohlc_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, ndarray[int64_t] counts, ndarray[%(dest_type2)s, ndim=2] values, - ndarray[int64_t] bins): + ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: - Py_ssize_t i, j, N, K, ngroups, b + Py_ssize_t i, j, N, K, lab %(dest_type2)s val, count - %(dest_type2)s vopen, vhigh, vlow, vclose - bint got_first = 0 + Py_ssize_t ngroups = len(counts) - if len(bins) == 0: + if len(labels) == 0: return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 N, K = (<object> values).shape if out.shape[1] != 4: raise ValueError('Output array must have 4 columns') - b = 0 if K > 1: raise NotImplementedError("Argument 'values' must have only " "one dimension") - else: + out.fill(np.nan) - with nogil: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - if not got_first: - out[b, 0] = NAN - out[b, 1] = NAN - out[b, 2] = NAN - out[b, 3] = NAN - else: - out[b, 0] = vopen - out[b, 1] = vhigh - out[b, 2] = vlow - out[b, 3] = vclose - b += 1 - got_first = 0 - - counts[b] += 1 - val = values[i, 0] + with nogil: + for i in range(N): + lab = labels[i] + if lab == -1: + continue - # not nan - if val == val: - if not got_first: - got_first = 1 - vopen = val - vlow = val - vhigh = val - else: - if val < vlow: - vlow = val - if val > vhigh: - vhigh = val - vclose = val - - if not got_first: - out[b, 0] = NAN - out[b, 1] = NAN - out[b, 2] = NAN - out[b, 3] = NAN + counts[lab] += 1 + val = values[i, 0] + if val != val: + continue + + if out[lab, 0] != out[lab, 0]: + out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val else: - out[b, 0] = vopen - out[b, 1] = vhigh - out[b, 2] = vlow - out[b, 3] = vclose + out[lab, 1] = max(out[lab, 1], val) + out[lab, 2] = min(out[lab, 2], val) + out[lab, 3] = val """ arrmap_template = """@cython.wraparound(False) @@ -2534,26 +1984,18 @@ def generate_from_template(template, exclude=None): put_2d = [diff_2d_template] groupbys = [group_add_template, - group_add_bin_template, group_prod_template, - group_prod_bin_template, group_var_template, - group_var_bin_template, group_mean_template, - group_mean_bin_template, group_ohlc_template] groupby_selection = [group_last_template, - group_last_bin_template, - group_nth_template, - group_nth_bin_template] + group_nth_template] groupby_min_max = [group_min_template, - group_min_bin_template, - group_max_template, - group_max_bin_template] + group_max_template] -groupby_count = [group_count_template, group_count_bin_template] +groupby_count = [group_count_template] templates_1d = [map_indices_template, pad_template, diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx index d4cf7824c8911..c0ecd04749e58 100644 --- a/pandas/src/generated.pyx +++ b/pandas/src/generated.pyx @@ -6865,131 +6865,6 @@ def group_add_float32(ndarray[float32_t, ndim=2] out, out[i, j] = sumx[i, j] -@cython.wraparound(False) -@cython.boundscheck(False) -def group_add_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b, nbins - float64_t val, count - ndarray[float64_t, ndim=2] sumx, nobs - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - N, K = (<object> values).shape - - with nogil: - - b = 0 - if K > 1: - - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = sumx[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_add_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b, nbins - float32_t val, count - ndarray[float32_t, ndim=2] sumx, nobs - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - N, K = (<object> values).shape - - with nogil: - - b = 0 - if K > 1: - - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = sumx[i, j] - - @cython.wraparound(False) @cython.boundscheck(False) def group_prod_float64(ndarray[float64_t, ndim=2] out, @@ -7107,129 +6982,6 @@ def group_prod_float32(ndarray[float32_t, ndim=2] out, out[i, j] = prodx[i, j] -@cython.wraparound(False) -@cython.boundscheck(False) -def group_prod_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float64_t val, count - ndarray[float64_t, ndim=2] prodx, nobs - - nobs = np.zeros_like(out) - prodx = np.ones_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - N, K = (<object> values).shape - - with nogil: - - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - prodx[b, j] *= val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - prodx[b, 0] *= val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = prodx[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_prod_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float32_t val, count - ndarray[float32_t, ndim=2] prodx, nobs - - nobs = np.zeros_like(out) - prodx = np.ones_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - N, K = (<object> values).shape - - with nogil: - - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - prodx[b, j] *= val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - prodx[b, 0] *= val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = prodx[i, j] - - @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) @@ -7329,137 +7081,6 @@ def group_var_float32(ndarray[float32_t, ndim=2] out, -@cython.wraparound(False) -@cython.boundscheck(False) -def group_var_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): - - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float64_t val, ct - ndarray[float64_t, ndim=2] nobs, sumx, sumxx - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - sumxx = np.zeros_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - sumxx[b, j] += val * val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - sumxx[b, 0] += val * val - - for i in range(ngroups): - for j in range(K): - ct = nobs[i, j] - if ct < 2: - out[i, j] = NAN - else: - out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / - (ct * ct - ct)) - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_var_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float32_t val, ct - ndarray[float32_t, ndim=2] nobs, sumx, sumxx - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - sumxx = np.zeros_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - sumxx[b, j] += val * val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - sumxx[b, 0] += val * val - - for i in range(ngroups): - for j in range(K): - ct = nobs[i, j] - if ct < 2: - out[i, j] = NAN - else: - out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / - (ct * ct - ct)) - - @cython.wraparound(False) @cython.boundscheck(False) def group_mean_float64(ndarray[float64_t, ndim=2] out, @@ -7569,276 +7190,95 @@ def group_mean_float32(ndarray[float32_t, ndim=2] out, out[i, j] = sumx[i, j] / count - -@cython.boundscheck(False) -def group_mean_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float64_t val, count - ndarray[float64_t, ndim=2] sumx, nobs - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - N, K = (<object> values).shape - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - - for i in range(ngroups): - for j in range(K): - count = nobs[i, j] - if count == 0: - out[i, j] = NAN - else: - out[i, j] = sumx[i, j] / count - - -@cython.boundscheck(False) -def group_mean_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float32_t val, count - ndarray[float32_t, ndim=2] sumx, nobs - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - N, K = (<object> values).shape - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - - for i in range(ngroups): - for j in range(K): - count = nobs[i, j] - if count == 0: - out[i, j] = NAN - else: - out[i, j] = sumx[i, j] / count - - -@cython.wraparound(False) +@cython.wraparound(False) @cython.boundscheck(False) def group_ohlc_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): + ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: - Py_ssize_t i, j, N, K, ngroups, b + Py_ssize_t i, j, N, K, lab float64_t val, count - float64_t vopen, vhigh, vlow, vclose - bint got_first = 0 + Py_ssize_t ngroups = len(counts) - if len(bins) == 0: + if len(labels) == 0: return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 N, K = (<object> values).shape if out.shape[1] != 4: raise ValueError('Output array must have 4 columns') - b = 0 if K > 1: raise NotImplementedError("Argument 'values' must have only " "one dimension") - else: + out.fill(np.nan) - with nogil: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - if not got_first: - out[b, 0] = NAN - out[b, 1] = NAN - out[b, 2] = NAN - out[b, 3] = NAN - else: - out[b, 0] = vopen - out[b, 1] = vhigh - out[b, 2] = vlow - out[b, 3] = vclose - b += 1 - got_first = 0 - - counts[b] += 1 - val = values[i, 0] + with nogil: + for i in range(N): + lab = labels[i] + if lab == -1: + continue - # not nan - if val == val: - if not got_first: - got_first = 1 - vopen = val - vlow = val - vhigh = val - else: - if val < vlow: - vlow = val - if val > vhigh: - vhigh = val - vclose = val - - if not got_first: - out[b, 0] = NAN - out[b, 1] = NAN - out[b, 2] = NAN - out[b, 3] = NAN + counts[lab] += 1 + val = values[i, 0] + if val != val: + continue + + if out[lab, 0] != out[lab, 0]: + out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val else: - out[b, 0] = vopen - out[b, 1] = vhigh - out[b, 2] = vlow - out[b, 3] = vclose + out[lab, 1] = max(out[lab, 1], val) + out[lab, 2] = min(out[lab, 2], val) + out[lab, 3] = val @cython.wraparound(False) @cython.boundscheck(False) def group_ohlc_float32(ndarray[float32_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): + ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: - Py_ssize_t i, j, N, K, ngroups, b + Py_ssize_t i, j, N, K, lab float32_t val, count - float32_t vopen, vhigh, vlow, vclose - bint got_first = 0 + Py_ssize_t ngroups = len(counts) - if len(bins) == 0: + if len(labels) == 0: return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 N, K = (<object> values).shape if out.shape[1] != 4: raise ValueError('Output array must have 4 columns') - b = 0 if K > 1: raise NotImplementedError("Argument 'values' must have only " "one dimension") - else: + out.fill(np.nan) - with nogil: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - if not got_first: - out[b, 0] = NAN - out[b, 1] = NAN - out[b, 2] = NAN - out[b, 3] = NAN - else: - out[b, 0] = vopen - out[b, 1] = vhigh - out[b, 2] = vlow - out[b, 3] = vclose - b += 1 - got_first = 0 - - counts[b] += 1 - val = values[i, 0] + with nogil: + for i in range(N): + lab = labels[i] + if lab == -1: + continue - # not nan - if val == val: - if not got_first: - got_first = 1 - vopen = val - vlow = val - vhigh = val - else: - if val < vlow: - vlow = val - if val > vhigh: - vhigh = val - vclose = val - - if not got_first: - out[b, 0] = NAN - out[b, 1] = NAN - out[b, 2] = NAN - out[b, 3] = NAN + counts[lab] += 1 + val = values[i, 0] + if val != val: + continue + + if out[lab, 0] != out[lab, 0]: + out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val else: - out[b, 0] = vopen - out[b, 1] = vhigh - out[b, 2] = vlow - out[b, 3] = vclose + out[lab, 1] = max(out[lab, 1], val) + out[lab, 2] = min(out[lab, 2], val) + out[lab, 3] = val @cython.wraparound(False) @@ -7977,151 +7417,6 @@ def group_last_int64(ndarray[int64_t, ndim=2] out, out[i, j] = resx[i, j] -@cython.wraparound(False) -@cython.boundscheck(False) -def group_last_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float64_t val, count - ndarray[float64_t, ndim=2] resx, nobs - - nobs = np.zeros_like(out) - resx = np.empty_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - resx[b, j] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = resx[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_last_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float32_t val, count - ndarray[float32_t, ndim=2] resx, nobs - - nobs = np.zeros_like(out) - resx = np.empty_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - resx[b, j] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = resx[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_last_bin_int64(ndarray[int64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int64_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - int64_t val, count - ndarray[int64_t, ndim=2] resx, nobs - - nobs = np.zeros_like(out) - resx = np.empty_like(out) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - resx[b, j] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = iNaT - else: - out[i, j] = resx[i, j] - - @cython.wraparound(False) @cython.boundscheck(False) def group_nth_float64(ndarray[float64_t, ndim=2] out, @@ -8263,538 +7558,7 @@ def group_nth_int64(ndarray[int64_t, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_nth_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins, int64_t rank): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float64_t val, count - ndarray[float64_t, ndim=2] resx, nobs - - nobs = np.zeros_like(out) - resx = np.empty_like(out) - - if len(bin) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - if nobs[b, j] == rank: - resx[b, j] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = resx[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_nth_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins, int64_t rank): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float32_t val, count - ndarray[float32_t, ndim=2] resx, nobs - - nobs = np.zeros_like(out) - resx = np.empty_like(out) - - if len(bin) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - if nobs[b, j] == rank: - resx[b, j] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = resx[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_nth_bin_int64(ndarray[int64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int64_t, ndim=2] values, - ndarray[int64_t] bins, int64_t rank): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - int64_t val, count - ndarray[int64_t, ndim=2] resx, nobs - - nobs = np.zeros_like(out) - resx = np.empty_like(out) - - if len(bin) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - if nobs[b, j] == rank: - resx[b, j] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = iNaT - else: - out[i, j] = resx[i, j] - - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_min_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - float64_t val, count - ndarray[float64_t, ndim=2] minx, nobs - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - - minx = np.empty_like(out) - minx.fill(np.inf) - - N, K = (<object> values).shape - - with nogil: - if K > 1: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - if val < minx[lab, j]: - minx[lab, j] = val - else: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[lab, 0] += 1 - if val < minx[lab, 0]: - minx[lab, 0] = val - - for i in range(ncounts): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = minx[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_min_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - float32_t val, count - ndarray[float32_t, ndim=2] minx, nobs - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - - minx = np.empty_like(out) - minx.fill(np.inf) - - N, K = (<object> values).shape - - with nogil: - if K > 1: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - if val < minx[lab, j]: - minx[lab, j] = val - else: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[lab, 0] += 1 - if val < minx[lab, 0]: - minx[lab, 0] = val - - for i in range(ncounts): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = minx[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_min_int64(ndarray[int64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int64_t, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - int64_t val, count - ndarray[int64_t, ndim=2] minx, nobs - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - - minx = np.empty_like(out) - minx.fill(9223372036854775807) - - N, K = (<object> values).shape - - with nogil: - if K > 1: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - if val < minx[lab, j]: - minx[lab, j] = val - else: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[lab, 0] += 1 - if val < minx[lab, 0]: - minx[lab, 0] = val - - for i in range(ncounts): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = iNaT - else: - out[i, j] = minx[i, j] - - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_min_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float64_t val, count - ndarray[float64_t, ndim=2] minx, nobs - - nobs = np.zeros_like(out) - - minx = np.empty_like(out) - minx.fill(np.inf) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - if val < minx[b, j]: - minx[b, j] = val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - if val < minx[b, 0]: - minx[b, 0] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = minx[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_min_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float32_t val, count - ndarray[float32_t, ndim=2] minx, nobs - - nobs = np.zeros_like(out) - - minx = np.empty_like(out) - minx.fill(np.inf) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - if val < minx[b, j]: - minx[b, j] = val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - if val < minx[b, 0]: - minx[b, 0] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = minx[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_min_bin_int64(ndarray[int64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int64_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - int64_t val, count - ndarray[int64_t, ndim=2] minx, nobs - - nobs = np.zeros_like(out) - - minx = np.empty_like(out) - minx.fill(9223372036854775807) - - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - with nogil: - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - if val < minx[b, j]: - minx[b, j] = val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - if val < minx[b, 0]: - minx[b, 0] = val - - for i in range(ngroups): - for j in range(K): - if nobs[i, j] == 0: - out[i, j] = iNaT - else: - out[i, j] = minx[i, j] - - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_max_float64(ndarray[float64_t, ndim=2] out, +def group_min_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, ndarray[int64_t] labels): @@ -8804,15 +7568,15 @@ def group_max_float64(ndarray[float64_t, ndim=2] out, cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) float64_t val, count - ndarray[float64_t, ndim=2] maxx, nobs + ndarray[float64_t, ndim=2] minx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") nobs = np.zeros_like(out) - maxx = np.empty_like(out) - maxx.fill(-np.inf) + minx = np.empty_like(out) + minx.fill(np.inf) N, K = (<object> values).shape @@ -8830,8 +7594,8 @@ def group_max_float64(ndarray[float64_t, ndim=2] out, # not nan if val == val: nobs[lab, j] += 1 - if val > maxx[lab, j]: - maxx[lab, j] = val + if val < minx[lab, j]: + minx[lab, j] = val else: for i in range(N): lab = labels[i] @@ -8844,19 +7608,19 @@ def group_max_float64(ndarray[float64_t, ndim=2] out, # not nan if val == val: nobs[lab, 0] += 1 - if val > maxx[lab, 0]: - maxx[lab, 0] = val + if val < minx[lab, 0]: + minx[lab, 0] = val for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: out[i, j] = NAN else: - out[i, j] = maxx[i, j] + out[i, j] = minx[i, j] @cython.wraparound(False) @cython.boundscheck(False) -def group_max_float32(ndarray[float32_t, ndim=2] out, +def group_min_float32(ndarray[float32_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float32_t, ndim=2] values, ndarray[int64_t] labels): @@ -8866,15 +7630,15 @@ def group_max_float32(ndarray[float32_t, ndim=2] out, cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) float32_t val, count - ndarray[float32_t, ndim=2] maxx, nobs + ndarray[float32_t, ndim=2] minx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") nobs = np.zeros_like(out) - maxx = np.empty_like(out) - maxx.fill(-np.inf) + minx = np.empty_like(out) + minx.fill(np.inf) N, K = (<object> values).shape @@ -8892,8 +7656,8 @@ def group_max_float32(ndarray[float32_t, ndim=2] out, # not nan if val == val: nobs[lab, j] += 1 - if val > maxx[lab, j]: - maxx[lab, j] = val + if val < minx[lab, j]: + minx[lab, j] = val else: for i in range(N): lab = labels[i] @@ -8906,19 +7670,19 @@ def group_max_float32(ndarray[float32_t, ndim=2] out, # not nan if val == val: nobs[lab, 0] += 1 - if val > maxx[lab, 0]: - maxx[lab, 0] = val + if val < minx[lab, 0]: + minx[lab, 0] = val for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: out[i, j] = NAN else: - out[i, j] = maxx[i, j] + out[i, j] = minx[i, j] @cython.wraparound(False) @cython.boundscheck(False) -def group_max_int64(ndarray[int64_t, ndim=2] out, +def group_min_int64(ndarray[int64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[int64_t, ndim=2] values, ndarray[int64_t] labels): @@ -8928,15 +7692,15 @@ def group_max_int64(ndarray[int64_t, ndim=2] out, cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) int64_t val, count - ndarray[int64_t, ndim=2] maxx, nobs + ndarray[int64_t, ndim=2] minx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") nobs = np.zeros_like(out) - maxx = np.empty_like(out) - maxx.fill(-9223372036854775807) + minx = np.empty_like(out) + minx.fill(9223372036854775807) N, K = (<object> values).shape @@ -8954,8 +7718,8 @@ def group_max_int64(ndarray[int64_t, ndim=2] out, # not nan if val == val: nobs[lab, j] += 1 - if val > maxx[lab, j]: - maxx[lab, j] = val + if val < minx[lab, j]: + minx[lab, j] = val else: for i in range(N): lab = labels[i] @@ -8968,75 +7732,73 @@ def group_max_int64(ndarray[int64_t, ndim=2] out, # not nan if val == val: nobs[lab, 0] += 1 - if val > maxx[lab, 0]: - maxx[lab, 0] = val + if val < minx[lab, 0]: + minx[lab, 0] = val for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: out[i, j] = iNaT else: - out[i, j] = maxx[i, j] + out[i, j] = minx[i, j] @cython.wraparound(False) @cython.boundscheck(False) -def group_max_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): +def group_max_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: - Py_ssize_t i, j, N, K, ngroups, b + Py_ssize_t i, j, N, K, lab, ncounts = len(counts) float64_t val, count ndarray[float64_t, ndim=2] maxx, nobs + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + nobs = np.zeros_like(out) + maxx = np.empty_like(out) maxx.fill(-np.inf) - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - N, K = (<object> values).shape with nogil: - b = 0 if K > 1: for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + lab = labels[i] + if lab < 0: + continue - counts[b] += 1 + counts[lab] += 1 for j in range(K): val = values[i, j] # not nan if val == val: - nobs[b, j] += 1 - if val > maxx[b, j]: - maxx[b, j] = val + nobs[lab, j] += 1 + if val > maxx[lab, j]: + maxx[lab, j] = val else: for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + lab = labels[i] + if lab < 0: + continue - counts[b] += 1 + counts[lab] += 1 val = values[i, 0] # not nan if val == val: - nobs[b, 0] += 1 - if val > maxx[b, 0]: - maxx[b, 0] = val + nobs[lab, 0] += 1 + if val > maxx[lab, 0]: + maxx[lab, 0] = val - for i in range(ngroups): + for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: out[i, j] = NAN @@ -9045,62 +7807,60 @@ def group_max_bin_float64(ndarray[float64_t, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_max_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): +def group_max_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: - Py_ssize_t i, j, N, K, ngroups, b + Py_ssize_t i, j, N, K, lab, ncounts = len(counts) float32_t val, count ndarray[float32_t, ndim=2] maxx, nobs + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + nobs = np.zeros_like(out) + maxx = np.empty_like(out) maxx.fill(-np.inf) - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - N, K = (<object> values).shape with nogil: - b = 0 if K > 1: for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + lab = labels[i] + if lab < 0: + continue - counts[b] += 1 + counts[lab] += 1 for j in range(K): val = values[i, j] # not nan if val == val: - nobs[b, j] += 1 - if val > maxx[b, j]: - maxx[b, j] = val + nobs[lab, j] += 1 + if val > maxx[lab, j]: + maxx[lab, j] = val else: for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + lab = labels[i] + if lab < 0: + continue - counts[b] += 1 + counts[lab] += 1 val = values[i, 0] # not nan if val == val: - nobs[b, 0] += 1 - if val > maxx[b, 0]: - maxx[b, 0] = val + nobs[lab, 0] += 1 + if val > maxx[lab, 0]: + maxx[lab, 0] = val - for i in range(ngroups): + for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: out[i, j] = NAN @@ -9109,62 +7869,60 @@ def group_max_bin_float32(ndarray[float32_t, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_max_bin_int64(ndarray[int64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int64_t, ndim=2] values, - ndarray[int64_t] bins): +def group_max_int64(ndarray[int64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int64_t, ndim=2] values, + ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: - Py_ssize_t i, j, N, K, ngroups, b + Py_ssize_t i, j, N, K, lab, ncounts = len(counts) int64_t val, count ndarray[int64_t, ndim=2] maxx, nobs + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + nobs = np.zeros_like(out) + maxx = np.empty_like(out) maxx.fill(-9223372036854775807) - if len(bins) == 0: - return - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - N, K = (<object> values).shape with nogil: - b = 0 if K > 1: for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + lab = labels[i] + if lab < 0: + continue - counts[b] += 1 + counts[lab] += 1 for j in range(K): val = values[i, j] # not nan if val == val: - nobs[b, j] += 1 - if val > maxx[b, j]: - maxx[b, j] = val + nobs[lab, j] += 1 + if val > maxx[lab, j]: + maxx[lab, j] = val else: for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + lab = labels[i] + if lab < 0: + continue - counts[b] += 1 + counts[lab] += 1 val = values[i, 0] # not nan if val == val: - nobs[b, 0] += 1 - if val > maxx[b, 0]: - maxx[b, 0] = val + nobs[lab, 0] += 1 + if val > maxx[lab, 0]: + maxx[lab, 0] = val - for i in range(ngroups): + for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: out[i, j] = iNaT @@ -9358,187 +8116,6 @@ def group_count_int64(ndarray[int64_t, ndim=2] out, out[i, j] = nobs[i, j] -@cython.wraparound(False) -@cython.boundscheck(False) -def group_count_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, ngroups - Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 - float64_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(bins) == 0: - return - ngroups = len(bins) + (bins[len(bins) - 1] != N) - - with nogil: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[b, j] += val == val and val != iNaT - - for i in range(ngroups): - for j in range(K): - out[i, j] = nobs[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_count_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, ngroups - Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 - float32_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(bins) == 0: - return - ngroups = len(bins) + (bins[len(bins) - 1] != N) - - with nogil: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[b, j] += val == val and val != iNaT - - for i in range(ngroups): - for j in range(K): - out[i, j] = nobs[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_count_bin_int64(ndarray[int64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int64_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, ngroups - Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 - int64_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(bins) == 0: - return - ngroups = len(bins) + (bins[len(bins) - 1] != N) - - with nogil: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[b, j] += val == val and val != iNaT - - for i in range(ngroups): - for j in range(K): - out[i, j] = nobs[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_count_bin_object(ndarray[object, ndim=2] out, - ndarray[int64_t] counts, - ndarray[object, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, ngroups - Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 - object val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(bins) == 0: - return - ngroups = len(bins) + (bins[len(bins) - 1] != N) - - - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[b, j] += val == val and val != iNaT - - for i in range(ngroups): - for j in range(K): - out[i, j] = nobs[i, j] - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_count_bin_int64(ndarray[int64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[int64_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, ngroups - Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 - int64_t val - ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), - dtype=np.int64) - - if len(bins) == 0: - return - ngroups = len(bins) + (bins[len(bins) - 1] != N) - - with nogil: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - nobs[b, j] += val == val and val != iNaT - - for i in range(ngroups): - for j in range(K): - out[i, j] = nobs[i, j] - - @cython.wraparound(False) @cython.boundscheck(False) def left_join_indexer_unique_float64(ndarray[float64_t] left, diff --git a/pandas/tests/test_tseries.py b/pandas/tests/test_tseries.py index 6dd43539eeabf..566fd54f3b024 100644 --- a/pandas/tests/test_tseries.py +++ b/pandas/tests/test_tseries.py @@ -474,78 +474,19 @@ def test_generate_bins(self): self.assertRaises(ValueError, generate_bins_generic, values, [-3, -1], 'right') - def test_group_bin_functions(self): - - dtypes = ['float32','float64'] - funcs = ['add', 'mean', 'prod', 'min', 'max', 'var'] - - np_funcs = { - 'add': np.sum, - 'mean': np.mean, - 'prod': np.prod, - 'min': np.min, - 'max': np.max, - 'var': lambda x: x.var(ddof=1) if len(x) >= 2 else np.nan - } - - for fname in funcs: - for d in dtypes: - check_less_precise = False - if d == 'float32': - check_less_precise = True - args = [getattr(algos, 'group_%s_%s' % (fname,d)), - getattr(algos, 'group_%s_bin_%s' % (fname,d)), - np_funcs[fname], - d, - check_less_precise] - self._check_versions(*args) - - def _check_versions(self, irr_func, bin_func, np_func, dtype, check_less_precise): - obj = self.obj.astype(dtype) - - cts = np.zeros(3, dtype=np.int64) - exp = np.zeros((3, 1), dtype) - irr_func(exp, cts, obj, self.labels) - - # bin-based version - bins = np.array([3, 6], dtype=np.int64) - out = np.zeros((3, 1), dtype) - counts = np.zeros(len(out), dtype=np.int64) - bin_func(out, counts, obj, bins) - - assert_almost_equal(out, exp, check_less_precise=check_less_precise) - - bins = np.array([3, 9, 10], dtype=np.int64) - out = np.zeros((3, 1), dtype) - counts = np.zeros(len(out), dtype=np.int64) - bin_func(out, counts, obj, bins) - exp = np.array([np_func(obj[:3]), np_func(obj[3:9]), - np_func(obj[9:])], - dtype=dtype) - assert_almost_equal(out.squeeze(), exp, check_less_precise=check_less_precise) - - # duplicate bins - bins = np.array([3, 6, 10, 10], dtype=np.int64) - out = np.zeros((4, 1), dtype) - counts = np.zeros(len(out), dtype=np.int64) - bin_func(out, counts, obj, bins) - exp = np.array([np_func(obj[:3]), np_func(obj[3:6]), - np_func(obj[6:10]), np.nan], - dtype=dtype) - assert_almost_equal(out.squeeze(), exp, check_less_precise=check_less_precise) - def test_group_ohlc(): def _check(dtype): obj = np.array(np.random.randn(20),dtype=dtype) - bins = np.array([6, 12], dtype=np.int64) + bins = np.array([6, 12, 20], dtype=np.int64) out = np.zeros((3, 4), dtype) counts = np.zeros(len(out), dtype=np.int64) + labels = np.repeat(np.arange(3), np.diff(np.r_[0, bins])) func = getattr(algos,'group_ohlc_%s' % dtype) - func(out, counts, obj[:, None], bins) + func(out, counts, obj[:, None], labels) def _ohlc(group): if isnull(group).all(): @@ -559,7 +500,7 @@ def _ohlc(group): assert_almost_equal(counts, [6, 6, 8]) obj[:6] = nan - func(out, counts, obj[:, None], bins) + func(out, counts, obj[:, None], labels) expected[0] = nan assert_almost_equal(out, expected) diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 0bee6f514cad0..49d344631e4b9 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -1596,7 +1596,7 @@ def test_aggregate_with_nat(self): normal_grouped = normal_df.groupby('key') dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) - for func in ['min', 'max', 'prod']: + for func in ['min', 'max', 'sum', 'prod']: normal_result = getattr(normal_grouped, func)() dt_result = getattr(dt_grouped, func)() pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], @@ -1606,7 +1606,7 @@ def test_aggregate_with_nat(self): expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key') assert_frame_equal(expected, dt_result) - for func in ['count', 'sum']: + for func in ['count']: normal_result = getattr(normal_grouped, func)() pad = DataFrame([[0, 0, 0, 0]], index=[3], columns=['A', 'B', 'C', 'D']) expected = normal_result.append(pad)
https://api.github.com/repos/pandas-dev/pandas/pulls/10976
2015-09-02T23:55:08Z
2015-09-03T13:02:09Z
2015-09-03T13:02:09Z
2015-09-04T18:40:03Z
BUG: DataFrame constructor should not promote complex64 dtypes (GH10952)
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 31b6bb0d5575d..270d365e19570 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -866,3 +866,4 @@ Bug Fixes - Bug in plotting functions may raise ``IndexError`` when plotted on ``GridSpec`` (:issue:`10819`) - Bug in plot result may show unnecessary minor ticklabels (:issue:`10657`) - Bug when constructing ``DataFrame`` where passing a dictionary with only scalar values and specifying columns did not raise an error (:issue:`10856`) +- Bug when constructing ``DataFrame`` with an array of complex64 dtype that meant the corresponding column was automatically promoted to the complex128 dtype (:issue:`10952`) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f110e28e80d9f..5366c5a6b8f80 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -3657,8 +3657,7 @@ def form_blocks(arrays, names, axes): blocks.extend(float_blocks) if len(complex_items): - complex_blocks = _simple_blockify( - complex_items, np.complex128) + complex_blocks = _multi_blockify(complex_items) blocks.extend(complex_blocks) if len(int_items): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 9bdb7f08fe7cf..659a5925be6f1 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -2668,6 +2668,15 @@ def _check_mixed_dtypes(df, dtypes = None): df = _make_mixed_dtypes_df('int') _check_mixed_dtypes(df) + def test_constructor_complex_dtypes(self): + # GH10952 + a = np.random.rand(10).astype(np.complex64) + b = np.random.rand(10).astype(np.complex128) + + df = DataFrame({'a': a, 'b': b}) + self.assertEqual(a.dtype, df.a.dtype) + self.assertEqual(b.dtype, df.b.dtype) + def test_constructor_rec(self): rec = self.frame.to_records(index=False)
Addresses [GH10952](https://github.com/pydata/pandas/issues/10952). When passing an array with complex64 dtype to the DataFrame constructor, that array will no longer be automatically promoted to the complex128 dtype (different complex dtypes are now allowed).
https://api.github.com/repos/pandas-dev/pandas/pulls/10975
2015-09-02T22:47:14Z
2015-09-03T13:25:23Z
null
2015-09-03T18:16:19Z
BUG: repr of Periods in a Series is broken
diff --git a/pandas/core/format.py b/pandas/core/format.py index d463c02dd41a2..818391d6eec23 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -2106,7 +2106,7 @@ def _format_strings(self): class PeriodArrayFormatter(IntArrayFormatter): def _format_strings(self): - values = np.array(self.values.to_native_types(), dtype=object) + values = PeriodIndex(self.values).to_native_types() formatter = self.formatter or (lambda x: '%s' % x) fmt_values = [formatter(x) for x in values] return fmt_values diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py index 5741e9cf9c093..37fa72db77cb9 100644 --- a/pandas/tseries/tests/test_base.py +++ b/pandas/tseries/tests/test_base.py @@ -139,6 +139,51 @@ def test_representation(self): result = getattr(idx, func)() self.assertEqual(result, expected) + def test_representation_to_series(self): + idx1 = DatetimeIndex([], freq='D') + idx2 = DatetimeIndex(['2011-01-01'], freq='D') + idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') + idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], + freq='H', tz='Asia/Tokyo') + idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], + tz='US/Eastern') + idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15']) + + exp1 = """Series([], dtype: datetime64[ns])""" + + exp2 = """0 2011-01-01 +dtype: datetime64[ns]""" + + exp3 = """0 2011-01-01 +1 2011-01-02 +dtype: datetime64[ns]""" + + exp4 = """0 2011-01-01 +1 2011-01-02 +2 2011-01-03 +dtype: datetime64[ns]""" + + exp5 = """0 2011-01-01 09:00:00+09:00 +1 2011-01-01 10:00:00+09:00 +2 2011-01-01 11:00:00+09:00 +dtype: object""" + + exp6 = """0 2011-01-01 09:00:00-05:00 +1 2011-01-01 10:00:00-05:00 +2 NaN +dtype: object""" + + exp7 = """0 2011-01-01 09:00:00 +1 2011-01-02 10:15:00 +dtype: datetime64[ns]""" + + with pd.option_context('display.width', 300): + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7], + [exp1, exp2, exp3, exp4, exp5, exp6, exp7]): + result = repr(Series(idx)) + self.assertEqual(result, expected) + def test_summary(self): # GH9116 idx1 = DatetimeIndex([], freq='D') @@ -536,6 +581,38 @@ def test_representation(self): result = getattr(idx, func)() self.assertEqual(result, expected) + def test_representation_to_series(self): + idx1 = TimedeltaIndex([], freq='D') + idx2 = TimedeltaIndex(['1 days'], freq='D') + idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') + idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') + idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) + + exp1 = """Series([], dtype: timedelta64[ns])""" + + exp2 = """0 1 days +dtype: timedelta64[ns]""" + + exp3 = """0 1 days +1 2 days +dtype: timedelta64[ns]""" + + exp4 = """0 1 days +1 2 days +2 3 days +dtype: timedelta64[ns]""" + + exp5 = """0 1 days 00:00:01 +1 2 days 00:00:00 +2 3 days 00:00:00 +dtype: timedelta64[ns]""" + + with pd.option_context('display.width',300): + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], + [exp1, exp2, exp3, exp4, exp5]): + result = repr(pd.Series(idx)) + self.assertEqual(result, expected) + def test_summary(self): # GH9116 idx1 = TimedeltaIndex([], freq='D') @@ -1145,6 +1222,60 @@ def test_representation(self): result = getattr(idx, func)() self.assertEqual(result, expected) + def test_representation_to_series(self): + # GH 10971 + idx1 = PeriodIndex([], freq='D') + idx2 = PeriodIndex(['2011-01-01'], freq='D') + idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') + idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') + idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H') + + idx7 = pd.period_range('2013Q1', periods=1, freq="Q") + idx8 = pd.period_range('2013Q1', periods=2, freq="Q") + idx9 = pd.period_range('2013Q1', periods=3, freq="Q") + + exp1 = """Series([], dtype: object)""" + + exp2 = """0 2011-01-01 +dtype: object""" + + exp3 = """0 2011-01-01 +1 2011-01-02 +dtype: object""" + + exp4 = """0 2011-01-01 +1 2011-01-02 +2 2011-01-03 +dtype: object""" + + exp5 = """0 2011 +1 2012 +2 2013 +dtype: object""" + + exp6 = """0 2011-01-01 09:00 +1 2012-02-01 10:00 +2 NaT +dtype: object""" + + exp7 = """0 2013Q1 +dtype: object""" + + exp8 = """0 2013Q1 +1 2013Q2 +dtype: object""" + + exp9 = """0 2013Q1 +1 2013Q2 +2 2013Q3 +dtype: object""" + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9], + [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]): + result = repr(pd.Series(idx)) + self.assertEqual(result, expected) + def test_summary(self): # GH9116 idx1 = PeriodIndex([], freq='D')
Closes #10971. It is caused by #10718.
https://api.github.com/repos/pandas-dev/pandas/pulls/10974
2015-09-02T21:57:36Z
2015-09-03T15:18:09Z
2015-09-03T15:18:09Z
2015-09-03T20:39:24Z
COMPAT: remove SettingWithCopy warning, and use copy-on-write, #10954
diff --git a/pandas/core/common.py b/pandas/core/common.py index 77536fb391f93..492025426f59a 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -26,15 +26,12 @@ class PandasError(Exception): pass - -class SettingWithCopyError(ValueError): +class SettingImmutableError(ValueError): pass - -class SettingWithCopyWarning(Warning): +class SettingWithCopyError(ValueError): pass - class AmbiguousIndexError(PandasError, KeyError): pass diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 03eaa45582bef..889c41f64055a 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -321,13 +321,12 @@ def use_inf_as_null_cb(key): # user warnings chained_assignment = """ : string - Raise an exception, warn, or no action if trying to use chained assignment, - The default is warn + this option has been deprecated and has no effect """ -with cf.config_prefix('mode'): - cf.register_option('chained_assignment', 'warn', chained_assignment, - validator=is_one_of_factory([None, 'warn', 'raise'])) +cf.register_option('mode.chained_assignment', 'warn', chained_assignment, + validator=is_one_of_factory([None, 'warn', 'raise'])) +cf.deprecate_option('mode.chained_assignment', chained_assignment) # Set up the io.excel specific configuration. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index acf5e69bf05e3..8ea0321feae1b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1825,7 +1825,7 @@ def _ixs(self, i, axis=0): copy = isinstance(new_values,np.ndarray) and new_values.base is None result = Series(new_values, index=self.columns, name=self.index[i], dtype=new_values.dtype) - result._set_is_copy(self, copy=copy) + result._set_parent(self, copy=copy) return result # icol @@ -1957,7 +1957,7 @@ def _getitem_multilevel(self, key): if isinstance(result, Series): result = self._constructor_sliced(result, index=self.index, name=key) - result._set_is_copy(self) + result._set_parent(self) return result else: return self._get_item_cache(key) @@ -2229,7 +2229,7 @@ def __setitem__(self, key, value): self._set_item(key, value) def _setitem_slice(self, key, value): - self._check_setitem_copy() + self._check_copy_on_write() self.ix._setitem_with_indexer(key, value) def _setitem_array(self, key, value): @@ -2240,7 +2240,7 @@ def _setitem_array(self, key, value): (len(key), len(self.index))) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] - self._check_setitem_copy() + self._check_copy_on_write() self.ix._setitem_with_indexer(indexer, value) else: if isinstance(value, DataFrame): @@ -2250,7 +2250,7 @@ def _setitem_array(self, key, value): self[k1] = value[k2] else: indexer = self.ix._convert_to_indexer(key, axis=1) - self._check_setitem_copy() + self._check_copy_on_write() self.ix._setitem_with_indexer((slice(None), indexer), value) def _setitem_frame(self, key, value): @@ -2260,7 +2260,7 @@ def _setitem_frame(self, key, value): raise TypeError('Must pass DataFrame with boolean values only') self._check_inplace_setting(value) - self._check_setitem_copy() + self._check_copy_on_write() self.where(-key, value, inplace=True) def _ensure_valid_index(self, value): @@ -2293,14 +2293,20 @@ def _set_item(self, key, value): """ self._ensure_valid_index(value) - value = self._sanitize_column(key, value) - NDFrame._set_item(self, key, value) - - # check if we are modifying a copy - # try to set first as we want an invalid - # value exeption to occur first - if len(self): - self._check_setitem_copy() + svalue = self._sanitize_column(key, value) + try: + NDFrame._set_item(self, key, svalue) + except com.SettingWithCopyError: + + # if we have a multi-index (which potentially has dropped levels) + # need to raise + for p in self._parent: + if isinstance(getattr(p(),'columns',None), MultiIndex): + raise + + # we have a chained assignment + # assign back to the original + self._parent[0]().loc[self.index,key] = value def insert(self, loc, column, value, allow_duplicates=False): """ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9c170286006f2..37d07e5d6611b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3,6 +3,7 @@ import operator import weakref import gc +import inspect import numpy as np import pandas.lib as lib @@ -21,8 +22,7 @@ from pandas.core.common import (isnull, notnull, is_list_like, _values_from_object, _maybe_promote, _maybe_box_datetimelike, ABCSeries, - SettingWithCopyError, SettingWithCopyWarning, - AbstractMethodError) + AbstractMethodError, SettingWithCopyError) import pandas.core.nanops as nanops from pandas.util.decorators import Appender, Substitution, deprecate_kwarg from pandas.core import config @@ -78,14 +78,15 @@ class NDFrame(PandasObject): axes : list copy : boolean, default False """ - _internal_names = ['_data', '_cacher', '_item_cache', '_cache', - 'is_copy', '_subtyp', '_index', + _internal_names = ['_data', '_cacher', '_item_cache', '_cache', '_parent', + '_subtyp', '_index', '_parent_copy_on_write', '_default_kind', '_default_fill_value', '_metadata', '__array_struct__', '__array_interface__'] _internal_names_set = set(_internal_names) _accessors = frozenset([]) _metadata = [] - is_copy = None + _parent_copy_on_write = True + _parent = [] def __init__(self, data, axes=None, copy=False, dtype=None, fastpath=False): @@ -100,10 +101,22 @@ def __init__(self, data, axes=None, copy=False, dtype=None, for i, ax in enumerate(axes): data = data.reindex_axis(ax, axis=i) - object.__setattr__(self, 'is_copy', None) + object.__setattr__(self, '_parent', []) object.__setattr__(self, '_data', data) object.__setattr__(self, '_item_cache', {}) + def _get_is_copy(self): + warnings.warn("is_copy is deprecated will be removed in a future release", + FutureWarning) + return None + + def _set_is_copy(self, v): + warnings.warn("is_copy is deprecated will be removed in a future release", + FutureWarning) + pass + + is_copy = property(fget=_get_is_copy, fset=_set_is_copy) + def _validate_dtype(self, dtype): """ validate the passed dtype """ @@ -1091,7 +1104,7 @@ def _get_item_cache(self, item): res._set_as_cached(item, self) # for a chain - res.is_copy = self.is_copy + res._set_parent(self) return res def _set_as_cached(self, item, cacher): @@ -1143,7 +1156,7 @@ def _is_view(self): """ boolean : return if I am a view of another array """ return self._data.is_view - def _maybe_update_cacher(self, clear=False, verify_is_copy=True): + def _maybe_update_cacher(self, clear=False, verify_parent=True): """ see if we need to update our parent cacher @@ -1153,8 +1166,8 @@ def _maybe_update_cacher(self, clear=False, verify_is_copy=True): ---------- clear : boolean, default False clear the item cache - verify_is_copy : boolean, default True - provide is_copy checks + verify_parent : boolean, default True + provide parent checks """ @@ -1172,8 +1185,8 @@ def _maybe_update_cacher(self, clear=False, verify_is_copy=True): except: pass - if verify_is_copy: - self._check_setitem_copy(stacklevel=5, t='referant') + if verify_parent: + self._check_copy_on_write() if clear: self._clear_item_cache() @@ -1192,125 +1205,94 @@ def _slice(self, slobj, axis=0, kind=None): """ axis = self._get_block_manager_axis(axis) - result = self._constructor(self._data.get_slice(slobj, axis=axis)) - result = result.__finalize__(self) - - # this could be a view - # but only in a single-dtyped view slicable case - is_copy = axis!=0 or result._is_view - result._set_is_copy(self, copy=is_copy) - return result + data = self._data.get_slice(slobj, axis=axis) + return self._constructor(data)._set_parent(self).__finalize__(self) def _set_item(self, key, value): + + self._check_copy_on_write() self._data.set(key, value) self._clear_item_cache() - def _set_is_copy(self, ref=None, copy=True): - if not copy: - self.is_copy = None - else: - if ref is not None: - self.is_copy = weakref.ref(ref) - else: - self.is_copy = None - - def _check_is_chained_assignment_possible(self): - """ - check if we are a view, have a cacher, and are of mixed type - if so, then force a setitem_copy check - - should be called just near setting a value + def _set_parent(self, ref=None, copy=True): + if ref is not None: + self._parent.extend(ref._parent) + self._parent.append(weakref.ref(ref)) + return self - will return a boolean if it we are a view and are cached, but a single-dtype - meaning that the cacher should be updated following setting - """ - if self._is_view and self._is_cached: - ref = self._get_cacher() - if ref is not None and ref._is_mixed_type: - self._check_setitem_copy(stacklevel=4, t='referant', force=True) - return True - elif self.is_copy: - self._check_setitem_copy(stacklevel=4, t='referant') - return False + def _check_copy_on_write(self): - def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): - """ + # we could have a copy-on-write scenario + if self._parent and self._parent_copy_on_write: - Parameters - ---------- - stacklevel : integer, default 4 - the level to show of the stack when the error is output - t : string, the type of setting error - force : boolean, default False - if True, then force showing an error + # we have an exception + if isinstance(self._parent, Exception): + raise self._parent - validate if we are doing a settitem on a chained copy. + def get_names_for_obj(__really_unused_name__342424__): + """Returns all named references for self""" - If you call this function, be sure to set the stacklevel such that the - user will see the error *at the level of setting* + removals = set(["__really_unused_name__342424__", "__really_unused_name__xxxxx__", "self"]) + refs = gc.get_referrers(__really_unused_name__342424__) - It is technically possible to figure out that we are setting on - a copy even WITH a multi-dtyped pandas object. In other words, some blocks - may be views while other are not. Currently _is_view will ALWAYS return False - for multi-blocks to avoid having to handle this case. + names = [] + for ref in refs: + if inspect.isframe(ref): + for name, __really_unused_name__xxxxx__ in compat.iteritems(ref.f_locals): + if __really_unused_name__xxxxx__ is __really_unused_name__342424__: + names.append(name) + elif isinstance(ref, dict): + for name, __really_unused_name__xxxxx__ in compat.iteritems(ref): + if __really_unused_name__xxxxx__ is __really_unused_name__342424__: + names.append(name) - df = DataFrame(np.arange(0,9), columns=['count']) - df['group'] = 'b' + for name, __really_unused_name__xxxxx__ in compat.iteritems(globals()): + if __really_unused_name__xxxxx__ is __really_unused_name__342424__: + names.append(name) - # this technically need not raise SettingWithCopy if both are view (which is not - # generally guaranteed but is usually True - # however, this is in general not a good practice and we recommend using .loc - df.iloc[0:5]['group'] = 'a' + return set(names) - removals - """ + # collect garbage + # if we don't have references, then we have a reassignment case + # e.g. df = df.ix[....]; since the reference is gone + # we can just copy and be done - if force or self.is_copy: + # otherwise we have chained indexing, raise and error + def error(): + raise SettingWithCopyError("chained indexing detected, you can fix this ......") - value = config.get_option('mode.chained_assignment') - if value is None: - return + gc.collect(2) + if len(self._parent) > 1: + error() - # see if the copy is not actually refererd; if so, then disolve - # the copy weakref - try: - gc.collect(2) - if not gc.get_referents(self.is_copy()): - self.is_copy = None - return - except: - pass + p = self._parent[0]() + if p is not None: + names = get_names_for_obj(self) + if not len(names): + error() - # we might be a false positive - try: - if self.is_copy().shape == self.shape: - self.is_copy = None - return - except: - pass + # provide copy-on-write + self._data = self._data.copy() + self._parent = [] - # a custom message - if isinstance(self.is_copy, string_types): - t = self.is_copy + def _check_is_chained_assignment_possible(self): + """ + check if we are a view, have a cacher, and are of mixed type + if so, then force a copy_on_write check - elif t == 'referant': - t = ("\n" - "A value is trying to be set on a copy of a slice from a " - "DataFrame\n\n" - "See the caveats in the documentation: " - "http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy") + should be called just near setting a value - else: - t = ("\n" - "A value is trying to be set on a copy of a slice from a " - "DataFrame.\n" - "Try using .loc[row_indexer,col_indexer] = value instead\n\n" - "See the caveats in the documentation: " - "http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy") - - if value == 'raise': - raise SettingWithCopyError(t) - elif value == 'warn': - warnings.warn(t, SettingWithCopyWarning, stacklevel=stacklevel) + will return a boolean if it we are a view and are cached, but a single-dtype + meaning that the cacher should be updated following setting + """ + if self._is_cached: + ref = self._get_cacher() + if ref is not None: + self._check_copy_on_write() + return True + elif self._parent: + self._check_copy_on_write() + return False def __delitem__(self, key): """ @@ -1371,7 +1353,7 @@ def take(self, indices, axis=0, convert=True, is_copy=True): # maybe set copy if we didn't actually change the index if is_copy: if not result._get_axis(axis).equals(self._get_axis(axis)): - result._set_is_copy(self) + result._set_parent(self) return result @@ -1514,9 +1496,7 @@ def xs(self, key, axis=0, level=None, copy=None, drop_level=True): result = self.iloc[loc] result.index = new_index - # this could be a view - # but only in a single-dtyped view slicable case - result._set_is_copy(self, copy=not result._is_view) + result._set_parent(self) return result _xs = xs @@ -1639,14 +1619,14 @@ def drop(self, labels, axis=0, level=None, inplace=False, errors='raise'): else: return result - def _update_inplace(self, result, verify_is_copy=True): + def _update_inplace(self, result, verify_parent=True): """ replace self internals with result. Parameters ---------- - verify_is_copy : boolean, default True - provide is_copy checks + verify_parent : boolean, default True + provide parent checks """ # NOTE: This does *not* call __finalize__ and that's an explicit @@ -1655,7 +1635,7 @@ def _update_inplace(self, result, verify_is_copy=True): self._reset_cache() self._clear_item_cache() self._data = getattr(result,'_data',result) - self._maybe_update_cacher(verify_is_copy=verify_is_copy) + self._maybe_update_cacher(verify_parent=verify_parent) def add_prefix(self, prefix): """ @@ -3376,11 +3356,11 @@ def resample(self, rule, how=None, axis=0, fill_method=None, For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0 - + Examples -------- - + Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') @@ -3409,11 +3389,11 @@ def resample(self, rule, how=None, axis=0, fill_method=None, Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, - which it labels. For example, in the original series the + which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed - value in the resampled bucket with the label``2000-01-01 00:03:00`` + value in the resampled bucket with the label``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). - To include this value close the right side of the bin interval as + To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', how='sum', label='right') @@ -3424,7 +3404,7 @@ def resample(self, rule, how=None, axis=0, fill_method=None, Downsample the series into 3 minute bins as above, but close the right side of the bin interval. - + >>> series.resample('3T', how='sum', label='right', closed='right') 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 @@ -3453,7 +3433,7 @@ def resample(self, rule, how=None, axis=0, fill_method=None, 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 - Upsample the series into 30 second bins and fill the + Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S', fill_method='bfill')[0:5] @@ -3468,7 +3448,7 @@ def resample(self, rule, how=None, axis=0, fill_method=None, >>> def custom_resampler(array_like): ... return np.sum(array_like)+5 - + >>> series.resample('3T', how=custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 534117b8e9249..61c2c9b4e1474 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -688,13 +688,14 @@ def apply(self, func, *args, **kwargs): def f(g): return func(g, *args, **kwargs) - # ignore SettingWithCopy here in case the user mutates - with option_context('mode.chained_assignment',None): - return self._python_apply_general(f) + return self._python_apply_general(f) + def _python_apply_general(self, f): + keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis) + self._selected_obj._parent_copy_on_write = True return self._wrap_applied_output(keys, values, not_indexed_same=mutated) @@ -3591,6 +3592,15 @@ def sort_idx(self): # Counting sort indexer return _get_group_index_sorter(self.labels, self.ngroups) + + def _set_cow(self, data): + # we may mutate, so don't allow cow + try: + data._parent_copy_on_write=False + except AttributeError: + pass + return data + def __iter__(self): sdata = self._get_sorted_data() @@ -3612,7 +3622,7 @@ def _get_sorted_data(self): return self.data.take(self.sort_idx, axis=self.axis, convert=False) def _chop(self, sdata, slice_obj): - return sdata.iloc[slice_obj] + return self._set_cow(sdata.iloc[slice_obj]) def apply(self, f): raise AbstractMethodError(self) @@ -3625,7 +3635,7 @@ class ArraySplitter(DataSplitter): class SeriesSplitter(DataSplitter): def _chop(self, sdata, slice_obj): - return sdata._get_values(slice_obj).to_dense() + return self._set_cow(sdata._get_values(slice_obj).to_dense()) class FrameSplitter(DataSplitter): @@ -3648,9 +3658,9 @@ def fast_apply(self, f, names): def _chop(self, sdata, slice_obj): if self.axis == 0: - return sdata.iloc[slice_obj] + return self._set_cow(sdata.iloc[slice_obj]) else: - return sdata._slice(slice_obj, axis=1) # ix[:, slice_obj] + return self._set_cow(sdata._slice(slice_obj, axis=1)) # ix[:, slice_obj] class NDFrameSplitter(DataSplitter): @@ -3671,7 +3681,7 @@ def _get_sorted_data(self): return sorted_data def _chop(self, sdata, slice_obj): - return self.factory(sdata.get_slice(slice_obj, axis=self.axis)) + return self._set_cow(self.factory(sdata.get_slice(slice_obj, axis=self.axis))) def get_splitter(data, *args, **kwargs): diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index b8ee831cdc12c..a641f5126454b 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -273,7 +273,7 @@ def _setitem_with_indexer(self, indexer, value): labels = index.insert(len(index),key) self.obj._data = self.obj.reindex_axis(labels, i)._data self.obj._maybe_update_cacher(clear=True) - self.obj.is_copy=None + self.obj._parent=[] nindexer.append(labels.get_loc(key)) diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 8e3dd3836855c..aeb218b56e3e6 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -191,7 +191,7 @@ def f(self, other): # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace(result.reindex_like(self,copy=False)._data, - verify_is_copy=False) + verify_parent=False) return self return f diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 1293b4034b84e..b676aad7599c5 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -812,8 +812,7 @@ def xs(self, key, axis=1, copy=None): axis_number = self._get_axis_number(axis) new_data = self._data.xs(key, axis=axis_number, copy=False) result = self._construct_return_type(new_data) - copy = new_data.is_mixed_type - result._set_is_copy(self, copy=copy) + result._set_parent(self) return result _xs = xs diff --git a/pandas/core/series.py b/pandas/core/series.py index 2890730956c75..030b0c117efc4 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -21,7 +21,8 @@ _possibly_convert_platform, _try_sort, is_int64_dtype, ABCSparseArray, _maybe_match_name, - _coerce_to_dtype, SettingWithCopyError, + _coerce_to_dtype, + SettingImmutableError, SettingWithCopyError, _maybe_box_datetimelike, ABCDataFrame, _dict_compat) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, @@ -527,7 +528,7 @@ def __getitem__(self, key): if not self.index.is_unique: result = self._constructor(result, index=[key]*len(result) - ,dtype=self.dtype).__finalize__(self) + ,dtype=self.dtype)._set_parent(self).__finalize__(self) return result except InvalidIndexError: @@ -620,12 +621,12 @@ def _get_values_tuple(self, key): # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) return self._constructor(self.values[indexer], - index=new_index).__finalize__(self) + index=new_index)._set_parent(self).__finalize__(self) def _get_values(self, indexer): try: return self._constructor(self._data.get_slice(indexer), - fastpath=True).__finalize__(self) + fastpath=True)._set_parent(self).__finalize__(self) except Exception: return self.values[indexer] @@ -635,7 +636,7 @@ def setitem(key, value): try: self._set_with_engine(key, value) return - except (SettingWithCopyError): + except (SettingImmutableError): raise except (KeyError, ValueError): values = self.values @@ -683,7 +684,19 @@ def setitem(key, value): self._set_with(key, value) # do the setitem - cacher_needs_updating = self._check_is_chained_assignment_possible() + try: + cacher_needs_updating = self._check_is_chained_assignment_possible() + except (SettingWithCopyError): + + # we have a chained assignment + # assign back to the original + obj = self._parent[0]() + if isinstance(obj, Series): + obj.loc[key] = value + else: + obj.loc[self.name,key] = value + return + setitem(key, value) if cacher_needs_updating: self._maybe_update_cacher() diff --git a/pandas/src/reduce.pyx b/pandas/src/reduce.pyx index eb736e4569009..5e09c87a629fb 100644 --- a/pandas/src/reduce.pyx +++ b/pandas/src/reduce.pyx @@ -488,6 +488,7 @@ def apply_frame_axis0(object frame, object f, object names, # Need to infer if our low-level mucking is going to cause a segfault if n > 0: chunk = frame.iloc[starts[0]:ends[0]] + chunk._parent_copy_on_write = False shape_before = chunk.shape try: result = f(chunk) @@ -508,6 +509,7 @@ def apply_frame_axis0(object frame, object f, object names, item_cache.clear() # ugh object.__setattr__(slider.dummy, 'name', names[i]) + object.__setattr__(slider.dummy, '_parent_copy_on_write', False) piece = f(slider.dummy) # I'm paying the price for index-sharing, ugh diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 9bdb7f08fe7cf..eb2d53f0db6d3 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -552,12 +552,10 @@ def test_setitem(self): self.frame['col8'] = 'foo' assert((self.frame['col8'] == 'foo').all()) - # this is partially a view (e.g. some blocks are view) - # so raise/warn + # this is copy-on-write smaller = self.frame[:2] - def f(): - smaller['col10'] = ['1', '2'] - self.assertRaises(com.SettingWithCopyError, f) + smaller['col10'] = ['1', '2'] + self.assertEqual(smaller['col10'].dtype, np.object_) self.assertTrue((smaller['col10'] == ['1', '2']).all()) @@ -999,13 +997,11 @@ def test_fancy_getitem_slice_mixed(self): sliced = self.mixed_frame.ix[:, -3:] self.assertEqual(sliced['D'].dtype, np.float64) - # get view with single block - # setting it triggers setting with copy + # this is copy-on-write sliced = self.frame.ix[:, -3:] - def f(): - sliced['C'] = 4. - self.assertRaises(com.SettingWithCopyError, f) - self.assertTrue((self.frame['C'] == 4).all()) + sliced['C'] = 4. + self.assertFalse((self.frame['C'] == 4).all()) + self.assertTrue((sliced['C'] == 4).all()) def test_fancy_setitem_int_labels(self): # integer index defers to label-based indexing @@ -1798,14 +1794,10 @@ def test_irow(self): expected = df.ix[8:14] assert_frame_equal(result, expected) - # verify slice is view - # setting it makes it raise/warn - def f(): - result[2] = 0. - self.assertRaises(com.SettingWithCopyError, f) - exp_col = df[2].copy() - exp_col[4:8] = 0. - assert_series_equal(df[2], exp_col) + # copy-on-write for a slice + result[2] = 0. + self.assertFalse((df[2] == 0).all()) + self.assertTrue((result[2] == 0).all()) # list of integers result = df.iloc[[1, 2, 4, 6]] @@ -1833,12 +1825,10 @@ def test_icol(self): expected = df.ix[:, 8:14] assert_frame_equal(result, expected) - # verify slice is view - # and that we are setting a copy - def f(): - result[8] = 0. - self.assertRaises(com.SettingWithCopyError, f) - self.assertTrue((df[8] == 0).all()) + # we have a slice, but copy-on-write + result[8] = 0. + self.assertFalse((df[8] == 0).all()) + self.assertTrue((result[8] == 0).all()) # list of integers result = df.iloc[:, [1, 2, 4, 6]] @@ -14489,16 +14479,15 @@ def test_idxmax(self): def test_stale_cached_series_bug_473(self): # this is chained, but ok - with option_context('chained_assignment',None): - Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'), - columns=('e', 'f', 'g', 'h')) - repr(Y) - Y['e'] = Y['e'].astype('object') - Y['g']['c'] = np.NaN - repr(Y) - result = Y.sum() - exp = Y['g'].sum() - self.assertTrue(isnull(Y['g']['c'])) + Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'), + columns=('e', 'f', 'g', 'h')) + repr(Y) + Y['e'] = Y['e'].astype('object') + Y['g']['c'] = np.NaN + repr(Y) + result = Y.sum() + exp = Y['g'].sum() + self.assertTrue(isnull(Y['g']['c'])) def test_index_namedtuple(self): from collections import namedtuple diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 41703b3b5a3b7..cd770bf449ec7 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2744,10 +2744,9 @@ def f(group): self.assertEqual(result['d'].dtype, np.float64) # this is by definition a mutating operation! - with option_context('mode.chained_assignment',None): - for key, group in grouped: - res = f(group) - assert_frame_equal(res, result.ix[key]) + for key, group in grouped: + res = f(group) + assert_frame_equal(res, result.ix[key]) def test_groupby_wrong_multi_labels(self): from pandas import read_csv diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 30a5716831087..62caa45d6bd82 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -3534,10 +3534,10 @@ def test_set_value_keeps_names(self): columns=['one', 'two', 'three', 'four'], index=idx) df = df.sortlevel() - self.assertIsNone(df.is_copy) + self.assertIsNone(df._parent) self.assertEqual(df.index.names, ('Name', 'Number')) df = df.set_value(('grethe', '4'), 'one', 99.34) - self.assertIsNone(df.is_copy) + self.assertIsNone(df._parent) self.assertEqual(df.index.names, ('Name', 'Number')) def test_names(self): diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 6a9d4096ad4b3..cecb137b23df4 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -2851,12 +2851,10 @@ def test_ix_assign_column_mixed(self): assert_frame_equal(df,expected) # ok, but chained assignments are dangerous - # if we turn off chained assignement it will work - with option_context('chained_assignment',None): - df = pd.DataFrame({'a': lrange(4) }) - df['b'] = np.nan - df['b'].ix[[1,3]] = [100,-100] - assert_frame_equal(df,expected) + df = pd.DataFrame({'a': lrange(4) }) + df['b'] = np.nan + df['b'].ix[[1,3]] = [100,-100] + assert_frame_equal(df,expected) def test_ix_get_set_consistency(self): @@ -3672,25 +3670,22 @@ def test_cache_updating(self): def test_slice_consolidate_invalidate_item_cache(self): - # this is chained assignment, but will 'work' - with option_context('chained_assignment',None): - - # #3970 - df = DataFrame({ "aa":lrange(5), "bb":[2.2]*5}) + # #3970 + df = DataFrame({ "aa":lrange(5), "bb":[2.2]*5}) - # Creates a second float block - df["cc"] = 0.0 + # Creates a second float block + df["cc"] = 0.0 - # caches a reference to the 'bb' series - df["bb"] + # caches a reference to the 'bb' series + df["bb"] - # repr machinery triggers consolidation - repr(df) + # repr machinery triggers consolidation + repr(df) - # Assignment to wrong series - df['bb'].iloc[0] = 0.17 - df._clear_item_cache() - self.assertAlmostEqual(df['bb'][0], 0.17) + # Assignment to wrong series + df['bb'].iloc[0] = 0.17 + df._clear_item_cache() + self.assertAlmostEqual(df['bb'][0], 0.17) def test_setitem_cache_updating(self): # GH 5424 @@ -3776,65 +3771,108 @@ def test_setitem_chained_setfault(self): result = df.head() assert_frame_equal(result, expected) - def test_detect_chained_assignment(self): + def test_chain_assignment_yields_copy_on_write(self): + + # 10954 + df = DataFrame({'col1':[1,2], 'col2':[3,4]}) + intermediate = df.loc[1:1,] + + # refrence created + self.assertFalse(len(df._parent)) + self.assertTrue(len(intermediate._parent) == 1) + intermediate['col1'] = -99 + + # reference is broken + self.assertFalse(len(df._parent)) + self.assertFalse(len(intermediate._parent)) + + # local assignment + expected = DataFrame([[-99,4]],index=[1],columns=['col1','col2']) + assert_frame_equal(intermediate, expected) + + # unchanged + expected = DataFrame({'col1':[1,2], 'col2':[3,4]}) + assert_frame_equal(df, expected) + + # chained assignment + # but one that we can deal with + df = pd.DataFrame({'col1':[1,2], 'col2':[3,4]}) + df.loc[1:1,]['col1'] = -99 + expected = DataFrame({'col1':[1,-99], 'col2':[3,4]}) + assert_frame_equal(df, expected) + + df = pd.DataFrame({'col1':[1,2], 'col2':[3,4]}) + df.loc[1]['col1'] = -99 + assert_frame_equal(df, expected) + + # changed via a scalar accessor + df = DataFrame({'col1':[1,2], 'col2':[3,4]}) + expected = df.copy() + + s2 = df.loc[0] + s2.iloc[0] = -99 + assert_frame_equal(df, expected) + expected = Series([-99,3],index=['col1','col2'],name=0) + assert_series_equal(s2, expected) + + # change dtype + df = pd.DataFrame({'col1':[1,2], 'col2':[3,4]}) + expected = DataFrame({'col1':[1, 2], 'col2':[3,'foo']}) + df.loc[1]['col2'] = 'foo' + assert_frame_equal(df, expected) - pd.set_option('chained_assignment','raise') + def test_detect_chained_assignment(self): # work with the chain expected = DataFrame([[-5,1],[-6,3]],columns=list('AB')) df = DataFrame(np.arange(4).reshape(2,2),columns=list('AB'),dtype='int64') - self.assertIsNone(df.is_copy) + self.assertFalse(len(df._parent)) df['A'][0] = -5 df['A'][1] = -6 assert_frame_equal(df, expected) # test with the chaining df = DataFrame({ 'A' : Series(range(2),dtype='int64'), 'B' : np.array(np.arange(2,4),dtype=np.float64)}) - self.assertIsNone(df.is_copy) - def f(): - df['A'][0] = -5 - self.assertRaises(com.SettingWithCopyError, f) - def f(): - df['A'][1] = np.nan - self.assertRaises(com.SettingWithCopyError, f) - self.assertIsNone(df['A'].is_copy) + self.assertFalse(len(df._parent)) + df['A'][0] = -5 + df['A'][1] = np.nan + self.assertFalse(len(df['A']._parent)) # using a copy (the chain), fails df = DataFrame({ 'A' : Series(range(2),dtype='int64'), 'B' : np.array(np.arange(2,4),dtype=np.float64)}) - def f(): - df.loc[0]['A'] = -5 - self.assertRaises(com.SettingWithCopyError, f) + df.loc[0]['A'] = -5 + self.assertEqual(df.loc[0,'A'], -5) # doc example df = DataFrame({'a' : ['one', 'one', 'two', 'three', 'two', 'one', 'six'], 'c' : Series(range(7),dtype='int64') }) - self.assertIsNone(df.is_copy) + self.assertFalse(len(df._parent)) expected = DataFrame({'a' : ['one', 'one', 'two', 'three', 'two', 'one', 'six'], 'c' : [42,42,2,3,4,42,6]}) - def f(): - indexer = df.a.str.startswith('o') - df[indexer]['c'] = 42 - self.assertRaises(com.SettingWithCopyError, f) + indexer = df.a.str.startswith('o') + df[indexer]['c'] = 42 + assert_frame_equal(df, expected) expected = DataFrame({'A':[111,'bbb','ccc'],'B':[1,2,3]}) df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]}) - def f(): - df['A'][0] = 111 - self.assertRaises(com.SettingWithCopyError, f) - def f(): - df.loc[0]['A'] = 111 - self.assertRaises(com.SettingWithCopyError, f) + df['A'][0] = 111 + assert_frame_equal(df, expected) + df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]}) + df.loc[0]['A'] = 111 + assert_frame_equal(df,expected) + + df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]}) df.loc[0,'A'] = 111 assert_frame_equal(df,expected) # make sure that is_copy is picked up reconstruction # GH5475 df = DataFrame({"A": [1,2]}) - self.assertIsNone(df.is_copy) + self.assertFalse(len(df._parent)) with tm.ensure_clean('__tmp__pickle') as path: df.to_pickle(path) df2 = pd.read_pickle(path) @@ -3858,34 +3896,34 @@ def random_text(nobs=100): # always a copy x = df.iloc[[0,1,2]] - self.assertIsNotNone(x.is_copy) + self.assertIsNotNone(x._parent) x = df.iloc[[0,1,2,4]] - self.assertIsNotNone(x.is_copy) + self.assertIsNotNone(x._parent) # explicity copy indexer = df.letters.apply(lambda x : len(x) > 10) df = df.ix[indexer].copy() - self.assertIsNone(df.is_copy) + self.assertFalse(len(df._parent)) df['letters'] = df['letters'].apply(str.lower) # implicity take df = random_text(100000) indexer = df.letters.apply(lambda x : len(x) > 10) df = df.ix[indexer] - self.assertIsNotNone(df.is_copy) + self.assertIsNotNone(df._parent) df['letters'] = df['letters'].apply(str.lower) # implicity take 2 df = random_text(100000) indexer = df.letters.apply(lambda x : len(x) > 10) df = df.ix[indexer] - self.assertIsNotNone(df.is_copy) + self.assertIsNotNone(df._parent) df.loc[:,'letters'] = df['letters'].apply(str.lower) # should be ok even though it's a copy! - self.assertIsNone(df.is_copy) + self.assertFalse(len(df._parent)) df['letters'] = df['letters'].apply(str.lower) - self.assertIsNone(df.is_copy) + self.assertFalse(len(df._parent)) df = random_text(100000) indexer = df.letters.apply(lambda x : len(x) > 10) @@ -3893,7 +3931,7 @@ def random_text(nobs=100): # an identical take, so no copy df = DataFrame({'a' : [1]}).dropna() - self.assertIsNone(df.is_copy) + self.assertFalse(len(df._parent)) df['a'] += 1 # inplace ops @@ -3928,23 +3966,22 @@ def f(): # from SO: http://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' - def f(): - df.iloc[0:5]['group'] = 'a' - self.assertRaises(com.SettingWithCopyError, f) + df.iloc[0:5]['group'] = 'a' # mixed type setting # same dtype & changing dtype df = DataFrame(dict(A=date_range('20130101',periods=5),B=np.random.randn(5),C=np.arange(5,dtype='int64'),D=list('abcde'))) + df.ix[2]['D'] = 'foo' - def f(): - df.ix[2]['D'] = 'foo' - self.assertRaises(com.SettingWithCopyError, f) - def f(): - df.ix[2]['C'] = 'foo' - self.assertRaises(com.SettingWithCopyError, f) - def f(): - df['C'][2] = 'foo' - self.assertRaises(com.SettingWithCopyError, f) + df = DataFrame(dict(A=date_range('20130101',periods=5),B=np.random.randn(5),C=np.arange(5,dtype='int64'),D=list('abcde'))) + df.ix[2]['C'] = 'foo' + + df = DataFrame(dict(A=date_range('20130101',periods=5),B=np.random.randn(5),C=np.arange(5,dtype='int64'),D=list('abcde'))) + df['C'][2] = 'foo' + + df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]}) + df.loc[0]['A'] = 111 + self.assertEqual(df.loc[0,'A'],111) def test_setting_with_copy_bug(self): @@ -3964,14 +4001,6 @@ def f(): # this should not raise df2['y'] = ['g', 'h', 'i'] - def test_detect_chained_assignment_warnings(self): - - # warnings - with option_context('chained_assignment','warn'): - df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]}) - with tm.assert_produces_warning(expected_warning=com.SettingWithCopyWarning): - df.loc[0]['A'] = 111 - def test_float64index_slicing_bug(self): # GH 5557, related to slicing a float index ser = {256: 2321.0, 1: 78.0, 2: 2716.0, 3: 0.0, 4: 369.0, 5: 0.0, 6: 269.0, 7: 0.0, 8: 0.0, 9: 0.0, 10: 3536.0, 11: 0.0, 12: 24.0, 13: 0.0, 14: 931.0, 15: 0.0, 16: 101.0, 17: 78.0, 18: 9643.0, 19: 0.0, 20: 0.0, 21: 0.0, 22: 63761.0, 23: 0.0, 24: 446.0, 25: 0.0, 26: 34773.0, 27: 0.0, 28: 729.0, 29: 78.0, 30: 0.0, 31: 0.0, 32: 3374.0, 33: 0.0, 34: 1391.0, 35: 0.0, 36: 361.0, 37: 0.0, 38: 61808.0, 39: 0.0, 40: 0.0, 41: 0.0, 42: 6677.0, 43: 0.0, 44: 802.0, 45: 0.0, 46: 2691.0, 47: 0.0, 48: 3582.0, 49: 0.0, 50: 734.0, 51: 0.0, 52: 627.0, 53: 70.0, 54: 2584.0, 55: 0.0, 56: 324.0, 57: 0.0, 58: 605.0, 59: 0.0, 60: 0.0, 61: 0.0, 62: 3989.0, 63: 10.0, 64: 42.0, 65: 0.0, 66: 904.0, 67: 0.0, 68: 88.0, 69: 70.0, 70: 8172.0, 71: 0.0, 72: 0.0, 73: 0.0, 74: 64902.0, 75: 0.0, 76: 347.0, 77: 0.0, 78: 36605.0, 79: 0.0, 80: 379.0, 81: 70.0, 82: 0.0, 83: 0.0, 84: 3001.0, 85: 0.0, 86: 1630.0, 87: 7.0, 88: 364.0, 89: 0.0, 90: 67404.0, 91: 9.0, 92: 0.0, 93: 0.0, 94: 7685.0, 95: 0.0, 96: 1017.0, 97: 0.0, 98: 2831.0, 99: 0.0, 100: 2963.0, 101: 0.0, 102: 854.0, 103: 0.0, 104: 0.0, 105: 0.0, 106: 0.0, 107: 0.0, 108: 0.0, 109: 0.0, 110: 0.0, 111: 0.0, 112: 0.0, 113: 0.0, 114: 0.0, 115: 0.0, 116: 0.0, 117: 0.0, 118: 0.0, 119: 0.0, 120: 0.0, 121: 0.0, 122: 0.0, 123: 0.0, 124: 0.0, 125: 0.0, 126: 67744.0, 127: 22.0, 128: 264.0, 129: 0.0, 260: 197.0, 268: 0.0, 265: 0.0, 269: 0.0, 261: 0.0, 266: 1198.0, 267: 0.0, 262: 2629.0, 258: 775.0, 257: 0.0, 263: 0.0, 259: 0.0, 264: 163.0, 250: 10326.0, 251: 0.0, 252: 1228.0, 253: 0.0, 254: 2769.0, 255: 0.0} diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 1bce047f3bf96..0f9f7c94f4ff1 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -539,11 +539,9 @@ def test_xs_level(self): # this is a copy in 0.14 result = self.frame.xs('two', level='second') - # setting this will give a SettingWithCopyError - # as we are trying to write a view - def f(x): - x[:] = 10 - self.assertRaises(com.SettingWithCopyError, f, result) + # this is copy-on-write + result[:] = 10 + self.assertTrue((result.values == 10).all()) def test_xs_level_multiple(self): from pandas import read_table @@ -562,11 +560,9 @@ def test_xs_level_multiple(self): # this is a copy in 0.14 result = df.xs(('a', 4), level=['one', 'four']) - # setting this will give a SettingWithCopyError - # as we are trying to write a view - def f(x): - x[:] = 10 - self.assertRaises(com.SettingWithCopyError, f, result) + # copy-on-write + result[:] = 10 + self.assertTrue((result.values == 10).all()) # GH2107 dates = lrange(20111201, 20111205) @@ -1412,7 +1408,7 @@ def test_frame_getitem_view(self): df['foo', 'four'] = 'foo' df = df.sortlevel(0, axis=1) - # this will work, but will raise/warn as its chained assignment + # chained assignment def f(): df['foo']['one'] = 2 return df diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 9cdc769dd7d74..72954f8dcb3ba 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -556,7 +556,7 @@ def test_xs(self): # mixed-type yields a copy self.panel['strings'] = 'foo' result = self.panel.xs('D', axis=2) - self.assertIsNotNone(result.is_copy) + self.assertIsNotNone(result._parent) def test_getitem_fancy_labels(self): p = self.panel diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index 289f7f134aa27..88e1277edd89b 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -514,7 +514,7 @@ def test_xs(self): # mixed-type self.panel4d['strings'] = 'foo' result = self.panel4d.xs('D', axis=3) - self.assertIsNotNone(result.is_copy) + self.assertIsNotNone(result._parent) def test_getitem_fancy_labels(self): panel4d = self.panel4d diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 86eafdf7ca2c8..61fbb72d967cc 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -157,7 +157,7 @@ def compare(s, name): result = s.dt.to_pytimedelta() self.assertIsInstance(result,np.ndarray) self.assertTrue(result.dtype == object) - + result = s.dt.total_seconds() self.assertIsInstance(result,pd.Series) self.assertTrue(result.dtype == 'float64') @@ -204,11 +204,10 @@ def get_dir(s): with tm.assertRaisesRegexp(ValueError, "modifications"): s.dt.hour = 5 - # trying to set a copy - with pd.option_context('chained_assignment','raise'): - def f(): - s.dt.hour[0] = 5 - self.assertRaises(com.SettingWithCopyError, f) + # trying to set an immutable + def f(): + s.dt.hour[0] = 5 + self.assertRaises(com.SettingImmutableError, f) def test_strftime(self): # GH 10086 @@ -1236,9 +1235,9 @@ def test_iget(self): expected = s.ix[2:4] assert_series_equal(result, expected) - # test slice is a view + # this is copy-on-write result[:] = 0 - self.assertTrue((s[1:3] == 0).all()) + self.assertTrue((s[1:3] != 0).all()) # list of integers result = s.iloc[[0, 2, 3, 4, 5]] @@ -1474,10 +1473,10 @@ def test_slice(self): self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[-10:])) - # test return view + # copy-on-write sl = self.series[10:20] sl[:] = 0 - self.assertTrue((self.series[10:20] == 0).all()) + self.assertTrue((self.series[10:20] != 0).all()) def test_slice_can_reorder_not_uniquely_indexed(self): s = Series(1, index=['a', 'a', 'b', 'b', 'c']) @@ -4447,7 +4446,6 @@ def test_underlying_data_conversion(self): # GH 3970 # these are chained assignments as well - pd.set_option('chained_assignment',None) df = DataFrame({ "aa":range(5), "bb":[2.2]*5}) df["cc"] = 0.0 ck = [True]*len(df) @@ -4455,7 +4453,6 @@ def test_underlying_data_conversion(self): df_tmp = df.iloc[ck] df["bb"].iloc[0] = .15 self.assertEqual(df['bb'].iloc[0], 0.15) - pd.set_option('chained_assignment','raise') # GH 3217 df = DataFrame(dict(a = [1,3], b = [np.nan, 2])) diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py index 9a282bec2e9e4..2692e38389106 100644 --- a/pandas/tseries/common.py +++ b/pandas/tseries/common.py @@ -9,7 +9,7 @@ from pandas import tslib from pandas.core.common import (_NS_DTYPE, _TD_DTYPE, is_period_arraylike, is_datetime_arraylike, is_integer_dtype, is_list_like, - get_dtype_kinds) + get_dtype_kinds, SettingImmutableError) def is_datetimelike(data): """ return a boolean if we can be successfully converted to a datetimelike """ @@ -76,15 +76,16 @@ def _delegate_property_get(self, name): # return the result as a Series, which is by definition a copy result = Series(result, index=self.index) - # setting this object will show a SettingWithCopyWarning/Error - result.is_copy = ("modifications to a property of a datetimelike object are not " - "supported and are discarded. Change values on the original.") + # setting this object will show a ValueError if accessed + result._parent = SettingImmutableError("modifications to a property of a datetimelike object are not " + "supported and are discarded. Change values on the original.") + return result def _delegate_property_set(self, name, value, *args, **kwargs): - raise ValueError("modifications to a property of a datetimelike object are not " - "supported. Change values on the original.") + raise SettingImmutableError("modifications to a property of a datetimelike object are not " + "supported. Change values on the original.") def _delegate_method(self, name, *args, **kwargs): from pandas import Series @@ -97,9 +98,9 @@ def _delegate_method(self, name, *args, **kwargs): result = Series(result, index=self.index) - # setting this object will show a SettingWithCopyWarning/Error - result.is_copy = ("modifications to a method of a datetimelike object are not " - "supported and are discarded. Change values on the original.") + # setting this object will show a SettingImmutableError + result._parent = SettingImmutableError("modifications to a method of a datetimelike object are not " + "supported and are discarded. Change values on the original.") return result diff --git a/pandas/util/testing.py b/pandas/util/testing.py index aaa83da036c2f..f1fe9d74af1f3 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -64,7 +64,7 @@ class TestCase(unittest.TestCase): @classmethod def setUpClass(cls): - pd.set_option('chained_assignment', 'raise') + pass @classmethod def tearDownClass(cls):
closes #10954 - deprecates the option `mode.chained_assignment` as its now unused (it shows the deprecation if you are explicity using it, more friendly this way). - deprecate `NDFrame.is_copy` property - remove `SettingWithCopyWarning` entirely TODO: - [ ] some systematic testing of the chaining might help (e.g. iterate thru the `iloc/loc/[]`, chain, and set various values including dtype changes - [ ] huge note explaining the rationale & change structure semantics: Intermediate assignment trigger copy-on-write and do not propogate. ``` In [1]: df = DataFrame({'col1':[1,2], 'col2':[3,4]}) In [2]: intermediate = df.loc[1:1,] In [3]: intermediate['col1'] = -99 In [4]: intermediate Out[4]: col1 col2 1 -99 4 In [5]: df Out[5]: col1 col2 0 1 3 1 2 4 ``` Chained assignments _always_ work! ``` In [6]: df = DataFrame({'col1':[1,2], 'col2':[3,4]}) In [7]: df.loc[1:1,]['col1'] = -99 In [8]: df Out[8]: col1 col2 0 1 3 1 -99 4 ``` Even true with cross-sections that change dtype ``` In [1]: df = DataFrame({'col1':[1,2], 'col2':[3,4]}) In [2]: df.loc[1]['col2'] = 'foo' In [3]: df Out[3]: col1 col2 0 1 3 1 2 foo ``` except for a really egregious case, which will raise a `SettingWithCopyError` (maybe I could even fix this....) ``` In [10]: df.loc[1:1]['col2'].replace(10,5,inplace=True) SettingWithCopyError: chained indexing detected, you can fix this ...... ``` This is an invalid assignment. I suppose we _could_ make it work, but we currently havent' allowed the `.dt` to be used as a setitem accessor ``` In [9]: s = Series(pd.date_range('20130101',periods=3)) In [12]: s.dt.hour[0] = 5 SettingImmutableError: modifications to a property of a datetimelike object are not supported and are discarded. Change values on the original. ``` cc @nickeubank cc @JanSchulz cc @ellisonbg cc @CarstVaartjes @shoyer special thanks to @JanSchulz for some reference checking code :)
https://api.github.com/repos/pandas-dev/pandas/pulls/10973
2015-09-02T20:56:46Z
2015-11-02T12:05:08Z
null
2020-09-06T18:46:29Z
DOC: Consistent variable names (sheetname vs sheet_name, issue 10559)
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index 359ec76533520..1057cfc4745e6 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -774,7 +774,7 @@ Writing to an excel file .. ipython:: python - df.to_excel('foo.xlsx', sheet_name='Sheet1') + df.to_excel('foo.xlsx', sheetname='Sheet1') Reading from an excel file diff --git a/doc/source/io.rst b/doc/source/io.rst index 70e7154493ccf..c86d3f214827b 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2012,9 +2012,9 @@ Specifying Sheets .. _io.specifying_sheets: -.. note :: The second argument is ``sheetname``, not to be confused with ``ExcelFile.sheet_names`` +.. note :: The second argument is ``sheetname``, not to be confused with ``ExcelFile.sheetnames`` -.. note :: An ExcelFile's attribute ``sheet_names`` provides access to a list of sheets. +.. note :: An ExcelFile's attribute ``sheetnames`` provides access to a list of sheets. - The arguments ``sheetname`` allows specifying the sheet or sheets to read. - The default value for ``sheetname`` is 0, indicating to read the first sheet @@ -2111,7 +2111,7 @@ written. For example: .. code-block:: python - df.to_excel('path_to_file.xlsx', sheet_name='Sheet1') + df.to_excel('path_to_file.xlsx', sheetname='Sheet1') Files with a ``.xls`` extension will be written using ``xlwt`` and those with a ``.xlsx`` extension will be written using ``xlsxwriter`` (if available) or @@ -2135,8 +2135,8 @@ one can pass an :class:`~pandas.io.excel.ExcelWriter`. .. code-block:: python with ExcelWriter('path_to_file.xlsx') as writer: - df1.to_excel(writer, sheet_name='Sheet1') - df2.to_excel(writer, sheet_name='Sheet2') + df1.to_excel(writer, sheetname='Sheet1') + df2.to_excel(writer, sheetname='Sheet2') .. note:: @@ -2181,7 +2181,7 @@ argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are: .. code-block:: python # By setting the 'engine' in the DataFrame and Panel 'to_excel()' methods. - df.to_excel('path_to_file.xlsx', sheet_name='Sheet1', engine='xlsxwriter') + df.to_excel('path_to_file.xlsx', sheetname='Sheet1', engine='xlsxwriter') # By setting the 'engine' in the ExcelWriter constructor. writer = ExcelWriter('path_to_file.xlsx', engine='xlsxwriter') @@ -2190,7 +2190,7 @@ argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are: from pandas import options options.io.excel.xlsx.writer = 'xlsxwriter' - df.to_excel('path_to_file.xlsx', sheet_name='Sheet1') + df.to_excel('path_to_file.xlsx', sheetname='Sheet1') .. _io.excel_writing_buffer: @@ -2214,7 +2214,7 @@ Pandas supports writing Excel files to buffer-like objects such as ``StringIO`` # By setting the 'engine' in the ExcelWriter constructor. writer = ExcelWriter(bio, engine='xlsxwriter') - df.to_excel(writer, sheet_name='Sheet1') + df.to_excel(writer, sheetname='Sheet1') # Save the workbook writer.save() diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 997dfeb728ade..4c8934e224126 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1269,7 +1269,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, if path_or_buf is None: return formatter.path_or_buf.getvalue() - def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', + def to_excel(self, excel_writer, sheetname='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep='inf', @@ -1281,7 +1281,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', ---------- excel_writer : string or ExcelWriter object File path or existing ExcelWriter - sheet_name : string, default 'Sheet1' + sheetname : string, default 'Sheet1' Name of sheet which will contain DataFrame na_rep : string, default '' Missing data representation @@ -1351,7 +1351,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', merge_cells=merge_cells, inf_rep=inf_rep, verbose=verbose) formatted_cells = formatter.get_formatted_cells() - excel_writer.write_cells(formatted_cells, sheet_name, + excel_writer.write_cells(formatted_cells, sheetname, startrow=startrow, startcol=startcol) if need_save: excel_writer.save() diff --git a/pandas/io/excel.py b/pandas/io/excel.py index d5258cb32e6e0..40e4cec8ff668 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -382,7 +382,7 @@ def _parse_cell(cell_contents,cell_typ): sheets = sheetname ret_dict = True elif sheetname is None: - sheets = self.sheet_names + sheets = self.sheetnames ret_dict = True else: sheets = [sheetname] @@ -441,8 +441,8 @@ def _parse_cell(cell_contents,cell_typ): @property - def sheet_names(self): - return self.book.sheet_names() + def sheetnames(self): + return self.book.sheetnames() def close(self): """close io if necessary""" @@ -508,7 +508,7 @@ class ExcelWriter(object): # Defining an ExcelWriter implementation (see abstract methods for more...) # - Mandatory - # - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)`` + # - ``write_cells(self, cells, sheetname=None, startrow=0, startcol=0)`` # --> called to write additional DataFrames to disk # - ``supported_extensions`` (tuple of supported extensions), used to # check that engine supports the given extension. @@ -560,7 +560,7 @@ def engine(self): pass @abc.abstractmethod - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): + def write_cells(self, cells, sheetname=None, startrow=0, startcol=0): """ Write given formated cells into Excel an excel sheet @@ -568,7 +568,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): ---------- cells : generator cell of formated data to save to Excel sheet - sheet_name : string, default None + sheetname : string, default None Name of Excel sheet, if None, then use self.cur_sheet startrow: upper left cell row to dump data frame startcol: upper left cell column to dump data frame @@ -605,13 +605,13 @@ def __init__(self, path, engine=None, else: self.datetime_format = datetime_format - def _get_sheet_name(self, sheet_name): - if sheet_name is None: - sheet_name = self.cur_sheet - if sheet_name is None: # pragma: no cover - raise ValueError('Must pass explicit sheet_name or set ' + def _get_sheetname(self, sheetname): + if sheetname is None: + sheetname = self.cur_sheet + if sheetname is None: # pragma: no cover + raise ValueError('Must pass explicit sheetname or set ' 'cur_sheet property') - return sheet_name + return sheetname @classmethod def check_extension(cls, ext): @@ -665,18 +665,18 @@ def save(self): """ return self.book.save(self.path) - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): + def write_cells(self, cells, sheetname=None, startrow=0, startcol=0): # Write the frame cells using openpyxl. from openpyxl.cell import get_column_letter - sheet_name = self._get_sheet_name(sheet_name) + sheetname = self._get_sheetname(sheetname) - if sheet_name in self.sheets: - wks = self.sheets[sheet_name] + if sheetname in self.sheets: + wks = self.sheets[sheetname] else: wks = self.book.create_sheet() - wks.title = sheet_name - self.sheets[sheet_name] = wks + wks.title = sheetname + self.sheets[sheetname] = wks for cell in cells: colletter = get_column_letter(startcol + cell.col + 1) @@ -759,18 +759,18 @@ class _Openpyxl2Writer(_Openpyxl1Writer): engine = 'openpyxl2' openpyxl_majorver = 2 - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): + def write_cells(self, cells, sheetname=None, startrow=0, startcol=0): # Write the frame cells using openpyxl. from openpyxl.cell import get_column_letter - sheet_name = self._get_sheet_name(sheet_name) + sheetname = self._get_sheetname(sheetname) - if sheet_name in self.sheets: - wks = self.sheets[sheet_name] + if sheetname in self.sheets: + wks = self.sheets[sheetname] else: wks = self.book.create_sheet() - wks.title = sheet_name - self.sheets[sheet_name] = wks + wks.title = sheetname + self.sheets[sheetname] = wks for cell in cells: colletter = get_column_letter(startcol + cell.col + 1) @@ -1189,16 +1189,16 @@ def save(self): """ return self.book.save(self.path) - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): + def write_cells(self, cells, sheetname=None, startrow=0, startcol=0): # Write the frame cells using xlwt. - sheet_name = self._get_sheet_name(sheet_name) + sheetname = self._get_sheetname(sheetname) - if sheet_name in self.sheets: - wks = self.sheets[sheet_name] + if sheetname in self.sheets: + wks = self.sheets[sheetname] else: - wks = self.book.add_sheet(sheet_name) - self.sheets[sheet_name] = wks + wks = self.book.add_sheet(sheetname) + self.sheets[sheetname] = wks style_dict = {} @@ -1312,16 +1312,16 @@ def save(self): """ return self.book.close() - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): + def write_cells(self, cells, sheetname=None, startrow=0, startcol=0): # Write the frame cells using xlsxwriter. - sheet_name = self._get_sheet_name(sheet_name) + sheetname = self._get_sheetname(sheetname) - if sheet_name in self.sheets: - wks = self.sheets[sheet_name] + if sheetname in self.sheets: + wks = self.sheets[sheetname] else: - wks = self.book.add_worksheet(sheet_name) - self.sheets[sheet_name] = wks + wks = self.book.add_worksheet(sheetname) + self.sheets[sheetname] = wks style_dict = {} diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 83db59f9d9029..68c34d0ec1120 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -743,9 +743,9 @@ def test_sheets(self): tm.assert_frame_equal(self.frame, recons) recons = reader.parse('test2', index_col=0) tm.assert_frame_equal(self.tsframe, recons) - np.testing.assert_equal(2, len(reader.sheet_names)) - np.testing.assert_equal('test1', reader.sheet_names[0]) - np.testing.assert_equal('test2', reader.sheet_names[1]) + np.testing.assert_equal(2, len(reader.sheetnames)) + np.testing.assert_equal('test1', reader.sheetnames[0]) + np.testing.assert_equal('test2', reader.sheetnames[1]) def test_colaliases(self): _skip_if_no_xlrd() @@ -842,7 +842,7 @@ def test_excel_roundtrip_indexname(self): df.to_excel(path, merge_cells=self.merge_cells) xf = ExcelFile(path) - result = xf.parse(xf.sheet_names[0], + result = xf.parse(xf.sheetnames[0], index_col=0, has_index_names=self.merge_cells) @@ -1006,7 +1006,7 @@ def test_to_excel_output_encoding(self): index=[u('A\u0192'), 'B'], columns=[u('X\u0193'), 'Y', 'Z']) with ensure_clean(filename) as filename: - df.to_excel(filename, sheet_name='TestSheet', encoding='utf8') + df.to_excel(filename, sheetname='TestSheet', encoding='utf8') result = read_excel(filename, 'TestSheet', encoding='utf8') tm.assert_frame_equal(result, df) @@ -1065,7 +1065,7 @@ def test_to_excel_unicode_filename(self): # wbk = xlrd.open_workbook(filename, # formatting_info=True) - # self.assertEqual(["test1"], wbk.sheet_names()) + # self.assertEqual(["test1"], wbk.sheetnames()) # ws = wbk.sheet_by_name('test1') # self.assertEqual([(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)], # ws.merged_cells) @@ -1111,7 +1111,7 @@ def test_to_excel_unicode_filename(self): # filename = '__tmp_to_excel_header_styling_xlsx__.xlsx' # pdf.to_excel(filename, 'test1') # wbk = openpyxl.load_workbook(filename) - # self.assertEqual(["test1"], wbk.get_sheet_names()) + # self.assertEqual(["test1"], wbk.get_sheetnames()) # ws = wbk.get_sheet_by_name('test1') # xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))] # xlsaddrs += ["A%s" % i for i in range(1, 6)] @@ -1149,7 +1149,7 @@ def roundtrip(df, header=True, parser_hdr=0, index=True): with ensure_clean(self.ext) as path: df.to_excel(path, header=header, merge_cells=self.merge_cells, index=index) xf = pd.ExcelFile(path) - res = xf.parse(xf.sheet_names[0], header=parser_hdr) + res = xf.parse(xf.sheetnames[0], header=parser_hdr) return res nrows = 5 @@ -1203,7 +1203,7 @@ def roundtrip2(df, header=True, parser_hdr=0, index=True): with ensure_clean(self.ext) as path: df.to_excel(path, header=header, merge_cells=self.merge_cells, index=index) xf = pd.ExcelFile(path) - res = xf.parse(xf.sheet_names[0], header=parser_hdr) + res = xf.parse(xf.sheetnames[0], header=parser_hdr) return res nrows = 5; ncols = 3 @@ -1430,7 +1430,7 @@ def test_write_cells_merge_styled(self): from pandas.core.format import ExcelCell from openpyxl import styles - sheet_name='merge_styled' + sheetname='merge_styled' sty_b1 = {'font': {'color': '00FF0000'}} sty_a2 = {'font': {'color': '0000FF00'}} @@ -1450,10 +1450,10 @@ def test_write_cells_merge_styled(self): with ensure_clean('.xlsx') as path: writer = _Openpyxl2Writer(path) - writer.write_cells(initial_cells, sheet_name=sheet_name) - writer.write_cells(merge_cells, sheet_name=sheet_name) + writer.write_cells(initial_cells, sheetname=sheetname) + writer.write_cells(merge_cells, sheetname=sheetname) - wks = writer.sheets[sheet_name] + wks = writer.sheets[sheetname] xcell_b1 = wks.cell('B1') xcell_a2 = wks.cell('A2') self.assertEqual(xcell_b1.style, openpyxl_sty_merged)
See issue #10559. Done testing after the changes.
https://api.github.com/repos/pandas-dev/pandas/pulls/10969
2015-09-02T03:25:15Z
2015-09-04T03:07:52Z
null
2015-09-04T03:07:57Z
DOC: Added default values in parsers.py doc-string
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 9ad992c434984..6801e8935e079 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -54,7 +54,7 @@ class ParserWarning(Warning): Skip spaces after delimiter escapechar : string (length 1), default None One-character string used to escape delimiter when quoting is QUOTE_NONE. -dtype : Type name or dict of column -> type +dtype : Type name or dict of column -> type, default None Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32} (Unsupported with engine='python') compression : {'gzip', 'bz2', 'infer', None}, default 'infer' @@ -65,7 +65,7 @@ class ParserWarning(Warning): dialect : string or csv.Dialect instance, default None If None defaults to Excel dialect. Ignored if sep longer than 1 char See csv.Dialect documentation for more details -header : int, list of ints +header : int, list of ints, default 'infer' Row number(s) to use as the column names, and the start of the data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly pass ``header=0`` to be able to replace existing names. The header can be @@ -74,7 +74,7 @@ class ParserWarning(Warning): skipped (e.g. 2 in this example are skipped). Note that this parameter ignores commented lines and empty lines if ``skip_blank_lines=True``, so header=0 denotes the first line of data rather than the first line of the file. -skiprows : list-like or integer +skiprows : list-like or integer, default None Line numbers to skip (0-indexed) or number of lines to skip (int) at the start of the file index_col : int or sequence or False, default None @@ -82,7 +82,7 @@ class ParserWarning(Warning): MultiIndex is used. If you have a malformed file with delimiters at the end of each line, you might consider index_col=False to force pandas to _not_ use the first column as the index (row names) -names : array-like +names : array-like, default None List of column names to use. If file contains no header row, then you should explicitly pass header=None prefix : string, default None @@ -90,14 +90,14 @@ class ParserWarning(Warning): na_values : str, list-like or dict, default None Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values -true_values : list +true_values : list, default None Values to consider as True -false_values : list +false_values : list, default None Values to consider as False keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to -parse_dates : boolean, list of ints or names, list of lists, or dict +parse_dates : boolean, list of ints or names, list of lists, or dict, default False If True -> try parsing the index. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column. @@ -106,7 +106,7 @@ class ParserWarning(Warning): keep_date_col : boolean, default False If True and parse_dates specifies combining multiple columns then keep the original columns. -date_parser : function +date_parser : function, default None Function to use for converting a sequence of string columns to an array of datetime instances. The default uses dateutil.parser.parser to do the conversion. Pandas will try to call date_parser in three different @@ -154,7 +154,7 @@ class ParserWarning(Warning): Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing na_filter=False can improve the performance of reading a large file -usecols : array-like +usecols : array-like, default None Return a subset of the columns. Results in much faster parsing time and lower memory usage. mangle_dupe_cols : boolean, default True
Function declaration contains default values for many of the parameters but the default values are not specified in the doc.
https://api.github.com/repos/pandas-dev/pandas/pulls/10968
2015-09-02T01:19:44Z
2015-09-02T08:51:21Z
2015-09-02T08:51:21Z
2015-09-02T13:09:57Z
ENH: read_excel MultiIndex #4679
diff --git a/doc/source/io.rst b/doc/source/io.rst index 31d0be6151ba4..f3d14b78bbf54 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1989,6 +1989,46 @@ advanced strategies Reading Excel Files ''''''''''''''''''' +.. versionadded:: 0.17 + +``read_excel`` can read a ``MultiIndex`` index, by passing a list of columns to ``index_col`` +and a ``MultiIndex`` column by passing a list of rows to ``header``. If either the ``index`` +or ``columns`` have serialized level names those will be read in as well by specifying +the rows/columns that make up the levels. + +.. ipython:: python + + # MultiIndex index - no names + df = pd.DataFrame({'a':[1,2,3,4], 'b':[5,6,7,8]}, + index=pd.MultiIndex.from_product([['a','b'],['c','d']])) + df.to_excel('path_to_file.xlsx') + df = pd.read_excel('path_to_file.xlsx', index_col=[0,1]) + df + + # MultiIndex index - with names + df.index = df.index.set_names(['lvl1', 'lvl2']) + df.to_excel('path_to_file.xlsx') + df = pd.read_excel('path_to_file.xlsx', index_col=[0,1]) + df + + # MultiIndex index and column - with names + df.columns = pd.MultiIndex.from_product([['a'],['b', 'd']], names=['c1', 'c2']) + df.to_excel('path_to_file.xlsx') + df = pd.read_excel('path_to_file.xlsx', + index_col=[0,1], header=[0,1]) + df + +.. ipython:: python + :suppress: + + import os + os.remove('path_to_file.xlsx') + +.. warning:: + + Excel files saved in version 0.16.2 or prior that had index names will still able to be read in, + but the ``has_index_names`` argument must specified to ``True``. + .. versionadded:: 0.16 ``read_excel`` can read more than one sheet, by setting ``sheetname`` to either diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index f88e5c0a11f9f..5a0e33b193b66 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -205,6 +205,53 @@ The support math functions are `sin`, `cos`, `exp`, `log`, `expm1`, `log1p`, These functions map to the intrinsics for the NumExpr engine. For Python engine, they are mapped to NumPy calls. +Changes to Excel with ``MultiIndex`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +In version 0.16.2 a ``DataFrame`` with ``MultiIndex`` columns could not be written to Excel via ``to_excel``. +That functionality has been added (:issue:`10564`), along with updating ``read_excel`` so that the data can +be read back with no loss of information by specifying which columns/rows make up the ``MultiIndex`` +in the ``header`` and ``index_col`` parameters (:issue:`4679`) + +See the :ref:`documentation <io.excel>` for more details. + +.. ipython:: python + + df = pd.DataFrame([[1,2,3,4], [5,6,7,8]], + columns = pd.MultiIndex.from_product([['foo','bar'],['a','b']], + names = ['col1', 'col2']), + index = pd.MultiIndex.from_product([['j'], ['l', 'k']], + names = ['i1', 'i2'])) + + df + df.to_excel('test.xlsx') + + df = pd.read_excel('test.xlsx', header=[0,1], index_col=[0,1]) + df + +.. ipython:: python + :suppress: + + import os + os.remove('test.xlsx') + +Previously, it was necessary to specify the ``has_index_names`` argument in ``read_excel`` +if the serialized data had index names. For version 0.17 the ouptput format of ``to_excel`` +has been changed to make this keyword unnecessary - the change is shown below. + +**Old** + +.. image:: _static/old-excel-index.png + +**New** + +.. image:: _static/new-excel-index.png + +.. warning:: + + Excel files saved in version 0.16.2 or prior that had index names will still able to be read in, + but the ``has_index_names`` argument must specified to ``True``. + + .. _whatsnew_0170.enhancements.other: Other enhancements @@ -761,7 +808,6 @@ Changes to ``Categorical.unique`` cat cat.unique() - .. _whatsnew_0170.api_breaking.other: Other API Changes @@ -771,7 +817,6 @@ Other API Changes - Calling the ``.value_counts`` method on a Series with ``categorical`` dtype now returns a Series with a ``CategoricalIndex`` (:issue:`10704`) - Allow passing `kwargs` to the interpolation methods (:issue:`10378`). - The metadata properties of subclasses of pandas objects will now be serialized (:issue:`10553`). -- Allow ``DataFrame`` with ``MultiIndex`` columns to be written to Excel (:issue:`10564`). This was changed in 0.16.2 as the read-back method could not always guarantee perfect fidelity (:issue:`9794`). - ``groupby`` using ``Categorical`` follows the same rule as ``Categorical.unique`` described above (:issue:`10508`) - Improved error message when concatenating an empty iterable of dataframes (:issue:`9157`) - When constructing ``DataFrame`` with an array of ``complex64`` dtype that meant the corresponding column was automatically promoted to the ``complex128`` dtype. Pandas will now preserve the itemsize of the input for complex data (:issue:`10952`) diff --git a/pandas/core/format.py b/pandas/core/format.py index 29f1e1efe9f5d..47d0ef37383c4 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -4,7 +4,6 @@ # pylint: disable=W0141 import sys -import warnings from pandas.core.base import PandasObject from pandas.core.common import adjoin, notnull @@ -1641,14 +1640,11 @@ class ExcelFormatter(object): inf_rep : string, default `'inf'` representation for np.inf values (which aren't representable in Excel) A `'-'` sign will be added in front of -inf. - verbose: boolean, default True - If True, warn user that the resulting output file may not be - re-read or parsed directly by pandas. """ def __init__(self, df, na_rep='', float_format=None, cols=None, header=True, index=True, index_label=None, merge_cells=False, - inf_rep='inf', verbose=True): + inf_rep='inf'): self.df = df self.rowcounter = 0 self.na_rep = na_rep @@ -1661,7 +1657,6 @@ def __init__(self, df, na_rep='', float_format=None, cols=None, self.header = header self.merge_cells = merge_cells self.inf_rep = inf_rep - self.verbose = verbose def _format_value(self, val): if lib.checknull(val): @@ -1682,10 +1677,6 @@ def _format_header_mi(self): raise NotImplementedError("Writing to Excel with MultiIndex" " columns and no index ('index'=False) " "is not yet implemented.") - elif self.index and self.verbose: - warnings.warn("Writing to Excel with MultiIndex columns is a" - " one way serializable operation. You will not" - " be able to re-read or parse the output file.") has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index)) if not(has_aliases or self.header): @@ -1796,18 +1787,14 @@ def _format_regular_rows(self): else: index_label = self.df.index.names[0] + if isinstance(self.columns, MultiIndex): + self.rowcounter += 1 + if index_label and self.header is not False: - if self.merge_cells: - yield ExcelCell(self.rowcounter, - 0, - index_label, - header_style) - self.rowcounter += 1 - else: - yield ExcelCell(self.rowcounter - 1, - 0, - index_label, - header_style) + yield ExcelCell(self.rowcounter - 1, + 0, + index_label, + header_style) # write index_values index_values = self.df.index @@ -1841,19 +1828,21 @@ def _format_hierarchical_rows(self): (list, tuple, np.ndarray, Index)): index_labels = self.index_label + # MultiIndex columns require an extra row + # with index names (blank if None) for + # unambigous round-trip + if isinstance(self.columns, MultiIndex): + self.rowcounter += 1 + # if index labels are not empty go ahead and dump if (any(x is not None for x in index_labels) and self.header is not False): - if not self.merge_cells: - self.rowcounter -= 1 - for cidx, name in enumerate(index_labels): - yield ExcelCell(self.rowcounter, + yield ExcelCell(self.rowcounter - 1, cidx, name, header_style) - self.rowcounter += 1 if self.merge_cells: # Format hierarchical rows as merged cells. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cb237b93c70ba..0e8bdbccb53fb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1336,9 +1336,6 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', inf_rep : string, default 'inf' Representation for infinity (there is no native representation for infinity in Excel) - verbose: boolean, default True - If True, warn user that the resulting output file may not be - re-read or parsed directly by pandas. Notes ----- @@ -1371,7 +1368,7 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', index=index, index_label=index_label, merge_cells=merge_cells, - inf_rep=inf_rep, verbose=verbose) + inf_rep=inf_rep) formatted_cells = formatter.get_formatted_cells() excel_writer.write_cells(formatted_cells, sheet_name, startrow=startrow, startcol=startcol) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index d5258cb32e6e0..b113cbf057f39 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -18,6 +18,7 @@ BytesIO, string_types) from pandas.core import config from pandas.core.common import pprint_thing +from pandas.util.decorators import Appender import pandas.compat as compat import pandas.compat.openpyxl_compat as openpyxl_compat import pandas.core.common as com @@ -68,15 +69,11 @@ def get_writer(engine_name): raise ValueError("No Excel writer '%s'" % engine_name) -def read_excel(io, sheetname=0, **kwds): - """Read an Excel table into a pandas DataFrame +excel_doc_common = """ + Read an Excel table into a pandas DataFrame Parameters - ---------- - io : string, file-like object, or xlrd workbook. - The string could be a URL. Valid URL schemes include http, ftp, s3, - and file. For file URLs, a host is expected. For instance, a local - file could be file://localhost/path/to/workbook.xlsx + ----------%(io)s sheetname : string, int, mixed list of strings/ints, or None, default 0 Strings are used for sheet names, Integers are used in zero-indexed sheet @@ -97,20 +94,23 @@ def read_excel(io, sheetname=0, **kwds): * [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames * None -> All sheets as a dictionary of DataFrames - header : int, default 0 - Row to use for the column labels of the parsed DataFrame + header : int, list of ints, default 0 + Row (0-indexed) to use for the column labels of the parsed + DataFrame. If a list of integers is passed those row positions will + be combined into a ``MultiIndex`` skiprows : list-like Rows to skip at the beginning (0-indexed) skip_footer : int, default 0 Rows at the end to skip (0-indexed) + index_col : int, list of ints, default None + Column (0-indexed) to use as the row labels of the DataFrame. + Pass None if there is no such column. If a list is passed, + those columns will be combined into a ``MultiIndex`` converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the Excel cell content, and return the transformed content. - index_col : int, default None - Column to use as the row labels of the DataFrame. Pass None if - there is no such column parse_cols : int or list, default None * If None then parse all columns, * If int then indicates last column to be parsed @@ -119,22 +119,21 @@ def read_excel(io, sheetname=0, **kwds): column ranges (e.g. "A:E" or "A,C,E:F") na_values : list-like, default None List of additional strings to recognize as NA/NaN + thousands : str, default None + Thousands separator keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to verbose : boolean, default False - Indicate number of NA values placed in non-numeric columns - engine: string, default None - If io is not a buffer or path, this must be set to identify io. - Acceptable values are None or xlrd + Indicate number of NA values placed in non-numeric columns%(eng)s convert_float : boolean, default True convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric data will be read in as floats: Excel stores all numbers as floats internally - has_index_names : boolean, default False - True if the cols defined in index_col have an index name and are - not in the header. Index name will be placed on a separate line below - the header. + has_index_names : boolean, default None + DEPCRECATED: for version 0.17+ index names will be automatically inferred + based on index_col. To read Excel output from 0.16.2 and prior that + had saved index names, use True. Returns ------- @@ -143,6 +142,19 @@ def read_excel(io, sheetname=0, **kwds): for more information on when a Dict of Dataframes is returned. """ +read_excel_kwargs = dict() +read_excel_kwargs['io'] = """ + io : string, file-like object, or xlrd workbook. + The string could be a URL. Valid URL schemes include http, ftp, s3, + and file. For file URLs, a host is expected. For instance, a local + file could be file://localhost/path/to/workbook.xlsx""" +read_excel_kwargs['eng'] = """ + engine: string, default None + If io is not a buffer or path, this must be set to identify io. + Acceptable values are None or xlrd""" + +@Appender(excel_doc_common % read_excel_kwargs) +def read_excel(io, sheetname=0, **kwds): engine = kwds.pop('engine', None) return ExcelFile(io, engine=engine).parse(sheetname=sheetname, **kwds) @@ -193,83 +205,23 @@ def __init__(self, io, **kwds): raise ValueError('Must explicitly set engine if not passing in' ' buffer or path for io.') + @Appender(excel_doc_common % dict(io='', eng='')) def parse(self, sheetname=0, header=0, skiprows=None, skip_footer=0, index_col=None, parse_cols=None, parse_dates=False, date_parser=None, na_values=None, thousands=None, chunksize=None, - convert_float=True, has_index_names=False, converters=None, **kwds): - """Read an Excel table into DataFrame + convert_float=True, has_index_names=None, converters=None, **kwds): - Parameters - ---------- - sheetname : string, int, mixed list of strings/ints, or None, default 0 - - Strings are used for sheet names, Integers are used in zero-indexed sheet - positions. - - Lists of strings/integers are used to request multiple sheets. - - Specify None to get all sheets. - - str|int -> DataFrame is returned. - list|None -> Dict of DataFrames is returned, with keys representing sheets. - - Available Cases - - * Defaults to 0 -> 1st sheet as a DataFrame - * 1 -> 2nd sheet as a DataFrame - * "Sheet1" -> 1st sheet as a DataFrame - * [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames - * None -> All sheets as a dictionary of DataFrames - header : int, default 0 - Row to use for the column labels of the parsed DataFrame - skiprows : list-like - Rows to skip at the beginning (0-indexed) - skip_footer : int, default 0 - Rows at the end to skip (0-indexed) - converters : dict, default None - Dict of functions for converting values in certain columns. Keys can - either be integers or column labels - index_col : int, default None - Column to use as the row labels of the DataFrame. Pass None if - there is no such column - parse_cols : int or list, default None - * If None then parse all columns - * If int then indicates last column to be parsed - * If list of ints then indicates list of column numbers to be - parsed - * If string then indicates comma separated list of column names and - column ranges (e.g. "A:E" or "A,C,E:F") - parse_dates : boolean, default False - Parse date Excel values, - date_parser : function default None - Date parsing function - na_values : list-like, default None - List of additional strings to recognize as NA/NaN - thousands : str, default None - Thousands separator - chunksize : int, default None - Size of file chunk to read for lazy evaluation. - convert_float : boolean, default True - convert integral floats to int (i.e., 1.0 --> 1). If False, all - numeric data will be read in as floats: Excel stores all numbers as - floats internally. - has_index_names : boolean, default False - True if the cols defined in index_col have an index name and are - not in the header - verbose : boolean, default False - Set to True to print a single statement when reading each - excel sheet. - - Returns - ------- - parsed : DataFrame or Dict of DataFrames - DataFrame from the passed in Excel file. See notes in sheetname argument - for more information on when a Dict of Dataframes is returned. - """ skipfooter = kwds.pop('skipfooter', None) if skipfooter is not None: skip_footer = skipfooter + if has_index_names is not None: + warn("\nThe has_index_names argument is deprecated; index names " + "will be automatically inferred based on index_col.\n" + "This argmument is still necessary if reading Excel output " + "from 0.16.2 or prior with index names.", FutureWarning, + stacklevel=3) + return self._parse_excel(sheetname=sheetname, header=header, skiprows=skiprows, index_col=index_col, @@ -418,8 +370,40 @@ def _parse_cell(cell_contents,cell_typ): if sheet.nrows == 0: return DataFrame() + if com.is_list_like(header) and len(header) == 1: + header = header[0] + + # forward fill and pull out names for MultiIndex column + header_names = None if header is not None: - data[header] = _trim_excel_header(data[header]) + if com.is_list_like(header): + header_names = [] + for row in header: + if com.is_integer(skiprows): + row += skiprows + data[row] = _fill_mi_header(data[row]) + header_name, data[row] = _pop_header_name(data[row], index_col) + header_names.append(header_name) + else: + data[header] = _trim_excel_header(data[header]) + + if com.is_list_like(index_col): + # forward fill values for MultiIndex index + if not com.is_list_like(header): + offset = 1 + header + else: + offset = 1 + max(header) + + for col in index_col: + last = data[offset][col] + for row in range(offset + 1, len(data)): + if data[row][col] == '' or data[row][col] is None: + data[row][col] = last + else: + last = data[row][col] + + if com.is_list_like(header) and len(header) > 1: + has_index_names = True parser = TextParser(data, header=header, index_col=index_col, has_index_names=has_index_names, @@ -433,6 +417,7 @@ def _parse_cell(cell_contents,cell_typ): **kwds) output[asheetname] = parser.read() + output[asheetname].columns = output[asheetname].columns.set_names(header_names) if ret_dict: return output @@ -463,6 +448,29 @@ def _trim_excel_header(row): row = row[1:] return row +def _fill_mi_header(row): + # forward fill blanks entries + # from headers if parsing as MultiIndex + last = row[0] + for i in range(1, len(row)): + if row[i] == '' or row[i] is None: + row[i] = last + else: + last = row[i] + return row + +# fill blank if index_col not None +def _pop_header_name(row, index_col): + """ (header, new_data) for header rows in MultiIndex parsing""" + none_fill = lambda x: None if x == '' else x + + if index_col is None: + # no index col specified, trim data for inference path + return none_fill(row[0]), row[1:] + else: + # pop out header name and fill w/ blank + i = index_col if not com.is_list_like(index_col) else max(index_col) + return none_fill(row[i]), row[:i] + [''] + row[i+1:] def _conv_value(val): # Convert numpy types to Python types for the Excel writers. diff --git a/pandas/io/tests/data/test_index_name_pre17.xls b/pandas/io/tests/data/test_index_name_pre17.xls new file mode 100644 index 0000000000000..2ab13105e7925 Binary files /dev/null and b/pandas/io/tests/data/test_index_name_pre17.xls differ diff --git a/pandas/io/tests/data/test_index_name_pre17.xlsm b/pandas/io/tests/data/test_index_name_pre17.xlsm new file mode 100644 index 0000000000000..33c0d7949531c Binary files /dev/null and b/pandas/io/tests/data/test_index_name_pre17.xlsm differ diff --git a/pandas/io/tests/data/test_index_name_pre17.xlsx b/pandas/io/tests/data/test_index_name_pre17.xlsx new file mode 100644 index 0000000000000..ce66c40cda141 Binary files /dev/null and b/pandas/io/tests/data/test_index_name_pre17.xlsx differ diff --git a/pandas/io/tests/data/testmultiindex.xls b/pandas/io/tests/data/testmultiindex.xls new file mode 100644 index 0000000000000..3664c5c8dedcc Binary files /dev/null and b/pandas/io/tests/data/testmultiindex.xls differ diff --git a/pandas/io/tests/data/testmultiindex.xlsm b/pandas/io/tests/data/testmultiindex.xlsm new file mode 100644 index 0000000000000..8f359782b57bb Binary files /dev/null and b/pandas/io/tests/data/testmultiindex.xlsm differ diff --git a/pandas/io/tests/data/testmultiindex.xlsx b/pandas/io/tests/data/testmultiindex.xlsx new file mode 100644 index 0000000000000..a70110caf1ec7 Binary files /dev/null and b/pandas/io/tests/data/testmultiindex.xlsx differ diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 073fc55357df7..0aee2af6ad166 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -14,6 +14,7 @@ import numpy as np from numpy.testing.decorators import slow +import pandas as pd from pandas import DataFrame, Index, MultiIndex from pandas.io.parsers import read_csv from pandas.io.excel import ( @@ -21,7 +22,7 @@ _Openpyxl2Writer, register_writer, _XlsxWriter ) from pandas.io.common import URLError -from pandas.util.testing import ensure_clean +from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf from pandas.core.config import set_option, get_option import pandas.util.testing as tm @@ -415,11 +416,8 @@ def test_read_xlrd_Book(self): @tm.network def test_read_from_http_url(self): - # TODO: remove this when merging into master - url = ('https://raw.github.com/davidovitch/pandas/master/' + url = ('https://raw.github.com/pydata/pandas/master/' 'pandas/io/tests/data/test1' + self.ext) -# url = ('https://raw.github.com/pydata/pandas/master/' -# 'pandas/io/tests/data/test' + self.ext) url_table = read_excel(url) local_table = self.get_exceldf('test1') tm.assert_frame_equal(url_table, local_table) @@ -518,6 +516,132 @@ def test_reader_seconds(self): actual = self.get_exceldf('times_1904', 'Sheet1') tm.assert_frame_equal(actual, expected) + def test_read_excel_multiindex(self): + #GH 4679 + mi = MultiIndex.from_product([['foo','bar'],['a','b']]) + mi_file = os.path.join(self.dirpath, 'testmultiindex' + self.ext) + + expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True], + [2, 3.5, pd.Timestamp('2015-01-02'), False], + [3, 4.5, pd.Timestamp('2015-01-03'), False], + [4, 5.5, pd.Timestamp('2015-01-04'), True]], + columns = mi) + + actual = read_excel(mi_file, 'mi_column', header=[0,1]) + tm.assert_frame_equal(actual, expected) + actual = read_excel(mi_file, 'mi_column', header=[0,1], index_col=0) + tm.assert_frame_equal(actual, expected) + + expected.columns = ['a', 'b', 'c', 'd'] + expected.index = mi + actual = read_excel(mi_file, 'mi_index', index_col=[0,1]) + tm.assert_frame_equal(actual, expected, check_names=False) + + expected.columns = mi + actual = read_excel(mi_file, 'both', index_col=[0,1], header=[0,1]) + tm.assert_frame_equal(actual, expected, check_names=False) + + expected.index = mi.set_names(['ilvl1', 'ilvl2']) + expected.columns = ['a', 'b', 'c', 'd'] + actual = read_excel(mi_file, 'mi_index_name', index_col=[0,1]) + tm.assert_frame_equal(actual, expected) + + expected.index = list(range(4)) + expected.columns = mi.set_names(['c1', 'c2']) + actual = read_excel(mi_file, 'mi_column_name', header=[0,1], index_col=0) + tm.assert_frame_equal(actual, expected) + + expected.index = mi.set_names(['ilvl1', 'ilvl2']) + actual = read_excel(mi_file, 'both_name', index_col=[0,1], header=[0,1]) + tm.assert_frame_equal(actual, expected) + + actual = read_excel(mi_file, 'both_name', index_col=[0,1], header=[0,1]) + tm.assert_frame_equal(actual, expected) + + actual = read_excel(mi_file, 'both_name_skiprows', index_col=[0,1], + header=[0,1], skiprows=2) + tm.assert_frame_equal(actual, expected) + + + def test_excel_multindex_roundtrip(self): + #GH 4679 + _skip_if_no_xlsxwriter() + with ensure_clean('.xlsx') as pth: + for c_idx_names in [True, False]: + for r_idx_names in [True, False]: + for c_idx_levels in [1, 3]: + for r_idx_levels in [1, 3]: + # column index name can't be serialized unless MultiIndex + if (c_idx_levels == 1 and c_idx_names): + continue + + # empty name case current read in as unamed levels, not Nones + check_names = True + if not r_idx_names and r_idx_levels > 1: + check_names = False + + df = mkdf(5, 5, c_idx_names, + r_idx_names, c_idx_levels, + r_idx_levels) + df.to_excel(pth) + act = pd.read_excel(pth, index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels))) + tm.assert_frame_equal(df, act, check_names=check_names) + + df.iloc[0, :] = np.nan + df.to_excel(pth) + act = pd.read_excel(pth, index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels))) + tm.assert_frame_equal(df, act, check_names=check_names) + + df.iloc[-1, :] = np.nan + df.to_excel(pth) + act = pd.read_excel(pth, index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels))) + tm.assert_frame_equal(df, act, check_names=check_names) + + def test_excel_oldindex_format(self): + #GH 4679 + data = np.array([['R0C0', 'R0C1', 'R0C2', 'R0C3', 'R0C4'], + ['R1C0', 'R1C1', 'R1C2', 'R1C3', 'R1C4'], + ['R2C0', 'R2C1', 'R2C2', 'R2C3', 'R2C4'], + ['R3C0', 'R3C1', 'R3C2', 'R3C3', 'R3C4'], + ['R4C0', 'R4C1', 'R4C2', 'R4C3', 'R4C4']]) + columns = ['C_l0_g0', 'C_l0_g1', 'C_l0_g2', 'C_l0_g3', 'C_l0_g4'] + mi = MultiIndex(levels=[['R_l0_g0', 'R_l0_g1', 'R_l0_g2', 'R_l0_g3', 'R_l0_g4'], + ['R_l1_g0', 'R_l1_g1', 'R_l1_g2', 'R_l1_g3', 'R_l1_g4']], + labels=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], + names=['R0', 'R1']) + si = Index(['R_l0_g0', 'R_l0_g1', 'R_l0_g2', 'R_l0_g3', 'R_l0_g4'], name='R0') + + in_file = os.path.join(self.dirpath, 'test_index_name_pre17' + self.ext) + + expected = pd.DataFrame(data, index=si, columns=columns) + with tm.assert_produces_warning(FutureWarning): + actual = pd.read_excel(in_file, 'single_names', has_index_names=True) + tm.assert_frame_equal(actual, expected) + + expected.index.name = None + actual = pd.read_excel(in_file, 'single_no_names') + tm.assert_frame_equal(actual, expected) + with tm.assert_produces_warning(FutureWarning): + actual = pd.read_excel(in_file, 'single_no_names', has_index_names=False) + tm.assert_frame_equal(actual, expected) + + expected.index = mi + with tm.assert_produces_warning(FutureWarning): + actual = pd.read_excel(in_file, 'multi_names', has_index_names=True) + tm.assert_frame_equal(actual, expected) + + expected.index.names = [None, None] + actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0,1]) + tm.assert_frame_equal(actual, expected, check_names=False) + with tm.assert_produces_warning(FutureWarning): + actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0,1], + has_index_names=False) + tm.assert_frame_equal(actual, expected, check_names=False) + + class XlsReaderTests(XlrdTests, tm.TestCase): ext = '.xls' @@ -537,6 +661,8 @@ class XlsmReaderTests(XlrdTests, tm.TestCase): check_skip = staticmethod(_skip_if_no_xlrd) + + class ExcelWriterBase(SharedItems): # Base class for test cases to run with different Excel writers. # To add a writer test, define the following: @@ -781,7 +907,6 @@ def test_roundtrip_indexlabels(self): reader = ExcelFile(path) recons = reader.parse('test1', index_col=0, - has_index_names=self.merge_cells ).astype(np.int64) frame.index.names = ['test'] self.assertEqual(frame.index.names, recons.index.names) @@ -794,7 +919,6 @@ def test_roundtrip_indexlabels(self): reader = ExcelFile(path) recons = reader.parse('test1', index_col=0, - has_index_names=self.merge_cells ).astype(np.int64) frame.index.names = ['test'] self.assertEqual(frame.index.names, recons.index.names) @@ -807,7 +931,6 @@ def test_roundtrip_indexlabels(self): reader = ExcelFile(path) recons = reader.parse('test1', index_col=0, - has_index_names=self.merge_cells ).astype(np.int64) frame.index.names = ['test'] tm.assert_frame_equal(frame, recons.astype(bool)) @@ -837,8 +960,7 @@ def test_excel_roundtrip_indexname(self): xf = ExcelFile(path) result = xf.parse(xf.sheet_names[0], - index_col=0, - has_index_names=self.merge_cells) + index_col=0) tm.assert_frame_equal(result, df) self.assertEqual(result.index.name, 'foo') @@ -925,8 +1047,7 @@ def test_to_excel_multiindex(self): frame.to_excel(path, 'test1', merge_cells=self.merge_cells) reader = ExcelFile(path) df = reader.parse('test1', index_col=[0, 1], - parse_dates=False, - has_index_names=self.merge_cells) + parse_dates=False) tm.assert_frame_equal(frame, df) self.assertEqual(frame.index.names, df.index.names) @@ -943,8 +1064,7 @@ def test_to_excel_multiindex_dates(self): tsframe.to_excel(path, 'test1', merge_cells=self.merge_cells) reader = ExcelFile(path) recons = reader.parse('test1', - index_col=[0, 1], - has_index_names=self.merge_cells) + index_col=[0, 1]) tm.assert_frame_equal(tsframe, recons) self.assertEqual(recons.index.names, ('time', 'foo')) @@ -1475,15 +1595,14 @@ def test_excel_raise_error_on_multiindex_columns_and_no_index(self): with ensure_clean(self.ext) as path: df.to_excel(path, index=False) - def test_excel_warns_verbosely_on_multiindex_columns_and_index_true(self): + def test_excel_multiindex_columns_and_index_true(self): _skip_if_no_xlwt() cols = MultiIndex.from_tuples([('site', ''), ('2014', 'height'), ('2014', 'weight')]) - df = DataFrame(np.random.randn(10, 3), columns=cols) - with tm.assert_produces_warning(UserWarning): - with ensure_clean(self.ext) as path: - df.to_excel(path, index=True) + df = pd.DataFrame(np.random.randn(10, 3), columns=cols) + with ensure_clean(self.ext) as path: + df.to_excel(path, index=True) def test_excel_multiindex_index(self): _skip_if_no_xlwt()
closes #4679 xref #10564 Output of `to_excel` should now be fully round-trippable with `read_excel` with the right combination of `index_col` and `header`. To make the semantics match `read_csv`, an index column name (`has_index_names=True`) is always assumed if something is passed to `index_col` - this should be non-breaking; if there are no names, it will be just filled to `None` as before. ``` In [7]: df = pd.DataFrame([[1,2,3,4], [5,6,7,8]], ...: columns = pd.MultiIndex.from_product([['foo','bar'],['a','b']], ...: names = ['col1', 'col2']), ...: index = pd.MultiIndex.from_product([['j'], ['l', 'k']], ...: names = ['i1', 'i2'])) In [8]: df Out[8]: col1 foo bar col2 a b a b i1 i2 j l 1 2 3 4 k 5 6 7 8 In [9]: df.to_excel('test.xlsx') In [10]: df = pd.read_excel('test.xlsx', header=[0,1], index_col=[0,1]) In [11]: df Out[11]: col1 foo bar col2 a b a b i1 i2 j l 1 2 3 4 k 5 6 7 8 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/10967
2015-09-02T00:25:28Z
2015-09-09T12:06:09Z
2015-09-09T12:06:09Z
2016-09-15T13:39:47Z
DOC: Add note regarding 0-len string to default NA values in IO docs
diff --git a/doc/source/io.rst b/doc/source/io.rst index 70e7154493ccf..ded314229225c 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -731,7 +731,9 @@ the corresponding equivalent values will also imply a missing value (in this cas To completely override the default values that are recognized as missing, specify ``keep_default_na=False``. The default ``NaN`` recognized values are ``['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A','N/A', 'NA', -'#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan']``. +'#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan']``. Although a 0-length string +``''`` is not included in the default ``NaN`` values list, it is still treated +as a missing value. .. code-block:: python
Fixes https://github.com/pydata/pandas/issues/10700
https://api.github.com/repos/pandas-dev/pandas/pulls/10965
2015-09-01T20:49:25Z
2015-09-02T08:52:40Z
2015-09-02T08:52:40Z
2015-09-02T08:52:40Z
TST: refactored Excel reader tests
diff --git a/pandas/io/tests/data/blank.xlsm b/pandas/io/tests/data/blank.xlsm new file mode 100755 index 0000000000000..c3c2074276d12 Binary files /dev/null and b/pandas/io/tests/data/blank.xlsm differ diff --git a/pandas/io/tests/data/blank_with_header.xlsm b/pandas/io/tests/data/blank_with_header.xlsm new file mode 100755 index 0000000000000..5b2ed0055b243 Binary files /dev/null and b/pandas/io/tests/data/blank_with_header.xlsm differ diff --git a/pandas/io/tests/data/test.xls b/pandas/io/tests/data/test1.xls similarity index 100% rename from pandas/io/tests/data/test.xls rename to pandas/io/tests/data/test1.xls diff --git a/pandas/io/tests/data/test.xlsm b/pandas/io/tests/data/test1.xlsm similarity index 100% rename from pandas/io/tests/data/test.xlsm rename to pandas/io/tests/data/test1.xlsm diff --git a/pandas/io/tests/data/test.xlsx b/pandas/io/tests/data/test1.xlsx similarity index 100% rename from pandas/io/tests/data/test.xlsx rename to pandas/io/tests/data/test1.xlsx diff --git a/pandas/io/tests/data/test2.xlsm b/pandas/io/tests/data/test2.xlsm new file mode 100644 index 0000000000000..31cfba7ede082 Binary files /dev/null and b/pandas/io/tests/data/test2.xlsm differ diff --git a/pandas/io/tests/data/test2.xlsx b/pandas/io/tests/data/test2.xlsx index 441db5e55e666..94dd951e0bb84 100644 Binary files a/pandas/io/tests/data/test2.xlsx and b/pandas/io/tests/data/test2.xlsx differ diff --git a/pandas/io/tests/data/test3.xlsm b/pandas/io/tests/data/test3.xlsm new file mode 100644 index 0000000000000..54b7ef456a9ea Binary files /dev/null and b/pandas/io/tests/data/test3.xlsm differ diff --git a/pandas/io/tests/data/test3.xlsx b/pandas/io/tests/data/test3.xlsx new file mode 100644 index 0000000000000..c16755c25fabd Binary files /dev/null and b/pandas/io/tests/data/test3.xlsx differ diff --git a/pandas/io/tests/data/test4.xls b/pandas/io/tests/data/test4.xls new file mode 100644 index 0000000000000..0e6f4331e2547 Binary files /dev/null and b/pandas/io/tests/data/test4.xls differ diff --git a/pandas/io/tests/data/test4.xlsm b/pandas/io/tests/data/test4.xlsm new file mode 100644 index 0000000000000..52328c7b28be9 Binary files /dev/null and b/pandas/io/tests/data/test4.xlsm differ diff --git a/pandas/io/tests/data/test4.xlsx b/pandas/io/tests/data/test4.xlsx new file mode 100644 index 0000000000000..441db5e55e666 Binary files /dev/null and b/pandas/io/tests/data/test4.xlsx differ diff --git a/pandas/io/tests/data/test_converters.xlsm b/pandas/io/tests/data/test_converters.xlsm new file mode 100644 index 0000000000000..eaf0b1d0219c5 Binary files /dev/null and b/pandas/io/tests/data/test_converters.xlsm differ diff --git a/pandas/io/tests/data/test_multisheet.xls b/pandas/io/tests/data/test_multisheet.xls new file mode 100644 index 0000000000000..fa37723fcdefb Binary files /dev/null and b/pandas/io/tests/data/test_multisheet.xls differ diff --git a/pandas/io/tests/data/test_multisheet.xlsm b/pandas/io/tests/data/test_multisheet.xlsm new file mode 100644 index 0000000000000..694f8e07d5e29 Binary files /dev/null and b/pandas/io/tests/data/test_multisheet.xlsm differ diff --git a/pandas/io/tests/data/test_types.xlsm b/pandas/io/tests/data/test_types.xlsm new file mode 100644 index 0000000000000..c66fdc82dfb67 Binary files /dev/null and b/pandas/io/tests/data/test_types.xlsm differ diff --git a/pandas/io/tests/data/times_1900.xlsm b/pandas/io/tests/data/times_1900.xlsm new file mode 100644 index 0000000000000..1ffdbe223453b Binary files /dev/null and b/pandas/io/tests/data/times_1900.xlsm differ diff --git a/pandas/io/tests/data/times_1900.xlsx b/pandas/io/tests/data/times_1900.xlsx new file mode 100644 index 0000000000000..3702289b256fd Binary files /dev/null and b/pandas/io/tests/data/times_1900.xlsx differ diff --git a/pandas/io/tests/data/times_1904.xlsm b/pandas/io/tests/data/times_1904.xlsm new file mode 100644 index 0000000000000..e884eca1e7c74 Binary files /dev/null and b/pandas/io/tests/data/times_1904.xlsm differ diff --git a/pandas/io/tests/data/times_1904.xlsx b/pandas/io/tests/data/times_1904.xlsx new file mode 100644 index 0000000000000..1a13468e59d1c Binary files /dev/null and b/pandas/io/tests/data/times_1904.xlsx differ diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 83db59f9d9029..073fc55357df7 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -24,7 +24,6 @@ from pandas.util.testing import ensure_clean from pandas.core.config import set_option, get_option import pandas.util.testing as tm -import pandas as pd def _skip_if_no_xlrd(): @@ -76,281 +75,215 @@ def _skip_if_no_excelsuite(): class SharedItems(object): def setUp(self): self.dirpath = tm.get_data_path() - self.csv1 = os.path.join(self.dirpath, 'test1.csv') - self.csv2 = os.path.join(self.dirpath, 'test2.csv') - self.xls1 = os.path.join(self.dirpath, 'test.xls') - self.xlsx1 = os.path.join(self.dirpath, 'test.xlsx') - self.multisheet = os.path.join(self.dirpath, 'test_multisheet.xlsx') self.frame = _frame.copy() self.frame2 = _frame2.copy() self.tsframe = _tsframe.copy() self.mixed_frame = _mixed_frame.copy() - def read_csv(self, *args, **kwds): - kwds = kwds.copy() - kwds['engine'] = 'python' - return read_csv(*args, **kwds) + def get_csv_refdf(self, basename): + """ + Obtain the reference data from read_csv with the Python engine. + Test data path is defined by pandas.util.testing.get_data_path() + Parameters + ---------- -class ExcelReaderTests(SharedItems, tm.TestCase): - def test_parse_cols_int(self): - _skip_if_no_openpyxl() - _skip_if_no_xlrd() + basename : str + File base name, excluding file extension. - suffix = ['xls', 'xlsx', 'xlsm'] - - for s in suffix: - pth = os.path.join(self.dirpath, 'test.%s' % s) - xls = ExcelFile(pth) - df = xls.parse('Sheet1', index_col=0, parse_dates=True, - parse_cols=3) - df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True) - df2 = df2.reindex(columns=['A', 'B', 'C']) - df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, - parse_dates=True, parse_cols=3) - # TODO add index to xls file) - tm.assert_frame_equal(df, df2, check_names=False) - tm.assert_frame_equal(df3, df2, check_names=False) + Returns + ------- - def test_parse_cols_list(self): - _skip_if_no_openpyxl() - _skip_if_no_xlrd() + dfref : DataFrame + """ + pref = os.path.join(self.dirpath, basename + '.csv') + dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python') + return dfref - suffix = ['xls', 'xlsx', 'xlsm'] - - for s in suffix: - pth = os.path.join(self.dirpath, 'test.%s' % s) - xls = ExcelFile(pth) - df = xls.parse('Sheet1', index_col=0, parse_dates=True, - parse_cols=[0, 2, 3]) - df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True) - df2 = df2.reindex(columns=['B', 'C']) - df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, - parse_dates=True, - parse_cols=[0, 2, 3]) - # TODO add index to xls file) - tm.assert_frame_equal(df, df2, check_names=False) - tm.assert_frame_equal(df3, df2, check_names=False) + def get_excelfile(self, basename): + """ + Return test data ExcelFile instance. Test data path is defined by + pandas.util.testing.get_data_path() - def test_parse_cols_str(self): - _skip_if_no_openpyxl() - _skip_if_no_xlrd() + Parameters + ---------- - suffix = ['xls', 'xlsx', 'xlsm'] - - for s in suffix: - - pth = os.path.join(self.dirpath, 'test.%s' % s) - xls = ExcelFile(pth) - - df = xls.parse('Sheet1', index_col=0, parse_dates=True, - parse_cols='A:D') - df2 = read_csv(self.csv1, index_col=0, parse_dates=True) - df2 = df2.reindex(columns=['A', 'B', 'C']) - df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, - parse_dates=True, parse_cols='A:D') - # TODO add index to xls, read xls ignores index name ? - tm.assert_frame_equal(df, df2, check_names=False) - tm.assert_frame_equal(df3, df2, check_names=False) - del df, df2, df3 - - df = xls.parse('Sheet1', index_col=0, parse_dates=True, - parse_cols='A,C,D') - df2 = read_csv(self.csv1, index_col=0, parse_dates=True) - df2 = df2.reindex(columns=['B', 'C']) - df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, - parse_dates=True, - parse_cols='A,C,D') - # TODO add index to xls file - tm.assert_frame_equal(df, df2, check_names=False) - tm.assert_frame_equal(df3, df2, check_names=False) - del df, df2, df3 - - df = xls.parse('Sheet1', index_col=0, parse_dates=True, - parse_cols='A,C:D') - df2 = read_csv(self.csv1, index_col=0, parse_dates=True) - df2 = df2.reindex(columns=['B', 'C']) - df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, - parse_dates=True, - parse_cols='A,C:D') - tm.assert_frame_equal(df, df2, check_names=False) - tm.assert_frame_equal(df3, df2, check_names=False) + basename : str + File base name, excluding file extension. - def test_excel_stop_iterator(self): - _skip_if_no_xlrd() + Returns + ------- - excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xls')) - parsed = excel_data.parse('Sheet1') - expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1']) - tm.assert_frame_equal(parsed, expected) + excel : io.excel.ExcelFile + """ + return ExcelFile(os.path.join(self.dirpath, basename + self.ext)) - def test_excel_cell_error_na(self): - _skip_if_no_xlrd() + def get_exceldf(self, basename, *args, **kwds): + """ + Return test data DataFrame. Test data path is defined by + pandas.util.testing.get_data_path() - excel_data = ExcelFile(os.path.join(self.dirpath, 'test3.xls')) - parsed = excel_data.parse('Sheet1') - expected = DataFrame([[np.nan]], columns=['Test']) - tm.assert_frame_equal(parsed, expected) + Parameters + ---------- - def test_excel_passes_na(self): - _skip_if_no_xlrd() + basename : str + File base name, excluding file extension. - excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xlsx')) - parsed = excel_data.parse('Sheet1', keep_default_na=False, - na_values=['apple']) - expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']], - columns=['Test']) - tm.assert_frame_equal(parsed, expected) + Returns + ------- - parsed = excel_data.parse('Sheet1', keep_default_na=True, - na_values=['apple']) - expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']], - columns=['Test']) - tm.assert_frame_equal(parsed, expected) + df : DataFrame + """ + pth = os.path.join(self.dirpath, basename + self.ext) + return read_excel(pth, *args, **kwds) - def check_excel_table_sheet_by_index(self, filename, csvfile): - import xlrd - pth = os.path.join(self.dirpath, filename) - xls = ExcelFile(pth) - df = xls.parse(0, index_col=0, parse_dates=True) - df2 = self.read_csv(csvfile, index_col=0, parse_dates=True) - df3 = xls.parse(1, skiprows=[1], index_col=0, parse_dates=True) - tm.assert_frame_equal(df, df2, check_names=False) - tm.assert_frame_equal(df3, df2, check_names=False) +class ReadingTestsBase(SharedItems): + # This is based on ExcelWriterBase + # + # Base class for test cases to run with different Excel readers. + # To add a reader test, define the following: + # 1. A check_skip function that skips your tests if your reader isn't + # installed. + # 2. Add a property ext, which is the file extension that your reader + # reades from. (needs to start with '.' so it's a valid path) + # 3. Add a property engine_name, which is the name of the reader class. + # For the reader this is not used for anything at the moment. - df4 = xls.parse(0, index_col=0, parse_dates=True, skipfooter=1) - df5 = xls.parse(0, index_col=0, parse_dates=True, skip_footer=1) - tm.assert_frame_equal(df4, df.ix[:-1]) - tm.assert_frame_equal(df4, df5) + def setUp(self): + self.check_skip() + super(ReadingTestsBase, self).setUp() - self.assertRaises(xlrd.XLRDError, xls.parse, 'asdf') + def test_parse_cols_int(self): - def test_excel_table_sheet_by_index(self): - _skip_if_no_xlrd() - for filename, csvfile in [(self.xls1, self.csv1), - (self.xlsx1, self.csv1)]: - self.check_excel_table_sheet_by_index(filename, csvfile) + dfref = self.get_csv_refdf('test1') + excel = self.get_excelfile('test1') + dfref = dfref.reindex(columns=['A', 'B', 'C']) + df1 = excel.parse('Sheet1', index_col=0, parse_dates=True, + parse_cols=3) + df2 = excel.parse('Sheet2', skiprows=[1], index_col=0, + parse_dates=True, parse_cols=3) + # TODO add index to xls file) + tm.assert_frame_equal(df1, dfref, check_names=False) + tm.assert_frame_equal(df2, dfref, check_names=False) - def test_excel_table(self): - _skip_if_no_xlrd() - - pth = os.path.join(self.dirpath, 'test.xls') - xls = ExcelFile(pth) - df = xls.parse('Sheet1', index_col=0, parse_dates=True) - df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True) - df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True) - tm.assert_frame_equal(df, df2, check_names=False) - tm.assert_frame_equal(df3, df2, check_names=False) - - df4 = xls.parse('Sheet1', index_col=0, parse_dates=True, - skipfooter=1) - df5 = xls.parse('Sheet1', index_col=0, parse_dates=True, - skip_footer=1) - tm.assert_frame_equal(df4, df.ix[:-1]) - tm.assert_frame_equal(df4, df5) + def test_parse_cols_list(self): - def test_excel_read_buffer(self): - _skip_if_no_xlrd() - _skip_if_no_openpyxl() + excel = self.get_excelfile('test1') + dfref = self.get_csv_refdf('test1') + dfref = dfref.reindex(columns=['B', 'C']) + df1 = excel.parse('Sheet1', index_col=0, parse_dates=True, + parse_cols=[0, 2, 3]) + df2 = excel.parse('Sheet2', skiprows=[1], index_col=0, + parse_dates=True, + parse_cols=[0, 2, 3]) + # TODO add index to xls file) + tm.assert_frame_equal(df1, dfref, check_names=False) + tm.assert_frame_equal(df2, dfref, check_names=False) - pth = os.path.join(self.dirpath, 'test.xls') - f = open(pth, 'rb') - xls = ExcelFile(f) - # it works - xls.parse('Sheet1', index_col=0, parse_dates=True) + def test_parse_cols_str(self): - pth = os.path.join(self.dirpath, 'test.xlsx') - f = open(pth, 'rb') - xl = ExcelFile(f) - xl.parse('Sheet1', index_col=0, parse_dates=True) + excel = self.get_excelfile('test1') + dfref = self.get_csv_refdf('test1') + + df1 = dfref.reindex(columns=['A', 'B', 'C']) + df2 = excel.parse('Sheet1', index_col=0, parse_dates=True, + parse_cols='A:D') + df3 = excel.parse('Sheet2', skiprows=[1], index_col=0, + parse_dates=True, parse_cols='A:D') + # TODO add index to xls, read xls ignores index name ? + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + + df1 = dfref.reindex(columns=['B', 'C']) + df2 = excel.parse('Sheet1', index_col=0, parse_dates=True, + parse_cols='A,C,D') + df3 = excel.parse('Sheet2', skiprows=[1], index_col=0, + parse_dates=True, + parse_cols='A,C,D') + # TODO add index to xls file + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + + df1 = dfref.reindex(columns=['B', 'C']) + df2 = excel.parse('Sheet1', index_col=0, parse_dates=True, + parse_cols='A,C:D') + df3 = excel.parse('Sheet2', skiprows=[1], index_col=0, + parse_dates=True, + parse_cols='A,C:D') + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) - def test_read_xlrd_Book(self): - _skip_if_no_xlrd() - _skip_if_no_xlwt() + def test_excel_stop_iterator(self): - import xlrd + excel = self.get_excelfile('test2') - df = self.frame + parsed = excel.parse('Sheet1') + expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1']) + tm.assert_frame_equal(parsed, expected) - with ensure_clean('.xls') as pth: - df.to_excel(pth, "SheetA") - book = xlrd.open_workbook(pth) + def test_excel_cell_error_na(self): - with ExcelFile(book, engine="xlrd") as xl: - result = xl.parse("SheetA") - tm.assert_frame_equal(df, result) + excel = self.get_excelfile('test3') - result = read_excel(book, sheetname="SheetA", engine="xlrd") - tm.assert_frame_equal(df, result) + parsed = excel.parse('Sheet1') + expected = DataFrame([[np.nan]], columns=['Test']) + tm.assert_frame_equal(parsed, expected) - @tm.network - def test_read_from_http_url(self): - _skip_if_no_xlrd() + def test_excel_passes_na(self): - url = ('https://raw.github.com/pydata/pandas/master/' - 'pandas/io/tests/data/test.xlsx') - url_table = read_excel(url) - dirpath = tm.get_data_path() - localtable = os.path.join(dirpath, 'test.xlsx') - local_table = read_excel(localtable) - tm.assert_frame_equal(url_table, local_table) + excel = self.get_excelfile('test4') - @slow - def test_read_from_file_url(self): - _skip_if_no_xlrd() + parsed = excel.parse('Sheet1', keep_default_na=False, + na_values=['apple']) + expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']], + columns=['Test']) + tm.assert_frame_equal(parsed, expected) - # FILE - if sys.version_info[:2] < (2, 6): - raise nose.SkipTest("file:// not supported with Python < 2.6") - dirpath = tm.get_data_path() - localtable = os.path.join(dirpath, 'test.xlsx') - local_table = read_excel(localtable) + parsed = excel.parse('Sheet1', keep_default_na=True, + na_values=['apple']) + expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']], + columns=['Test']) + tm.assert_frame_equal(parsed, expected) - try: - url_table = read_excel('file://localhost/' + localtable) - except URLError: - # fails on some systems - raise nose.SkipTest("failing on %s" % - ' '.join(platform.uname()).strip()) + def test_excel_table_sheet_by_index(self): - tm.assert_frame_equal(url_table, local_table) + excel = self.get_excelfile('test1') + dfref = self.get_csv_refdf('test1') - def test_xlsx_table(self): - _skip_if_no_xlrd() - _skip_if_no_openpyxl() + df1 = excel.parse(0, index_col=0, parse_dates=True) + df2 = excel.parse(1, skiprows=[1], index_col=0, parse_dates=True) + tm.assert_frame_equal(df1, dfref, check_names=False) + tm.assert_frame_equal(df2, dfref, check_names=False) - pth = os.path.join(self.dirpath, 'test.xlsx') - xlsx = ExcelFile(pth) - df = xlsx.parse('Sheet1', index_col=0, parse_dates=True) - df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True) - df3 = xlsx.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True) + df3 = excel.parse(0, index_col=0, parse_dates=True, skipfooter=1) + df4 = excel.parse(0, index_col=0, parse_dates=True, skip_footer=1) + tm.assert_frame_equal(df3, df1.ix[:-1]) + tm.assert_frame_equal(df3, df4) - # TODO add index to xlsx file - tm.assert_frame_equal(df, df2, check_names=False) - tm.assert_frame_equal(df3, df2, check_names=False) + import xlrd + self.assertRaises(xlrd.XLRDError, excel.parse, 'asdf') - df4 = xlsx.parse('Sheet1', index_col=0, parse_dates=True, - skipfooter=1) - df5 = xlsx.parse('Sheet1', index_col=0, parse_dates=True, - skip_footer=1) - tm.assert_frame_equal(df4, df.ix[:-1]) - tm.assert_frame_equal(df4, df5) + def test_excel_table(self): - def test_reader_closes_file(self): - _skip_if_no_xlrd() - _skip_if_no_openpyxl() + excel = self.get_excelfile('test1') + dfref = self.get_csv_refdf('test1') - pth = os.path.join(self.dirpath, 'test.xlsx') - f = open(pth, 'rb') - with ExcelFile(f) as xlsx: - # parses okay - xlsx.parse('Sheet1', index_col=0) + df1 = excel.parse('Sheet1', index_col=0, parse_dates=True) + df2 = excel.parse('Sheet2', skiprows=[1], index_col=0, + parse_dates=True) + # TODO add index to file + tm.assert_frame_equal(df1, dfref, check_names=False) + tm.assert_frame_equal(df2, dfref, check_names=False) - self.assertTrue(f.closed) + df3 = excel.parse('Sheet1', index_col=0, parse_dates=True, + skipfooter=1) + df4 = excel.parse('Sheet1', index_col=0, parse_dates=True, + skip_footer=1) + tm.assert_frame_equal(df3, df1.ix[:-1]) + tm.assert_frame_equal(df3, df4) def test_reader_special_dtypes(self): - _skip_if_no_xlrd() expected = DataFrame.from_items([ ("IntCol", [1, 2, -3, 4, 0]), @@ -364,44 +297,40 @@ def test_reader_special_dtypes(self): datetime(2015, 3, 14)]) ]) - xlsx_path = os.path.join(self.dirpath, 'test_types.xlsx') - xls_path = os.path.join(self.dirpath, 'test_types.xls') + basename = 'test_types' # should read in correctly and infer types - for path in (xls_path, xlsx_path): - actual = read_excel(path, 'Sheet1') - tm.assert_frame_equal(actual, expected) + actual = self.get_exceldf(basename, 'Sheet1') + tm.assert_frame_equal(actual, expected) # if not coercing number, then int comes in as float float_expected = expected.copy() float_expected["IntCol"] = float_expected["IntCol"].astype(float) float_expected.loc[1, "Str2Col"] = 3.0 - for path in (xls_path, xlsx_path): - actual = read_excel(path, 'Sheet1', convert_float=False) - tm.assert_frame_equal(actual, float_expected) + actual = self.get_exceldf(basename, 'Sheet1', convert_float=False) + tm.assert_frame_equal(actual, float_expected) # check setting Index (assuming xls and xlsx are the same here) for icol, name in enumerate(expected.columns): - actual = read_excel(xlsx_path, 'Sheet1', index_col=icol) - actual2 = read_excel(xlsx_path, 'Sheet1', index_col=name) + actual = self.get_exceldf(basename, 'Sheet1', index_col=icol) exp = expected.set_index(name) tm.assert_frame_equal(actual, exp) - tm.assert_frame_equal(actual2, exp) # convert_float and converters should be different but both accepted expected["StrCol"] = expected["StrCol"].apply(str) - actual = read_excel(xlsx_path, 'Sheet1', converters={"StrCol": str}) + actual = self.get_exceldf(basename, 'Sheet1', converters={"StrCol": str}) tm.assert_frame_equal(actual, expected) no_convert_float = float_expected.copy() no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str) - actual = read_excel(xlsx_path, 'Sheet1', converters={"StrCol": str}, - convert_float=False) + actual = self.get_exceldf(basename, 'Sheet1', convert_float=False, + converters={"StrCol": str}) tm.assert_frame_equal(actual, no_convert_float) # GH8212 - support for converters and missing values def test_reader_converters(self): - _skip_if_no_xlrd() + + basename = 'test_converters' expected = DataFrame.from_items([ ("IntCol", [1, 2, -3, -1000, 0]), @@ -416,48 +345,122 @@ def test_reader_converters(self): 3: lambda x: str(x) if x else '', } - xlsx_path = os.path.join(self.dirpath, 'test_converters.xlsx') - xls_path = os.path.join(self.dirpath, 'test_converters.xls') - # should read in correctly and set types of single cells (not array dtypes) - for path in (xls_path, xlsx_path): - actual = read_excel(path, 'Sheet1', converters=converters) - tm.assert_frame_equal(actual, expected) + actual = self.get_exceldf(basename, 'Sheet1', converters=converters) + tm.assert_frame_equal(actual, expected) def test_reading_all_sheets(self): # Test reading all sheetnames by setting sheetname to None, # Ensure a dict is returned. # See PR #9450 - - _skip_if_no_xlrd() - - dfs = read_excel(self.multisheet,sheetname=None) - expected_keys = ['Alpha','Beta','Charlie'] - tm.assert_contains_all(expected_keys,dfs.keys()) + basename = 'test_multisheet' + dfs = self.get_exceldf(basename, sheetname=None) + expected_keys = ['Alpha', 'Beta', 'Charlie'] + tm.assert_contains_all(expected_keys, dfs.keys()) def test_reading_multiple_specific_sheets(self): # Test reading specific sheetnames by specifying a mixed list # of integers and strings, and confirm that duplicated sheet # references (positions/names) are removed properly. - # Ensure a dict is returned # See PR #9450 - _skip_if_no_xlrd() - - #Explicitly request duplicates. Only the set should be returned. - expected_keys = [2,'Charlie','Charlie'] - dfs = read_excel(self.multisheet,sheetname=expected_keys) + basename = 'test_multisheet' + # Explicitly request duplicates. Only the set should be returned. + expected_keys = [2, 'Charlie', 'Charlie'] + dfs = self.get_exceldf(basename, sheetname=expected_keys) expected_keys = list(set(expected_keys)) - tm.assert_contains_all(expected_keys,dfs.keys()) + tm.assert_contains_all(expected_keys, dfs.keys()) assert len(expected_keys) == len(dfs.keys()) + # GH6403 + def test_read_excel_blank(self): + actual = self.get_exceldf('blank', 'Sheet1') + tm.assert_frame_equal(actual, DataFrame()) + + def test_read_excel_blank_with_header(self): + expected = DataFrame(columns=['col_1', 'col_2']) + actual = self.get_exceldf('blank_with_header', 'Sheet1') + tm.assert_frame_equal(actual, expected) + + +class XlrdTests(ReadingTestsBase): + """ + This is the base class for the xlrd tests, and 3 different file formats + are supported: xls, xlsx, xlsm + """ + + def test_excel_read_buffer(self): + + pth = os.path.join(self.dirpath, 'test1' + self.ext) + f = open(pth, 'rb') + xls = ExcelFile(f) + # it works + xls.parse('Sheet1', index_col=0, parse_dates=True) + + def test_read_xlrd_Book(self): + _skip_if_no_xlwt() + + import xlrd + df = self.frame + with ensure_clean('.xls') as pth: + df.to_excel(pth, "SheetA") + book = xlrd.open_workbook(pth) + + with ExcelFile(book, engine="xlrd") as xl: + result = xl.parse("SheetA") + tm.assert_frame_equal(df, result) + + result = read_excel(book, sheetname="SheetA", engine="xlrd") + tm.assert_frame_equal(df, result) + + @tm.network + def test_read_from_http_url(self): + # TODO: remove this when merging into master + url = ('https://raw.github.com/davidovitch/pandas/master/' + 'pandas/io/tests/data/test1' + self.ext) +# url = ('https://raw.github.com/pydata/pandas/master/' +# 'pandas/io/tests/data/test' + self.ext) + url_table = read_excel(url) + local_table = self.get_exceldf('test1') + tm.assert_frame_equal(url_table, local_table) + + @slow + def test_read_from_file_url(self): + + # FILE + if sys.version_info[:2] < (2, 6): + raise nose.SkipTest("file:// not supported with Python < 2.6") + + localtable = os.path.join(self.dirpath, 'test1' + self.ext) + local_table = read_excel(localtable) + + try: + url_table = read_excel('file://localhost/' + localtable) + except URLError: + # fails on some systems + import platform + raise nose.SkipTest("failing on %s" % + ' '.join(platform.uname()).strip()) + + tm.assert_frame_equal(url_table, local_table) + + def test_reader_closes_file(self): + + pth = os.path.join(self.dirpath, 'test1' + self.ext) + f = open(pth, 'rb') + with ExcelFile(f) as xlsx: + # parses okay + xlsx.parse('Sheet1', index_col=0) + + self.assertTrue(f.closed) + def test_creating_and_reading_multiple_sheets(self): # Test reading multiple sheets, from a runtime created excel file # with multiple sheets. # See PR #9450 - _skip_if_no_xlrd() _skip_if_no_xlwt() + _skip_if_no_openpyxl() def tdf(sheetname): d, i = [11,22,33], [1,2,3] @@ -468,17 +471,16 @@ def tdf(sheetname): dfs = [tdf(s) for s in sheets] dfs = dict(zip(sheets,dfs)) - with ensure_clean('.xlsx') as pth: + with ensure_clean(self.ext) as pth: with ExcelWriter(pth) as ew: for sheetname, df in iteritems(dfs): df.to_excel(ew,sheetname) - dfs_returned = pd.read_excel(pth,sheetname=sheets) + dfs_returned = read_excel(pth,sheetname=sheets) for s in sheets: tm.assert_frame_equal(dfs[s],dfs_returned[s]) def test_reader_seconds(self): # Test reading times with and without milliseconds. GH5945. - _skip_if_no_xlrd() import xlrd if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"): @@ -510,38 +512,30 @@ def test_reader_seconds(self): time(16, 37, 1), time(18, 20, 54)])]) - epoch_1900 = os.path.join(self.dirpath, 'times_1900.xls') - epoch_1904 = os.path.join(self.dirpath, 'times_1904.xls') - - actual = read_excel(epoch_1900, 'Sheet1') + actual = self.get_exceldf('times_1900', 'Sheet1') tm.assert_frame_equal(actual, expected) - actual = read_excel(epoch_1904, 'Sheet1') + actual = self.get_exceldf('times_1904', 'Sheet1') tm.assert_frame_equal(actual, expected) - # GH6403 - def test_read_excel_blank(self): - _skip_if_no_xlrd() - blank = os.path.join(self.dirpath, 'blank.xls') - actual = read_excel(blank, 'Sheet1') - tm.assert_frame_equal(actual, DataFrame()) +class XlsReaderTests(XlrdTests, tm.TestCase): + ext = '.xls' + engine_name = 'xlrd' + check_skip = staticmethod(_skip_if_no_xlrd) - blank = os.path.join(self.dirpath, 'blank.xlsx') - actual = read_excel(blank, 'Sheet1') - tm.assert_frame_equal(actual, DataFrame()) - def test_read_excel_blank_with_header(self): - _skip_if_no_xlrd() +class XlsxReaderTests(XlrdTests, tm.TestCase): + ext = '.xlsx' + engine_name = 'xlrd' + check_skip = staticmethod(_skip_if_no_xlrd) - expected = DataFrame(columns=['col_1', 'col_2']) - blank = os.path.join(self.dirpath, 'blank_with_header.xls') - actual = read_excel(blank, 'Sheet1') - tm.assert_frame_equal(actual, expected) - blank = os.path.join(self.dirpath, 'blank_with_header.xlsx') - actual = read_excel(blank, 'Sheet1') - tm.assert_frame_equal(actual, expected) +class XlsmReaderTests(XlrdTests, tm.TestCase): + ext = '.xlsm' + engine_name = 'xlrd' + check_skip = staticmethod(_skip_if_no_xlrd) + class ExcelWriterBase(SharedItems): # Base class for test cases to run with different Excel writers. @@ -961,11 +955,11 @@ def test_to_excel_multiindex_no_write_index(self): # Test writing and re-reading a MI witout the index. GH 5616. # Initial non-MI frame. - frame1 = pd.DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]}) + frame1 = DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]}) # Add a MI. frame2 = frame1.copy() - multi_index = pd.MultiIndex.from_tuples([(70, 80), (90, 100)]) + multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)]) frame2.index = multi_index with ensure_clean(self.ext) as path: @@ -1148,7 +1142,7 @@ def roundtrip(df, header=True, parser_hdr=0, index=True): with ensure_clean(self.ext) as path: df.to_excel(path, header=header, merge_cells=self.merge_cells, index=index) - xf = pd.ExcelFile(path) + xf = ExcelFile(path) res = xf.parse(xf.sheet_names[0], header=parser_hdr) return res @@ -1202,7 +1196,7 @@ def roundtrip2(df, header=True, parser_hdr=0, index=True): with ensure_clean(self.ext) as path: df.to_excel(path, header=header, merge_cells=self.merge_cells, index=index) - xf = pd.ExcelFile(path) + xf = ExcelFile(path) res = xf.parse(xf.sheet_names[0], header=parser_hdr) return res @@ -1269,20 +1263,24 @@ def test_datetimes(self): # GH7074 def test_bytes_io(self): + _skip_if_no_xlrd() + bio = BytesIO() df = DataFrame(np.random.randn(10, 2)) writer = ExcelWriter(bio) df.to_excel(writer) writer.save() bio.seek(0) - reread_df = pd.read_excel(bio) + reread_df = read_excel(bio) tm.assert_frame_equal(df, reread_df) # GH8188 def test_write_lists_dict(self): - df = pd.DataFrame({'mixed': ['a', ['b', 'c'], {'d': 'e', 'f': 2}], - 'numeric': [1, 2, 3.0], - 'str': ['apple', 'banana', 'cherry']}) + _skip_if_no_xlrd() + + df = DataFrame({'mixed': ['a', ['b', 'c'], {'d': 'e', 'f': 2}], + 'numeric': [1, 2, 3.0], + 'str': ['apple', 'banana', 'cherry']}) expected = df.copy() expected.mixed = expected.mixed.apply(str) expected.numeric = expected.numeric.astype('int64') @@ -1291,6 +1289,7 @@ def test_write_lists_dict(self): read = read_excel(path, 'Sheet1', header=0) tm.assert_frame_equal(read, expected) + def raise_wrapper(major_ver): def versioned_raise_wrapper(orig_method): @functools.wraps(orig_method) @@ -1468,20 +1467,20 @@ class XlwtTests(ExcelWriterBase, tm.TestCase): def test_excel_raise_error_on_multiindex_columns_and_no_index(self): _skip_if_no_xlwt() # MultiIndex as columns is not yet implemented 9794 - cols = pd.MultiIndex.from_tuples([('site', ''), + cols = MultiIndex.from_tuples([('site', ''), ('2014', 'height'), ('2014', 'weight')]) - df = pd.DataFrame(np.random.randn(10, 3), columns=cols) + df = DataFrame(np.random.randn(10, 3), columns=cols) with tm.assertRaises(NotImplementedError): with ensure_clean(self.ext) as path: df.to_excel(path, index=False) def test_excel_warns_verbosely_on_multiindex_columns_and_index_true(self): _skip_if_no_xlwt() - cols = pd.MultiIndex.from_tuples([('site', ''), + cols = MultiIndex.from_tuples([('site', ''), ('2014', 'height'), ('2014', 'weight')]) - df = pd.DataFrame(np.random.randn(10, 3), columns=cols) + df = DataFrame(np.random.randn(10, 3), columns=cols) with tm.assert_produces_warning(UserWarning): with ensure_clean(self.ext) as path: df.to_excel(path, index=True) @@ -1489,10 +1488,10 @@ def test_excel_warns_verbosely_on_multiindex_columns_and_index_true(self): def test_excel_multiindex_index(self): _skip_if_no_xlwt() # MultiIndex as index works so assert no error #9794 - cols = pd.MultiIndex.from_tuples([('site', ''), + cols = MultiIndex.from_tuples([('site', ''), ('2014', 'height'), ('2014', 'weight')]) - df = pd.DataFrame(np.random.randn(3, 10), index=cols) + df = DataFrame(np.random.randn(3, 10), index=cols) with ensure_clean(self.ext) as path: df.to_excel(path, index=False)
This PR implements more flexible Excel (spreadsheet) reader tests. The work was originally part of the larger but unsuccessful PR #9070. This PR includes: - Excel reader base class tests to facilitate adding the similar tests for other readers - not all Excel file formats (xls, xlsm, xlsx) had a corresponding test file. This PR adds the missing ones. Consequently, all Excel data files are now tested against all 3 Excel file formats. - rename `test` to `test1` (for xls, xlsm, xlsx extensions) for consistency with `test2..4` test data files. This also means that before merging (but after Travis has ran the tests) the URL of Excel test file `test1.*` has to be changed from `davidovitch/pandas` to `pydata/pandas` in `pandas/io/tests/test_excel.py` at line [378](https://github.com/davidovitch/pandas/blob/master/pandas/io/tests/test_excel.py#L378).
https://api.github.com/repos/pandas-dev/pandas/pulls/10964
2015-09-01T20:32:15Z
2015-09-05T17:22:03Z
null
2015-09-05T20:24:26Z
BUG: DataFrame subplot with duplicated columns output incorrect result
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 3eed3f7ddada2..86c8aa488a7df 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -922,3 +922,4 @@ Bug Fixes - Bug in ``groupby`` incorrect computation for aggregation on ``DataFrame`` with ``NaT`` (E.g ``first``, ``last``, ``min``). (:issue:`10590`) - Bug when constructing ``DataFrame`` where passing a dictionary with only scalar values and specifying columns did not raise an error (:issue:`10856`) - Bug in ``.var()`` causing roundoff errors for highly similar values (:issue:`10242`) +- Bug in ``DataFrame.plot(subplots=True)`` with duplicated columns outputs incorrect result (:issue:`10962`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 71fd85bde1235..d1f1f2196558a 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1646,6 +1646,28 @@ def test_subplots_sharex_axes_existing_axes(self): for ax in axes.ravel(): self._check_visible(ax.get_yticklabels(), visible=True) + @slow + def test_subplots_dup_columns(self): + # GH 10962 + df = DataFrame(np.random.rand(5, 5), columns=list('aaaaa')) + axes = df.plot(subplots=True) + for ax in axes: + self._check_legend_labels(ax, labels=['a']) + self.assertEqual(len(ax.lines), 1) + tm.close() + + axes = df.plot(subplots=True, secondary_y='a') + for ax in axes: + # (right) is only attached when subplots=False + self._check_legend_labels(ax, labels=['a']) + self.assertEqual(len(ax.lines), 1) + tm.close() + + ax = df.plot(secondary_y='a') + self._check_legend_labels(ax, labels=['a (right)'] * 5) + self.assertEqual(len(ax.lines), 0) + self.assertEqual(len(ax.right_ax.lines), 5) + def test_negative_log(self): df = - DataFrame(rand(6, 4), index=list(string.ascii_letters[:6]), diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index e0d13287fcf3b..9eab385a7a2a5 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -920,11 +920,11 @@ def _iter_data(self, data=None, keep_index=False, fillna=None): else: columns = data.columns - for col in columns: + for col, values in data.iteritems(): if keep_index is True: - yield col, data[col] + yield col, values else: - yield col, data[col].values + yield col, values.values @property def nseries(self):
When `DataFrame` has duplicated column name, each subplot will contain all the lines with same name. ``` import pandas as pd import numpy as np df = pd.DataFrame(np.random.rand(5, 5), columns=list('aaaaa')) df.plot(subplots=True) ``` ![figure_1](https://cloud.githubusercontent.com/assets/1696302/9609047/7acba4d0-510c-11e5-872d-1fcc1fcf8ce6.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/10962
2015-09-01T15:52:12Z
2015-09-05T02:14:54Z
2015-09-05T02:14:54Z
2015-09-05T02:14:56Z
BUG: Fixed bug that Timedelta raises error when slicing from 0s (issue #10583)
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index eae33bc80be32..fc5777ddea3f1 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -821,6 +821,7 @@ Bug Fixes - Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`) - Bug in ``pd.DataFrame.diff`` when DataFrame is not consolidated (:issue:`10907`) - Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue:`9431`) +- Bug in ``Timedelta`` raising error when slicing from 0s (:issue:`10583`) - Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`) - Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`) - Bug in ``PeriodIndex.order`` reset freq (:issue:`10295`) diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index 4870fbd55f33e..97e7f883542cc 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -404,6 +404,14 @@ def test_timedelta_range(self): result = timedelta_range('0 days',freq='30T',periods=50) tm.assert_index_equal(result, expected) + # issue10583 + df = pd.DataFrame(np.random.normal(size=(10,4))) + df.index = pd.timedelta_range(start='0s', periods=10, freq='s') + expected = df.loc[pd.Timedelta('0s'):,:] + result = df.loc['0s':,:] + assert_frame_equal(expected, result) + + def test_numeric_conversions(self): self.assertEqual(ct(0), np.timedelta64(0,'ns')) self.assertEqual(ct(10), np.timedelta64(10,'ns')) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 369993b4c54d1..77ac362181a2b 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -2265,9 +2265,8 @@ class Timedelta(_Timedelta): return "m" elif self._h: return "h" - elif self._d: + else: return "D" - raise ValueError("invalid resolution") def round(self, reso): """
closes #10583
https://api.github.com/repos/pandas-dev/pandas/pulls/10960
2015-09-01T12:46:37Z
2015-09-01T19:38:53Z
2015-09-01T19:38:53Z
2015-09-01T19:39:57Z
TST: Changed pythonxs link to alternative link and mofidifed test_html (issue: 10906)
diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py index ebdfbe5af299d..fb7ffbc6dd621 100644 --- a/pandas/io/tests/test_html.py +++ b/pandas/io/tests/test_html.py @@ -358,20 +358,16 @@ def test_negative_skiprows(self): @network def test_multiple_matches(self): - raise nose.SkipTest("pythonxy link seems to have changed") - - url = 'http://code.google.com/p/pythonxy/wiki/StandardPlugins' - dfs = self.read_html(url, match='Python', attrs={'class': 'wikitable'}) + url = 'https://docs.python.org/2/' + dfs = self.read_html(url, match='Python') self.assertTrue(len(dfs) > 1) @network - def test_pythonxy_plugins_table(self): - raise nose.SkipTest("pythonxy link seems to have changed") - - url = 'http://code.google.com/p/pythonxy/wiki/StandardPlugins' - dfs = self.read_html(url, match='Python', attrs={'class': 'wikitable'}) - zz = [df.iloc[0, 0] for df in dfs] - self.assertEqual(sorted(zz), sorted(['Python', 'SciTE'])) + def test_python_docs_table(self): + url = 'https://docs.python.org/2/' + dfs = self.read_html(url, match='Python') + zz = [df.iloc[0, 0][0:4] for df in dfs] + self.assertEqual(sorted(zz), sorted(['Repo', 'What'])) @slow def test_thousands_macau_stats(self):
See issue #10906
https://api.github.com/repos/pandas-dev/pandas/pulls/10958
2015-09-01T02:53:10Z
2015-09-01T11:03:07Z
2015-09-01T11:03:07Z
2015-09-01T13:23:33Z
BUG: Fixed bug that Timedelta raises error when slicing from 0s (issue #10583)
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 88f491ecc0bb0..65d079703e243 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -130,11 +130,10 @@ Other enhancements ^^^^^^^^^^^^^^^^^^ - `read_sql` and `to_sql` can accept database URI as con parameter (:issue:`10214`) - - Enable `read_hdf` to be used without specifying a key when the HDF file contains a single dataset (:issue:`10443`) - +- Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`) +- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`) - Added functionality to use the ``base`` argument when resampling a ``TimeDeltaIndex`` (:issue:`10530`) - - ``DatetimeIndex`` can be instantiated using strings contains ``NaT`` (:issue:`7599`) - The string parsing of ``to_datetime``, ``Timestamp`` and ``DatetimeIndex`` has been made consistent. (:issue:`7599`) @@ -235,7 +234,7 @@ Changes to sorting API The sorting API has had some longtime inconsistencies. (:issue:`9816`, :issue:`8239`). -Here is a summary of the **PRIOR** to 0.17.0: +Here is a summary of the API **PRIOR** to 0.17.0: - ``Series.sort`` is **INPLACE** while ``DataFrame.sort`` returns a new object. - ``Series.order`` returns a new object @@ -256,19 +255,19 @@ will show a ``FutureWarning``. To sort by the **values**: -================================= ==================================== +================================== ==================================== Previous Replacement -================================= ==================================== -\* ``Series.order()`` ``Series.sort_values()`` -\* ``Series.sort()`` ``Series.sort_values(inplace=True)`` -\* ``DataFrame.sort(columns=...)`` ``DataFrame.sort_values(by=...)`` -================================= ==================================== +================================== ==================================== +\* ``Series.order()`` ``Series.sort_values()`` +\* ``Series.sort()`` ``Series.sort_values(inplace=True)`` +\* ``DataFrame.sort(columns=...)`` ``DataFrame.sort_values(by=...)`` +================================== ==================================== To sort by the **index**: -================================= ==================================== -Previous Equivalent -================================= ==================================== +================================== ==================================== +Previous Replacement +================================== ==================================== ``Series.sort_index()`` ``Series.sort_index()`` ``Series.sortlevel(level=...)`` ``Series.sort_index(level=...``) ``DataFrame.sort_index()`` ``DataFrame.sort_index()`` @@ -281,8 +280,8 @@ We have also deprecated and changed similar methods in two Series-like classes, ================================== ==================================== Previous Replacement ================================== ==================================== -\* ``Index.order()`` ``Index.sort_values()`` -\* ``Categorical.order()`` ``Categorical.sort_values`` +\* ``Index.order()`` ``Index.sort_values()`` +\* ``Categorical.order()`` ``Categorical.sort_values`` ================================== ==================================== .. _whatsnew_0170.api_breaking.to_datetime: @@ -351,7 +350,7 @@ keyword argument to ``'coerce'`` instead of ``True``, as in ``convert_dates='coe 's': ['apple','banana']}) df -The old usage of ``DataFrame.convert_objects`` used `'coerce'` along with the +The old usage of ``DataFrame.convert_objects`` used ``'coerce'`` along with the type. .. code-block:: python @@ -366,8 +365,7 @@ Now the ``coerce`` keyword must be explicitly used. In earlier versions of pandas, ``DataFrame.convert_objects`` would not coerce numeric types when there were no values convertible to a numeric type. This returns -the original DataFrame with no conversion. This change alters -this behavior so that converts all non-number-like strings to ``NaN``. +the original DataFrame with no conversion. .. code-block:: python @@ -378,6 +376,9 @@ this behavior so that converts all non-number-like strings to ``NaN``. 0 a 1 b +THe new behavior will convert all non-number-like strings to ``NaN``, +when ``coerce=True`` is passed explicity. + .. ipython:: python pd.DataFrame({'s': ['a','b']}) @@ -517,7 +518,10 @@ New behavior: .. ipython:: python - df_with_missing.to_hdf('file.h5', 'df_with_missing', format = 'table', mode='w') + df_with_missing.to_hdf('file.h5', + 'df_with_missing', + format='table', + mode='w') pd.read_hdf('file.h5', 'df_with_missing') @@ -571,10 +575,10 @@ from ``7``. Changes to ``Categorical.unique`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -``Categorical.unique`` now returns new ``Categorical`` which ``categories`` and ``codes`` that are unique, rather than returning ``np.array`` (:issue:`10508`) +``Categorical.unique`` now returns new ``Categoricals`` with ``categories`` and ``codes`` that are unique, rather than returning ``np.array`` (:issue:`10508`) - unordered category: values and categories are sorted by appearance order. -- ordered category: values are sorted by appearance order, categories keeps existing order. +- ordered category: values are sorted by appearance order, categories keep existing order. .. ipython :: python @@ -597,25 +601,23 @@ Other API Changes - Line and kde plot with ``subplots=True`` now uses default colors, not all black. Specify ``color='k'`` to draw all lines in black (:issue:`9894`) - Calling the ``.value_counts`` method on a Series with ``categorical`` dtype now returns a Series with a ``CategoricalIndex`` (:issue:`10704`) -- Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`) -- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`) - Allow passing `kwargs` to the interpolation methods (:issue:`10378`). -- Serialize metadata properties of subclasses of pandas objects (:issue:`10553`). +- The metadata properties of subclasses of pandas objects will now be serialized (:issue:`10553`). - Allow ``DataFrame`` with ``MultiIndex`` columns to be written to Excel (:issue:`10564`). This was changed in 0.16.2 as the read-back method could not always guarantee perfect fidelity (:issue:`9794`). - ``groupby`` using ``Categorical`` follows the same rule as ``Categorical.unique`` described above (:issue:`10508`) +- Improved error message when concatenating an empty iterable of dataframes (:issue:`9157`) + - ``NaT``'s methods now either raise ``ValueError``, or return ``np.nan`` or ``NaT`` (:issue:`9513`) =============================== =============================================================== Behavior Methods =============================== =============================================================== - ``return np.nan`` ``weekday``, ``isoweekday`` - ``return NaT`` ``date``, ``now``, ``replace``, ``to_datetime``, ``today`` - ``return np.datetime64('NaT')`` ``to_datetime64`` (unchanged) - ``raise ValueError`` All other public methods (names not beginning with underscores) + return ``np.nan`` ``weekday``, ``isoweekday`` + return ``NaT`` ``date``, ``now``, ``replace``, ``to_datetime``, ``today`` + return ``np.datetime64('NaT')`` ``to_datetime64`` (unchanged) + raise ``ValueError`` All other public methods (names not beginning with underscores) =============================== =============================================================== -- Improved error message when concatenating an empty iterable of dataframes (:issue:`9157`) - .. _whatsnew_0170.deprecations: Deprecations @@ -703,6 +705,8 @@ Removal of prior version deprecations/changes Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ + +- Development support for benchmarking with the `Air Speed Velocity library <https://github.com/spacetelescope/asv/>`_ (:issue:`8316`) - Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`) - Performance improvements in ``Categorical.value_counts`` (:issue:`10804`) - Performance improvements in ``SeriesGroupBy.nunique`` and ``SeriesGroupBy.value_counts`` (:issue:`10820`) @@ -720,6 +724,8 @@ Performance Improvements Bug Fixes ~~~~~~~~~ + +- Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) - Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) - Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`) - Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`) @@ -814,4 +820,6 @@ Bug Fixes - Bug in ``read_msgpack`` where encoding is not respected (:issue:`10580`) - Bug preventing access to the first index when using ``iloc`` with a list containing the appropriate negative integer (:issue:`10547`, :issue:`10779`) - Bug in ``TimedeltaIndex`` formatter causing error while trying to save ``DataFrame`` with ``TimedeltaIndex`` using ``to_csv`` (:issue:`10833`) +- Bug in ``Timedelta`` raising error when slicing from 0s (:issue:`10583`) - Bug in ``DataFrame.where`` when handling Series slicing (:issue:`10218`, :issue:`9558`) +- Bug where ``pd.read_gbq`` throws ``ValueError`` when Bigquery returns zero rows (:issue:`10273`) diff --git a/pandas/core/common.py b/pandas/core/common.py index 245535e47abd8..72ea6d14456b0 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -63,6 +63,7 @@ def __str__(self): _int8_max = np.iinfo(np.int8).max _int16_max = np.iinfo(np.int16).max _int32_max = np.iinfo(np.int32).max +_int64_max = np.iinfo(np.int64).max # define abstract base classes to enable isinstance type checking on our # objects diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index c70fb6339517d..447a273a1e171 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -21,7 +21,8 @@ is_bool_dtype, is_object_dtype, is_datetime64_dtype, is_timedelta64_dtype, is_datetime_or_timedelta_dtype, _get_dtype, - is_int_or_datetime_dtype, is_any_int_dtype) + is_int_or_datetime_dtype, is_any_int_dtype, + _int64_max) class disallow(object): @@ -145,7 +146,7 @@ def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): else: if fill_value_typ == '+inf': # need the max int here - return np.iinfo(np.int64).max + return _int64_max else: return tslib.iNaT @@ -223,7 +224,12 @@ def _wrap_results(result, dtype): result = result.view(dtype) elif is_timedelta64_dtype(dtype): if not isinstance(result, np.ndarray): - result = lib.Timedelta(result) + + # raise if we have a timedelta64[ns] which is too large + if np.fabs(result) > _int64_max: + raise ValueError("overflow in timedelta operation") + + result = lib.Timedelta(result, unit='ns') else: result = result.astype('i8').view(dtype) @@ -247,6 +253,8 @@ def nansum(values, axis=None, skipna=True): dtype_sum = dtype_max if is_float_dtype(dtype): dtype_sum = dtype + elif is_timedelta64_dtype(dtype): + dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _maybe_null_out(the_sum, axis, mask) @@ -260,7 +268,7 @@ def nanmean(values, axis=None, skipna=True): dtype_sum = dtype_max dtype_count = np.float64 - if is_integer_dtype(dtype): + if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype): dtype_sum = np.float64 elif is_float_dtype(dtype): dtype_sum = dtype diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 06ad8827a5642..1dff195e4b54f 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -121,7 +121,7 @@ def get_service(self, credentials): try: from apiclient.discovery import build - + except ImportError: raise ImportError('Could not import Google API Client.') @@ -279,7 +279,7 @@ def _parse_data(schema, rows): field_type) page_array[row_num][col_num] = field_value - return DataFrame(page_array) + return DataFrame(page_array, columns=col_names) def _parse_entry(field_value, field_type): if field_value is None or field_value == 'null': @@ -338,7 +338,10 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=Fals page = pages.pop() dataframe_list.append(_parse_data(schema, page)) - final_df = concat(dataframe_list, ignore_index = True) + if len(dataframe_list) > 0: + final_df = concat(dataframe_list, ignore_index=True) + else: + final_df = _parse_data(schema, []) # Reindex the DataFrame on the provided column if index_col is not None: diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py index 5417842d3f863..f04eeb03f790e 100644 --- a/pandas/io/tests/test_gbq.py +++ b/pandas/io/tests/test_gbq.py @@ -296,6 +296,13 @@ def test_download_dataset_larger_than_200k_rows(self): df = gbq.read_gbq("SELECT id FROM [publicdata:samples.wikipedia] GROUP EACH BY id ORDER BY id ASC LIMIT 200005", project_id=PROJECT_ID) self.assertEqual(len(df.drop_duplicates()), 200005) + def test_zero_rows(self): + # Bug fix for https://github.com/pydata/pandas/issues/10273 + df = gbq.read_gbq("SELECT title, language FROM [publicdata:samples.wikipedia] where timestamp=-9999999", project_id=PROJECT_ID) + expected_result = DataFrame(columns=['title', 'language']) + self.assert_frame_equal(df, expected_result) + + class TestToGBQIntegration(tm.TestCase): # This class requires bq.py to be installed for setup/teardown. # It will also need to be preconfigured with a default dataset, diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index 753e76fd1faea..e84dc6c692737 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -404,6 +404,13 @@ def test_timedelta_range(self): result = timedelta_range('0 days',freq='30T',periods=50) tm.assert_index_equal(result, expected) + # issue10583 + df = pd.DataFrame(np.random.normal(size=(10,4))) + df.index = pd.timedelta_range(start='0s', periods=10, freq='s') + expected = df.loc[pd.Timedelta('0s'):,:] + result = df.loc['0s':,:] + assert_frame_equal(expected, result) + def test_numeric_conversions(self): self.assertEqual(ct(0), np.timedelta64(0,'ns')) self.assertEqual(ct(10), np.timedelta64(10,'ns')) @@ -686,6 +693,25 @@ def test_timedelta_ops(self): s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'), Timestamp('2015-02-15')]) self.assertEqual(s.diff().median(), timedelta(days=6)) + def test_overflow(self): + # GH 9442 + s = Series(pd.date_range('20130101',periods=100000,freq='H')) + s[0] += pd.Timedelta('1s 1ms') + + # mean + result = (s-s.min()).mean() + expected = pd.Timedelta((pd.DatetimeIndex((s-s.min())).asi8/len(s)).sum()) + + # the computation is converted to float so might be some loss of precision + self.assertTrue(np.allclose(result.value/1000, expected.value/1000)) + + # sum + self.assertRaises(ValueError, lambda : (s-s.min()).sum()) + s1 = s[0:10000] + self.assertRaises(ValueError, lambda : (s1-s1.min()).sum()) + s2 = s[0:1000] + result = (s2-s2.min()).sum() + def test_timedelta_ops_scalar(self): # GH 6808 base = pd.to_datetime('20130101 09:01:12.123456') diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 369993b4c54d1..77ac362181a2b 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -2265,9 +2265,8 @@ class Timedelta(_Timedelta): return "m" elif self._h: return "h" - elif self._d: + else: return "D" - raise ValueError("invalid resolution") def round(self, reso): """
closes #10583
https://api.github.com/repos/pandas-dev/pandas/pulls/10957
2015-09-01T01:52:22Z
2015-09-01T12:47:11Z
null
2015-09-01T13:22:24Z
Add support for math functions in eval()
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 749512890d86b..d52c1d55a6c66 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -38,6 +38,7 @@ Highlights include: - Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`) - Support for reading SAS xport files, see :ref:`here <whatsnew_0170.enhancements.sas_xport>` - Removal of the automatic TimeSeries broadcasting, deprecated since 0.8.0, see :ref:`here <whatsnew_0170.prior_deprecations>` +- Support for math functions in .eval(), see :ref:`here <whatsnew_0170.matheval>` Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsnew_0170.deprecations>` before updating. @@ -123,6 +124,25 @@ incrementally. See the :ref:`docs <io.sas>` for more details. +.. _whatsnew_0170.matheval: + +Support for Math Functions in .eval() +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:meth:`~pandas.eval` now supports calling math functions. + +.. code-block:: python + + df = pd.DataFrame({'a': np.random.randn(10)}) + df.eval("b = sin(a)") + +The support math functions are `sin`, `cos`, `exp`, `log`, `expm1`, `log1p`, +`sqrt`, `sinh`, `cosh`, `tanh`, `arcsin`, `arccos`, `arctan`, `arccosh`, +`arcsinh`, `arctanh`, `abs` and `arctan2`. + +These functions map to the intrinsics for the NumExpr engine. For Python +engine, they are mapped to NumPy calls. + .. _whatsnew_0170.enhancements.other: Other enhancements diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py index b6a1fcbec8339..123051d802d7d 100644 --- a/pandas/computation/expr.py +++ b/pandas/computation/expr.py @@ -20,7 +20,7 @@ _arith_ops_syms, _unary_ops_syms, is_term) from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div -from pandas.computation.ops import UndefinedVariableError +from pandas.computation.ops import UndefinedVariableError, FuncNode from pandas.computation.scope import Scope, _ensure_scope @@ -524,27 +524,48 @@ def visit_Call(self, node, side=None, **kwargs): elif not isinstance(node.func, ast.Name): raise TypeError("Only named functions are supported") else: - res = self.visit(node.func) + try: + res = self.visit(node.func) + except UndefinedVariableError: + # Check if this is a supported function name + try: + res = FuncNode(node.func.id) + except ValueError: + # Raise original error + raise if res is None: raise ValueError("Invalid function call {0}".format(node.func.id)) if hasattr(res, 'value'): res = res.value - args = [self.visit(targ).value for targ in node.args] - if node.starargs is not None: - args += self.visit(node.starargs).value + if isinstance(res, FuncNode): + args = [self.visit(targ) for targ in node.args] + + if node.starargs is not None: + args += self.visit(node.starargs) + + if node.keywords or node.kwargs: + raise TypeError("Function \"{0}\" does not support keyword " + "arguments".format(res.name)) + + return res(*args, **kwargs) + + else: + args = [self.visit(targ).value for targ in node.args] + if node.starargs is not None: + args += self.visit(node.starargs).value - keywords = {} - for key in node.keywords: - if not isinstance(key, ast.keyword): - raise ValueError("keyword error in function call " - "'{0}'".format(node.func.id)) - keywords[key.arg] = self.visit(key.value).value - if node.kwargs is not None: - keywords.update(self.visit(node.kwargs).value) + keywords = {} + for key in node.keywords: + if not isinstance(key, ast.keyword): + raise ValueError("keyword error in function call " + "'{0}'".format(node.func.id)) + keywords[key.arg] = self.visit(key.value).value + if node.kwargs is not None: + keywords.update(self.visit(node.kwargs).value) - return self.const_type(res(*args, **keywords), self.env) + return self.const_type(res(*args, **keywords), self.env) def translate_In(self, op): return op @@ -587,7 +608,7 @@ def visitor(x, y): return reduce(visitor, operands) -_python_not_supported = frozenset(['Dict', 'Call', 'BoolOp', 'In', 'NotIn']) +_python_not_supported = frozenset(['Dict', 'BoolOp', 'In', 'NotIn']) _numexpr_supported_calls = frozenset(_reductions + _mathops) diff --git a/pandas/computation/ops.py b/pandas/computation/ops.py index 9df9975b4b61c..f6d5f171036ea 100644 --- a/pandas/computation/ops.py +++ b/pandas/computation/ops.py @@ -16,9 +16,12 @@ _reductions = 'sum', 'prod' -_mathops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p', 'pow', 'div', 'sqrt', - 'inv', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos', 'arctan', - 'arccosh', 'arcsinh', 'arctanh', 'arctan2', 'abs') + +_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p', + 'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos', + 'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs') +_binary_math_ops = ('arctan2',) +_mathops = _unary_math_ops + _binary_math_ops _LOCAL_TAG = '__pd_eval_local_' @@ -498,3 +501,28 @@ def return_type(self): (operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)): return np.dtype('bool') return np.dtype('int') + + +class MathCall(Op): + def __init__(self, func, args): + super(MathCall, self).__init__(func.name, args) + self.func = func + + def __call__(self, env): + operands = [op(env) for op in self.operands] + return self.func.func(*operands) + + def __unicode__(self): + operands = map(str, self.operands) + return com.pprint_thing('{0}({1})'.format(self.op, ','.join(operands))) + + +class FuncNode(object): + def __init__(self, name): + if name not in _mathops: + raise ValueError("\"{0}\" is not a supported function".format(name)) + self.name = name + self.func = getattr(np, name) + + def __call__(self, *args): + return MathCall(self, args) diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py index 4f998319d922d..8db0b82f1aa2e 100644 --- a/pandas/computation/tests/test_eval.py +++ b/pandas/computation/tests/test_eval.py @@ -23,7 +23,8 @@ from pandas.computation.expr import PythonExprVisitor, PandasExprVisitor from pandas.computation.ops import (_binary_ops_dict, _special_case_arith_ops_syms, - _arith_ops_syms, _bool_ops_syms) + _arith_ops_syms, _bool_ops_syms, + _unary_math_ops, _binary_math_ops) import pandas.computation.expr as expr import pandas.util.testing as tm @@ -1439,6 +1440,129 @@ def setUpClass(cls): cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms +class TestMathPythonPython(tm.TestCase): + @classmethod + def setUpClass(cls): + super(TestMathPythonPython, cls).setUpClass() + tm.skip_if_no_ne() + cls.engine = 'python' + cls.parser = 'pandas' + cls.unary_fns = _unary_math_ops + cls.binary_fns = _binary_math_ops + + @classmethod + def tearDownClass(cls): + del cls.engine, cls.parser + + def eval(self, *args, **kwargs): + kwargs['engine'] = self.engine + kwargs['parser'] = self.parser + kwargs['level'] = kwargs.pop('level', 0) + 1 + return pd.eval(*args, **kwargs) + + def test_unary_functions(self): + df = DataFrame({'a': np.random.randn(10)}) + a = df.a + for fn in self.unary_fns: + expr = "{0}(a)".format(fn) + got = self.eval(expr) + expect = getattr(np, fn)(a) + pd.util.testing.assert_almost_equal(got, expect) + + def test_binary_functions(self): + df = DataFrame({'a': np.random.randn(10), + 'b': np.random.randn(10)}) + a = df.a + b = df.b + for fn in self.binary_fns: + expr = "{0}(a, b)".format(fn) + got = self.eval(expr) + expect = getattr(np, fn)(a, b) + np.testing.assert_allclose(got, expect) + + def test_df_use_case(self): + df = DataFrame({'a': np.random.randn(10), + 'b': np.random.randn(10)}) + df.eval("e = arctan2(sin(a), b)", + engine=self.engine, + parser=self.parser) + got = df.e + expect = np.arctan2(np.sin(df.a), df.b) + pd.util.testing.assert_almost_equal(got, expect) + + def test_df_arithmetic_subexpression(self): + df = DataFrame({'a': np.random.randn(10), + 'b': np.random.randn(10)}) + df.eval("e = sin(a + b)", + engine=self.engine, + parser=self.parser) + got = df.e + expect = np.sin(df.a + df.b) + pd.util.testing.assert_almost_equal(got, expect) + + def check_result_type(self, dtype, expect_dtype): + df = DataFrame({'a': np.random.randn(10).astype(dtype)}) + self.assertEqual(df.a.dtype, dtype) + df.eval("b = sin(a)", + engine=self.engine, + parser=self.parser) + got = df.b + expect = np.sin(df.a) + self.assertEqual(expect.dtype, got.dtype) + self.assertEqual(expect_dtype, got.dtype) + pd.util.testing.assert_almost_equal(got, expect) + + def test_result_types(self): + self.check_result_type(np.int32, np.float64) + self.check_result_type(np.int64, np.float64) + self.check_result_type(np.float32, np.float32) + self.check_result_type(np.float64, np.float64) + # Did not test complex64 because DataFrame is converting it to + # complex128. Due to https://github.com/pydata/pandas/issues/10952 + self.check_result_type(np.complex128, np.complex128) + + def test_undefined_func(self): + df = DataFrame({'a': np.random.randn(10)}) + with tm.assertRaisesRegexp(ValueError, + "\"mysin\" is not a supported function"): + df.eval("mysin(a)", + engine=self.engine, + parser=self.parser) + + def test_keyword_arg(self): + df = DataFrame({'a': np.random.randn(10)}) + with tm.assertRaisesRegexp(TypeError, + "Function \"sin\" does not support " + "keyword arguments"): + df.eval("sin(x=a)", + engine=self.engine, + parser=self.parser) + + +class TestMathPythonPandas(TestMathPythonPython): + @classmethod + def setUpClass(cls): + super(TestMathPythonPandas, cls).setUpClass() + cls.engine = 'python' + cls.parser = 'pandas' + + +class TestMathNumExprPandas(TestMathPythonPython): + @classmethod + def setUpClass(cls): + super(TestMathNumExprPandas, cls).setUpClass() + cls.engine = 'numexpr' + cls.parser = 'pandas' + + +class TestMathNumExprPython(TestMathPythonPython): + @classmethod + def setUpClass(cls): + super(TestMathNumExprPython, cls).setUpClass() + cls.engine = 'numexpr' + cls.parser = 'python' + + _var_s = randn(10)
closes #4893 Extends the eval parser to accept calling math functions.
https://api.github.com/repos/pandas-dev/pandas/pulls/10953
2015-08-31T17:20:37Z
2015-09-05T22:26:10Z
2015-09-05T22:26:10Z
2015-09-05T22:26:15Z
DEPR: Deprecate legacy offsets
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 6f30ff3f51ad5..4394981abb8c3 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -656,7 +656,7 @@ apply the offset to each element. rng + DateOffset(months=2) s + DateOffset(months=2) s - DateOffset(months=2) - + If the offset class maps directly to a ``Timedelta`` (``Day``, ``Hour``, ``Minute``, ``Second``, ``Micro``, ``Milli``, ``Nano``) it can be used exactly like a ``Timedelta`` - see the @@ -670,7 +670,7 @@ used exactly like a ``Timedelta`` - see the td + Minute(15) Note that some offsets (such as ``BQuarterEnd``) do not have a -vectorized implementation. They can still be used but may +vectorized implementation. They can still be used but may calculate signficantly slower and will raise a ``PerformanceWarning`` .. ipython:: python @@ -882,10 +882,10 @@ frequencies. We will refer to these aliases as *offset aliases* "BAS", "business year start frequency" "BH", "business hour frequency" "H", "hourly frequency" - "T", "minutely frequency" + "T, min", "minutely frequency" "S", "secondly frequency" - "L", "milliseonds" - "U", "microseconds" + "L, ms", "milliseonds" + "U, us", "microseconds" "N", "nanoseconds" Combining Aliases @@ -953,11 +953,12 @@ These can be used as arguments to ``date_range``, ``bdate_range``, constructors for ``DatetimeIndex``, as well as various other timeseries-related functions in pandas. +.. _timeseries.legacyaliases: + Legacy Aliases ~~~~~~~~~~~~~~ -Note that prior to v0.8.0, time rules had a slightly different look. pandas -will continue to support the legacy time rules for the time being but it is -strongly recommended that you switch to using the new offset aliases. +Note that prior to v0.8.0, time rules had a slightly different look. These are +deprecated in v0.17.0, and removed in future version. .. csv-table:: :header: "Legacy Time Rule", "Offset Alias" @@ -987,9 +988,7 @@ strongly recommended that you switch to using the new offset aliases. "A\@OCT", "BA\-OCT" "A\@NOV", "BA\-NOV" "A\@DEC", "BA\-DEC" - "min", "T" - "ms", "L" - "us", "U" + As you can see, legacy quarterly and annual frequencies are business quarters and business year ends. Please also note the legacy time rule for milliseconds diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 9f2ec43cb2ae3..1d16136dd6b4d 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -653,6 +653,7 @@ Deprecations ``DataFrame.add(other, fill_value=0)`` and ``DataFrame.mul(other, fill_value=1.)`` (:issue:`10735`). - ``TimeSeries`` deprecated in favor of ``Series`` (note that this has been alias since 0.13.0), (:issue:`10890`) +- Legacy offsets (like ``'A@JAN'``) listed in :ref:`here <timeseries.legacyaliases>` are deprecated (note that this has been alias since 0.8.0), (:issue:`10878`) - ``WidePanel`` deprecated in favor of ``Panel``, ``LongPanel`` in favor of ``DataFrame`` (note these have been aliases since < 0.11.0), (:issue:`10892`) .. _whatsnew_0170.prior_deprecations: diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 85de5e083d6d9..7e5c3af43c861 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -2,6 +2,7 @@ from pandas.compat import range, long, zip from pandas import compat import re +import warnings import numpy as np @@ -335,10 +336,8 @@ def get_period_alias(offset_str): _rule_aliases = { # Legacy rules that will continue to map to their original values # essentially for the rest of time - 'WEEKDAY': 'B', 'EOM': 'BM', - 'W@MON': 'W-MON', 'W@TUE': 'W-TUE', 'W@WED': 'W-WED', @@ -346,18 +345,9 @@ def get_period_alias(offset_str): 'W@FRI': 'W-FRI', 'W@SAT': 'W-SAT', 'W@SUN': 'W-SUN', - 'W': 'W-SUN', - 'Q@JAN': 'BQ-JAN', 'Q@FEB': 'BQ-FEB', 'Q@MAR': 'BQ-MAR', - 'Q': 'Q-DEC', - - 'A': 'A-DEC', # YearEnd(month=12), - 'AS': 'AS-JAN', # YearBegin(month=1), - 'BA': 'BA-DEC', # BYearEnd(month=12), - 'BAS': 'BAS-JAN', # BYearBegin(month=1), - 'A@JAN': 'BA-JAN', 'A@FEB': 'BA-FEB', 'A@MAR': 'BA-MAR', @@ -370,8 +360,17 @@ def get_period_alias(offset_str): 'A@OCT': 'BA-OCT', 'A@NOV': 'BA-NOV', 'A@DEC': 'BA-DEC', +} + +_lite_rule_alias = { + 'W': 'W-SUN', + 'Q': 'Q-DEC', + + 'A': 'A-DEC', # YearEnd(month=12), + 'AS': 'AS-JAN', # YearBegin(month=1), + 'BA': 'BA-DEC', # BYearEnd(month=12), + 'BAS': 'BAS-JAN', # BYearBegin(month=1), - # lite aliases 'Min': 'T', 'min': 'T', 'ms': 'L', @@ -386,6 +385,7 @@ def get_period_alias(offset_str): # Note that _rule_aliases is not 1:1 (d[BA]==d[A@DEC]), and so traversal # order matters when constructing an inverse. we pick one. #2331 +# Used in get_legacy_offset_name _legacy_reverse_map = dict((v, k) for k, v in reversed(sorted(compat.iteritems(_rule_aliases)))) @@ -501,6 +501,9 @@ def get_base_alias(freqstr): _dont_uppercase = set(('MS', 'ms')) +_LEGACY_FREQ_WARNING = 'Freq "{0}" is deprecated, use "{1}" as alternative.' + + def get_offset(name): """ Return DateOffset object associated with rule name @@ -513,12 +516,26 @@ def get_offset(name): name = name.upper() if name in _rule_aliases: - name = _rule_aliases[name] + new = _rule_aliases[name] + warnings.warn(_LEGACY_FREQ_WARNING.format(name, new), + FutureWarning) + name = new elif name.lower() in _rule_aliases: - name = _rule_aliases[name.lower()] + new = _rule_aliases[name.lower()] + warnings.warn(_LEGACY_FREQ_WARNING.format(name, new), + FutureWarning) + name = new + + name = _lite_rule_alias.get(name, name) + name = _lite_rule_alias.get(name.lower(), name) + else: if name in _rule_aliases: - name = _rule_aliases[name] + new = _rule_aliases[name] + warnings.warn(_LEGACY_FREQ_WARNING.format(name, new), + FutureWarning) + name = new + name = _lite_rule_alias.get(name, name) if name not in _offset_map: try: @@ -561,6 +578,9 @@ def get_legacy_offset_name(offset): """ Return the pre pandas 0.8.0 name for the date offset """ + + # This only used in test_timeseries_legacy.py + name = offset.name return _legacy_reverse_map.get(name, name) @@ -754,10 +774,21 @@ def _period_alias_dictionary(): def _period_str_to_code(freqstr): # hack - freqstr = _rule_aliases.get(freqstr, freqstr) + if freqstr in _rule_aliases: + new = _rule_aliases[freqstr] + warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, new), + FutureWarning) + freqstr = new + freqstr = _lite_rule_alias.get(freqstr, freqstr) if freqstr not in _dont_uppercase: - freqstr = _rule_aliases.get(freqstr.lower(), freqstr) + lower = freqstr.lower() + if lower in _rule_aliases: + new = _rule_aliases[lower] + warnings.warn(_LEGACY_FREQ_WARNING.format(lower, new), + FutureWarning) + freqstr = new + freqstr = _lite_rule_alias.get(lower, freqstr) try: if freqstr not in _dont_uppercase: @@ -766,6 +797,8 @@ def _period_str_to_code(freqstr): except KeyError: try: alias = _period_alias_dict[freqstr] + warnings.warn(_LEGACY_FREQ_WARNING.format(freqstr, alias), + FutureWarning) except KeyError: raise ValueError("Unknown freqstr: %s" % freqstr) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 33faac153cce0..ec416efe1079f 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -2650,14 +2650,13 @@ def generate_range(start=None, end=None, periods=None, prefix_mapping['N'] = Nano - def _make_offset(key): """Gets offset based on key. KeyError if prefix is bad, ValueError if suffix is bad. All handled by `get_offset` in tseries/frequencies. Not public.""" if key is None: return None - split = key.replace('@', '-').split('-') + split = key.split('-') klass = prefix_mapping[split[0]] # handles case where there's no suffix (and will TypeError if too many '-') obj = klass._from_name(*split[1:]) diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py index 68b65697918f4..070363460f791 100644 --- a/pandas/tseries/tests/test_frequencies.py +++ b/pandas/tseries/tests/test_frequencies.py @@ -529,9 +529,13 @@ def test_series(self): self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar']))) # cannot infer on PeriodIndex - for freq in [None, 'L', 'Y']: + for freq in [None, 'L']: s = Series(period_range('2013',periods=10,freq=freq)) self.assertRaises(TypeError, lambda : frequencies.infer_freq(s)) + for freq in ['Y']: + with tm.assert_produces_warning(FutureWarning): + s = Series(period_range('2013',periods=10,freq=freq)) + self.assertRaises(TypeError, lambda : frequencies.infer_freq(s)) # DateTimeIndex for freq in ['M', 'L', 'S']: @@ -543,6 +547,19 @@ def test_series(self): inferred = frequencies.infer_freq(s) self.assertEqual(inferred,'D') + def test_legacy_offset_warnings(self): + for k, v in compat.iteritems(frequencies._rule_aliases): + with tm.assert_produces_warning(FutureWarning): + result = frequencies.get_offset(k) + exp = frequencies.get_offset(v) + self.assertEqual(result, exp) + + with tm.assert_produces_warning(FutureWarning): + idx = date_range('2011-01-01', periods=5, freq=k) + exp = date_range('2011-01-01', periods=5, freq=v) + self.assert_index_equal(idx, exp) + + MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index d364206017c7e..b3ec88f4d0988 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -3615,7 +3615,6 @@ def test_get_offset(): ('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)), ('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)), ('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4)), - ('w@Sat', Week(weekday=5)), ("RE-N-DEC-MON", makeFY5253NearestEndMonth(weekday=0, startingMonth=12)), ("RE-L-DEC-TUE", makeFY5253LastOfMonth(weekday=1, startingMonth=12)), ("REQ-L-MAR-TUE-4", makeFY5253LastOfMonthQuarter(weekday=1, startingMonth=3, qtr_with_extra_week=4)), @@ -3628,6 +3627,13 @@ def test_get_offset(): assert offset == expected, ("Expected %r to yield %r (actual: %r)" % (name, expected, offset)) +def test_get_offset_legacy(): + pairs = [('w@Sat', Week(weekday=5))] + for name, expected in pairs: + with tm.assert_produces_warning(FutureWarning): + offset = get_offset(name) + assert offset == expected, ("Expected %r to yield %r (actual: %r)" % + (name, expected, offset)) class TestParseTimeString(tm.TestCase): @@ -3663,11 +3669,18 @@ def test_get_standard_freq(): assert fstr == get_standard_freq('w') assert fstr == get_standard_freq('1w') assert fstr == get_standard_freq(('W', 1)) - assert fstr == get_standard_freq('WeEk') + + with tm.assert_produces_warning(FutureWarning): + result = get_standard_freq('WeEk') + assert fstr == result fstr = get_standard_freq('5Q') assert fstr == get_standard_freq('5q') - assert fstr == get_standard_freq('5QuarTer') + + with tm.assert_produces_warning(FutureWarning): + result = get_standard_freq('5QuarTer') + assert fstr == result + assert fstr == get_standard_freq(('q', 5)) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index dca37d9ce164c..cdd9d036fcadc 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -437,7 +437,7 @@ def test_properties_monthly(self): def test_properties_weekly(self): # Test properties on Periods with daily frequency. - w_date = Period(freq='WK', year=2007, month=1, day=7) + w_date = Period(freq='W', year=2007, month=1, day=7) # assert_equal(w_date.year, 2007) assert_equal(w_date.quarter, 1) @@ -445,7 +445,22 @@ def test_properties_weekly(self): assert_equal(w_date.week, 1) assert_equal((w_date - 1).week, 52) assert_equal(w_date.days_in_month, 31) - assert_equal(Period(freq='WK', year=2012, month=2, day=1).days_in_month, 29) + assert_equal(Period(freq='W', year=2012, month=2, day=1).days_in_month, 29) + + def test_properties_weekly_legacy(self): + # Test properties on Periods with daily frequency. + with tm.assert_produces_warning(FutureWarning): + w_date = Period(freq='WK', year=2007, month=1, day=7) + # + assert_equal(w_date.year, 2007) + assert_equal(w_date.quarter, 1) + assert_equal(w_date.month, 1) + assert_equal(w_date.week, 1) + assert_equal((w_date - 1).week, 52) + assert_equal(w_date.days_in_month, 31) + with tm.assert_produces_warning(FutureWarning): + exp = Period(freq='WK', year=2012, month=2, day=1) + assert_equal(exp.days_in_month, 29) def test_properties_daily(self): # Test properties on Periods with daily frequency. @@ -613,8 +628,8 @@ def test_conv_annual(self): ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4) ival_A_to_M_start = Period(freq='M', year=2007, month=1) ival_A_to_M_end = Period(freq='M', year=2007, month=12) - ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1) - ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31) + ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1) + ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31) ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1) ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31) ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1) @@ -643,8 +658,8 @@ def test_conv_annual(self): assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end) assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start) assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end) - assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start) - assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end) + assert_equal(ival_A.asfreq('W', 'S'), ival_A_to_W_start) + assert_equal(ival_A.asfreq('W', 'E'), ival_A_to_W_end) assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start) assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end) assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start) @@ -681,8 +696,8 @@ def test_conv_quarterly(self): ival_Q_to_A = Period(freq='A', year=2007) ival_Q_to_M_start = Period(freq='M', year=2007, month=1) ival_Q_to_M_end = Period(freq='M', year=2007, month=3) - ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1) - ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31) + ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1) + ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31) ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1) ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30) ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1) @@ -711,8 +726,8 @@ def test_conv_quarterly(self): assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start) assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end) - assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start) - assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end) + assert_equal(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start) + assert_equal(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end) assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start) assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end) assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start) @@ -739,8 +754,8 @@ def test_conv_monthly(self): ival_M_end_of_quarter = Period(freq='M', year=2007, month=3) ival_M_to_A = Period(freq='A', year=2007) ival_M_to_Q = Period(freq='Q', year=2007, quarter=1) - ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1) - ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31) + ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1) + ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31) ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1) ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31) ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1) @@ -763,8 +778,8 @@ def test_conv_monthly(self): assert_equal(ival_M.asfreq('Q'), ival_M_to_Q) assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q) - assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start) - assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end) + assert_equal(ival_M.asfreq('W', 'S'), ival_M_to_W_start) + assert_equal(ival_M.asfreq('W', 'E'), ival_M_to_W_end) assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start) assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end) assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start) @@ -781,15 +796,15 @@ def test_conv_monthly(self): def test_conv_weekly(self): # frequency conversion tests: from Weekly Frequency - ival_W = Period(freq='WK', year=2007, month=1, day=1) + ival_W = Period(freq='W', year=2007, month=1, day=1) - ival_WSUN = Period(freq='WK', year=2007, month=1, day=7) - ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6) - ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5) - ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4) - ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3) - ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2) - ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1) + ival_WSUN = Period(freq='W', year=2007, month=1, day=7) + ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6) + ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5) + ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4) + ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3) + ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2) + ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1) ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1) ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7) @@ -806,9 +821,9 @@ def test_conv_weekly(self): ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26) ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1) - ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31) - ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31) - ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31) + ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31) + ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31) + ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31) ival_W_to_A = Period(freq='A', year=2007) ival_W_to_Q = Period(freq='Q', year=2007, quarter=1) ival_W_to_M = Period(freq='M', year=2007, month=1) @@ -885,7 +900,128 @@ def test_conv_weekly(self): assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start) assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end) - assert_equal(ival_W.asfreq('WK'), ival_W) + assert_equal(ival_W.asfreq('W'), ival_W) + + def test_conv_weekly_legacy(self): + # frequency conversion tests: from Weekly Frequency + + with tm.assert_produces_warning(FutureWarning): + ival_W = Period(freq='WK', year=2007, month=1, day=1) + + with tm.assert_produces_warning(FutureWarning): + ival_WSUN = Period(freq='WK', year=2007, month=1, day=7) + with tm.assert_produces_warning(FutureWarning): + ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6) + with tm.assert_produces_warning(FutureWarning): + ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5) + with tm.assert_produces_warning(FutureWarning): + ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4) + with tm.assert_produces_warning(FutureWarning): + ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3) + with tm.assert_produces_warning(FutureWarning): + ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2) + with tm.assert_produces_warning(FutureWarning): + ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1) + + ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1) + ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7) + ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31) + ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6) + ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30) + ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5) + ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29) + ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4) + ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28) + ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3) + ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27) + ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2) + ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26) + ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1) + + with tm.assert_produces_warning(FutureWarning): + ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31) + with tm.assert_produces_warning(FutureWarning): + ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31) + with tm.assert_produces_warning(FutureWarning): + ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31) + ival_W_to_A = Period(freq='A', year=2007) + ival_W_to_Q = Period(freq='Q', year=2007, quarter=1) + ival_W_to_M = Period(freq='M', year=2007, month=1) + + if Period(freq='D', year=2007, month=12, day=31).weekday == 6: + ival_W_to_A_end_of_year = Period(freq='A', year=2007) + else: + ival_W_to_A_end_of_year = Period(freq='A', year=2008) + + if Period(freq='D', year=2007, month=3, day=31).weekday == 6: + ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, + quarter=1) + else: + ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, + quarter=2) + + if Period(freq='D', year=2007, month=1, day=31).weekday == 6: + ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1) + else: + ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2) + + ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1) + ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5) + ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1) + ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7) + ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1, + hour=0) + ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7, + hour=23) + ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1, + hour=0, minute=0) + ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7, + hour=23, minute=59) + ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1, + hour=0, minute=0, second=0) + ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7, + hour=23, minute=59, second=59) + + assert_equal(ival_W.asfreq('A'), ival_W_to_A) + assert_equal(ival_W_end_of_year.asfreq('A'), + ival_W_to_A_end_of_year) + assert_equal(ival_W.asfreq('Q'), ival_W_to_Q) + assert_equal(ival_W_end_of_quarter.asfreq('Q'), + ival_W_to_Q_end_of_quarter) + assert_equal(ival_W.asfreq('M'), ival_W_to_M) + assert_equal(ival_W_end_of_month.asfreq('M'), + ival_W_to_M_end_of_month) + + assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start) + assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end) + + assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start) + assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end) + + assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start) + assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end) + assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start) + assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end) + assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start) + assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end) + assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start) + assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end) + assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start) + assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end) + assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start) + assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end) + assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start) + assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end) + + assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start) + assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end) + assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start) + assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end) + assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start) + assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end) + + with tm.assert_produces_warning(FutureWarning): + assert_equal(ival_W.asfreq('WK'), ival_W) def test_conv_business(self): # frequency conversion tests: from Business Frequency" @@ -899,7 +1035,7 @@ def test_conv_business(self): ival_B_to_A = Period(freq='A', year=2007) ival_B_to_Q = Period(freq='Q', year=2007, quarter=1) ival_B_to_M = Period(freq='M', year=2007, month=1) - ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7) + ival_B_to_W = Period(freq='W', year=2007, month=1, day=7) ival_B_to_D = Period(freq='D', year=2007, month=1, day=1) ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0) @@ -920,8 +1056,8 @@ def test_conv_business(self): assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q) assert_equal(ival_B.asfreq('M'), ival_B_to_M) assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M) - assert_equal(ival_B.asfreq('WK'), ival_B_to_W) - assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W) + assert_equal(ival_B.asfreq('W'), ival_B_to_W) + assert_equal(ival_B_end_of_week.asfreq('W'), ival_B_to_W) assert_equal(ival_B.asfreq('D'), ival_B_to_D) @@ -962,7 +1098,7 @@ def test_conv_daily(self): ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1) ival_D_to_M = Period(freq='M', year=2007, month=1) - ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7) + ival_D_to_W = Period(freq='W', year=2007, month=1, day=7) ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0) @@ -993,8 +1129,8 @@ def test_conv_daily(self): assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC) assert_equal(ival_D.asfreq('M'), ival_D_to_M) assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M) - assert_equal(ival_D.asfreq('WK'), ival_D_to_W) - assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W) + assert_equal(ival_D.asfreq('W'), ival_D_to_W) + assert_equal(ival_D_end_of_week.asfreq('W'), ival_D_to_W) assert_equal(ival_D_friday.asfreq('B'), ival_B_friday) assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday) @@ -1031,7 +1167,7 @@ def test_conv_hourly(self): ival_H_to_A = Period(freq='A', year=2007) ival_H_to_Q = Period(freq='Q', year=2007, quarter=1) ival_H_to_M = Period(freq='M', year=2007, month=1) - ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7) + ival_H_to_W = Period(freq='W', year=2007, month=1, day=7) ival_H_to_D = Period(freq='D', year=2007, month=1, day=1) ival_H_to_B = Period(freq='B', year=2007, month=1, day=1) @@ -1050,8 +1186,8 @@ def test_conv_hourly(self): assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q) assert_equal(ival_H.asfreq('M'), ival_H_to_M) assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M) - assert_equal(ival_H.asfreq('WK'), ival_H_to_W) - assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W) + assert_equal(ival_H.asfreq('W'), ival_H_to_W) + assert_equal(ival_H_end_of_week.asfreq('W'), ival_H_to_W) assert_equal(ival_H.asfreq('D'), ival_H_to_D) assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D) assert_equal(ival_H.asfreq('B'), ival_H_to_B) @@ -1087,7 +1223,7 @@ def test_conv_minutely(self): ival_T_to_A = Period(freq='A', year=2007) ival_T_to_Q = Period(freq='Q', year=2007, quarter=1) ival_T_to_M = Period(freq='M', year=2007, month=1) - ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7) + ival_T_to_W = Period(freq='W', year=2007, month=1, day=7) ival_T_to_D = Period(freq='D', year=2007, month=1, day=1) ival_T_to_B = Period(freq='B', year=2007, month=1, day=1) ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0) @@ -1103,8 +1239,8 @@ def test_conv_minutely(self): assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q) assert_equal(ival_T.asfreq('M'), ival_T_to_M) assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M) - assert_equal(ival_T.asfreq('WK'), ival_T_to_W) - assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W) + assert_equal(ival_T.asfreq('W'), ival_T_to_W) + assert_equal(ival_T_end_of_week.asfreq('W'), ival_T_to_W) assert_equal(ival_T.asfreq('D'), ival_T_to_D) assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D) assert_equal(ival_T.asfreq('B'), ival_T_to_B) @@ -1142,7 +1278,7 @@ def test_conv_secondly(self): ival_S_to_A = Period(freq='A', year=2007) ival_S_to_Q = Period(freq='Q', year=2007, quarter=1) ival_S_to_M = Period(freq='M', year=2007, month=1) - ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7) + ival_S_to_W = Period(freq='W', year=2007, month=1, day=7) ival_S_to_D = Period(freq='D', year=2007, month=1, day=1) ival_S_to_B = Period(freq='B', year=2007, month=1, day=1) ival_S_to_H = Period(freq='H', year=2007, month=1, day=1, @@ -1156,8 +1292,8 @@ def test_conv_secondly(self): assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q) assert_equal(ival_S.asfreq('M'), ival_S_to_M) assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M) - assert_equal(ival_S.asfreq('WK'), ival_S_to_W) - assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W) + assert_equal(ival_S.asfreq('W'), ival_S_to_W) + assert_equal(ival_S_end_of_week.asfreq('W'), ival_S_to_W) assert_equal(ival_S.asfreq('D'), ival_S_to_D) assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D) assert_equal(ival_S.asfreq('B'), ival_S_to_B) @@ -2171,12 +2307,17 @@ def test_to_period_annualish(self): self.assertEqual(prng.freq, 'A-DEC') def test_to_period_monthish(self): - offsets = ['MS', 'EOM', 'BM'] + offsets = ['MS', 'BM'] for off in offsets: rng = date_range('01-Jan-2012', periods=8, freq=off) prng = rng.to_period() self.assertEqual(prng.freq, 'M') + with tm.assert_produces_warning(FutureWarning): + rng = date_range('01-Jan-2012', periods=8, freq='EOM') + prng = rng.to_period() + self.assertEqual(prng.freq, 'M') + def test_no_multiples(self): self.assertRaises(ValueError, period_range, '1989Q3', periods=10, freq='2Q') diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index 39736eef79295..08a4056c1fce2 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -22,7 +22,7 @@ @tm.mplskip class TestTSPlot(tm.TestCase): def setUp(self): - freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'Y'] + freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A'] idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq] self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx] self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
Closes #10878.
https://api.github.com/repos/pandas-dev/pandas/pulls/10951
2015-08-31T14:29:06Z
2015-09-01T11:05:56Z
2015-09-01T11:05:56Z
2015-09-01T12:22:24Z
PERF: improves performance in SeriesGroupBy.count
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 42752112a64f7..7f56b27f0eab3 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -814,6 +814,8 @@ Bug Fixes - Bug in ``BinGrouper.group_info`` where returned values are not compatible with base class (:issue:`10914`) - Bug in clearing the cache on ``DataFrame.pop`` and a subsequent inplace op (:issue:`10912`) - Bug in indexing with a mixed-integer ``Index`` causing an ``ImportError`` (:issue:`10610`) +- Bug in ``Series.count`` when index has nulls (:issue:`10946`) + - Bug causing ``DataFrame.where`` to not respect the ``axis`` parameter when the frame has a symmetric shape. (:issue:`9736`) - Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index f42825a11933b..354c9a6c5579c 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2684,6 +2684,15 @@ def value_counts(self, normalize=False, sort=True, ascending=False, return Series(out, index=mi) + def count(self): + ids, _, ngroups = self.grouper.group_info + val = self.obj.get_values() + + mask = (ids != -1) & ~isnull(val) + out = np.bincount(ids[mask], minlength=ngroups) if ngroups != 0 else [] + + return Series(out, index=self.grouper.result_index, name=self.name) + def _apply_to_column_groupbys(self, func): """ return a pass thru """ return func(self) diff --git a/pandas/core/series.py b/pandas/core/series.py index 2890730956c75..48fe5b6bf2894 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1117,27 +1117,24 @@ def count(self, level=None): ------- nobs : int or Series (if level specified) """ - if level is not None: - mask = notnull(self.values) + from pandas.core.index import _get_na_value - if isinstance(level, compat.string_types): - level = self.index._get_level_number(level) + if level is None: + return notnull(_values_from_object(self)).sum() - level_index = self.index.levels[level] + if isinstance(level, compat.string_types): + level = self.index._get_level_number(level) - if len(self) == 0: - return self._constructor(0, index=level_index)\ - .__finalize__(self) + lev = self.index.levels[level] + lab = np.array(self.index.labels[level], subok=False, copy=True) - # call cython function - max_bin = len(level_index) - labels = com._ensure_int64(self.index.labels[level]) - counts = lib.count_level_1d(mask.view(np.uint8), - labels, max_bin) - return self._constructor(counts, - index=level_index).__finalize__(self) + mask = lab == -1 + if mask.any(): + lab[mask] = cnt = len(lev) + lev = lev.insert(cnt, _get_na_value(lev.dtype.type)) - return notnull(_values_from_object(self)).sum() + out = np.bincount(lab[notnull(self.values)], minlength=len(lev)) + return self._constructor(out, index=lev).__finalize__(self) def mode(self): """Returns the mode(s) of the dataset. diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 07f0c89535a77..720862df97b78 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -1253,23 +1253,6 @@ def lookup_values(ndarray[object] values, dict mapping): return maybe_convert_objects(result) -def count_level_1d(ndarray[uint8_t, cast=True] mask, - ndarray[int64_t] labels, Py_ssize_t max_bin): - cdef: - Py_ssize_t i, n - ndarray[int64_t] counts - - counts = np.zeros(max_bin, dtype='i8') - - n = len(mask) - - for i from 0 <= i < n: - if mask[i]: - counts[labels[i]] += 1 - - return counts - - def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask, ndarray[int64_t] labels, Py_ssize_t max_bin): cdef: diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 86eafdf7ca2c8..a4392f3045fbb 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -4740,6 +4740,16 @@ def test_count(self): self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum()) + mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]]) + ts = Series(np.arange(len(mi)), index=mi) + + left = ts.count(level=1) + right = Series([2, 3, 1], index=[1, 2, nan]) + assert_series_equal(left, right) + + ts.iloc[[0, 3, 5]] = nan + assert_series_equal(ts.count(level=1), right - 1) + def test_dtype(self): self.assertEqual(self.ts.dtype, np.dtype('float64'))
BUG: closes bug in Series.count when index has nulls ``` python In [4]: ts Out[4]: a 1 0 2 1 b 2 2 NaN 3 c 1 4 2 5 dtype: int64 In [5]: ts.count(level=1) Out[5]: 1 2 2 4 # <<< BUG! dtype: int64 In [6]: from string import ascii_lowercase In [7]: np.random.seed(2718281) In [8]: n = 1 << 21 In [9]: df = DataFrame({ ...: '1st':np.random.choice(list(ascii_lowercase), n), ...: '2nd':np.random.randint(0, n // 100, n), ...: '3rd':np.random.randn(n).round(3)}) In [10]: df.loc[np.random.choice(n, n // 10), '3rd'] = np.nan In [11]: In [11]: gr = df.groupby(['1st', '2nd'])['3rd'] In [12]: %timeit gr.count() The slowest run took 6.67 times longer than the fastest. This could mean that an intermediate result is being cached 1 loops, best of 3: 86.4 ms per loop In [13]: %timeit gr.count() 10 loops, best of 3: 87 ms per loop ``` on branch: ``` python In [5]: ts.count(level=1) Out[5]: 1 2 2 3 NaN 1 dtype: int64 ... In [12]: %timeit gr.count() The slowest run took 12.29 times longer than the fastest. This could mean that an intermediate result is being cached 1 loops, best of 3: 43.1 ms per loop In [13]: %timeit gr.count() 10 loops, best of 3: 43.5 ms per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/10946
2015-08-31T02:11:16Z
2015-09-05T16:36:53Z
null
2015-09-05T16:46:12Z
BUG: Index name lost in conv #10875
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 3e81a923a114c..834514b603a80 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -770,7 +770,7 @@ Bug Fixes - Bug in ``filter`` (regression from 0.16.0) and ``transform`` when grouping on multiple keys, one of which is datetime-like (:issue:`10114`) - +- Bug in ``to_datetime`` and ``to_timedelta`` causing ``Index`` name to be lost (:issue:`10875`) - Bug that caused segfault when resampling an empty Series (:issue:`10228`) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index b54e129c7a4e1..17aa6c30cd185 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1732,6 +1732,11 @@ def test_equals_op_multiindex(self): df.index == index_a tm.assert_numpy_array_equal(index_a == mi3, np.array([False, False, False])) + def test_conversion_preserves_name(self): + #GH 10875 + i = pd.Index(['01:02:03', '01:02:04'], name='label') + self.assertEqual(i.name, pd.to_datetime(i).name) + self.assertEqual(i.name, pd.to_timedelta(i).name) class TestCategoricalIndex(Base, tm.TestCase): _holder = CategoricalIndex diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py index 886d6ff42ced6..282e1d603ed84 100644 --- a/pandas/tseries/timedeltas.py +++ b/pandas/tseries/timedeltas.py @@ -8,7 +8,7 @@ from pandas import compat from pandas.core.common import (ABCSeries, is_integer_dtype, is_timedelta64_dtype, is_list_like, - isnull, _ensure_object) + isnull, _ensure_object, ABCIndexClass) from pandas.util.decorators import deprecate_kwarg @deprecate_kwarg(old_arg_name='coerce', new_arg_name='errors', @@ -35,7 +35,7 @@ def to_timedelta(arg, unit='ns', box=True, errors='raise', coerce=None): """ unit = _validate_timedelta_unit(unit) - def _convert_listlike(arg, box, unit): + def _convert_listlike(arg, box, unit, name=None): if isinstance(arg, (list,tuple)) or ((hasattr(arg,'__iter__') and not hasattr(arg,'dtype'))): arg = np.array(list(arg), dtype='O') @@ -51,7 +51,7 @@ def _convert_listlike(arg, box, unit): if box: from pandas import TimedeltaIndex - value = TimedeltaIndex(value,unit='ns') + value = TimedeltaIndex(value,unit='ns', name=name) return value if arg is None: @@ -60,6 +60,8 @@ def _convert_listlike(arg, box, unit): from pandas import Series values = _convert_listlike(arg.values, box=False, unit=unit) return Series(values, index=arg.index, name=arg.name, dtype='m8[ns]') + elif isinstance(arg, ABCIndexClass): + return _convert_listlike(arg, box=box, unit=unit, name=arg.name) elif is_list_like(arg): return _convert_listlike(arg, box=box, unit=unit) diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 6f08448b47b1e..efd1ff9ba34fd 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -8,6 +8,7 @@ import pandas.tslib as tslib import pandas.core.common as com from pandas.compat import StringIO, callable +from pandas.core.common import ABCIndexClass import pandas.compat as compat from pandas.util.decorators import deprecate_kwarg @@ -277,7 +278,7 @@ def _to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, from pandas.core.series import Series from pandas.tseries.index import DatetimeIndex - def _convert_listlike(arg, box, format): + def _convert_listlike(arg, box, format, name=None): if isinstance(arg, (list,tuple)): arg = np.array(arg, dtype='O') @@ -286,7 +287,7 @@ def _convert_listlike(arg, box, format): if com.is_datetime64_ns_dtype(arg): if box and not isinstance(arg, DatetimeIndex): try: - return DatetimeIndex(arg, tz='utc' if utc else None) + return DatetimeIndex(arg, tz='utc' if utc else None, name=name) except ValueError: pass @@ -294,7 +295,7 @@ def _convert_listlike(arg, box, format): elif format is None and com.is_integer_dtype(arg) and unit=='ns': result = arg.astype('datetime64[ns]') if box: - return DatetimeIndex(result, tz='utc' if utc else None) + return DatetimeIndex(result, tz='utc' if utc else None, name=name) return result @@ -355,13 +356,13 @@ def _convert_listlike(arg, box, format): require_iso8601=require_iso8601) if com.is_datetime64_dtype(result) and box: - result = DatetimeIndex(result, tz='utc' if utc else None) + result = DatetimeIndex(result, tz='utc' if utc else None, name=name) return result except ValueError as e: try: values, tz = tslib.datetime_to_datetime64(arg) - return DatetimeIndex._simple_new(values, None, tz=tz) + return DatetimeIndex._simple_new(values, name=name, tz=tz) except (ValueError, TypeError): raise e @@ -372,6 +373,8 @@ def _convert_listlike(arg, box, format): elif isinstance(arg, Series): values = _convert_listlike(arg.values, False, format) return Series(values, index=arg.index, name=arg.name) + elif isinstance(arg, ABCIndexClass): + return _convert_listlike(arg, box, format, name=arg.name) elif com.is_list_like(arg): return _convert_listlike(arg, box, format)
Addresses #10875, when `Index` is converted via `to_datetime`, `to_timedelta` part of #9862 master issue
https://api.github.com/repos/pandas-dev/pandas/pulls/10945
2015-08-30T22:43:26Z
2015-08-31T12:18:36Z
2015-08-31T12:18:36Z
2015-08-31T23:37:24Z
Fixing column_format argument passing, #9402
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 3e81a923a114c..f6c06e5e0233f 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -727,6 +727,7 @@ Bug Fixes - Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) - Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) +- Bug in ``DataFrame.to_latex()`` the ``column_format`` argument could not be passed (:issue:`9402`) - Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`) - Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`) - Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`) diff --git a/pandas/core/format.py b/pandas/core/format.py index 5c74b5a5655e9..d463c02dd41a2 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -584,7 +584,6 @@ def to_latex(self, column_format=None, longtable=False): """ self.escape = self.kwds.get('escape', True) - # TODO: column_format is not settable in df.to_latex def get_col_type(dtype): if issubclass(dtype.type, np.number): return 'r' diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 607ac43e6ce27..9d676991c2c56 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1477,7 +1477,8 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None, def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, - bold_rows=True, longtable=False, escape=True): + bold_rows=True, column_format=None, + longtable=False, escape=True): """ Render a DataFrame to a tabular environment table. You can splice this into a LaTeX document. Requires \\usepackage{booktabs}. @@ -1486,6 +1487,9 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None, bold_rows : boolean, default True Make the row labels bold in the output + column_format : str, default None + The columns format as specified in LaTeX (e.g 'rcl' for a 3 columns + table), see https://en.wikibooks.org/wiki/LaTeX/Tables longtable : boolean, default False Use a longtable environment instead of tabular. Requires adding a \\usepackage{longtable} to your LaTeX preamble. diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 388df526e05f5..de6d172408916 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -2380,6 +2380,24 @@ def test_to_latex(self): """ self.assertEqual(withoutindex_result, withoutindex_expected) + def test_to_latex_format(self): + # GH Bug #9402 + self.frame.to_latex(column_format='ccc') + + df = DataFrame({'a': [1, 2], + 'b': ['b1', 'b2']}) + withindex_result = df.to_latex(column_format='ccc') + withindex_expected = r"""\begin{tabular}{ccc} +\toprule +{} & a & b \\ +\midrule +0 & 1 & b1 \\ +1 & 2 & b2 \\ +\bottomrule +\end{tabular} +""" + self.assertEqual(withindex_result, withindex_expected) + def test_to_latex_multiindex(self): df = DataFrame({('x', 'y'): ['a']}) result = df.to_latex()
closes #9402
https://api.github.com/repos/pandas-dev/pandas/pulls/10944
2015-08-30T14:48:18Z
2015-08-31T11:07:48Z
null
2015-09-01T09:17:48Z
BUG: passing columns and dict with scalar values should raise error
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index e9d39e0441055..70d70a2b76a81 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -836,3 +836,4 @@ Bug Fixes - Bug in ``to_json`` which was causing segmentation fault when serializing 0-rank ndarray (:issue:`9576`) - Bug in plotting functions may raise ``IndexError`` when plotted on ``GridSpec`` (:issue:`10819`) - Bug in plot result may show unnecessary minor ticklabels (:issue:`10657`) +- Bug when constructing ``DataFrame`` where passing a dictionary with only scalar values and specifying columns did not raise an error (:issue:`10856`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 997dfeb728ade..acf5e69bf05e3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -298,13 +298,18 @@ def _init_dict(self, data, index, columns, dtype=None): if columns is not None: columns = _ensure_index(columns) - # prefilter if columns passed + # GH10856 + # raise ValueError if only scalars in dict + if index is None: + extract_index(list(data.values())) + # prefilter if columns passed data = dict((k, v) for k, v in compat.iteritems(data) if k in columns) if index is None: index = extract_index(list(data.values())) + else: index = _ensure_index(index) @@ -330,6 +335,7 @@ def _init_dict(self, data, index, columns, dtype=None): v = data[k] data_names.append(k) arrays.append(v) + else: keys = list(data.keys()) if not isinstance(data, OrderedDict): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index d7b5a9811bc5b..9bdb7f08fe7cf 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -2762,6 +2762,17 @@ def test_constructor_dict(self): frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B']) self.assertTrue(frame.index.equals(Index([]))) + # GH10856 + # dict with scalar values should raise error, even if columns passed + with tm.assertRaises(ValueError): + DataFrame({'a': 0.7}) + + with tm.assertRaises(ValueError): + DataFrame({'a': 0.7}, columns=['a']) + + with tm.assertRaises(ValueError): + DataFrame({'a': 0.7}, columns=['b']) + def test_constructor_multi_index(self): # GH 4078 # construction error with mi and all-nan frame
Fixes [GH10856](https://github.com/pydata/pandas/issues/10856). ``` Python >>> pd.DataFrame({'a':0.1}, columns=['b']) ValueError: If using all scalar values, you must pass an index ``` Trying to raise this error was slightly trickier than I anticipated - this was the only way that didn't break existing tests. If no index is passed to the constructor, `extract_index` is called to check whether the dictionary contacts only scalar values (and raises the ValueError if so). This check now happens _prior_ to preselecting any columns. If people are happy with this approach I can write tests for the PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/10943
2015-08-30T14:41:37Z
2015-09-02T11:51:18Z
2015-09-02T11:51:18Z
2015-09-02T20:14:50Z
BUG: fixing bug in groupby_indices benchmark
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index a84a5373651bb..f1ac09b8b2516 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -212,7 +212,7 @@ class groupby_indices(object): def setup(self): try: self.rng = date_range('1/1/2000', '12/31/2005', freq='H') - (year, month, day) = (self.rng.year, self.rng.month, self.rng.day) + (self.year, self.month, self.day) = (self.rng.year, self.rng.month, self.rng.day) except: self.rng = date_range('1/1/2000', '12/31/2000', offset=datetools.Hour()) self.year = self.rng.map((lambda x: x.year)) @@ -1690,4 +1690,4 @@ def setup(self): self.s = Series(np.tile(self.uniques, (self.N // self.K))) def time_series_value_counts_strings(self): - self.s.value_counts() \ No newline at end of file + self.s.value_counts()
https://api.github.com/repos/pandas-dev/pandas/pulls/10942
2015-08-30T14:15:09Z
2015-08-31T00:56:45Z
2015-08-31T00:56:45Z
2015-08-31T00:56:49Z
Edit DOC: consistent imports (GH9886) part V
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index d51c2f62b8a0c..c62647010a131 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -6,9 +6,8 @@ import numpy as np np.random.seed(123456) - from numpy import nan - from pandas import * - options.display.max_rows=15 + import pandas as pd + pd.options.display.max_rows=15 randn = np.random.randn np.set_printoptions(precision=4, suppress=True) @@ -43,26 +42,26 @@ a simple example: .. ipython:: python - df1 = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3'], - 'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}, - index=[0, 1, 2, 3]) + df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}, + index=[0, 1, 2, 3]) - df2 = DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], - 'B': ['B4', 'B5', 'B6', 'B7'], - 'C': ['C4', 'C5', 'C6', 'C7'], - 'D': ['D4', 'D5', 'D6', 'D7']}, - index=[4, 5, 6, 7]) + df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], + 'B': ['B4', 'B5', 'B6', 'B7'], + 'C': ['C4', 'C5', 'C6', 'C7'], + 'D': ['D4', 'D5', 'D6', 'D7']}, + index=[4, 5, 6, 7]) - df3 = DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], - 'B': ['B8', 'B9', 'B10', 'B11'], - 'C': ['C8', 'C9', 'C10', 'C11'], - 'D': ['D8', 'D9', 'D10', 'D11']}, - index=[8, 9, 10, 11]) + df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], + 'B': ['B8', 'B9', 'B10', 'B11'], + 'C': ['C8', 'C9', 'C10', 'C11'], + 'D': ['D8', 'D9', 'D10', 'D11']}, + index=[8, 9, 10, 11]) frames = [df1, df2, df3] - result = concat(frames) + result = pd.concat(frames) .. ipython:: python :suppress: @@ -78,7 +77,7 @@ some configurable handling of "what to do with the other axes": :: - concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, + pd.concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False) - ``objs``: list or dict of Series, DataFrame, or Panel objects. If a dict is @@ -112,7 +111,7 @@ this using the ``keys`` argument: .. ipython:: python - result = concat(frames, keys=['x', 'y', 'z']) + result = pd.concat(frames, keys=['x', 'y', 'z']) .. ipython:: python :suppress: @@ -163,11 +162,11 @@ behavior: .. ipython:: python - df4 = DataFrame({'B': ['B2', 'B3', 'B6', 'B7'], + df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'], 'D': ['D2', 'D3', 'D6', 'D7'], 'F': ['F2', 'F3', 'F6', 'F7']}, index=[2, 3, 6, 7]) - result = concat([df1, df4], axis=1) + result = pd.concat([df1, df4], axis=1) .. ipython:: python @@ -183,7 +182,7 @@ with ``join='inner'``: .. ipython:: python - result = concat([df1, df4], axis=1, join='inner') + result = pd.concat([df1, df4], axis=1, join='inner') .. ipython:: python :suppress: @@ -198,7 +197,7 @@ DataFrame: .. ipython:: python - result = concat([df1, df4], axis=1, join_axes=[df1.index]) + result = pd.concat([df1, df4], axis=1, join_axes=[df1.index]) .. ipython:: python :suppress: @@ -275,7 +274,7 @@ To do this, use the ``ignore_index`` argument: .. ipython:: python - result = concat([df1, df4], ignore_index=True) + result = pd.concat([df1, df4], ignore_index=True) .. ipython:: python :suppress: @@ -310,8 +309,8 @@ the name of the Series. .. ipython:: python - s1 = Series(['X0', 'X1', 'X2', 'X3'], name='X') - result = concat([df1, s1], axis=1) + s1 = pd.Series(['X0', 'X1', 'X2', 'X3'], name='X') + result = pd.concat([df1, s1], axis=1) .. ipython:: python :suppress: @@ -325,8 +324,8 @@ If unnamed Series are passed they will be numbered consecutively. .. ipython:: python - s2 = Series(['_0', '_1', '_2', '_3']) - result = concat([df1, s2, s2, s2], axis=1) + s2 = pd.Series(['_0', '_1', '_2', '_3']) + result = pd.concat([df1, s2, s2, s2], axis=1) .. ipython:: python :suppress: @@ -340,7 +339,7 @@ Passing ``ignore_index=True`` will drop all name references. .. ipython:: python - result = concat([df1, s1], axis=1, ignore_index=True) + result = pd.concat([df1, s1], axis=1, ignore_index=True) .. ipython:: python :suppress: @@ -357,7 +356,7 @@ Let's consider a variation on the first example presented: .. ipython:: python - result = concat(frames, keys=['x', 'y', 'z']) + result = pd.concat(frames, keys=['x', 'y', 'z']) .. ipython:: python :suppress: @@ -373,7 +372,7 @@ for the ``keys`` argument (unless other keys are specified): .. ipython:: python pieces = {'x': df1, 'y': df2, 'z': df3} - result = concat(pieces) + result = pd.concat(pieces) .. ipython:: python :suppress: @@ -385,7 +384,7 @@ for the ``keys`` argument (unless other keys are specified): .. ipython:: python - result = concat(pieces, keys=['z', 'y']) + result = pd.concat(pieces, keys=['z', 'y']) .. ipython:: python :suppress: @@ -407,7 +406,7 @@ do so using the ``levels`` argument: .. ipython:: python - result = concat(pieces, keys=['x', 'y', 'z'], + result = pd.concat(pieces, keys=['x', 'y', 'z'], levels=[['z', 'y', 'x', 'w']], names=['group_key']) @@ -437,7 +436,7 @@ which returns a new DataFrame as above. .. ipython:: python - s2 = Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D']) + s2 = pd.Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D']) result = df1.append(s2, ignore_index=True) .. ipython:: python @@ -464,7 +463,7 @@ You can also pass a list of dicts or Series: :suppress: @savefig merging_append_dits.png - p.plot([df1, DataFrame(dicts)], result, + p.plot([df1, pd.DataFrame(dicts)], result, labels=['df1', 'dicts'], vertical=True); plt.close('all'); @@ -490,9 +489,9 @@ standard database join operations between DataFrame objects: :: - merge(left, right, how='inner', on=None, left_on=None, right_on=None, - left_index=False, right_index=False, sort=True, - suffixes=('_x', '_y'), copy=True) + pd.merge(left, right, how='inner', on=None, left_on=None, right_on=None, + left_index=False, right_index=False, sort=True, + suffixes=('_x', '_y'), copy=True) Here's a description of what each argument is for: @@ -566,14 +565,14 @@ key combination: .. ipython:: python - left = DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], - 'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3']}) + left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], + 'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3']}) - right = DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], - 'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}) - result = merge(left, right, on='key') + right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}) + result = pd.merge(left, right, on='key') .. ipython:: python :suppress: @@ -587,17 +586,17 @@ Here is a more complicated example with multiple join keys: .. ipython:: python - left = DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'], - 'key2': ['K0', 'K1', 'K0', 'K1'], - 'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3']}) + left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'], + 'key2': ['K0', 'K1', 'K0', 'K1'], + 'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3']}) - right = DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'], - 'key2': ['K0', 'K0', 'K0', 'K0'], - 'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}) + right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'], + 'key2': ['K0', 'K0', 'K0', 'K0'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}) - result = merge(left, right, on=['key1', 'key2']) + result = pd.merge(left, right, on=['key1', 'key2']) .. ipython:: python :suppress: @@ -623,7 +622,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='left', on=['key1', 'key2']) + result = pd.merge(left, right, how='left', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -635,7 +634,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='right', on=['key1', 'key2']) + result = pd.merge(left, right, how='right', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -646,7 +645,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='outer', on=['key1', 'key2']) + result = pd.merge(left, right, how='outer', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -658,7 +657,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='inner', on=['key1', 'key2']) + result = pd.merge(left, right, how='inner', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -679,13 +678,13 @@ is a very basic example: .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2'], - 'B': ['B0', 'B1', 'B2']}, - index=['K0', 'K1', 'K2']) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=['K0', 'K1', 'K2']) - right = DataFrame({'C': ['C0', 'C2', 'C3'], - 'D': ['D0', 'D2', 'D3']}, - index=['K0', 'K2', 'K3']) + right = pd.DataFrame({'C': ['C0', 'C2', 'C3'], + 'D': ['D0', 'D2', 'D3']}, + index=['K0', 'K2', 'K3']) result = left.join(right) @@ -727,7 +726,7 @@ indexes: .. ipython:: python - result = merge(left, right, left_index=True, right_index=True, how='outer') + result = pd.merge(left, right, left_index=True, right_index=True, how='outer') .. ipython:: python :suppress: @@ -739,7 +738,7 @@ indexes: .. ipython:: python - result = merge(left, right, left_index=True, right_index=True, how='inner'); + result = pd.merge(left, right, left_index=True, right_index=True, how='inner'); .. ipython:: python :suppress: @@ -760,7 +759,7 @@ equivalent: :: left.join(right, on=key_or_keys) - merge(left, right, left_on=key_or_keys, right_index=True, + pd.merge(left, right, left_on=key_or_keys, right_index=True, how='left', sort=False) Obviously you can choose whichever form you find more convenient. For @@ -769,13 +768,13 @@ key), using ``join`` may be more convenient. Here is a simple example: .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3'], - 'key': ['K0', 'K1', 'K0', 'K1']}) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'key': ['K0', 'K1', 'K0', 'K1']}) - right = DataFrame({'C': ['C0', 'C1'], - 'D': ['D0', 'D1']}, - index=['K0', 'K1']) + right = pd.DataFrame({'C': ['C0', 'C1'], + 'D': ['D0', 'D1']}, + index=['K0', 'K1']) result = left.join(right, on='key') @@ -789,8 +788,8 @@ key), using ``join`` may be more convenient. Here is a simple example: .. ipython:: python - result = merge(left, right, left_on='key', right_index=True, - how='left', sort=False); + result = pd.merge(left, right, left_on='key', right_index=True, + how='left', sort=False); .. ipython:: python :suppress: @@ -806,14 +805,14 @@ To join on multiple keys, the passed DataFrame must have a ``MultiIndex``: .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3'], - 'key1': ['K0', 'K0', 'K1', 'K2'], - 'key2': ['K0', 'K1', 'K0', 'K1']}) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'key1': ['K0', 'K0', 'K1', 'K2'], + 'key2': ['K0', 'K1', 'K0', 'K1']}) - index = MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'), - ('K2', 'K0'), ('K2', 'K1')]) - right = DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], + index = pd.MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'), + ('K2', 'K0'), ('K2', 'K1')]) + right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=index) @@ -865,16 +864,16 @@ a level name of the multi-indexed frame. .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2'], - 'B': ['B0', 'B1', 'B2']}, - index=Index(['K0', 'K1', 'K2'], name='key')) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=Index(['K0', 'K1', 'K2'], name='key')) - index = MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), - ('K2', 'Y2'), ('K2', 'Y3')], - names=['key', 'Y']) - right = DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}, - index=index) + index = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), + ('K2', 'Y2'), ('K2', 'Y3')], + names=['key', 'Y']) + right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}, + index=index) result = left.join(right, how='inner') @@ -890,7 +889,7 @@ This is equivalent but less verbose and more memory efficient / faster than this .. ipython:: python - result = merge(left.reset_index(), right.reset_index(), + result = pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner').set_index(['key','Y']) .. ipython:: python @@ -908,15 +907,15 @@ This is not Implemented via ``join`` at-the-moment, however it can be done using .. ipython:: python - index = MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), - ('K1', 'X2')], - names=['key', 'X']) - left = DataFrame({'A': ['A0', 'A1', 'A2'], - 'B': ['B0', 'B1', 'B2']}, - index=index) + index = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), + ('K1', 'X2')], + names=['key', 'X']) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=index) - result = merge(left.reset_index(), right.reset_index(), - on=['key'], how='inner').set_index(['key','X','Y']) + result = pd.merge(left.reset_index(), right.reset_index(), + on=['key'], how='inner').set_index(['key','X','Y']) .. ipython:: python :suppress: @@ -935,10 +934,10 @@ columns: .. ipython:: python - left = DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]}) - right = DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]}) + left = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]}) + right = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]}) - result = merge(left, right, on='k') + result = pd.merge(left, right, on='k') .. ipython:: python :suppress: @@ -950,7 +949,7 @@ columns: .. ipython:: python - result = merge(left, right, on='k', suffixes=['_l', '_r']) + result = pd.merge(left, right, on='k', suffixes=['_l', '_r']) .. ipython:: python :suppress: @@ -987,7 +986,7 @@ them together on their indexes. The same is true for ``Panel.join``. .. ipython:: python - right2 = DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2']) + right2 = pd.DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2']) result = left.join([right, right2]) .. ipython:: python @@ -1037,10 +1036,10 @@ object from values for matching indices in the other. Here is an example: .. ipython:: python - df1 = DataFrame([[nan, 3., 5.], [-4.6, np.nan, nan], - [nan, 7., nan]]) - df2 = DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]], - index=[1, 2]) + df1 = pd.DataFrame([[np.nan, 3., 5.], [-4.6, np.nan, np.nan], + [np.nan, 7., np.nan]]) + df2 = pd.DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]], + index=[1, 2]) For this, use the ``combine_first`` method: @@ -1075,4 +1074,4 @@ values inplace: @savefig merging_update.png p.plot([df1_copy, df2], df1, labels=['df1', 'df2'], vertical=False); - plt.close('all'); + plt.close('all'); \ No newline at end of file
Updated - but no idea why this is such a messed up branch.
https://api.github.com/repos/pandas-dev/pandas/pulls/10941
2015-08-30T14:07:24Z
2015-09-01T09:17:33Z
2015-09-01T09:17:33Z
2015-09-01T09:18:15Z
DOC: consistent imports (GH9886) part V Fix of I#10934
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index d51c2f62b8a0c..c62647010a131 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -6,9 +6,8 @@ import numpy as np np.random.seed(123456) - from numpy import nan - from pandas import * - options.display.max_rows=15 + import pandas as pd + pd.options.display.max_rows=15 randn = np.random.randn np.set_printoptions(precision=4, suppress=True) @@ -43,26 +42,26 @@ a simple example: .. ipython:: python - df1 = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3'], - 'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}, - index=[0, 1, 2, 3]) + df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}, + index=[0, 1, 2, 3]) - df2 = DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], - 'B': ['B4', 'B5', 'B6', 'B7'], - 'C': ['C4', 'C5', 'C6', 'C7'], - 'D': ['D4', 'D5', 'D6', 'D7']}, - index=[4, 5, 6, 7]) + df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], + 'B': ['B4', 'B5', 'B6', 'B7'], + 'C': ['C4', 'C5', 'C6', 'C7'], + 'D': ['D4', 'D5', 'D6', 'D7']}, + index=[4, 5, 6, 7]) - df3 = DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], - 'B': ['B8', 'B9', 'B10', 'B11'], - 'C': ['C8', 'C9', 'C10', 'C11'], - 'D': ['D8', 'D9', 'D10', 'D11']}, - index=[8, 9, 10, 11]) + df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], + 'B': ['B8', 'B9', 'B10', 'B11'], + 'C': ['C8', 'C9', 'C10', 'C11'], + 'D': ['D8', 'D9', 'D10', 'D11']}, + index=[8, 9, 10, 11]) frames = [df1, df2, df3] - result = concat(frames) + result = pd.concat(frames) .. ipython:: python :suppress: @@ -78,7 +77,7 @@ some configurable handling of "what to do with the other axes": :: - concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, + pd.concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False) - ``objs``: list or dict of Series, DataFrame, or Panel objects. If a dict is @@ -112,7 +111,7 @@ this using the ``keys`` argument: .. ipython:: python - result = concat(frames, keys=['x', 'y', 'z']) + result = pd.concat(frames, keys=['x', 'y', 'z']) .. ipython:: python :suppress: @@ -163,11 +162,11 @@ behavior: .. ipython:: python - df4 = DataFrame({'B': ['B2', 'B3', 'B6', 'B7'], + df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'], 'D': ['D2', 'D3', 'D6', 'D7'], 'F': ['F2', 'F3', 'F6', 'F7']}, index=[2, 3, 6, 7]) - result = concat([df1, df4], axis=1) + result = pd.concat([df1, df4], axis=1) .. ipython:: python @@ -183,7 +182,7 @@ with ``join='inner'``: .. ipython:: python - result = concat([df1, df4], axis=1, join='inner') + result = pd.concat([df1, df4], axis=1, join='inner') .. ipython:: python :suppress: @@ -198,7 +197,7 @@ DataFrame: .. ipython:: python - result = concat([df1, df4], axis=1, join_axes=[df1.index]) + result = pd.concat([df1, df4], axis=1, join_axes=[df1.index]) .. ipython:: python :suppress: @@ -275,7 +274,7 @@ To do this, use the ``ignore_index`` argument: .. ipython:: python - result = concat([df1, df4], ignore_index=True) + result = pd.concat([df1, df4], ignore_index=True) .. ipython:: python :suppress: @@ -310,8 +309,8 @@ the name of the Series. .. ipython:: python - s1 = Series(['X0', 'X1', 'X2', 'X3'], name='X') - result = concat([df1, s1], axis=1) + s1 = pd.Series(['X0', 'X1', 'X2', 'X3'], name='X') + result = pd.concat([df1, s1], axis=1) .. ipython:: python :suppress: @@ -325,8 +324,8 @@ If unnamed Series are passed they will be numbered consecutively. .. ipython:: python - s2 = Series(['_0', '_1', '_2', '_3']) - result = concat([df1, s2, s2, s2], axis=1) + s2 = pd.Series(['_0', '_1', '_2', '_3']) + result = pd.concat([df1, s2, s2, s2], axis=1) .. ipython:: python :suppress: @@ -340,7 +339,7 @@ Passing ``ignore_index=True`` will drop all name references. .. ipython:: python - result = concat([df1, s1], axis=1, ignore_index=True) + result = pd.concat([df1, s1], axis=1, ignore_index=True) .. ipython:: python :suppress: @@ -357,7 +356,7 @@ Let's consider a variation on the first example presented: .. ipython:: python - result = concat(frames, keys=['x', 'y', 'z']) + result = pd.concat(frames, keys=['x', 'y', 'z']) .. ipython:: python :suppress: @@ -373,7 +372,7 @@ for the ``keys`` argument (unless other keys are specified): .. ipython:: python pieces = {'x': df1, 'y': df2, 'z': df3} - result = concat(pieces) + result = pd.concat(pieces) .. ipython:: python :suppress: @@ -385,7 +384,7 @@ for the ``keys`` argument (unless other keys are specified): .. ipython:: python - result = concat(pieces, keys=['z', 'y']) + result = pd.concat(pieces, keys=['z', 'y']) .. ipython:: python :suppress: @@ -407,7 +406,7 @@ do so using the ``levels`` argument: .. ipython:: python - result = concat(pieces, keys=['x', 'y', 'z'], + result = pd.concat(pieces, keys=['x', 'y', 'z'], levels=[['z', 'y', 'x', 'w']], names=['group_key']) @@ -437,7 +436,7 @@ which returns a new DataFrame as above. .. ipython:: python - s2 = Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D']) + s2 = pd.Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D']) result = df1.append(s2, ignore_index=True) .. ipython:: python @@ -464,7 +463,7 @@ You can also pass a list of dicts or Series: :suppress: @savefig merging_append_dits.png - p.plot([df1, DataFrame(dicts)], result, + p.plot([df1, pd.DataFrame(dicts)], result, labels=['df1', 'dicts'], vertical=True); plt.close('all'); @@ -490,9 +489,9 @@ standard database join operations between DataFrame objects: :: - merge(left, right, how='inner', on=None, left_on=None, right_on=None, - left_index=False, right_index=False, sort=True, - suffixes=('_x', '_y'), copy=True) + pd.merge(left, right, how='inner', on=None, left_on=None, right_on=None, + left_index=False, right_index=False, sort=True, + suffixes=('_x', '_y'), copy=True) Here's a description of what each argument is for: @@ -566,14 +565,14 @@ key combination: .. ipython:: python - left = DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], - 'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3']}) + left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], + 'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3']}) - right = DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], - 'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}) - result = merge(left, right, on='key') + right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}) + result = pd.merge(left, right, on='key') .. ipython:: python :suppress: @@ -587,17 +586,17 @@ Here is a more complicated example with multiple join keys: .. ipython:: python - left = DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'], - 'key2': ['K0', 'K1', 'K0', 'K1'], - 'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3']}) + left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'], + 'key2': ['K0', 'K1', 'K0', 'K1'], + 'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3']}) - right = DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'], - 'key2': ['K0', 'K0', 'K0', 'K0'], - 'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}) + right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'], + 'key2': ['K0', 'K0', 'K0', 'K0'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}) - result = merge(left, right, on=['key1', 'key2']) + result = pd.merge(left, right, on=['key1', 'key2']) .. ipython:: python :suppress: @@ -623,7 +622,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='left', on=['key1', 'key2']) + result = pd.merge(left, right, how='left', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -635,7 +634,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='right', on=['key1', 'key2']) + result = pd.merge(left, right, how='right', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -646,7 +645,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='outer', on=['key1', 'key2']) + result = pd.merge(left, right, how='outer', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -658,7 +657,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='inner', on=['key1', 'key2']) + result = pd.merge(left, right, how='inner', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -679,13 +678,13 @@ is a very basic example: .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2'], - 'B': ['B0', 'B1', 'B2']}, - index=['K0', 'K1', 'K2']) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=['K0', 'K1', 'K2']) - right = DataFrame({'C': ['C0', 'C2', 'C3'], - 'D': ['D0', 'D2', 'D3']}, - index=['K0', 'K2', 'K3']) + right = pd.DataFrame({'C': ['C0', 'C2', 'C3'], + 'D': ['D0', 'D2', 'D3']}, + index=['K0', 'K2', 'K3']) result = left.join(right) @@ -727,7 +726,7 @@ indexes: .. ipython:: python - result = merge(left, right, left_index=True, right_index=True, how='outer') + result = pd.merge(left, right, left_index=True, right_index=True, how='outer') .. ipython:: python :suppress: @@ -739,7 +738,7 @@ indexes: .. ipython:: python - result = merge(left, right, left_index=True, right_index=True, how='inner'); + result = pd.merge(left, right, left_index=True, right_index=True, how='inner'); .. ipython:: python :suppress: @@ -760,7 +759,7 @@ equivalent: :: left.join(right, on=key_or_keys) - merge(left, right, left_on=key_or_keys, right_index=True, + pd.merge(left, right, left_on=key_or_keys, right_index=True, how='left', sort=False) Obviously you can choose whichever form you find more convenient. For @@ -769,13 +768,13 @@ key), using ``join`` may be more convenient. Here is a simple example: .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3'], - 'key': ['K0', 'K1', 'K0', 'K1']}) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'key': ['K0', 'K1', 'K0', 'K1']}) - right = DataFrame({'C': ['C0', 'C1'], - 'D': ['D0', 'D1']}, - index=['K0', 'K1']) + right = pd.DataFrame({'C': ['C0', 'C1'], + 'D': ['D0', 'D1']}, + index=['K0', 'K1']) result = left.join(right, on='key') @@ -789,8 +788,8 @@ key), using ``join`` may be more convenient. Here is a simple example: .. ipython:: python - result = merge(left, right, left_on='key', right_index=True, - how='left', sort=False); + result = pd.merge(left, right, left_on='key', right_index=True, + how='left', sort=False); .. ipython:: python :suppress: @@ -806,14 +805,14 @@ To join on multiple keys, the passed DataFrame must have a ``MultiIndex``: .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3'], - 'key1': ['K0', 'K0', 'K1', 'K2'], - 'key2': ['K0', 'K1', 'K0', 'K1']}) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'key1': ['K0', 'K0', 'K1', 'K2'], + 'key2': ['K0', 'K1', 'K0', 'K1']}) - index = MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'), - ('K2', 'K0'), ('K2', 'K1')]) - right = DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], + index = pd.MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'), + ('K2', 'K0'), ('K2', 'K1')]) + right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=index) @@ -865,16 +864,16 @@ a level name of the multi-indexed frame. .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2'], - 'B': ['B0', 'B1', 'B2']}, - index=Index(['K0', 'K1', 'K2'], name='key')) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=Index(['K0', 'K1', 'K2'], name='key')) - index = MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), - ('K2', 'Y2'), ('K2', 'Y3')], - names=['key', 'Y']) - right = DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}, - index=index) + index = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), + ('K2', 'Y2'), ('K2', 'Y3')], + names=['key', 'Y']) + right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}, + index=index) result = left.join(right, how='inner') @@ -890,7 +889,7 @@ This is equivalent but less verbose and more memory efficient / faster than this .. ipython:: python - result = merge(left.reset_index(), right.reset_index(), + result = pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner').set_index(['key','Y']) .. ipython:: python @@ -908,15 +907,15 @@ This is not Implemented via ``join`` at-the-moment, however it can be done using .. ipython:: python - index = MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), - ('K1', 'X2')], - names=['key', 'X']) - left = DataFrame({'A': ['A0', 'A1', 'A2'], - 'B': ['B0', 'B1', 'B2']}, - index=index) + index = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), + ('K1', 'X2')], + names=['key', 'X']) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=index) - result = merge(left.reset_index(), right.reset_index(), - on=['key'], how='inner').set_index(['key','X','Y']) + result = pd.merge(left.reset_index(), right.reset_index(), + on=['key'], how='inner').set_index(['key','X','Y']) .. ipython:: python :suppress: @@ -935,10 +934,10 @@ columns: .. ipython:: python - left = DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]}) - right = DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]}) + left = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]}) + right = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]}) - result = merge(left, right, on='k') + result = pd.merge(left, right, on='k') .. ipython:: python :suppress: @@ -950,7 +949,7 @@ columns: .. ipython:: python - result = merge(left, right, on='k', suffixes=['_l', '_r']) + result = pd.merge(left, right, on='k', suffixes=['_l', '_r']) .. ipython:: python :suppress: @@ -987,7 +986,7 @@ them together on their indexes. The same is true for ``Panel.join``. .. ipython:: python - right2 = DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2']) + right2 = pd.DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2']) result = left.join([right, right2]) .. ipython:: python @@ -1037,10 +1036,10 @@ object from values for matching indices in the other. Here is an example: .. ipython:: python - df1 = DataFrame([[nan, 3., 5.], [-4.6, np.nan, nan], - [nan, 7., nan]]) - df2 = DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]], - index=[1, 2]) + df1 = pd.DataFrame([[np.nan, 3., 5.], [-4.6, np.nan, np.nan], + [np.nan, 7., np.nan]]) + df2 = pd.DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]], + index=[1, 2]) For this, use the ``combine_first`` method: @@ -1075,4 +1074,4 @@ values inplace: @savefig merging_update.png p.plot([df1_copy, df2], df1, labels=['df1', 'df2'], vertical=False); - plt.close('all'); + plt.close('all'); \ No newline at end of file diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt deleted file mode 100644 index 3e81a923a114c..0000000000000 --- a/doc/source/whatsnew/v0.17.0.txt +++ /dev/null @@ -1,824 +0,0 @@ -.. _whatsnew_0170: - -v0.17.0 (September ??, 2015) ----------------------------- - -This is a major release from 0.16.2 and includes a small number of API changes, several new features, -enhancements, and performance improvements along with a large number of bug fixes. We recommend that all -users upgrade to this version. - -.. warning:: - - pandas >= 0.17.0 will no longer support compatibility with Python version 3.2 (:issue:`9118`) - -.. warning:: - - The ``pandas.io.data`` package is deprecated and will be replaced by the - `pandas-datareader package <https://github.com/pydata/pandas-datareader>`_. - This will allow the data modules to be independently updated to your pandas - installation. The API for ``pandas-datareader v0.1.1`` is exactly the same - as in ``pandas v0.17.0`` (:issue:`8961`, :issue:`10861`). - - After installing pandas-datareader, you can easily change your imports: - - .. code-block:: Python - - from pandas.io import data, wb # becomes - from pandas_datareader import data, wb - -Highlights include: - -- Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>` -- The sorting API has been revamped to remove some long-time inconsistencies, see :ref:`here <whatsnew_0170.api_breaking.sorting>` -- The default for ``to_datetime`` will now be to ``raise`` when presented with unparseable formats, - previously this would return the original input, see :ref:`here <whatsnew_0170.api_breaking.to_datetime>` -- The default for ``dropna`` in ``HDFStore`` has changed to ``False``, to store by default all rows even - if they are all ``NaN``, see :ref:`here <whatsnew_0170.api_breaking.hdf_dropna>` -- Support for ``Series.dt.strftime`` to generate formatted strings for datetime-likes, see :ref:`here <whatsnew_0170.strftime>` -- Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`) -- Development support for benchmarking with the `Air Speed Velocity library <https://github.com/spacetelescope/asv/>`_ (:issue:`8316`) -- Support for reading SAS xport files, see :ref:`here <whatsnew_0170.enhancements.sas_xport>` -- Removal of the automatic TimeSeries broadcasting, deprecated since 0.8.0, see :ref:`here <whatsnew_0170.prior_deprecations>` - -Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsnew_0170.deprecations>` before updating. - -.. contents:: What's new in v0.17.0 - :local: - :backlinks: none - -.. _whatsnew_0170.enhancements: - -New features -~~~~~~~~~~~~ - -- ``DataFrame`` has the ``nlargest`` and ``nsmallest`` methods (:issue:`10393`) -- SQL io functions now accept a SQLAlchemy connectable. (:issue:`7877`) -- Enable writing complex values to HDF stores when using table format (:issue:`10447`) -- Enable reading gzip compressed files via URL, either by explicitly setting the compression parameter or by inferring from the presence of the HTTP Content-Encoding header in the response (:issue:`8685`) - -.. _whatsnew_0170.gil: - -Releasing the GIL -^^^^^^^^^^^^^^^^^ - -We are releasing the global-interpreter-lock (GIL) on some cython operations. -This will allow other threads to run simultaneously during computation, potentially allowing performance improvements -from multi-threading. Notably ``groupby`` and some indexing operations are a benefit from this. (:issue:`8882`) - -For example the groupby expression in the following code will have the GIL released during the factorization step, e.g. ``df.groupby('key')`` -as well as the ``.sum()`` operation. - -.. code-block:: python - - N = 1e6 - df = DataFrame({'key' : np.random.randint(0,ngroups,size=N), - 'data' : np.random.randn(N) }) - df.groupby('key')['data'].sum() - -Releasing of the GIL could benefit an application that uses threads for user interactions (e.g. QT_), or performaning multi-threaded computations. A nice example of a library that can handle these types of computation-in-parallel is the dask_ library. - -.. _dask: https://dask.readthedocs.org/en/latest/ -.. _QT: https://wiki.python.org/moin/PyQt - -.. _whatsnew_0170.strftime: - -Support strftime for Datetimelikes -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -We are now supporting a ``Series.dt.strftime`` method for datetime-likes to generate a formatted string (:issue:`10110`). Examples: - -.. ipython:: python - - # DatetimeIndex - s = pd.Series(pd.date_range('20130101', periods=4)) - s - s.dt.strftime('%Y/%m/%d') - -.. ipython:: python - - # PeriodIndex - s = pd.Series(pd.period_range('20130101', periods=4)) - s - s.dt.strftime('%Y/%m/%d') - -The string format is as the python standard library and details can be found `here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_ - -.. _whatsnew_0170.enhancements.sas_xport: - -Support for SAS XPORT files -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:meth:`~pandas.io.read_sas` provides support for reading *SAS XPORT* format files. (:issue:`4052`). - -.. code-block:: python - - df = pd.read_sas('sas_xport.xpt') - -It is also possible to obtain an iterator and read an XPORT file -incrementally. - -.. code-block:: python - - for df in pd.read_sas('sas_xport.xpt', chunksize=10000) - do_something(df) - -See the :ref:`docs <io.sas>` for more details. - -.. _whatsnew_0170.enhancements.other: - -Other enhancements -^^^^^^^^^^^^^^^^^^ - -- `read_sql` and `to_sql` can accept database URI as con parameter (:issue:`10214`) -- Enable `read_hdf` to be used without specifying a key when the HDF file contains a single dataset (:issue:`10443`) -- Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`) -- Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`) -- Added functionality to use the ``base`` argument when resampling a ``TimeDeltaIndex`` (:issue:`10530`) -- ``DatetimeIndex`` can be instantiated using strings contains ``NaT`` (:issue:`7599`) -- The string parsing of ``to_datetime``, ``Timestamp`` and ``DatetimeIndex`` has been made consistent. (:issue:`7599`) - - Prior to v0.17.0, ``Timestamp`` and ``to_datetime`` may parse year-only datetime-string incorrectly using today's date, otherwise ``DatetimeIndex`` - uses the beginning of the year. ``Timestamp`` and ``to_datetime`` may raise ``ValueError`` in some types of datetime-string which ``DatetimeIndex`` - can parse, such as a quarterly string. - - Previous Behavior - - .. code-block:: python - - In [1]: Timestamp('2012Q2') - Traceback - ... - ValueError: Unable to parse 2012Q2 - - # Results in today's date. - In [2]: Timestamp('2014') - Out [2]: 2014-08-12 00:00:00 - - v0.17.0 can parse them as below. It works on ``DatetimeIndex`` also. - - New Behaviour - - .. ipython:: python - - Timestamp('2012Q2') - Timestamp('2014') - DatetimeIndex(['2012Q2', '2014']) - - .. note:: If you want to perform calculations based on today's date, use ``Timestamp.now()`` and ``pandas.tseries.offsets``. - - .. ipython:: python - - import pandas.tseries.offsets as offsets - Timestamp.now() - Timestamp.now() + offsets.DateOffset(years=1) - -- ``to_datetime`` can now accept ``yearfirst`` keyword (:issue:`7599`) - -- ``pandas.tseries.offsets`` larger than the ``Day`` offset can now be used with with ``Series`` for addition/subtraction (:issue:`10699`). See the :ref:`Documentation <timeseries.offsetseries>` for more details. - -- ``.as_blocks`` will now take a ``copy`` optional argument to return a copy of the data, default is to copy (no change in behavior from prior versions), (:issue:`9607`) - -- ``regex`` argument to ``DataFrame.filter`` now handles numeric column names instead of raising ``ValueError`` (:issue:`10384`). -- ``pd.read_stata`` will now read Stata 118 type files. (:issue:`9882`) - -- ``pd.merge`` will now allow duplicate column names if they are not merged upon (:issue:`10639`). - -- ``pd.pivot`` will now allow passing index as ``None`` (:issue:`3962`). - -- ``read_sql_table`` will now allow reading from views (:issue:`10750`). - -- ``msgpack`` submodule has been updated to 0.4.6 with backward compatibility (:issue:`10581`) - -- ``DataFrame.to_dict`` now accepts the *index* option in ``orient`` keyword argument (:issue:`10844`). - -- ``drop_duplicates`` and ``duplicated`` now accept ``keep`` keyword to target first, last, and all duplicates. ``take_last`` keyword is deprecated, see :ref:`deprecations <whatsnew_0170.deprecations>` (:issue:`6511`, :issue:`8505`) - - .. ipython :: python - - s = pd.Series(['A', 'B', 'C', 'A', 'B', 'D']) - s.drop_duplicates() - s.drop_duplicates(keep='last') - s.drop_duplicates(keep=False) - -- Reindex now has a ``tolerance`` argument that allows for finer control of :ref:`basics.limits_on_reindex_fill`: - - .. ipython:: python - - df = pd.DataFrame({'x': range(5), 't': pd.date_range('2000-01-01', periods=5)}) - df.reindex([0.1, 1.9, 3.5], method='nearest', tolerance=0.2) - - When used on a ``DatetimeIndex``, ``TimedeltaIndex`` or ``PeriodIndex``, ``tolerance`` will coerced into a ``Timedelta`` if possible. This allows you to specify tolerance with a string: - - .. ipython:: python - - df = df.set_index('t') - df.reindex(pd.to_datetime(['1999-12-31']), method='nearest', tolerance='1 day') - - ``tolerance`` is also exposed by the lower level ``Index.get_indexer`` and ``Index.get_loc`` methods. - -- Support pickling of ``Period`` objects (:issue:`10439`) - -- ``DataFrame.apply`` will return a Series of dicts if the passed function returns a dict and ``reduce=True`` (:issue:`8735`). - -.. _whatsnew_0170.api: - -.. _whatsnew_0170.api_breaking: - -Backwards incompatible API changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. _whatsnew_0170.api_breaking.sorting: - -Changes to sorting API -^^^^^^^^^^^^^^^^^^^^^^ - -The sorting API has had some longtime inconsistencies. (:issue:`9816`, :issue:`8239`). - -Here is a summary of the API **PRIOR** to 0.17.0: - -- ``Series.sort`` is **INPLACE** while ``DataFrame.sort`` returns a new object. -- ``Series.order`` returns a new object -- It was possible to use ``Series/DataFrame.sort_index`` to sort by **values** by passing the ``by`` keyword. -- ``Series/DataFrame.sortlevel`` worked only on a ``MultiIndex`` for sorting by index. - -To address these issues, we have revamped the API: - -- We have introduced a new method, :meth:`DataFrame.sort_values`, which is the merger of ``DataFrame.sort()``, ``Series.sort()``, - and ``Series.order()``, to handle sorting of **values**. -- The existing methods ``Series.sort()``, ``Series.order()``, and ``DataFrame.sort()`` has been deprecated and will be removed in a - future version of pandas. -- The ``by`` argument of ``DataFrame.sort_index()`` has been deprecated and will be removed in a future version of pandas. -- The existing method ``.sort_index()`` will gain the ``level`` keyword to enable level sorting. - -We now have two distinct and non-overlapping methods of sorting. A ``*`` marks items that -will show a ``FutureWarning``. - -To sort by the **values**: - -================================== ==================================== -Previous Replacement -================================== ==================================== -\* ``Series.order()`` ``Series.sort_values()`` -\* ``Series.sort()`` ``Series.sort_values(inplace=True)`` -\* ``DataFrame.sort(columns=...)`` ``DataFrame.sort_values(by=...)`` -================================== ==================================== - -To sort by the **index**: - -================================== ==================================== -Previous Replacement -================================== ==================================== -``Series.sort_index()`` ``Series.sort_index()`` -``Series.sortlevel(level=...)`` ``Series.sort_index(level=...``) -``DataFrame.sort_index()`` ``DataFrame.sort_index()`` -``DataFrame.sortlevel(level=...)`` ``DataFrame.sort_index(level=...)`` -\* ``DataFrame.sort()`` ``DataFrame.sort_index()`` -================================== ==================================== - -We have also deprecated and changed similar methods in two Series-like classes, ``Index`` and ``Categorical``. - -================================== ==================================== -Previous Replacement -================================== ==================================== -\* ``Index.order()`` ``Index.sort_values()`` -\* ``Categorical.order()`` ``Categorical.sort_values`` -================================== ==================================== - -.. _whatsnew_0170.api_breaking.to_datetime: - -Changes to to_datetime and to_timedelta -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The default for ``pd.to_datetime`` error handling has changed to ``errors='raise'``. In prior versions it was ``errors='ignore'``. -Furthermore, the ``coerce`` argument has been deprecated in favor of ``errors='coerce'``. This means that invalid parsing will raise rather that return the original -input as in previous versions. (:issue:`10636`) - -Previous Behavior: - -.. code-block:: python - - In [2]: pd.to_datetime(['2009-07-31', 'asd']) - Out[2]: array(['2009-07-31', 'asd'], dtype=object) - -New Behavior: - -.. code-block:: python - - In [3]: pd.to_datetime(['2009-07-31', 'asd']) - ValueError: Unknown string format - -.. ipython:: python - -Of course you can coerce this as well. - -.. ipython:: python - - to_datetime(['2009-07-31', 'asd'], errors='coerce') - -To keep the previous behaviour, you can use ``errors='ignore'``: - -.. ipython:: python - - to_datetime(['2009-07-31', 'asd'], errors='ignore') - -Furthermore, ``pd.to_timedelta`` has gained a similar API, of ``errors='raise'|'ignore'|'coerce'``, and the ``coerce`` keyword -has been deprecated in favor of ``errors='coerce'``. - -.. _whatsnew_0170.api_breaking.convert_objects: - -Changes to convert_objects -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``DataFrame.convert_objects`` keyword arguments have been shortened. (:issue:`10265`) - -===================== ============= -Previous Replacement -===================== ============= -``convert_dates`` ``datetime`` -``convert_numeric`` ``numeric`` -``convert_timedelta`` ``timedelta`` -===================== ============= - -Coercing types with ``DataFrame.convert_objects`` is now implemented using the -keyword argument ``coerce=True``. Previously types were coerced by setting a -keyword argument to ``'coerce'`` instead of ``True``, as in ``convert_dates='coerce'``. - -.. ipython:: python - - df = pd.DataFrame({'i': ['1','2'], - 'f': ['apple', '4.2'], - 's': ['apple','banana']}) - df - -The old usage of ``DataFrame.convert_objects`` used ``'coerce'`` along with the -type. - -.. code-block:: python - - In [2]: df.convert_objects(convert_numeric='coerce') - -Now the ``coerce`` keyword must be explicitly used. - -.. ipython:: python - - df.convert_objects(numeric=True, coerce=True) - -In earlier versions of pandas, ``DataFrame.convert_objects`` would not coerce -numeric types when there were no values convertible to a numeric type. This returns -the original DataFrame with no conversion. - -.. code-block:: python - - In [1]: df = pd.DataFrame({'s': ['a','b']}) - In [2]: df.convert_objects(convert_numeric='coerce') - Out[2]: - s - 0 a - 1 b - -THe new behavior will convert all non-number-like strings to ``NaN``, -when ``coerce=True`` is passed explicity. - -.. ipython:: python - - pd.DataFrame({'s': ['a','b']}) - df.convert_objects(numeric=True, coerce=True) - -In earlier versions of pandas, the default behavior was to try and convert -datetimes and timestamps. The new default is for ``DataFrame.convert_objects`` -to do nothing, and so it is necessary to pass at least one conversion target -in the method call. - -Changes to Index Comparisons -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Operator equal on ``Index`` should behavior similarly to ``Series`` (:issue:`9947`, :issue:`10637`) - -Starting in v0.17.0, comparing ``Index`` objects of different lengths will raise -a ``ValueError``. This is to be consistent with the behavior of ``Series``. - -Previous behavior: - -.. code-block:: python - - In [2]: pd.Index([1, 2, 3]) == pd.Index([1, 4, 5]) - Out[2]: array([ True, False, False], dtype=bool) - - In [3]: pd.Index([1, 2, 3]) == pd.Index([2]) - Out[3]: array([False, True, False], dtype=bool) - - In [4]: pd.Index([1, 2, 3]) == pd.Index([1, 2]) - Out[4]: False - -New behavior: - -.. code-block:: python - - In [8]: pd.Index([1, 2, 3]) == pd.Index([1, 4, 5]) - Out[8]: array([ True, False, False], dtype=bool) - - In [9]: pd.Index([1, 2, 3]) == pd.Index([2]) - ValueError: Lengths must match to compare - - In [10]: pd.Index([1, 2, 3]) == pd.Index([1, 2]) - ValueError: Lengths must match to compare - -Note that this is different from the ``numpy`` behavior where a comparison can -be broadcast: - -.. ipython:: python - - np.array([1, 2, 3]) == np.array([1]) - -or it can return False if broadcasting can not be done: - -.. ipython:: python - - np.array([1, 2, 3]) == np.array([1, 2]) - -Changes to Boolean Comparisons vs. None -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Boolean comparisons of a ``Series`` vs ``None`` will now be equivalent to comparing with ``np.nan``, rather than raise ``TypeError``. xref (:issue:`1079`). - -.. ipython:: python - - s = Series(range(3)) - s.iloc[1] = None - s - -Previous behavior: - -.. code-block:: python - - In [5]: s==None - TypeError: Could not compare <type 'NoneType'> type with Series - -New behavior: - -.. ipython:: python - - s==None - -Usually you simply want to know which values are null. - -.. ipython:: python - - s.isnull() - -.. warning:: - - You generally will want to use ``isnull/notnull`` for these types of comparisons, as ``isnull/notnull`` tells you which elements are null. One has to be - mindful that ``nan's`` don't compare equal, but ``None's`` do. Note that Pandas/numpy uses the fact that ``np.nan != np.nan``, and treats ``None`` like ``np.nan``. - - .. ipython:: python - - None == None - np.nan == np.nan - -.. _whatsnew_0170.api_breaking.hdf_dropna: - -HDFStore dropna behavior -^^^^^^^^^^^^^^^^^^^^^^^^ - -The default behavior for HDFStore write functions with ``format='table'`` is now to keep rows that are all missing. Previously, the behavior was to drop rows that were all missing save the index. The previous behavior can be replicated using the ``dropna=True`` option. (:issue:`9382`) - -Previously: - -.. ipython:: python - - df_with_missing = pd.DataFrame({'col1':[0, np.nan, 2], - 'col2':[1, np.nan, np.nan]}) - - df_with_missing - - -.. code-block:: python - - In [28]: - df_with_missing.to_hdf('file.h5', - 'df_with_missing', - format='table', - mode='w') - - pd.read_hdf('file.h5', 'df_with_missing') - - Out [28]: - col1 col2 - 0 0 1 - 2 2 NaN - - -New behavior: - -.. ipython:: python - :suppress: - - import os - -.. ipython:: python - - df_with_missing.to_hdf('file.h5', - 'df_with_missing', - format='table', - mode='w') - - pd.read_hdf('file.h5', 'df_with_missing') - -.. ipython:: python - :suppress: - - os.remove('file.h5') - -See :ref:`documentation <io.hdf5>` for more details. - -.. _whatsnew_0170.api_breaking.display_precision: - -Changes to ``display.precision`` option -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The ``display.precision`` option has been clarified to refer to decimal places (:issue:`10451`). - -Earlier versions of pandas would format floating point numbers to have one less decimal place than the value in -``display.precision``. - -.. code-block:: python - - In [1]: pd.set_option('display.precision', 2) - - In [2]: pd.DataFrame({'x': [123.456789]}) - Out[2]: - x - 0 123.5 - -If interpreting precision as "significant figures" this did work for scientific notation but that same interpretation -did not work for values with standard formatting. It was also out of step with how numpy handles formatting. - -Going forward the value of ``display.precision`` will directly control the number of places after the decimal, for -regular formatting as well as scientific notation, similar to how numpy's ``precision`` print option works. - -.. ipython:: python - - pd.set_option('display.precision', 2) - pd.DataFrame({'x': [123.456789]}) - -To preserve output behavior with prior versions the default value of ``display.precision`` has been reduced to ``6`` -from ``7``. - -.. ipython:: python - :suppress: - - pd.set_option('display.precision', 6) - -.. _whatsnew_0170.api_breaking.categorical_unique: - -Changes to ``Categorical.unique`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``Categorical.unique`` now returns new ``Categoricals`` with ``categories`` and ``codes`` that are unique, rather than returning ``np.array`` (:issue:`10508`) - -- unordered category: values and categories are sorted by appearance order. -- ordered category: values are sorted by appearance order, categories keep existing order. - -.. ipython :: python - - cat = pd.Categorical(['C', 'A', 'B', 'C'], - categories=['A', 'B', 'C'], - ordered=True) - cat - cat.unique() - - cat = pd.Categorical(['C', 'A', 'B', 'C'], - categories=['A', 'B', 'C']) - cat - cat.unique() - - -.. _whatsnew_0170.api_breaking.other: - -Other API Changes -^^^^^^^^^^^^^^^^^ - -- Line and kde plot with ``subplots=True`` now uses default colors, not all black. Specify ``color='k'`` to draw all lines in black (:issue:`9894`) -- Calling the ``.value_counts`` method on a Series with ``categorical`` dtype now returns a Series with a ``CategoricalIndex`` (:issue:`10704`) -- Allow passing `kwargs` to the interpolation methods (:issue:`10378`). -- The metadata properties of subclasses of pandas objects will now be serialized (:issue:`10553`). -- Allow ``DataFrame`` with ``MultiIndex`` columns to be written to Excel (:issue:`10564`). This was changed in 0.16.2 as the read-back method could not always guarantee perfect fidelity (:issue:`9794`). -- ``groupby`` using ``Categorical`` follows the same rule as ``Categorical.unique`` described above (:issue:`10508`) -- Improved error message when concatenating an empty iterable of dataframes (:issue:`9157`) - -- ``NaT``'s methods now either raise ``ValueError``, or return ``np.nan`` or ``NaT`` (:issue:`9513`) - - =============================== =============================================================== - Behavior Methods - =============================== =============================================================== - return ``np.nan`` ``weekday``, ``isoweekday`` - return ``NaT`` ``date``, ``now``, ``replace``, ``to_datetime``, ``today`` - return ``np.datetime64('NaT')`` ``to_datetime64`` (unchanged) - raise ``ValueError`` All other public methods (names not beginning with underscores) - =============================== =============================================================== - -.. _whatsnew_0170.deprecations: - -Deprecations -^^^^^^^^^^^^ - -.. note:: These indexing function have been deprecated in the documentation since 0.11.0. - -- For ``Series`` the following indexing functions are deprecated (:issue:`10177`). - - ===================== ================================= - Deprecated Function Replacement - ===================== ================================= - ``.irow(i)`` ``.iloc[i]`` or ``.iat[i]`` - ``.iget(i)`` ``.iloc[i]`` - ``.iget_value(i)`` ``.iloc[i]`` or ``.iat[i]`` - ===================== ================================= - -- For ``DataFrame`` the following indexing functions are deprecated (:issue:`10177`). - - ===================== ================================= - Deprecated Function Replacement - ===================== ================================= - ``.irow(i)`` ``.iloc[i]`` - ``.iget_value(i, j)`` ``.iloc[i, j]`` or ``.iat[i, j]`` - ``.icol(j)`` ``.iloc[:, j]`` - ===================== ================================= - -- ``Categorical.name`` was deprecated to make ``Categorical`` more ``numpy.ndarray`` like. Use ``Series(cat, name="whatever")`` instead (:issue:`10482`). -- ``drop_duplicates`` and ``duplicated``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`6511`, :issue:`8505`) -- ``Series.nsmallest`` and ``nlargest``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`10792`) -- ``DataFrame.combineAdd`` and ``DataFrame.combineMult`` are deprecated. They - can easily be replaced by using the ``add`` and ``mul`` methods: - ``DataFrame.add(other, fill_value=0)`` and ``DataFrame.mul(other, fill_value=1.)`` - (:issue:`10735`). -- ``TimeSeries`` deprecated in favor of ``Series`` (note that this has been alias since 0.13.0), (:issue:`10890`) -- ``WidePanel`` deprecated in favor of ``Panel``, ``LongPanel`` in favor of ``DataFrame`` (note these have been aliases since < 0.11.0), (:issue:`10892`) - -.. _whatsnew_0170.prior_deprecations: - -Removal of prior version deprecations/changes -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Remove use of some deprecated numpy comparison operations, mainly in tests. (:issue:`10569`) -- Removal of ``na_last`` parameters from ``Series.order()`` and ``Series.sort()``, in favor of ``na_position``, xref (:issue:`5231`) -- Remove of ``percentile_width`` from ``.describe()``, in favor of ``percentiles``. (:issue:`7088`) -- Removal of ``colSpace`` parameter from ``DataFrame.to_string()``, in favor of ``col_space``, circa 0.8.0 version. -- Removal of automatic time-series broadcasting (:issue:`2304`) - - .. ipython :: python - - np.random.seed(1234) - df = DataFrame(np.random.randn(5,2),columns=list('AB'),index=date_range('20130101',periods=5)) - df - - Previously - - .. code-block:: python - - In [3]: df + df.A - FutureWarning: TimeSeries broadcasting along DataFrame index by default is deprecated. - Please use DataFrame.<op> to explicitly broadcast arithmetic operations along the index - - Out[3]: - A B - 2013-01-01 0.942870 -0.719541 - 2013-01-02 2.865414 1.120055 - 2013-01-03 -1.441177 0.166574 - 2013-01-04 1.719177 0.223065 - 2013-01-05 0.031393 -2.226989 - - Current - - .. ipython :: python - - df.add(df.A,axis='index') - - -- Remove ``table`` keyword in ``HDFStore.put/append``, in favor of using ``format=`` (:issue:`4645`) -- Remove ``kind`` in ``read_excel/ExcelFile`` as its unused (:issue:`4712`) -- Remove ``infer_type`` keyword from ``pd.read_html`` as its unused (:issue:`4770`, :issue:`7032`) -- Remove ``offset`` and ``timeRule`` keywords from ``Series.tshift/shift``, in favor of ``freq`` (:issue:`4853`, :issue:`4864`) -- Remove ``pd.load/pd.save`` aliases in favor of ``pd.to_pickle/pd.read_pickle`` (:issue:`3787`) - -.. _whatsnew_0170.performance: - -Performance Improvements -~~~~~~~~~~~~~~~~~~~~~~~~ - -- Development support for benchmarking with the `Air Speed Velocity library <https://github.com/spacetelescope/asv/>`_ (:issue:`8316`) -- Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`) -- Performance improvements in ``Categorical.value_counts`` (:issue:`10804`) -- Performance improvements in ``SeriesGroupBy.nunique`` and ``SeriesGroupBy.value_counts`` (:issue:`10820`) -- Performance improvements in ``DataFrame.drop_duplicates`` with integer dtypes (:issue:`10917`) -- 4x improvement in ``timedelta`` string parsing (:issue:`6755`, :issue:`10426`) -- 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`) -- Significantly improved performance of indexing ``MultiIndex`` with slicers (:issue:`10287`) -- 8x improvement in ``iloc`` using list-like input (:issue:`10791`) -- Improved performance of ``Series.isin`` for datetimelike/integer Series (:issue:`10287`) -- 20x improvement in ``concat`` of Categoricals when categories are identical (:issue:`10587`) -- Improved performance of ``to_datetime`` when specified format string is ISO8601 (:issue:`10178`) -- 2x improvement of ``Series.value_counts`` for float dtype (:issue:`10821`) - -.. _whatsnew_0170.bug_fixes: - -Bug Fixes -~~~~~~~~~ - -- Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) -- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) -- Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`) -- Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`) -- Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`) -- Bug in ``Series.quantile`` dropping name (:issue:`10881`) -- Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`) -- Bug in ``pd.Series.interpolate`` with invalid ``order`` keyword values. (:issue:`10633`) -- Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`) -- Bug in ``Index`` construction with a mixed list of tuples (:issue:`10697`) -- Bug in ``DataFrame.reset_index`` when index contains ``NaT``. (:issue:`10388`) -- Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`) -- Bug in ``BinGrouper.group_info`` where returned values are not compatible with base class (:issue:`10914`) -- Bug in clearing the cache on ``DataFrame.pop`` and a subsequent inplace op (:issue:`10912`) - -- Bug causing ``DataFrame.where`` to not respect the ``axis`` parameter when the frame has a symmetric shape. (:issue:`9736`) - -- Bug in ``Table.select_column`` where name is not preserved (:issue:`10392`) -- Bug in ``offsets.generate_range`` where ``start`` and ``end`` have finer precision than ``offset`` (:issue:`9907`) -- Bug in ``pd.rolling_*`` where ``Series.name`` would be lost in the output (:issue:`10565`) -- Bug in ``stack`` when index or columns are not unique. (:issue:`10417`) -- Bug in setting a ``Panel`` when an axis has a multi-index (:issue:`10360`) -- Bug in ``USFederalHolidayCalendar`` where ``USMemorialDay`` and ``USMartinLutherKingJr`` were incorrect (:issue:`10278` and :issue:`9760` ) -- Bug in ``.sample()`` where returned object, if set, gives unnecessary ``SettingWithCopyWarning`` (:issue:`10738`) -- Bug in ``.sample()`` where weights passed as ``Series`` were not aligned along axis before being treated positionally, potentially causing problems if weight indices were not aligned with sampled object. (:issue:`10738`) - - - -- Bug in ``DataFrame.interpolate`` with ``axis=1`` and ``inplace=True`` (:issue:`10395`) -- Bug in ``io.sql.get_schema`` when specifying multiple columns as primary - key (:issue:`10385`). - -- Bug in ``groupby(sort=False)`` with datetime-like ``Categorical`` raises ``ValueError`` (:issue:`10505`) - -- Bug in ``test_categorical`` on big-endian builds (:issue:`10425`) -- Bug in ``Series.shift`` and ``DataFrame.shift`` not supporting categorical data (:issue:`9416`) -- Bug in ``Series.map`` using categorical ``Series`` raises ``AttributeError`` (:issue:`10324`) -- Bug in ``MultiIndex.get_level_values`` including ``Categorical`` raises ``AttributeError`` (:issue:`10460`) -- Bug in ``pd.get_dummies`` with `sparse=True` not returning ``SparseDataFrame`` (:issue:`10531`) -- Bug in ``Index`` subtypes (such as ``PeriodIndex``) not returning their own type for ``.drop`` and ``.insert`` methods (:issue:`10620`) -- Bug in ``algos.outer_join_indexer`` when ``right`` array is empty (:issue:`10618`) - -- Bug in ``filter`` (regression from 0.16.0) and ``transform`` when grouping on multiple keys, one of which is datetime-like (:issue:`10114`) - - - - - -- Bug that caused segfault when resampling an empty Series (:issue:`10228`) -- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`) -- Bug in ``pd.eval`` using ``numexpr`` engine coerces 1 element numpy array to scalar (:issue:`10546`) -- Bug in ``pd.concat`` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`) -- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`, :issue:`10630`) -- Bug in ``pd.read_csv`` with kwargs ``index_col=False``, ``index_col=['a', 'b']`` or ``dtype`` - (:issue:`10413`, :issue:`10467`, :issue:`10577`) -- Bug in ``Series.from_csv`` with ``header`` kwarg not setting the ``Series.name`` or the ``Series.index.name`` (:issue:`10483`) -- Bug in ``groupby.var`` which caused variance to be inaccurate for small float values (:issue:`10448`) -- Bug in ``Series.plot(kind='hist')`` Y Label not informative (:issue:`10485`) -- Bug in ``read_csv`` when using a converter which generates a ``uint8`` type (:issue:`9266`) - -- Bug causes memory leak in time-series line and area plot (:issue:`9003`) - - -- Bug in line and kde plot cannot accept multiple colors when ``subplots=True`` (:issue:`9894`) -- Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`) - -- Bug in left and right ``align`` of ``Series`` with ``MultiIndex`` may be inverted (:issue:`10665`) -- Bug in left and right ``join`` of with ``MultiIndex`` may be inverted (:issue:`10741`) - -- Bug in ``read_stata`` when reading a file with a different order set in ``columns`` (:issue:`10757`) -- Bug in ``Categorical`` may not representing properly when category contains ``tz`` or ``Period`` (:issue:`10713`) -- Bug in ``Categorical.__iter__`` may not returning correct ``datetime`` and ``Period`` (:issue:`10713`) - -- Bug in ``read_csv`` with ``engine='c'``: EOF preceded by a comment, blank line, etc. was not handled correctly (:issue:`10728`, :issue:`10548`) - -- Reading "famafrench" data via ``DataReader`` results in HTTP 404 error because of the website url is changed (:issue:`10591`). -- Bug in ``read_msgpack`` where DataFrame to decode has duplicate column names (:issue:`9618`) -- Bug in ``io.common.get_filepath_or_buffer`` which caused reading of valid S3 files to fail if the bucket also contained keys for which the user does not have read permission (:issue:`10604`) -- Bug in vectorised setting of timestamp columns with python ``datetime.date`` and numpy ``datetime64`` (:issue:`10408`, :issue:`10412`) -- Bug in ``Index.take`` may add unnecessary ``freq`` attribute (:issue:`10791`) -- Bug in ``merge`` with empty ``DataFrame`` may raise ``IndexError`` (:issue:`10824`) - - -- Bug in ``read_csv`` when using the ``nrows`` or ``chunksize`` parameters if file contains only a header line (:issue:`9535`) -- Bug in serialization of ``category`` types in HDF5 in presence of alternate encodings. (:issue:`10366`) -- Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`) -- Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue:`9431`) -- Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`) -- Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`) -- Bug in ``PeriodIndex.order`` reset freq (:issue:`10295`) -- Bug in ``date_range`` when ``freq`` divides ``end`` as nanos (:issue:`10885`) -- Bug in ``iloc`` allowing memory outside bounds of a Series to be accessed with negative integers (:issue:`10779`) -- Bug in ``read_msgpack`` where encoding is not respected (:issue:`10580`) -- Bug preventing access to the first index when using ``iloc`` with a list containing the appropriate negative integer (:issue:`10547`, :issue:`10779`) -- Bug in ``TimedeltaIndex`` formatter causing error while trying to save ``DataFrame`` with ``TimedeltaIndex`` using ``to_csv`` (:issue:`10833`) -- Bug in ``DataFrame.where`` when handling Series slicing (:issue:`10218`, :issue:`9558`) -- Bug where ``pd.read_gbq`` throws ``ValueError`` when Bigquery returns zero rows (:issue:`10273`) diff --git a/pandas/io/tests/generate_legacy_storage_files.py b/pandas/io/tests/generate_legacy_storage_files.py deleted file mode 100644 index 0ca5ced1b8d1a..0000000000000 --- a/pandas/io/tests/generate_legacy_storage_files.py +++ /dev/null @@ -1,213 +0,0 @@ -""" self-contained to write legacy storage (pickle/msgpack) files """ -from __future__ import print_function -from distutils.version import LooseVersion -from pandas import (Series, DataFrame, Panel, - SparseSeries, SparseDataFrame, SparsePanel, - Index, MultiIndex, PeriodIndex, bdate_range, to_msgpack, - date_range, period_range, bdate_range, Timestamp, Categorical, - Period) -import os -import sys -import numpy as np -import pandas -import pandas.util.testing as tm -import platform as pl - - -def _create_sp_series(): - nan = np.nan - - # nan-based - arr = np.arange(15, dtype=np.float64) - arr[7:12] = nan - arr[-1:] = nan - - bseries = SparseSeries(arr, kind='block') - bseries.name = 'bseries' - return bseries - - -def _create_sp_tsseries(): - nan = np.nan - - # nan-based - arr = np.arange(15, dtype=np.float64) - arr[7:12] = nan - arr[-1:] = nan - - date_index = bdate_range('1/1/2011', periods=len(arr)) - bseries = SparseSeries(arr, index=date_index, kind='block') - bseries.name = 'btsseries' - return bseries - - -def _create_sp_frame(): - nan = np.nan - - data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], - 'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], - 'C': np.arange(10).astype(np.int64), - 'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]} - - dates = bdate_range('1/1/2011', periods=10) - return SparseDataFrame(data, index=dates) - - -def create_data(): - """ create the pickle/msgpack data """ - - data = { - 'A': [0., 1., 2., 3., np.nan], - 'B': [0, 1, 0, 1, 0], - 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'], - 'D': date_range('1/1/2009', periods=5), - 'E': [0., 1, Timestamp('20100101'), 'foo', 2.] - } - - scalars = dict(timestamp=Timestamp('20130101')) - if LooseVersion(pandas.__version__) >= '0.17.0': - scalars['period'] = Period('2012','M') - - index = dict(int=Index(np.arange(10)), - date=date_range('20130101', periods=10), - period=period_range('2013-01-01', freq='M', periods=10)) - - mi = dict(reg2=MultiIndex.from_tuples(tuple(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], - ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']])), - names=['first', 'second'])) - series = dict(float=Series(data['A']), - int=Series(data['B']), - mixed=Series(data['E']), - ts=Series(np.arange(10).astype(np.int64), index=date_range('20130101',periods=10)), - mi=Series(np.arange(5).astype(np.float64), - index=MultiIndex.from_tuples(tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), - names=['one', 'two'])), - dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A']), - cat=Series(Categorical(['foo', 'bar', 'baz']))) - if LooseVersion(pandas.__version__) >= '0.17.0': - series['period'] = Series([Period('2000Q1')] * 5) - - mixed_dup_df = DataFrame(data) - mixed_dup_df.columns = list("ABCDA") - frame = dict(float=DataFrame(dict(A=series['float'], B=series['float'] + 1)), - int=DataFrame(dict(A=series['int'], B=series['int'] + 1)), - mixed=DataFrame(dict([(k, data[k]) for k in ['A', 'B', 'C', 'D']])), - mi=DataFrame(dict(A=np.arange(5).astype(np.float64), B=np.arange(5).astype(np.int64)), - index=MultiIndex.from_tuples(tuple(zip(*[['bar', 'bar', 'baz', 'baz', 'baz'], - ['one', 'two', 'one', 'two', 'three']])), - names=['first', 'second'])), - dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64), - columns=['A', 'B', 'A']), - cat_onecol=DataFrame(dict(A=Categorical(['foo', 'bar']))), - cat_and_float=DataFrame(dict(A=Categorical(['foo', 'bar', 'baz']), - B=np.arange(3).astype(np.int64))), - mixed_dup=mixed_dup_df) - - mixed_dup_panel = Panel(dict(ItemA=frame['float'], ItemB=frame['int'])) - mixed_dup_panel.items = ['ItemA', 'ItemA'] - panel = dict(float=Panel(dict(ItemA=frame['float'], ItemB=frame['float'] + 1)), - dup=Panel(np.arange(30).reshape(3, 5, 2).astype(np.float64), - items=['A', 'B', 'A']), - mixed_dup=mixed_dup_panel) - - return dict(series=series, - frame=frame, - panel=panel, - index=index, - scalars=scalars, - mi=mi, - sp_series=dict(float=_create_sp_series(), - ts=_create_sp_tsseries()), - sp_frame=dict(float=_create_sp_frame())) - - -def create_pickle_data(): - data = create_data() - - # Pre-0.14.1 versions generated non-unpicklable mixed-type frames and - # panels if their columns/items were non-unique. - if LooseVersion(pandas.__version__) < '0.14.1': - del data['frame']['mixed_dup'] - del data['panel']['mixed_dup'] - return data - - -def create_msgpack_data(): - data = create_data() - if LooseVersion(pandas.__version__) < '0.17.0': - del data['frame']['mixed_dup'] - del data['panel']['mixed_dup'] - del data['frame']['dup'] - del data['panel']['dup'] - # Not supported - del data['sp_series'] - del data['sp_frame'] - del data['series']['cat'] - del data['frame']['cat_onecol'] - del data['frame']['cat_and_float'] - return data - - -def platform_name(): - return '_'.join([str(pandas.__version__), str(pl.machine()), str(pl.system().lower()), str(pl.python_version())]) - - -def write_legacy_pickles(output_dir): - - # make sure we are < 0.13 compat (in py3) - try: - from pandas.compat import zip, cPickle as pickle - except: - import pickle - - version = pandas.__version__ - - print("This script generates a storage file for the current arch, system, and python version") - print(" pandas version: {0}".format(version)) - print(" output dir : {0}".format(output_dir)) - print(" storage format: pickle") - - pth = '{0}.pickle'.format(platform_name()) - - fh = open(os.path.join(output_dir, pth), 'wb') - pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL) - fh.close() - - print("created pickle file: %s" % pth) - - -def write_legacy_msgpack(output_dir): - - version = pandas.__version__ - - print("This script generates a storage file for the current arch, system, and python version") - print(" pandas version: {0}".format(version)) - print(" output dir : {0}".format(output_dir)) - print(" storage format: msgpack") - - pth = '{0}.msgpack'.format(platform_name()) - to_msgpack(os.path.join(output_dir, pth), create_msgpack_data()) - - print("created msgpack file: %s" % pth) - - -def write_legacy_file(): - # force our cwd to be the first searched - sys.path.insert(0, '.') - - if len(sys.argv) != 3: - exit("Specify output directory and storage type: generate_legacy_storage_files.py <output_dir> <storage_type>") - - output_dir = str(sys.argv[1]) - storage_type = str(sys.argv[2]) - - if storage_type == 'pickle': - write_legacy_pickles(output_dir=output_dir) - elif storage_type == 'msgpack': - write_legacy_msgpack(output_dir=output_dir) - else: - exit("storage_type must be one of {'pickle', 'msgpack'}") - - -if __name__ == '__main__': - write_legacy_file() diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py deleted file mode 100644 index 7ed8799dd6ded..0000000000000 --- a/pandas/tests/test_generic.py +++ /dev/null @@ -1,1769 +0,0 @@ -# -*- coding: utf-8 -*- -# pylint: disable-msg=E1101,W0612 - -from datetime import datetime, timedelta -import nose -import numpy as np -from numpy import nan -import pandas as pd - -from pandas import (Index, Series, DataFrame, Panel, - isnull, notnull, date_range, period_range) -from pandas.core.index import Index, MultiIndex - -import pandas.core.common as com - -from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long -from pandas import compat -from pandas.util.testing import (assert_series_equal, - assert_frame_equal, - assert_panel_equal, - assert_almost_equal, - assert_equal, - ensure_clean) -import pandas.util.testing as tm - - -def _skip_if_no_pchip(): - try: - from scipy.interpolate import pchip_interpolate - except ImportError: - raise nose.SkipTest('scipy.interpolate.pchip missing') - -#------------------------------------------------------------------------------ -# Generic types test cases - - -class Generic(object): - - _multiprocess_can_split_ = True - - def setUp(self): - import warnings - warnings.filterwarnings(action='ignore', category=FutureWarning) - - @property - def _ndim(self): - return self._typ._AXIS_LEN - - def _axes(self): - """ return the axes for my object typ """ - return self._typ._AXIS_ORDERS - - def _construct(self, shape, value=None, dtype=None, **kwargs): - """ construct an object for the given shape - if value is specified use that if its a scalar - if value is an array, repeat it as needed """ - - if isinstance(shape,int): - shape = tuple([shape] * self._ndim) - if value is not None: - if np.isscalar(value): - if value == 'empty': - arr = None - - # remove the info axis - kwargs.pop(self._typ._info_axis_name,None) - else: - arr = np.empty(shape,dtype=dtype) - arr.fill(value) - else: - fshape = np.prod(shape) - arr = value.ravel() - new_shape = fshape/arr.shape[0] - if fshape % arr.shape[0] != 0: - raise Exception("invalid value passed in _construct") - - arr = np.repeat(arr,new_shape).reshape(shape) - else: - arr = np.random.randn(*shape) - return self._typ(arr,dtype=dtype,**kwargs) - - def _compare(self, result, expected): - self._comparator(result,expected) - - def test_rename(self): - - # single axis - for axis in self._axes(): - kwargs = { axis : list('ABCD') } - obj = self._construct(4,**kwargs) - - # no values passed - #self.assertRaises(Exception, o.rename(str.lower)) - - # rename a single axis - result = obj.rename(**{ axis : str.lower }) - expected = obj.copy() - setattr(expected,axis,list('abcd')) - self._compare(result, expected) - - # multiple axes at once - - def test_get_numeric_data(self): - - n = 4 - kwargs = { } - for i in range(self._ndim): - kwargs[self._typ._AXIS_NAMES[i]] = list(range(n)) - - # get the numeric data - o = self._construct(n,**kwargs) - result = o._get_numeric_data() - self._compare(result, o) - - # non-inclusion - result = o._get_bool_data() - expected = self._construct(n,value='empty',**kwargs) - self._compare(result,expected) - - # get the bool data - arr = np.array([True,True,False,True]) - o = self._construct(n,value=arr,**kwargs) - result = o._get_numeric_data() - self._compare(result, o) - - # _get_numeric_data is includes _get_bool_data, so can't test for non-inclusion - - def test_get_default(self): - - # GH 7725 - d0 = "a", "b", "c", "d" - d1 = np.arange(4, dtype='int64') - others = "e", 10 - - for data, index in ((d0, d1), (d1, d0)): - s = Series(data, index=index) - for i,d in zip(index, data): - self.assertEqual(s.get(i), d) - self.assertEqual(s.get(i, d), d) - self.assertEqual(s.get(i, "z"), d) - for other in others: - self.assertEqual(s.get(other, "z"), "z") - self.assertEqual(s.get(other, other), other) - - def test_nonzero(self): - - # GH 4633 - # look at the boolean/nonzero behavior for objects - obj = self._construct(shape=4) - self.assertRaises(ValueError, lambda : bool(obj == 0)) - self.assertRaises(ValueError, lambda : bool(obj == 1)) - self.assertRaises(ValueError, lambda : bool(obj)) - - obj = self._construct(shape=4,value=1) - self.assertRaises(ValueError, lambda : bool(obj == 0)) - self.assertRaises(ValueError, lambda : bool(obj == 1)) - self.assertRaises(ValueError, lambda : bool(obj)) - - obj = self._construct(shape=4,value=np.nan) - self.assertRaises(ValueError, lambda : bool(obj == 0)) - self.assertRaises(ValueError, lambda : bool(obj == 1)) - self.assertRaises(ValueError, lambda : bool(obj)) - - # empty - obj = self._construct(shape=0) - self.assertRaises(ValueError, lambda : bool(obj)) - - # invalid behaviors - - obj1 = self._construct(shape=4,value=1) - obj2 = self._construct(shape=4,value=1) - - def f(): - if obj1: - com.pprint_thing("this works and shouldn't") - self.assertRaises(ValueError, f) - self.assertRaises(ValueError, lambda : obj1 and obj2) - self.assertRaises(ValueError, lambda : obj1 or obj2) - self.assertRaises(ValueError, lambda : not obj1) - - def test_numpy_1_7_compat_numeric_methods(self): - # GH 4435 - # numpy in 1.7 tries to pass addtional arguments to pandas functions - - o = self._construct(shape=4) - for op in ['min','max','max','var','std','prod','sum','cumsum','cumprod', - 'median','skew','kurt','compound','cummax','cummin','all','any']: - f = getattr(np,op,None) - if f is not None: - f(o) - - def test_downcast(self): - # test close downcasting - - o = self._construct(shape=4, value=9, dtype=np.int64) - result = o.copy() - result._data = o._data.downcast(dtypes='infer') - self._compare(result, o) - - o = self._construct(shape=4, value=9.) - expected = o.astype(np.int64) - result = o.copy() - result._data = o._data.downcast(dtypes='infer') - self._compare(result, expected) - - o = self._construct(shape=4, value=9.5) - result = o.copy() - result._data = o._data.downcast(dtypes='infer') - self._compare(result, o) - - # are close - o = self._construct(shape=4, value=9.000000000005) - result = o.copy() - result._data = o._data.downcast(dtypes='infer') - expected = o.astype(np.int64) - self._compare(result, expected) - - def test_constructor_compound_dtypes(self): - # GH 5191 - # compound dtypes should raise not-implementederror - - def f(dtype): - return self._construct(shape=3, dtype=dtype) - - self.assertRaises(NotImplementedError, f, [("A","datetime64[h]"), ("B","str"), ("C","int32")]) - - # these work (though results may be unexpected) - f('int64') - f('float64') - f('M8[ns]') - - def check_metadata(self, x, y=None): - for m in x._metadata: - v = getattr(x,m,None) - if y is None: - self.assertIsNone(v) - else: - self.assertEqual(v, getattr(y,m,None)) - - def test_metadata_propagation(self): - # check that the metadata matches up on the resulting ops - - o = self._construct(shape=3) - o.name = 'foo' - o2 = self._construct(shape=3) - o2.name = 'bar' - - # TODO - # Once panel can do non-trivial combine operations - # (currently there is an a raise in the Panel arith_ops to prevent - # this, though it actually does work) - # can remove all of these try: except: blocks on the actual operations - - - # ---------- - # preserving - # ---------- - - # simple ops with scalars - for op in [ '__add__','__sub__','__truediv__','__mul__' ]: - result = getattr(o,op)(1) - self.check_metadata(o,result) - - # ops with like - for op in [ '__add__','__sub__','__truediv__','__mul__' ]: - try: - result = getattr(o,op)(o) - self.check_metadata(o,result) - except (ValueError, AttributeError): - pass - - # simple boolean - for op in [ '__eq__','__le__', '__ge__' ]: - v1 = getattr(o,op)(o) - self.check_metadata(o,v1) - - try: - self.check_metadata(o, v1 & v1) - except (ValueError): - pass - - try: - self.check_metadata(o, v1 | v1) - except (ValueError): - pass - - # combine_first - try: - result = o.combine_first(o2) - self.check_metadata(o,result) - except (AttributeError): - pass - - # --------------------------- - # non-preserving (by default) - # --------------------------- - - # add non-like - try: - result = o + o2 - self.check_metadata(result) - except (ValueError, AttributeError): - pass - - # simple boolean - for op in [ '__eq__','__le__', '__ge__' ]: - - # this is a name matching op - v1 = getattr(o,op)(o) - - v2 = getattr(o,op)(o2) - self.check_metadata(v2) - - try: - self.check_metadata(v1 & v2) - except (ValueError): - pass - - try: - self.check_metadata(v1 | v2) - except (ValueError): - pass - - def test_head_tail(self): - # GH5370 - - o = self._construct(shape=10) - - # check all index types - for index in [ tm.makeFloatIndex, tm.makeIntIndex, - tm.makeStringIndex, tm.makeUnicodeIndex, - tm.makeDateIndex, tm.makePeriodIndex ]: - axis = o._get_axis_name(0) - setattr(o,axis,index(len(getattr(o,axis)))) - - # Panel + dims - try: - o.head() - except (NotImplementedError): - raise nose.SkipTest('not implemented on {0}'.format(o.__class__.__name__)) - - self._compare(o.head(), o.iloc[:5]) - self._compare(o.tail(), o.iloc[-5:]) - - # 0-len - self._compare(o.head(0), o.iloc[:]) - self._compare(o.tail(0), o.iloc[0:]) - - # bounded - self._compare(o.head(len(o)+1), o) - self._compare(o.tail(len(o)+1), o) - - # neg index - self._compare(o.head(-3), o.head(7)) - self._compare(o.tail(-3), o.tail(7)) - - def test_sample(self): - # Fixes issue: 2419 - - o = self._construct(shape=10) - - ### - # Check behavior of random_state argument - ### - - # Check for stability when receives seed or random state -- run 10 times. - for test in range(10): - seed = np.random.randint(0,100) - self._compare(o.sample(n=4, random_state=seed), o.sample(n=4, random_state=seed)) - self._compare(o.sample(frac=0.7,random_state=seed), o.sample(frac=0.7, random_state=seed)) - - self._compare(o.sample(n=4, random_state=np.random.RandomState(test)), - o.sample(n=4, random_state=np.random.RandomState(test))) - - self._compare(o.sample(frac=0.7,random_state=np.random.RandomState(test)), - o.sample(frac=0.7, random_state=np.random.RandomState(test))) - - - # Check for error when random_state argument invalid. - with tm.assertRaises(ValueError): - o.sample(random_state='astring!') - - ### - # Check behavior of `frac` and `N` - ### - - # Giving both frac and N throws error - with tm.assertRaises(ValueError): - o.sample(n=3, frac=0.3) - - # Check that raises right error for negative lengths - with tm.assertRaises(ValueError): - o.sample(n=-3) - with tm.assertRaises(ValueError): - o.sample(frac=-0.3) - - # Make sure float values of `n` give error - with tm.assertRaises(ValueError): - o.sample(n= 3.2) - - # Check lengths are right - self.assertTrue(len(o.sample(n=4) == 4)) - self.assertTrue(len(o.sample(frac=0.34) == 3)) - self.assertTrue(len(o.sample(frac=0.36) == 4)) - - ### - # Check weights - ### - - # Weight length must be right - with tm.assertRaises(ValueError): - o.sample(n=3, weights=[0,1]) - - with tm.assertRaises(ValueError): - bad_weights = [0.5]*11 - o.sample(n=3, weights=bad_weights) - - with tm.assertRaises(ValueError): - bad_weight_series = Series([0,0,0.2]) - o.sample(n=4, weights=bad_weight_series) - - # Check won't accept negative weights - with tm.assertRaises(ValueError): - bad_weights = [-0.1]*10 - o.sample(n=3, weights=bad_weights) - - # Check inf and -inf throw errors: - with tm.assertRaises(ValueError): - weights_with_inf = [0.1]*10 - weights_with_inf[0] = np.inf - o.sample(n=3, weights=weights_with_inf) - - with tm.assertRaises(ValueError): - weights_with_ninf = [0.1]*10 - weights_with_ninf[0] = -np.inf - o.sample(n=3, weights=weights_with_ninf) - - # All zeros raises errors - zero_weights = [0]*10 - with tm.assertRaises(ValueError): - o.sample(n=3, weights=zero_weights) - - # All missing weights - nan_weights = [np.nan]*10 - with tm.assertRaises(ValueError): - o.sample(n=3, weights=nan_weights) - - - # A few dataframe test with degenerate weights. - easy_weight_list = [0]*10 - easy_weight_list[5] = 1 - - df = pd.DataFrame({'col1':range(10,20), - 'col2':range(20,30), - 'colString': ['a']*10, - 'easyweights':easy_weight_list}) - sample1 = df.sample(n=1, weights='easyweights') - assert_frame_equal(sample1, df.iloc[5:6]) - - # Ensure proper error if string given as weight for Series, panel, or - # DataFrame with axis = 1. - s = Series(range(10)) - with tm.assertRaises(ValueError): - s.sample(n=3, weights='weight_column') - - panel = pd.Panel(items = [0,1,2], major_axis = [2,3,4], minor_axis = [3,4,5]) - with tm.assertRaises(ValueError): - panel.sample(n=1, weights='weight_column') - - with tm.assertRaises(ValueError): - df.sample(n=1, weights='weight_column', axis = 1) - - # Check weighting key error - with tm.assertRaises(KeyError): - df.sample(n=3, weights='not_a_real_column_name') - - # Check np.nan are replaced by zeros. - weights_with_nan = [np.nan]*10 - weights_with_nan[5] = 0.5 - self._compare(o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6]) - - # Check None are also replaced by zeros. - weights_with_None = [None]*10 - weights_with_None[5] = 0.5 - self._compare(o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6]) - - # Check that re-normalizes weights that don't sum to one. - weights_less_than_1 = [0]*10 - weights_less_than_1[0] = 0.5 - tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1]) - - - ### - # Test axis argument - ### - - # Test axis argument - df = pd.DataFrame({'col1':range(10), 'col2':['a']*10}) - second_column_weight = [0,1] - assert_frame_equal(df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']]) - - # Different axis arg types - assert_frame_equal(df.sample(n=1, axis='columns', weights=second_column_weight), - df[['col2']]) - - weight = [0]*10 - weight[5] = 0.5 - assert_frame_equal(df.sample(n=1, axis='rows', weights=weight), - df.iloc[5:6]) - assert_frame_equal(df.sample(n=1, axis='index', weights=weight), - df.iloc[5:6]) - - # Check out of range axis values - with tm.assertRaises(ValueError): - df.sample(n=1, axis=2) - - with tm.assertRaises(ValueError): - df.sample(n=1, axis='not_a_name') - - with tm.assertRaises(ValueError): - s = pd.Series(range(10)) - s.sample(n=1, axis=1) - - # Test weight length compared to correct axis - with tm.assertRaises(ValueError): - df.sample(n=1, axis=1, weights=[0.5]*10) - - # Check weights with axis = 1 - easy_weight_list = [0]*3 - easy_weight_list[2] = 1 - - df = pd.DataFrame({'col1':range(10,20), - 'col2':range(20,30), - 'colString': ['a']*10}) - sample1 = df.sample(n=1, axis=1, weights=easy_weight_list) - assert_frame_equal(sample1, df[['colString']]) - - # Test default axes - p = pd.Panel(items = ['a','b','c'], major_axis=[2,4,6], minor_axis=[1,3,5]) - assert_panel_equal(p.sample(n=3, random_state=42), p.sample(n=3, axis=1, random_state=42)) - assert_frame_equal(df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)) - - # Test that function aligns weights with frame - df = DataFrame({'col1':[5,6,7], 'col2':['a','b','c'], }, index = [9,5,3]) - s = Series([1,0,0], index=[3,5,9]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s)) - - # Weights have index values to be dropped because not in - # sampled DataFrame - s2 = Series([0.001,0,10000], index=[3,5,10]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2)) - - # Weights have empty values to be filed with zeros - s3 = Series([0.01,0], index=[3,5]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3)) - - # No overlap in weight and sampled DataFrame indices - s4 = Series([1,0], index=[1,2]) - with tm.assertRaises(ValueError): - df.sample(1, weights=s4) - - - def test_size_compat(self): - # GH8846 - # size property should be defined - - o = self._construct(shape=10) - self.assertTrue(o.size == np.prod(o.shape)) - self.assertTrue(o.size == 10**len(o.axes)) - - def test_split_compat(self): - # xref GH8846 - o = self._construct(shape=10) - self.assertTrue(len(np.array_split(o,5)) == 5) - self.assertTrue(len(np.array_split(o,2)) == 2) - - def test_unexpected_keyword(self): # GH8597 - from pandas.util.testing import assertRaisesRegexp - - df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe']) - ca = pd.Categorical([0, 0, 2, 2, 3, np.nan]) - ts = df['joe'].copy() - ts[2] = np.nan - - with assertRaisesRegexp(TypeError, 'unexpected keyword'): - df.drop('joe', axis=1, in_place=True) - - with assertRaisesRegexp(TypeError, 'unexpected keyword'): - df.reindex([1, 0], inplace=True) - - with assertRaisesRegexp(TypeError, 'unexpected keyword'): - ca.fillna(0, inplace=True) - - with assertRaisesRegexp(TypeError, 'unexpected keyword'): - ts.fillna(0, in_place=True) - -class TestSeries(tm.TestCase, Generic): - _typ = Series - _comparator = lambda self, x, y: assert_series_equal(x,y) - - def setUp(self): - self.ts = tm.makeTimeSeries() # Was at top level in test_series - self.ts.name = 'ts' - - self.series = tm.makeStringSeries() - self.series.name = 'series' - - def test_rename_mi(self): - s = Series([11,21,31], - index=MultiIndex.from_tuples([("A",x) for x in ["a","B","c"]])) - result = s.rename(str.lower) - - def test_get_numeric_data_preserve_dtype(self): - - # get the numeric data - o = Series([1,2,3]) - result = o._get_numeric_data() - self._compare(result, o) - - o = Series([1,'2',3.]) - result = o._get_numeric_data() - expected = Series([],dtype=object) - self._compare(result, expected) - - o = Series([True,False,True]) - result = o._get_numeric_data() - self._compare(result, o) - - o = Series([True,False,True]) - result = o._get_bool_data() - self._compare(result, o) - - o = Series(date_range('20130101',periods=3)) - result = o._get_numeric_data() - expected = Series([],dtype='M8[ns]') - self._compare(result, expected) - - def test_nonzero_single_element(self): - - # allow single item via bool method - s = Series([True]) - self.assertTrue(s.bool()) - - s = Series([False]) - self.assertFalse(s.bool()) - - # single item nan to raise - for s in [ Series([np.nan]), Series([pd.NaT]), Series([True]), Series([False]) ]: - self.assertRaises(ValueError, lambda : bool(s)) - - for s in [ Series([np.nan]), Series([pd.NaT])]: - self.assertRaises(ValueError, lambda : s.bool()) - - # multiple bool are still an error - for s in [Series([True,True]), Series([False, False])]: - self.assertRaises(ValueError, lambda : bool(s)) - self.assertRaises(ValueError, lambda : s.bool()) - - # single non-bool are an error - for s in [Series([1]), Series([0]), - Series(['a']), Series([0.0])]: - self.assertRaises(ValueError, lambda : bool(s)) - self.assertRaises(ValueError, lambda : s.bool()) - - def test_metadata_propagation_indiv(self): - # check that the metadata matches up on the resulting ops - - o = Series(range(3),range(3)) - o.name = 'foo' - o2 = Series(range(3),range(3)) - o2.name = 'bar' - - result = o.T - self.check_metadata(o,result) - - # resample - ts = Series(np.random.rand(1000), - index=date_range('20130101',periods=1000,freq='s'), - name='foo') - result = ts.resample('1T') - self.check_metadata(ts,result) - - result = ts.resample('1T',how='min') - self.check_metadata(ts,result) - - result = ts.resample('1T',how=lambda x: x.sum()) - self.check_metadata(ts,result) - - _metadata = Series._metadata - _finalize = Series.__finalize__ - Series._metadata = ['name','filename'] - o.filename = 'foo' - o2.filename = 'bar' - - def finalize(self, other, method=None, **kwargs): - for name in self._metadata: - if method == 'concat' and name == 'filename': - value = '+'.join([ getattr(o,name) for o in other.objs if getattr(o,name,None) ]) - object.__setattr__(self, name, value) - else: - object.__setattr__(self, name, getattr(other, name, None)) - - return self - - Series.__finalize__ = finalize - - result = pd.concat([o, o2]) - self.assertEqual(result.filename,'foo+bar') - self.assertIsNone(result.name) - - # reset - Series._metadata = _metadata - Series.__finalize__ = _finalize - - def test_interpolate(self): - ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index) - - ts_copy = ts.copy() - ts_copy[5:10] = np.NaN - - linear_interp = ts_copy.interpolate(method='linear') - self.assert_numpy_array_equal(linear_interp, ts) - - ord_ts = Series([d.toordinal() for d in self.ts.index], - index=self.ts.index).astype(float) - - ord_ts_copy = ord_ts.copy() - ord_ts_copy[5:10] = np.NaN - - time_interp = ord_ts_copy.interpolate(method='time') - self.assert_numpy_array_equal(time_interp, ord_ts) - - # try time interpolation on a non-TimeSeries - # Only raises ValueError if there are NaNs. - non_ts = self.series.copy() - non_ts[0] = np.NaN - self.assertRaises(ValueError, non_ts.interpolate, method='time') - - def test_interp_regression(self): - tm._skip_if_no_scipy() - _skip_if_no_pchip() - - ser = Series(np.sort(np.random.uniform(size=100))) - - # interpolate at new_index - new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])) - interp_s = ser.reindex(new_index).interpolate(method='pchip') - # does not blow up, GH5977 - interp_s[49:51] - - def test_interpolate_corners(self): - s = Series([np.nan, np.nan]) - assert_series_equal(s.interpolate(), s) - - s = Series([]).interpolate() - assert_series_equal(s.interpolate(), s) - - tm._skip_if_no_scipy() - s = Series([np.nan, np.nan]) - assert_series_equal(s.interpolate(method='polynomial', order=1), s) - - s = Series([]).interpolate() - assert_series_equal(s.interpolate(method='polynomial', order=1), s) - - def test_interpolate_index_values(self): - s = Series(np.nan, index=np.sort(np.random.rand(30))) - s[::3] = np.random.randn(10) - - vals = s.index.values.astype(float) - - result = s.interpolate(method='index') - - expected = s.copy() - bad = isnull(expected.values) - good = ~bad - expected = Series( - np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]) - - assert_series_equal(result[bad], expected) - - # 'values' is synonymous with 'index' for the method kwarg - other_result = s.interpolate(method='values') - - assert_series_equal(other_result, result) - assert_series_equal(other_result[bad], expected) - - def test_interpolate_non_ts(self): - s = Series([1, 3, np.nan, np.nan, np.nan, 11]) - with tm.assertRaises(ValueError): - s.interpolate(method='time') - - # New interpolation tests - def test_nan_interpolate(self): - s = Series([0, 1, np.nan, 3]) - result = s.interpolate() - expected = Series([0., 1., 2., 3.]) - assert_series_equal(result, expected) - - tm._skip_if_no_scipy() - result = s.interpolate(method='polynomial', order=1) - assert_series_equal(result, expected) - - def test_nan_irregular_index(self): - s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9]) - result = s.interpolate() - expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9]) - assert_series_equal(result, expected) - - def test_nan_str_index(self): - s = Series([0, 1, 2, np.nan], index=list('abcd')) - result = s.interpolate() - expected = Series([0., 1., 2., 2.], index=list('abcd')) - assert_series_equal(result, expected) - - def test_interp_quad(self): - tm._skip_if_no_scipy() - sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4]) - result = sq.interpolate(method='quadratic') - expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4]) - assert_series_equal(result, expected) - - def test_interp_scipy_basic(self): - tm._skip_if_no_scipy() - s = Series([1, 3, np.nan, 12, np.nan, 25]) - # slinear - expected = Series([1., 3., 7.5, 12., 18.5, 25.]) - result = s.interpolate(method='slinear') - assert_series_equal(result, expected) - - result = s.interpolate(method='slinear', downcast='infer') - assert_series_equal(result, expected) - # nearest - expected = Series([1, 3, 3, 12, 12, 25]) - result = s.interpolate(method='nearest') - assert_series_equal(result, expected.astype('float')) - - result = s.interpolate(method='nearest', downcast='infer') - assert_series_equal(result, expected) - # zero - expected = Series([1, 3, 3, 12, 12, 25]) - result = s.interpolate(method='zero') - assert_series_equal(result, expected.astype('float')) - - result = s.interpolate(method='zero', downcast='infer') - assert_series_equal(result, expected) - # quadratic - expected = Series([1, 3., 6.769231, 12., 18.230769, 25.]) - result = s.interpolate(method='quadratic') - assert_series_equal(result, expected) - - result = s.interpolate(method='quadratic', downcast='infer') - assert_series_equal(result, expected) - # cubic - expected = Series([1., 3., 6.8, 12., 18.2, 25.]) - result = s.interpolate(method='cubic') - assert_series_equal(result, expected) - - def test_interp_limit(self): - s = Series([1, 3, np.nan, np.nan, np.nan, 11]) - expected = Series([1., 3., 5., 7., np.nan, 11.]) - result = s.interpolate(method='linear', limit=2) - assert_series_equal(result, expected) - - def test_interp_all_good(self): - # scipy - tm._skip_if_no_scipy() - s = Series([1, 2, 3]) - result = s.interpolate(method='polynomial', order=1) - assert_series_equal(result, s) - - # non-scipy - result = s.interpolate() - assert_series_equal(result, s) - - def test_interp_multiIndex(self): - idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')]) - s = Series([1, 2, np.nan], index=idx) - - expected = s.copy() - expected.loc[2] = 2 - result = s.interpolate() - assert_series_equal(result, expected) - - tm._skip_if_no_scipy() - with tm.assertRaises(ValueError): - s.interpolate(method='polynomial', order=1) - - def test_interp_nonmono_raise(self): - tm._skip_if_no_scipy() - s = Series([1, np.nan, 3], index=[0, 2, 1]) - with tm.assertRaises(ValueError): - s.interpolate(method='krogh') - - def test_interp_datetime64(self): - tm._skip_if_no_scipy() - df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3)) - result = df.interpolate(method='nearest') - expected = Series([1., 1., 3.], index=date_range('1/1/2000', periods=3)) - assert_series_equal(result, expected) - - def test_interp_limit_no_nans(self): - # GH 7173 - s = pd.Series([1., 2., 3.]) - result = s.interpolate(limit=1) - expected = s - assert_series_equal(result, expected) - - def test_describe(self): - _ = self.series.describe() - _ = self.ts.describe() - - def test_describe_objects(self): - s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a']) - result = s.describe() - expected = Series({'count': 7, 'unique': 4, - 'top': 'a', 'freq': 3}, index=result.index) - assert_series_equal(result, expected) - - dt = list(self.ts.index) - dt.append(dt[0]) - ser = Series(dt) - rs = ser.describe() - min_date = min(dt) - max_date = max(dt) - xp = Series({'count': len(dt), - 'unique': len(self.ts.index), - 'first': min_date, 'last': max_date, 'freq': 2, - 'top': min_date}, index=rs.index) - assert_series_equal(rs, xp) - - def test_describe_empty(self): - result = pd.Series().describe() - - self.assertEqual(result['count'], 0) - self.assertTrue(result.drop('count').isnull().all()) - - nanSeries = Series([np.nan]) - nanSeries.name = 'NaN' - result = nanSeries.describe() - self.assertEqual(result['count'], 0) - self.assertTrue(result.drop('count').isnull().all()) - - def test_describe_none(self): - noneSeries = Series([None]) - noneSeries.name = 'None' - expected = Series([0, 0], index=['count', 'unique'], name='None') - assert_series_equal(noneSeries.describe(), expected) - - -class TestDataFrame(tm.TestCase, Generic): - _typ = DataFrame - _comparator = lambda self, x, y: assert_frame_equal(x,y) - - def test_rename_mi(self): - df = DataFrame([11,21,31], - index=MultiIndex.from_tuples([("A",x) for x in ["a","B","c"]])) - result = df.rename(str.lower) - - def test_nonzero_single_element(self): - - # allow single item via bool method - df = DataFrame([[True]]) - self.assertTrue(df.bool()) - - df = DataFrame([[False]]) - self.assertFalse(df.bool()) - - df = DataFrame([[False, False]]) - self.assertRaises(ValueError, lambda : df.bool()) - self.assertRaises(ValueError, lambda : bool(df)) - - def test_get_numeric_data_preserve_dtype(self): - - # get the numeric data - o = DataFrame({'A': [1, '2', 3.]}) - result = o._get_numeric_data() - expected = DataFrame(index=[0, 1, 2], dtype=object) - self._compare(result, expected) - - def test_interp_basic(self): - df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [1, 4, 9, np.nan], - 'C': [1, 2, 3, 5], 'D': list('abcd')}) - expected = DataFrame({'A': [1., 2., 3., 4.], 'B': [1., 4., 9., 9.], - 'C': [1, 2, 3, 5], 'D': list('abcd')}) - result = df.interpolate() - assert_frame_equal(result, expected) - - result = df.set_index('C').interpolate() - expected = df.set_index('C') - expected.loc[3,'A'] = 3 - expected.loc[5,'B'] = 9 - assert_frame_equal(result, expected) - - def test_interp_bad_method(self): - df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [1, 4, 9, np.nan], - 'C': [1, 2, 3, 5], 'D': list('abcd')}) - with tm.assertRaises(ValueError): - df.interpolate(method='not_a_method') - - def test_interp_combo(self): - df = DataFrame({'A': [1., 2., np.nan, 4.], 'B': [1, 4, 9, np.nan], - 'C': [1, 2, 3, 5], 'D': list('abcd')}) - - result = df['A'].interpolate() - expected = Series([1., 2., 3., 4.], name='A') - assert_series_equal(result, expected) - - result = df['A'].interpolate(downcast='infer') - expected = Series([1, 2, 3, 4], name='A') - assert_series_equal(result, expected) - - def test_interp_nan_idx(self): - df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]}) - df = df.set_index('A') - with tm.assertRaises(NotImplementedError): - df.interpolate(method='values') - - def test_interp_various(self): - tm._skip_if_no_scipy() - df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7], - 'C': [1, 2, 3, 5, 8, 13, 21]}) - df = df.set_index('C') - expected = df.copy() - result = df.interpolate(method='polynomial', order=1) - - expected.A.loc[3] = 2.66666667 - expected.A.loc[13] = 5.76923076 - assert_frame_equal(result, expected) - - result = df.interpolate(method='cubic') - expected.A.loc[3] = 2.81621174 - expected.A.loc[13] = 5.64146581 - assert_frame_equal(result, expected) - - result = df.interpolate(method='nearest') - expected.A.loc[3] = 2 - expected.A.loc[13] = 5 - assert_frame_equal(result, expected, check_dtype=False) - - result = df.interpolate(method='quadratic') - expected.A.loc[3] = 2.82533638 - expected.A.loc[13] = 6.02817974 - assert_frame_equal(result, expected) - - result = df.interpolate(method='slinear') - expected.A.loc[3] = 2.66666667 - expected.A.loc[13] = 5.76923077 - assert_frame_equal(result, expected) - - result = df.interpolate(method='zero') - expected.A.loc[3] = 2. - expected.A.loc[13] = 5 - assert_frame_equal(result, expected, check_dtype=False) - - result = df.interpolate(method='quadratic') - expected.A.loc[3] = 2.82533638 - expected.A.loc[13] = 6.02817974 - assert_frame_equal(result, expected) - - def test_interp_alt_scipy(self): - tm._skip_if_no_scipy() - df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7], - 'C': [1, 2, 3, 5, 8, 13, 21]}) - result = df.interpolate(method='barycentric') - expected = df.copy() - expected.ix[2,'A'] = 3 - expected.ix[5,'A'] = 6 - assert_frame_equal(result, expected) - - result = df.interpolate(method='barycentric', downcast='infer') - assert_frame_equal(result, expected.astype(np.int64)) - - result = df.interpolate(method='krogh') - expectedk = df.copy() - expectedk['A'] = expected['A'] - assert_frame_equal(result, expectedk) - - _skip_if_no_pchip() - result = df.interpolate(method='pchip') - expected.ix[2,'A'] = 3 - expected.ix[5,'A'] = 6.125 - assert_frame_equal(result, expected) - - def test_interp_rowwise(self): - df = DataFrame({0: [1, 2, np.nan, 4], - 1: [2, 3, 4, np.nan], - 2: [np.nan, 4, 5, 6], - 3: [4, np.nan, 6, 7], - 4: [1, 2, 3, 4]}) - result = df.interpolate(axis=1) - expected = df.copy() - expected.loc[3,1] = 5 - expected.loc[0,2] = 3 - expected.loc[1,3] = 3 - expected[4] = expected[4].astype(np.float64) - assert_frame_equal(result, expected) - - # scipy route - tm._skip_if_no_scipy() - result = df.interpolate(axis=1, method='values') - assert_frame_equal(result, expected) - - result = df.interpolate(axis=0) - expected = df.interpolate() - assert_frame_equal(result, expected) - - def test_rowwise_alt(self): - df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64], - 1: [1, 2, 3, 4, 3, 2, 1, 0, -1]}) - df.interpolate(axis=0) - - def test_interp_leading_nans(self): - df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0], - "B": [np.nan, -3, -3.5, np.nan, -4]}) - result = df.interpolate() - expected = df.copy() - expected['B'].loc[3] = -3.75 - assert_frame_equal(result, expected) - - tm._skip_if_no_scipy() - result = df.interpolate(method='polynomial', order=1) - assert_frame_equal(result, expected) - - def test_interp_raise_on_only_mixed(self): - df = DataFrame({'A': [1, 2, np.nan, 4], 'B': ['a', 'b', 'c', 'd'], - 'C': [np.nan, 2, 5, 7], 'D': [np.nan, np.nan, 9, 9], - 'E': [1, 2, 3, 4]}) - with tm.assertRaises(TypeError): - df.interpolate(axis=1) - - def test_interp_inplace(self): - df = DataFrame({'a': [1., 2., np.nan, 4.]}) - expected = DataFrame({'a': [1., 2., 3., 4.]}) - result = df.copy() - result['a'].interpolate(inplace=True) - assert_frame_equal(result, expected) - - result = df.copy() - result['a'].interpolate(inplace=True, downcast='infer') - assert_frame_equal(result, expected.astype('int64')) - - def test_interp_inplace_row(self): - # GH 10395 - result = DataFrame({'a': [1.,2.,3.,4.], 'b': [np.nan, 2., 3., 4.], - 'c': [3, 2, 2, 2]}) - expected = result.interpolate(method='linear', axis=1, inplace=False) - result.interpolate(method='linear', axis=1, inplace=True) - assert_frame_equal(result, expected) - - def test_interp_ignore_all_good(self): - # GH - df = DataFrame({'A': [1, 2, np.nan, 4], - 'B': [1, 2, 3, 4], - 'C': [1., 2., np.nan, 4.], - 'D': [1., 2., 3., 4.]}) - expected = DataFrame({'A': np.array([1, 2, 3, 4], dtype='float64'), - 'B': np.array([1, 2, 3, 4], dtype='int64'), - 'C': np.array([1., 2., 3, 4.], dtype='float64'), - 'D': np.array([1., 2., 3., 4.], dtype='float64')}) - - result = df.interpolate(downcast=None) - assert_frame_equal(result, expected) - - # all good - result = df[['B', 'D']].interpolate(downcast=None) - assert_frame_equal(result, df[['B', 'D']]) - - def test_describe(self): - desc = tm.makeDataFrame().describe() - desc = tm.makeMixedDataFrame().describe() - desc = tm.makeTimeDataFrame().describe() - - def test_describe_percentiles_percent_or_raw(self): - msg = 'percentiles should all be in the interval \\[0, 1\\]' - - df = tm.makeDataFrame() - with tm.assertRaisesRegexp(ValueError, msg): - df.describe(percentiles=[10, 50, 100]) - - with tm.assertRaisesRegexp(ValueError, msg): - df.describe(percentiles=[2]) - - with tm.assertRaisesRegexp(ValueError, msg): - df.describe(percentiles=[-2]) - - def test_describe_percentiles_equivalence(self): - df = tm.makeDataFrame() - d1 = df.describe() - d2 = df.describe(percentiles=[.25, .75]) - assert_frame_equal(d1, d2) - - def test_describe_percentiles_insert_median(self): - df = tm.makeDataFrame() - d1 = df.describe(percentiles=[.25, .75]) - d2 = df.describe(percentiles=[.25, .5, .75]) - assert_frame_equal(d1, d2) - self.assertTrue('25%' in d1.index) - self.assertTrue('75%' in d2.index) - - # none above - d1 = df.describe(percentiles=[.25, .45]) - d2 = df.describe(percentiles=[.25, .45, .5]) - assert_frame_equal(d1, d2) - self.assertTrue('25%' in d1.index) - self.assertTrue('45%' in d2.index) - - # none below - d1 = df.describe(percentiles=[.75, 1]) - d2 = df.describe(percentiles=[.5, .75, 1]) - assert_frame_equal(d1, d2) - self.assertTrue('75%' in d1.index) - self.assertTrue('100%' in d2.index) - - # edge - d1 = df.describe(percentiles=[0, 1]) - d2 = df.describe(percentiles=[0, .5, 1]) - assert_frame_equal(d1, d2) - self.assertTrue('0%' in d1.index) - self.assertTrue('100%' in d2.index) - - def test_describe_no_numeric(self): - df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8, - 'B': ['a', 'b', 'c', 'd'] * 6}) - desc = df.describe() - expected = DataFrame(dict((k, v.describe()) - for k, v in compat.iteritems(df)), - columns=df.columns) - assert_frame_equal(desc, expected) - - ts = tm.makeTimeSeries() - df = DataFrame({'time': ts.index}) - desc = df.describe() - self.assertEqual(desc.time['first'], min(ts.index)) - - def test_describe_empty_int_columns(self): - df = DataFrame([[0, 1], [1, 2]]) - desc = df[df[0] < 0].describe() # works - assert_series_equal(desc.xs('count'), - Series([0, 0], dtype=float, name='count')) - self.assertTrue(isnull(desc.ix[1:]).all().all()) - - def test_describe_objects(self): - df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']}) - result = df.describe() - expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]}, - index=['count', 'unique', 'top', 'freq']) - assert_frame_equal(result, expected) - - df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')}) - df.loc[4] = pd.Timestamp('2010-01-04') - result = df.describe() - expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2, - pd.Timestamp('2010-01-01'), - pd.Timestamp('2010-01-04')]}, - index=['count', 'unique', 'top', 'freq', - 'first', 'last']) - assert_frame_equal(result, expected) - - # mix time and str - df['C2'] = ['a', 'a', 'b', 'c', 'a'] - result = df.describe() - expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan] - assert_frame_equal(result, expected) - - # just str - expected = DataFrame({'C2': [5, 3, 'a', 4]}, - index=['count', 'unique', 'top', 'freq']) - result = df[['C2']].describe() - - # mix of time, str, numeric - df['C3'] = [2, 4, 6, 8, 2] - result = df.describe() - expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]}, - index=['count', 'mean', 'std', 'min', '25%', - '50%', '75%', 'max']) - assert_frame_equal(result, expected) - assert_frame_equal(df.describe(), df[['C3']].describe()) - - assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe()) - assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe()) - - def test_describe_typefiltering(self): - df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8, - 'catB': ['a', 'b', 'c', 'd'] * 6, - 'numC': np.arange(24, dtype='int64'), - 'numD': np.arange(24.) + .5, - 'ts': tm.makeTimeSeries()[:24].index}) - - descN = df.describe() - expected_cols = ['numC', 'numD',] - expected = DataFrame(dict((k, df[k].describe()) - for k in expected_cols), - columns=expected_cols) - assert_frame_equal(descN, expected) - - desc = df.describe(include=['number']) - assert_frame_equal(desc, descN) - desc = df.describe(exclude=['object', 'datetime']) - assert_frame_equal(desc, descN) - desc = df.describe(include=['float']) - assert_frame_equal(desc, descN.drop('numC',1)) - - descC = df.describe(include=['O']) - expected_cols = ['catA', 'catB'] - expected = DataFrame(dict((k, df[k].describe()) - for k in expected_cols), - columns=expected_cols) - assert_frame_equal(descC, expected) - - descD = df.describe(include=['datetime']) - assert_series_equal( descD.ts, df.ts.describe()) - - desc = df.describe(include=['object','number', 'datetime']) - assert_frame_equal(desc.loc[:,["numC","numD"]].dropna(), descN) - assert_frame_equal(desc.loc[:,["catA","catB"]].dropna(), descC) - descDs = descD.sort_index() # the index order change for mixed-types - assert_frame_equal(desc.loc[:,"ts":].dropna().sort_index(), descDs) - - desc = df.loc[:,'catA':'catB'].describe(include='all') - assert_frame_equal(desc, descC) - desc = df.loc[:,'numC':'numD'].describe(include='all') - assert_frame_equal(desc, descN) - - desc = df.describe(percentiles = [], include='all') - cnt = Series(data=[4,4,6,6,6], index=['catA','catB','numC','numD','ts']) - assert_series_equal( desc.count(), cnt) - self.assertTrue('count' in desc.index) - self.assertTrue('unique' in desc.index) - self.assertTrue('50%' in desc.index) - self.assertTrue('first' in desc.index) - - desc = df.drop("ts", 1).describe(percentiles = [], include='all') - assert_series_equal( desc.count(), cnt.drop("ts")) - self.assertTrue('first' not in desc.index) - desc = df.drop(["numC","numD"], 1).describe(percentiles = [], include='all') - assert_series_equal( desc.count(), cnt.drop(["numC","numD"])) - self.assertTrue('50%' not in desc.index) - - def test_describe_typefiltering_category_bool(self): - df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8), - 'B_str': ['a', 'b', 'c', 'd'] * 6, - 'C_bool': [True] * 12 + [False] * 12, - 'D_num': np.arange(24.) + .5, - 'E_ts': tm.makeTimeSeries()[:24].index}) - - # bool is considered numeric in describe, although not an np.number - desc = df.describe() - expected_cols = ['C_bool', 'D_num'] - expected = DataFrame(dict((k, df[k].describe()) - for k in expected_cols), - columns=expected_cols) - assert_frame_equal(desc, expected) - - desc = df.describe(include=["category"]) - self.assertTrue(desc.columns.tolist() == ["A_cat"]) - - # 'all' includes numpy-dtypes + category - desc1 = df.describe(include="all") - desc2 = df.describe(include=[np.generic, "category"]) - assert_frame_equal(desc1, desc2) - - def test_describe_timedelta(self): - df = DataFrame({"td": pd.to_timedelta(np.arange(24)%20,"D")}) - self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta("8d4h")) - - def test_describe_typefiltering_dupcol(self): - df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8, - 'catB': ['a', 'b', 'c', 'd'] * 6, - 'numC': np.arange(24), - 'numD': np.arange(24.) + .5, - 'ts': tm.makeTimeSeries()[:24].index}) - s = df.describe(include='all').shape[1] - df = pd.concat([df, df], axis=1) - s2 = df.describe(include='all').shape[1] - self.assertTrue(s2 == 2 * s) - - def test_describe_typefiltering_groupby(self): - df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8, - 'catB': ['a', 'b', 'c', 'd'] * 6, - 'numC': np.arange(24), - 'numD': np.arange(24.) + .5, - 'ts': tm.makeTimeSeries()[:24].index}) - G = df.groupby('catA') - self.assertTrue(G.describe(include=['number']).shape == (16, 2)) - self.assertTrue(G.describe(include=['number', 'object']).shape == (22, 3)) - self.assertTrue(G.describe(include='all').shape == (26, 4)) - - def test_no_order(self): - tm._skip_if_no_scipy() - s = Series([0, 1, np.nan, 3]) - with tm.assertRaises(ValueError): - s.interpolate(method='polynomial') - with tm.assertRaises(ValueError): - s.interpolate(method='spline') - - def test_spline(self): - tm._skip_if_no_scipy() - s = Series([1, 2, np.nan, 4, 5, np.nan, 7]) - result = s.interpolate(method='spline', order=1) - expected = Series([1., 2., 3., 4., 5., 6., 7.]) - assert_series_equal(result, expected) - - def test_spline_extrapolate(self): - tm.skip_if_no_package('scipy', '0.15', 'setting ext on scipy.interpolate.UnivariateSpline') - s = Series([1, 2, 3, 4, np.nan, 6, np.nan]) - result3 = s.interpolate(method='spline', order=1, ext=3) - expected3 = Series([1., 2., 3., 4., 5., 6., 6.]) - assert_series_equal(result3, expected3) - - result1 = s.interpolate(method='spline', order=1, ext=0) - expected1 = Series([1., 2., 3., 4., 5., 6., 7.]) - assert_series_equal(result1, expected1) - - def test_spline_smooth(self): - tm._skip_if_no_scipy() - s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7]) - self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5], - s.interpolate(method='spline', order=3)[5]) - - def test_spline_interpolation(self): - tm._skip_if_no_scipy() - - s = Series(np.arange(10)**2) - s[np.random.randint(0,9,3)] = np.nan - result1 = s.interpolate(method='spline', order=1) - expected1 = s.interpolate(method='spline', order=1) - assert_series_equal(result1, expected1) - - # GH #10633 - def test_spline_error(self): - tm._skip_if_no_scipy() - - s = pd.Series(np.arange(10)**2) - s[np.random.randint(0,9,3)] = np.nan - with tm.assertRaises(ValueError): - s.interpolate(method='spline') - - with tm.assertRaises(ValueError): - s.interpolate(method='spline', order=0) - - def test_metadata_propagation_indiv(self): - - # groupby - df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', - 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.random.randn(8)}) - result = df.groupby('A').sum() - self.check_metadata(df,result) - - # resample - df = DataFrame(np.random.randn(1000,2), - index=date_range('20130101',periods=1000,freq='s')) - result = df.resample('1T') - self.check_metadata(df,result) - - # merging with override - # GH 6923 - _metadata = DataFrame._metadata - _finalize = DataFrame.__finalize__ - - np.random.seed(10) - df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b']) - df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd']) - DataFrame._metadata = ['filename'] - df1.filename = 'fname1.csv' - df2.filename = 'fname2.csv' - - def finalize(self, other, method=None, **kwargs): - - for name in self._metadata: - if method == 'merge': - left, right = other.left, other.right - value = getattr(left, name, '') + '|' + getattr(right, name, '') - object.__setattr__(self, name, value) - else: - object.__setattr__(self, name, getattr(other, name, '')) - - return self - - DataFrame.__finalize__ = finalize - result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') - self.assertEqual(result.filename,'fname1.csv|fname2.csv') - - # concat - # GH 6927 - DataFrame._metadata = ['filename'] - df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab')) - df1.filename = 'foo' - - def finalize(self, other, method=None, **kwargs): - for name in self._metadata: - if method == 'concat': - value = '+'.join([ getattr(o,name) for o in other.objs if getattr(o,name,None) ]) - object.__setattr__(self, name, value) - else: - object.__setattr__(self, name, getattr(other, name, None)) - - return self - - DataFrame.__finalize__ = finalize - - result = pd.concat([df1, df1]) - self.assertEqual(result.filename,'foo+foo') - - # reset - DataFrame._metadata = _metadata - DataFrame.__finalize__ = _finalize - - def test_tz_convert_and_localize(self): - l0 = date_range('20140701', periods=5, freq='D') - - # TODO: l1 should be a PeriodIndex for testing - # after GH2106 is addressed - with tm.assertRaises(NotImplementedError): - period_range('20140701', periods=1).tz_convert('UTC') - with tm.assertRaises(NotImplementedError): - period_range('20140701', periods=1).tz_localize('UTC') - # l1 = period_range('20140701', periods=5, freq='D') - l1 = date_range('20140701', periods=5, freq='D') - - int_idx = Index(range(5)) - - for fn in ['tz_localize', 'tz_convert']: - - if fn == 'tz_convert': - l0 = l0.tz_localize('UTC') - l1 = l1.tz_localize('UTC') - - for idx in [l0, l1]: - - l0_expected = getattr(idx, fn)('US/Pacific') - l1_expected = getattr(idx, fn)('US/Pacific') - - df1 = DataFrame(np.ones(5), index=l0) - df1 = getattr(df1, fn)('US/Pacific') - self.assertTrue(df1.index.equals(l0_expected)) - - # MultiIndex - # GH7846 - df2 = DataFrame(np.ones(5), - MultiIndex.from_arrays([l0, l1])) - - df3 = getattr(df2, fn)('US/Pacific', level=0) - self.assertFalse(df3.index.levels[0].equals(l0)) - self.assertTrue(df3.index.levels[0].equals(l0_expected)) - self.assertTrue(df3.index.levels[1].equals(l1)) - self.assertFalse(df3.index.levels[1].equals(l1_expected)) - - df3 = getattr(df2, fn)('US/Pacific', level=1) - self.assertTrue(df3.index.levels[0].equals(l0)) - self.assertFalse(df3.index.levels[0].equals(l0_expected)) - self.assertTrue(df3.index.levels[1].equals(l1_expected)) - self.assertFalse(df3.index.levels[1].equals(l1)) - - df4 = DataFrame(np.ones(5), - MultiIndex.from_arrays([int_idx, l0])) - - df5 = getattr(df4, fn)('US/Pacific', level=1) - self.assertTrue(df3.index.levels[0].equals(l0)) - self.assertFalse(df3.index.levels[0].equals(l0_expected)) - self.assertTrue(df3.index.levels[1].equals(l1_expected)) - self.assertFalse(df3.index.levels[1].equals(l1)) - - # Bad Inputs - for fn in ['tz_localize', 'tz_convert']: - # Not DatetimeIndex / PeriodIndex - with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'): - df = DataFrame(index=int_idx) - df = getattr(df, fn)('US/Pacific') - - # Not DatetimeIndex / PeriodIndex - with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'): - df = DataFrame(np.ones(5), - MultiIndex.from_arrays([int_idx, l0])) - df = getattr(df, fn)('US/Pacific', level=0) - - # Invalid level - with tm.assertRaisesRegexp(ValueError, 'not valid'): - df = DataFrame(index=l0) - df = getattr(df, fn)('US/Pacific', level=1) - - def test_set_attribute(self): - # Test for consistent setattr behavior when an attribute and a column - # have the same name (Issue #8994) - df = DataFrame({'x':[1, 2, 3]}) - - df.y = 2 - df['y'] = [2, 4, 6] - df.y = 5 - - assert_equal(df.y, 5) - assert_series_equal(df['y'], Series([2, 4, 6], name='y')) - - -class TestPanel(tm.TestCase, Generic): - _typ = Panel - _comparator = lambda self, x, y: assert_panel_equal(x, y) - - -class TestNDFrame(tm.TestCase): - # tests that don't fit elsewhere - - def test_squeeze(self): - # noop - for s in [ tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries() ]: - tm.assert_series_equal(s.squeeze(),s) - for df in [ tm.makeTimeDataFrame() ]: - tm.assert_frame_equal(df.squeeze(),df) - for p in [ tm.makePanel() ]: - tm.assert_panel_equal(p.squeeze(),p) - for p4d in [ tm.makePanel4D() ]: - tm.assert_panel4d_equal(p4d.squeeze(),p4d) - - # squeezing - df = tm.makeTimeDataFrame().reindex(columns=['A']) - tm.assert_series_equal(df.squeeze(),df['A']) - - p = tm.makePanel().reindex(items=['ItemA']) - tm.assert_frame_equal(p.squeeze(),p['ItemA']) - - p = tm.makePanel().reindex(items=['ItemA'],minor_axis=['A']) - tm.assert_series_equal(p.squeeze(),p.ix['ItemA',:,'A']) - - p4d = tm.makePanel4D().reindex(labels=['label1']) - tm.assert_panel_equal(p4d.squeeze(),p4d['label1']) - - p4d = tm.makePanel4D().reindex(labels=['label1'],items=['ItemA']) - tm.assert_frame_equal(p4d.squeeze(),p4d.ix['label1','ItemA']) - - def test_equals(self): - s1 = pd.Series([1, 2, 3], index=[0, 2, 1]) - s2 = s1.copy() - self.assertTrue(s1.equals(s2)) - - s1[1] = 99 - self.assertFalse(s1.equals(s2)) - - # NaNs compare as equal - s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3]) - s2 = s1.copy() - self.assertTrue(s1.equals(s2)) - - s2[0] = 9.9 - self.assertFalse(s1.equals(s2)) - - idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')]) - s1 = Series([1, 2, np.nan], index=idx) - s2 = s1.copy() - self.assertTrue(s1.equals(s2)) - - # Add object dtype column with nans - index = np.random.random(10) - df1 = DataFrame(np.random.random(10,), index=index, columns=['floats']) - df1['text'] = 'the sky is so blue. we could use more chocolate.'.split() - df1['start'] = date_range('2000-1-1', periods=10, freq='T') - df1['end'] = date_range('2000-1-1', periods=10, freq='D') - df1['diff'] = df1['end'] - df1['start'] - df1['bool'] = (np.arange(10) % 3 == 0) - df1.ix[::2] = nan - df2 = df1.copy() - self.assertTrue(df1['text'].equals(df2['text'])) - self.assertTrue(df1['start'].equals(df2['start'])) - self.assertTrue(df1['end'].equals(df2['end'])) - self.assertTrue(df1['diff'].equals(df2['diff'])) - self.assertTrue(df1['bool'].equals(df2['bool'])) - self.assertTrue(df1.equals(df2)) - self.assertFalse(df1.equals(object)) - - # different dtype - different = df1.copy() - different['floats'] = different['floats'].astype('float32') - self.assertFalse(df1.equals(different)) - - # different index - different_index = -index - different = df2.set_index(different_index) - self.assertFalse(df1.equals(different)) - - # different columns - different = df2.copy() - different.columns = df2.columns[::-1] - self.assertFalse(df1.equals(different)) - - # DatetimeIndex - index = pd.date_range('2000-1-1', periods=10, freq='T') - df1 = df1.set_index(index) - df2 = df1.copy() - self.assertTrue(df1.equals(df2)) - - # MultiIndex - df3 = df1.set_index(['text'], append=True) - df2 = df1.set_index(['text'], append=True) - self.assertTrue(df3.equals(df2)) - - df2 = df1.set_index(['floats'], append=True) - self.assertFalse(df3.equals(df2)) - - # NaN in index - df3 = df1.set_index(['floats'], append=True) - df2 = df1.set_index(['floats'], append=True) - self.assertTrue(df3.equals(df2)) - - # GH 8437 - a = pd.Series([False, np.nan]) - b = pd.Series([False, np.nan]) - c = pd.Series(index=range(2)) - d = pd.Series(index=range(2)) - e = pd.Series(index=range(2)) - f = pd.Series(index=range(2)) - c[:-1] = d[:-1] = e[0] = f[0] = False - self.assertTrue(a.equals(a)) - self.assertTrue(a.equals(b)) - self.assertTrue(a.equals(c)) - self.assertTrue(a.equals(d)) - self.assertFalse(a.equals(e)) - self.assertTrue(e.equals(f)) - - def test_describe_raises(self): - with tm.assertRaises(NotImplementedError): - tm.makePanel().describe() - - def test_pipe(self): - df = DataFrame({'A': [1, 2, 3]}) - f = lambda x, y: x ** y - result = df.pipe(f, 2) - expected = DataFrame({'A': [1, 4, 9]}) - self.assert_frame_equal(result, expected) - - result = df.A.pipe(f, 2) - self.assert_series_equal(result, expected.A) - - def test_pipe_tuple(self): - df = DataFrame({'A': [1, 2, 3]}) - f = lambda x, y: y - result = df.pipe((f, 'y'), 0) - self.assert_frame_equal(result, df) - - result = df.A.pipe((f, 'y'), 0) - self.assert_series_equal(result, df.A) - - def test_pipe_tuple_error(self): - df = DataFrame({"A": [1, 2, 3]}) - f = lambda x, y: y - with tm.assertRaises(ValueError): - result = df.pipe((f, 'y'), x=1, y=0) - - with tm.assertRaises(ValueError): - result = df.A.pipe((f, 'y'), x=1, y=0) - - def test_pipe_panel(self): - wp = Panel({'r1': DataFrame({"A": [1, 2, 3]})}) - f = lambda x, y: x + y - result = wp.pipe(f, 2) - expected = wp + 2 - assert_panel_equal(result, expected) - - result = wp.pipe((f, 'y'), x=1) - expected = wp + 1 - assert_panel_equal(result, expected) - - with tm.assertRaises(ValueError): - result = wp.pipe((f, 'y'), x=1, y=1) - -if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py deleted file mode 100644 index dca37d9ce164c..0000000000000 --- a/pandas/tseries/tests/test_period.py +++ /dev/null @@ -1,2930 +0,0 @@ -"""Tests suite for Period handling. - -Parts derived from scikits.timeseries code, original authors: -- Pierre Gerard-Marchant & Matt Knox -- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com - -""" - -from datetime import datetime, date, timedelta - -from numpy.ma.testutils import assert_equal - -from pandas import Timestamp -from pandas.tseries.frequencies import MONTHS, DAYS, _period_code_map -from pandas.tseries.period import Period, PeriodIndex, period_range -from pandas.tseries.index import DatetimeIndex, date_range, Index -from pandas.tseries.tools import to_datetime -import pandas.tseries.period as period -import pandas.tseries.offsets as offsets - -import pandas.core.datetools as datetools -import pandas as pd -import numpy as np -from numpy.random import randn -from pandas.compat import range, lrange, lmap, zip - -from pandas import Series, DataFrame, _np_version_under1p9 -from pandas import tslib -from pandas.util.testing import(assert_series_equal, assert_almost_equal, - assertRaisesRegexp) -import pandas.util.testing as tm -from pandas import compat - - -class TestPeriodProperties(tm.TestCase): - "Test properties such as year, month, weekday, etc...." - # - - def test_quarterly_negative_ordinals(self): - p = Period(ordinal=-1, freq='Q-DEC') - self.assertEqual(p.year, 1969) - self.assertEqual(p.quarter, 4) - - p = Period(ordinal=-2, freq='Q-DEC') - self.assertEqual(p.year, 1969) - self.assertEqual(p.quarter, 3) - - p = Period(ordinal=-2, freq='M') - self.assertEqual(p.year, 1969) - self.assertEqual(p.month, 11) - - def test_period_cons_quarterly(self): - # bugs in scikits.timeseries - for month in MONTHS: - freq = 'Q-%s' % month - exp = Period('1989Q3', freq=freq) - self.assertIn('1989Q3', str(exp)) - stamp = exp.to_timestamp('D', how='end') - p = Period(stamp, freq=freq) - self.assertEqual(p, exp) - - def test_period_cons_annual(self): - # bugs in scikits.timeseries - for month in MONTHS: - freq = 'A-%s' % month - exp = Period('1989', freq=freq) - stamp = exp.to_timestamp('D', how='end') + timedelta(days=30) - p = Period(stamp, freq=freq) - self.assertEqual(p, exp + 1) - - def test_period_cons_weekly(self): - for num in range(10, 17): - daystr = '2011-02-%d' % num - for day in DAYS: - freq = 'W-%s' % day - - result = Period(daystr, freq=freq) - expected = Period(daystr, freq='D').asfreq(freq) - self.assertEqual(result, expected) - - def test_period_cons_nat(self): - p = Period('NaT', freq='M') - self.assertEqual(p.ordinal, tslib.iNaT) - self.assertEqual(p.freq, 'M') - - p = Period('nat', freq='W-SUN') - self.assertEqual(p.ordinal, tslib.iNaT) - self.assertEqual(p.freq, 'W-SUN') - - p = Period(tslib.iNaT, freq='D') - self.assertEqual(p.ordinal, tslib.iNaT) - self.assertEqual(p.freq, 'D') - - self.assertRaises(ValueError, Period, 'NaT') - - def test_timestamp_tz_arg(self): - import pytz - p = Period('1/1/2005', freq='M').to_timestamp(tz='Europe/Brussels') - self.assertEqual(p.tz, - pytz.timezone('Europe/Brussels').normalize(p).tzinfo) - - def test_timestamp_tz_arg_dateutil(self): - from pandas.tslib import _dateutil_gettz as gettz - from pandas.tslib import maybe_get_tz - p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz('dateutil/Europe/Brussels')) - self.assertEqual(p.tz, gettz('Europe/Brussels')) - - def test_timestamp_tz_arg_dateutil_from_string(self): - from pandas.tslib import _dateutil_gettz as gettz - p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels') - self.assertEqual(p.tz, gettz('Europe/Brussels')) - - def test_timestamp_nat_tz(self): - t = Period('NaT', freq='M').to_timestamp() - self.assertTrue(t is tslib.NaT) - - t = Period('NaT', freq='M').to_timestamp(tz='Asia/Tokyo') - self.assertTrue(t is tslib.NaT) - - def test_period_constructor(self): - i1 = Period('1/1/2005', freq='M') - i2 = Period('Jan 2005') - - self.assertEqual(i1, i2) - - i1 = Period('2005', freq='A') - i2 = Period('2005') - i3 = Period('2005', freq='a') - - self.assertEqual(i1, i2) - self.assertEqual(i1, i3) - - i4 = Period('2005', freq='M') - i5 = Period('2005', freq='m') - - self.assertRaises(ValueError, i1.__ne__, i4) - self.assertEqual(i4, i5) - - i1 = Period.now('Q') - i2 = Period(datetime.now(), freq='Q') - i3 = Period.now('q') - - self.assertEqual(i1, i2) - self.assertEqual(i1, i3) - - # Biz day construction, roll forward if non-weekday - i1 = Period('3/10/12', freq='B') - i2 = Period('3/10/12', freq='D') - self.assertEqual(i1, i2.asfreq('B')) - i2 = Period('3/11/12', freq='D') - self.assertEqual(i1, i2.asfreq('B')) - i2 = Period('3/12/12', freq='D') - self.assertEqual(i1, i2.asfreq('B')) - - i3 = Period('3/10/12', freq='b') - self.assertEqual(i1, i3) - - i1 = Period(year=2005, quarter=1, freq='Q') - i2 = Period('1/1/2005', freq='Q') - self.assertEqual(i1, i2) - - i1 = Period(year=2005, quarter=3, freq='Q') - i2 = Period('9/1/2005', freq='Q') - self.assertEqual(i1, i2) - - i1 = Period(year=2005, month=3, day=1, freq='D') - i2 = Period('3/1/2005', freq='D') - self.assertEqual(i1, i2) - - i3 = Period(year=2005, month=3, day=1, freq='d') - self.assertEqual(i1, i3) - - i1 = Period(year=2012, month=3, day=10, freq='B') - i2 = Period('3/12/12', freq='B') - self.assertEqual(i1, i2) - - i1 = Period('2005Q1') - i2 = Period(year=2005, quarter=1, freq='Q') - i3 = Period('2005q1') - self.assertEqual(i1, i2) - self.assertEqual(i1, i3) - - i1 = Period('05Q1') - self.assertEqual(i1, i2) - lower = Period('05q1') - self.assertEqual(i1, lower) - - i1 = Period('1Q2005') - self.assertEqual(i1, i2) - lower = Period('1q2005') - self.assertEqual(i1, lower) - - i1 = Period('1Q05') - self.assertEqual(i1, i2) - lower = Period('1q05') - self.assertEqual(i1, lower) - - i1 = Period('4Q1984') - self.assertEqual(i1.year, 1984) - lower = Period('4q1984') - self.assertEqual(i1, lower) - - i1 = Period('1982', freq='min') - i2 = Period('1982', freq='MIN') - self.assertEqual(i1, i2) - i2 = Period('1982', freq=('Min', 1)) - self.assertEqual(i1, i2) - - expected = Period('2007-01', freq='M') - i1 = Period('200701', freq='M') - self.assertEqual(i1, expected) - - i1 = Period('200701', freq='M') - self.assertEqual(i1, expected) - - i1 = Period(200701, freq='M') - self.assertEqual(i1, expected) - - i1 = Period(ordinal=200701, freq='M') - self.assertEqual(i1.year, 18695) - - i1 = Period(datetime(2007, 1, 1), freq='M') - i2 = Period('200701', freq='M') - self.assertEqual(i1, i2) - - i1 = Period(date(2007, 1, 1), freq='M') - i2 = Period(datetime(2007, 1, 1), freq='M') - i3 = Period(np.datetime64('2007-01-01'), freq='M') - i4 = Period(np.datetime64('2007-01-01 00:00:00Z'), freq='M') - i5 = Period(np.datetime64('2007-01-01 00:00:00.000Z'), freq='M') - self.assertEqual(i1, i2) - self.assertEqual(i1, i3) - self.assertEqual(i1, i4) - self.assertEqual(i1, i5) - - i1 = Period('2007-01-01 09:00:00.001') - expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L') - self.assertEqual(i1, expected) - - expected = Period(np.datetime64('2007-01-01 09:00:00.001Z'), freq='L') - self.assertEqual(i1, expected) - - i1 = Period('2007-01-01 09:00:00.00101') - expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U') - self.assertEqual(i1, expected) - - expected = Period(np.datetime64('2007-01-01 09:00:00.00101Z'), - freq='U') - self.assertEqual(i1, expected) - - self.assertRaises(ValueError, Period, ordinal=200701) - - self.assertRaises(ValueError, Period, '2007-1-1', freq='X') - - def test_freq_str(self): - i1 = Period('1982', freq='Min') - self.assertNotEqual(i1.freq[0], '1') - - def test_repr(self): - p = Period('Jan-2000') - self.assertIn('2000-01', repr(p)) - - p = Period('2000-12-15') - self.assertIn('2000-12-15', repr(p)) - - def test_repr_nat(self): - p = Period('nat', freq='M') - self.assertIn(repr(tslib.NaT), repr(p)) - - def test_millisecond_repr(self): - p = Period('2000-01-01 12:15:02.123') - - self.assertEqual("Period('2000-01-01 12:15:02.123', 'L')", repr(p)) - - def test_microsecond_repr(self): - p = Period('2000-01-01 12:15:02.123567') - - self.assertEqual("Period('2000-01-01 12:15:02.123567', 'U')", repr(p)) - - def test_strftime(self): - p = Period('2000-1-1 12:34:12', freq='S') - res = p.strftime('%Y-%m-%d %H:%M:%S') - self.assertEqual(res, '2000-01-01 12:34:12') - tm.assertIsInstance(res, compat.text_type) # GH3363 - - def test_sub_delta(self): - left, right = Period('2011', freq='A'), Period('2007', freq='A') - result = left - right - self.assertEqual(result, 4) - - self.assertRaises(ValueError, left.__sub__, - Period('2007-01', freq='M')) - - def test_to_timestamp(self): - p = Period('1982', freq='A') - start_ts = p.to_timestamp(how='S') - aliases = ['s', 'StarT', 'BEGIn'] - for a in aliases: - self.assertEqual(start_ts, p.to_timestamp('D', how=a)) - - end_ts = p.to_timestamp(how='E') - aliases = ['e', 'end', 'FINIsH'] - for a in aliases: - self.assertEqual(end_ts, p.to_timestamp('D', how=a)) - - from_lst = ['A', 'Q', 'M', 'W', 'B', - 'D', 'H', 'Min', 'S'] - - def _ex(p): - return Timestamp((p + 1).start_time.value - 1) - - for i, fcode in enumerate(from_lst): - p = Period('1982', freq=fcode) - result = p.to_timestamp().to_period(fcode) - self.assertEqual(result, p) - - self.assertEqual(p.start_time, p.to_timestamp(how='S')) - - self.assertEqual(p.end_time, _ex(p)) - - # Frequency other than daily - - p = Period('1985', freq='A') - - result = p.to_timestamp('H', how='end') - expected = datetime(1985, 12, 31, 23) - self.assertEqual(result, expected) - - result = p.to_timestamp('T', how='end') - expected = datetime(1985, 12, 31, 23, 59) - self.assertEqual(result, expected) - - result = p.to_timestamp(how='end') - expected = datetime(1985, 12, 31) - self.assertEqual(result, expected) - - expected = datetime(1985, 1, 1) - result = p.to_timestamp('H', how='start') - self.assertEqual(result, expected) - result = p.to_timestamp('T', how='start') - self.assertEqual(result, expected) - result = p.to_timestamp('S', how='start') - self.assertEqual(result, expected) - - assertRaisesRegexp(ValueError, 'Only mult == 1', p.to_timestamp, '5t') - - p = Period('NaT', freq='W') - self.assertTrue(p.to_timestamp() is tslib.NaT) - - def test_start_time(self): - freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S'] - xp = datetime(2012, 1, 1) - for f in freq_lst: - p = Period('2012', freq=f) - self.assertEqual(p.start_time, xp) - self.assertEqual(Period('2012', freq='B').start_time, - datetime(2012, 1, 2)) - self.assertEqual(Period('2012', freq='W').start_time, - datetime(2011, 12, 26)) - - p = Period('NaT', freq='W') - self.assertTrue(p.start_time is tslib.NaT) - - def test_end_time(self): - p = Period('2012', freq='A') - - def _ex(*args): - return Timestamp(Timestamp(datetime(*args)).value - 1) - - xp = _ex(2013, 1, 1) - self.assertEqual(xp, p.end_time) - - p = Period('2012', freq='Q') - xp = _ex(2012, 4, 1) - self.assertEqual(xp, p.end_time) - - p = Period('2012', freq='M') - xp = _ex(2012, 2, 1) - self.assertEqual(xp, p.end_time) - - xp = _ex(2012, 1, 2) - p = Period('2012', freq='D') - self.assertEqual(p.end_time, xp) - - xp = _ex(2012, 1, 1, 1) - p = Period('2012', freq='H') - self.assertEqual(p.end_time, xp) - - xp = _ex(2012, 1, 3) - self.assertEqual(Period('2012', freq='B').end_time, xp) - - xp = _ex(2012, 1, 2) - self.assertEqual(Period('2012', freq='W').end_time, xp) - - p = Period('NaT', freq='W') - self.assertTrue(p.end_time is tslib.NaT) - - def test_anchor_week_end_time(self): - def _ex(*args): - return Timestamp(Timestamp(datetime(*args)).value - 1) - - p = Period('2013-1-1', 'W-SAT') - xp = _ex(2013, 1, 6) - self.assertEqual(p.end_time, xp) - - def test_properties_annually(self): - # Test properties on Periods with annually frequency. - a_date = Period(freq='A', year=2007) - assert_equal(a_date.year, 2007) - - def test_properties_quarterly(self): - # Test properties on Periods with daily frequency. - qedec_date = Period(freq="Q-DEC", year=2007, quarter=1) - qejan_date = Period(freq="Q-JAN", year=2007, quarter=1) - qejun_date = Period(freq="Q-JUN", year=2007, quarter=1) - # - for x in range(3): - for qd in (qedec_date, qejan_date, qejun_date): - assert_equal((qd + x).qyear, 2007) - assert_equal((qd + x).quarter, x + 1) - - def test_properties_monthly(self): - # Test properties on Periods with daily frequency. - m_date = Period(freq='M', year=2007, month=1) - for x in range(11): - m_ival_x = m_date + x - assert_equal(m_ival_x.year, 2007) - if 1 <= x + 1 <= 3: - assert_equal(m_ival_x.quarter, 1) - elif 4 <= x + 1 <= 6: - assert_equal(m_ival_x.quarter, 2) - elif 7 <= x + 1 <= 9: - assert_equal(m_ival_x.quarter, 3) - elif 10 <= x + 1 <= 12: - assert_equal(m_ival_x.quarter, 4) - assert_equal(m_ival_x.month, x + 1) - - def test_properties_weekly(self): - # Test properties on Periods with daily frequency. - w_date = Period(freq='WK', year=2007, month=1, day=7) - # - assert_equal(w_date.year, 2007) - assert_equal(w_date.quarter, 1) - assert_equal(w_date.month, 1) - assert_equal(w_date.week, 1) - assert_equal((w_date - 1).week, 52) - assert_equal(w_date.days_in_month, 31) - assert_equal(Period(freq='WK', year=2012, month=2, day=1).days_in_month, 29) - - def test_properties_daily(self): - # Test properties on Periods with daily frequency. - b_date = Period(freq='B', year=2007, month=1, day=1) - # - assert_equal(b_date.year, 2007) - assert_equal(b_date.quarter, 1) - assert_equal(b_date.month, 1) - assert_equal(b_date.day, 1) - assert_equal(b_date.weekday, 0) - assert_equal(b_date.dayofyear, 1) - assert_equal(b_date.days_in_month, 31) - assert_equal(Period(freq='B', year=2012, month=2, day=1).days_in_month, 29) - # - d_date = Period(freq='D', year=2007, month=1, day=1) - # - assert_equal(d_date.year, 2007) - assert_equal(d_date.quarter, 1) - assert_equal(d_date.month, 1) - assert_equal(d_date.day, 1) - assert_equal(d_date.weekday, 0) - assert_equal(d_date.dayofyear, 1) - assert_equal(d_date.days_in_month, 31) - assert_equal(Period(freq='D', year=2012, month=2, - day=1).days_in_month, 29) - - def test_properties_hourly(self): - # Test properties on Periods with hourly frequency. - h_date = Period(freq='H', year=2007, month=1, day=1, hour=0) - # - assert_equal(h_date.year, 2007) - assert_equal(h_date.quarter, 1) - assert_equal(h_date.month, 1) - assert_equal(h_date.day, 1) - assert_equal(h_date.weekday, 0) - assert_equal(h_date.dayofyear, 1) - assert_equal(h_date.hour, 0) - assert_equal(h_date.days_in_month, 31) - assert_equal(Period(freq='H', year=2012, month=2, day=1, - hour=0).days_in_month, 29) - # - - def test_properties_minutely(self): - # Test properties on Periods with minutely frequency. - t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0, - minute=0) - # - assert_equal(t_date.quarter, 1) - assert_equal(t_date.month, 1) - assert_equal(t_date.day, 1) - assert_equal(t_date.weekday, 0) - assert_equal(t_date.dayofyear, 1) - assert_equal(t_date.hour, 0) - assert_equal(t_date.minute, 0) - assert_equal(t_date.days_in_month, 31) - assert_equal(Period(freq='D', year=2012, month=2, day=1, hour=0, - minute=0).days_in_month, 29) - - def test_properties_secondly(self): - # Test properties on Periods with secondly frequency. - s_date = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=0, second=0) - # - assert_equal(s_date.year, 2007) - assert_equal(s_date.quarter, 1) - assert_equal(s_date.month, 1) - assert_equal(s_date.day, 1) - assert_equal(s_date.weekday, 0) - assert_equal(s_date.dayofyear, 1) - assert_equal(s_date.hour, 0) - assert_equal(s_date.minute, 0) - assert_equal(s_date.second, 0) - assert_equal(s_date.days_in_month, 31) - assert_equal(Period(freq='Min', year=2012, month=2, day=1, hour=0, - minute=0, second=0).days_in_month, 29) - - def test_properties_nat(self): - p_nat = Period('NaT', freq='M') - t_nat = pd.Timestamp('NaT') - # confirm Period('NaT') work identical with Timestamp('NaT') - for f in ['year', 'month', 'day', 'hour', 'minute', 'second', - 'week', 'dayofyear', 'quarter', 'days_in_month']: - self.assertTrue(np.isnan(getattr(p_nat, f))) - self.assertTrue(np.isnan(getattr(t_nat, f))) - - for f in ['weekofyear', 'dayofweek', 'weekday', 'qyear']: - self.assertTrue(np.isnan(getattr(p_nat, f))) - - def test_pnow(self): - dt = datetime.now() - - val = period.pnow('D') - exp = Period(dt, freq='D') - self.assertEqual(val, exp) - - def test_constructor_corner(self): - self.assertRaises(ValueError, Period, year=2007, month=1, - freq='2M') - - self.assertRaises(ValueError, Period, datetime.now()) - self.assertRaises(ValueError, Period, datetime.now().date()) - self.assertRaises(ValueError, Period, 1.6, freq='D') - self.assertRaises(ValueError, Period, ordinal=1.6, freq='D') - self.assertRaises(ValueError, Period, ordinal=2, value=1, freq='D') - self.assertRaises(ValueError, Period) - self.assertRaises(ValueError, Period, month=1) - - p = Period('2007-01-01', freq='D') - - result = Period(p, freq='A') - exp = Period('2007', freq='A') - self.assertEqual(result, exp) - - def test_constructor_infer_freq(self): - p = Period('2007-01-01') - self.assertEqual(p.freq, 'D') - - p = Period('2007-01-01 07') - self.assertEqual(p.freq, 'H') - - p = Period('2007-01-01 07:10') - self.assertEqual(p.freq, 'T') - - p = Period('2007-01-01 07:10:15') - self.assertEqual(p.freq, 'S') - - p = Period('2007-01-01 07:10:15.123') - self.assertEqual(p.freq, 'L') - - p = Period('2007-01-01 07:10:15.123000') - self.assertEqual(p.freq, 'L') - - p = Period('2007-01-01 07:10:15.123400') - self.assertEqual(p.freq, 'U') - - def test_asfreq_MS(self): - initial = Period("2013") - - self.assertEqual(initial.asfreq(freq="M", how="S"), Period('2013-01', 'M')) - self.assertRaises(ValueError, initial.asfreq, freq="MS", how="S") - tm.assertRaisesRegexp(ValueError, "Unknown freqstr: MS", pd.Period, '2013-01', 'MS') - self.assertTrue(_period_code_map.get("MS") is None) - -def noWrap(item): - return item - - -class TestFreqConversion(tm.TestCase): - "Test frequency conversion of date objects" - - def test_asfreq_corner(self): - val = Period(freq='A', year=2007) - self.assertRaises(ValueError, val.asfreq, '5t') - - def test_conv_annual(self): - # frequency conversion tests: from Annual Frequency - - ival_A = Period(freq='A', year=2007) - - ival_AJAN = Period(freq="A-JAN", year=2007) - ival_AJUN = Period(freq="A-JUN", year=2007) - ival_ANOV = Period(freq="A-NOV", year=2007) - - ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1) - ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4) - ival_A_to_M_start = Period(freq='M', year=2007, month=1) - ival_A_to_M_end = Period(freq='M', year=2007, month=12) - ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1) - ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31) - ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1) - ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31) - ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1) - ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31) - ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, - hour=0) - ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31, - hour=23) - ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=0) - ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31, - hour=23, minute=59) - ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=0) - ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31, - hour=23, minute=59, second=59) - - ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31) - ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1) - ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30) - ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1) - ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30) - ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1) - - assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start) - assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end) - assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start) - assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end) - assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start) - assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end) - assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start) - assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end) - assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start) - assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end) - assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start) - assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end) - assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start) - assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end) - assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start) - assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end) - assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start) - assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end) - - assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start) - assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end) - - assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start) - assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end) - - assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start) - assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end) - - assert_equal(ival_A.asfreq('A'), ival_A) - - def test_conv_quarterly(self): - # frequency conversion tests: from Quarterly Frequency - - ival_Q = Period(freq='Q', year=2007, quarter=1) - ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4) - - ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1) - ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1) - - ival_Q_to_A = Period(freq='A', year=2007) - ival_Q_to_M_start = Period(freq='M', year=2007, month=1) - ival_Q_to_M_end = Period(freq='M', year=2007, month=3) - ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1) - ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31) - ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1) - ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30) - ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1) - ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31) - ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, - hour=0) - ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, - hour=23) - ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=0) - ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31, - hour=23, minute=59) - ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=0) - ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, - hour=23, minute=59, second=59) - - ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1) - ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30) - - ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1) - ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30) - - assert_equal(ival_Q.asfreq('A'), ival_Q_to_A) - assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A) - - assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start) - assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end) - assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start) - assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end) - assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start) - assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end) - assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start) - assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end) - assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start) - assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end) - assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start) - assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end) - assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start) - assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end) - - assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start) - assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end) - assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start) - assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end) - - assert_equal(ival_Q.asfreq('Q'), ival_Q) - - def test_conv_monthly(self): - # frequency conversion tests: from Monthly Frequency - - ival_M = Period(freq='M', year=2007, month=1) - ival_M_end_of_year = Period(freq='M', year=2007, month=12) - ival_M_end_of_quarter = Period(freq='M', year=2007, month=3) - ival_M_to_A = Period(freq='A', year=2007) - ival_M_to_Q = Period(freq='Q', year=2007, quarter=1) - ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1) - ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31) - ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1) - ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31) - ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1) - ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31) - ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, - hour=0) - ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, - hour=23) - ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=0) - ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31, - hour=23, minute=59) - ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=0) - ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, - hour=23, minute=59, second=59) - - assert_equal(ival_M.asfreq('A'), ival_M_to_A) - assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A) - assert_equal(ival_M.asfreq('Q'), ival_M_to_Q) - assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q) - - assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start) - assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end) - assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start) - assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end) - assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start) - assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end) - assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start) - assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end) - assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start) - assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end) - assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start) - assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end) - - assert_equal(ival_M.asfreq('M'), ival_M) - - def test_conv_weekly(self): - # frequency conversion tests: from Weekly Frequency - - ival_W = Period(freq='WK', year=2007, month=1, day=1) - - ival_WSUN = Period(freq='WK', year=2007, month=1, day=7) - ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6) - ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5) - ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4) - ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3) - ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2) - ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1) - - ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1) - ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7) - ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31) - ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6) - ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30) - ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5) - ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29) - ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4) - ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28) - ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3) - ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27) - ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2) - ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26) - ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1) - - ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31) - ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31) - ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31) - ival_W_to_A = Period(freq='A', year=2007) - ival_W_to_Q = Period(freq='Q', year=2007, quarter=1) - ival_W_to_M = Period(freq='M', year=2007, month=1) - - if Period(freq='D', year=2007, month=12, day=31).weekday == 6: - ival_W_to_A_end_of_year = Period(freq='A', year=2007) - else: - ival_W_to_A_end_of_year = Period(freq='A', year=2008) - - if Period(freq='D', year=2007, month=3, day=31).weekday == 6: - ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, - quarter=1) - else: - ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, - quarter=2) - - if Period(freq='D', year=2007, month=1, day=31).weekday == 6: - ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1) - else: - ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2) - - ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1) - ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5) - ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1) - ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7) - ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1, - hour=0) - ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7, - hour=23) - ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=0) - ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7, - hour=23, minute=59) - ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=0) - ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7, - hour=23, minute=59, second=59) - - assert_equal(ival_W.asfreq('A'), ival_W_to_A) - assert_equal(ival_W_end_of_year.asfreq('A'), - ival_W_to_A_end_of_year) - assert_equal(ival_W.asfreq('Q'), ival_W_to_Q) - assert_equal(ival_W_end_of_quarter.asfreq('Q'), - ival_W_to_Q_end_of_quarter) - assert_equal(ival_W.asfreq('M'), ival_W_to_M) - assert_equal(ival_W_end_of_month.asfreq('M'), - ival_W_to_M_end_of_month) - - assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start) - assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end) - - assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start) - assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end) - - assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start) - assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end) - assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start) - assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end) - assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start) - assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end) - assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start) - assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end) - assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start) - assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end) - assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start) - assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end) - assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start) - assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end) - - assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start) - assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end) - assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start) - assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end) - assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start) - assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end) - - assert_equal(ival_W.asfreq('WK'), ival_W) - - def test_conv_business(self): - # frequency conversion tests: from Business Frequency" - - ival_B = Period(freq='B', year=2007, month=1, day=1) - ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31) - ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30) - ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31) - ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5) - - ival_B_to_A = Period(freq='A', year=2007) - ival_B_to_Q = Period(freq='Q', year=2007, quarter=1) - ival_B_to_M = Period(freq='M', year=2007, month=1) - ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7) - ival_B_to_D = Period(freq='D', year=2007, month=1, day=1) - ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1, - hour=0) - ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1, - hour=23) - ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=0) - ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1, - hour=23, minute=59) - ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=0) - ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1, - hour=23, minute=59, second=59) - - assert_equal(ival_B.asfreq('A'), ival_B_to_A) - assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A) - assert_equal(ival_B.asfreq('Q'), ival_B_to_Q) - assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q) - assert_equal(ival_B.asfreq('M'), ival_B_to_M) - assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M) - assert_equal(ival_B.asfreq('WK'), ival_B_to_W) - assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W) - - assert_equal(ival_B.asfreq('D'), ival_B_to_D) - - assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start) - assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end) - assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start) - assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end) - assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start) - assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end) - - assert_equal(ival_B.asfreq('B'), ival_B) - - def test_conv_daily(self): - # frequency conversion tests: from Business Frequency" - - ival_D = Period(freq='D', year=2007, month=1, day=1) - ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31) - ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31) - ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31) - ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7) - - ival_D_friday = Period(freq='D', year=2007, month=1, day=5) - ival_D_saturday = Period(freq='D', year=2007, month=1, day=6) - ival_D_sunday = Period(freq='D', year=2007, month=1, day=7) - ival_D_monday = Period(freq='D', year=2007, month=1, day=8) - - ival_B_friday = Period(freq='B', year=2007, month=1, day=5) - ival_B_monday = Period(freq='B', year=2007, month=1, day=8) - - ival_D_to_A = Period(freq='A', year=2007) - - ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008) - ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007) - ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007) - - ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4) - ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3) - ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1) - - ival_D_to_M = Period(freq='M', year=2007, month=1) - ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7) - - ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1, - hour=0) - ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1, - hour=23) - ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=0) - ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1, - hour=23, minute=59) - ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=0) - ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1, - hour=23, minute=59, second=59) - - assert_equal(ival_D.asfreq('A'), ival_D_to_A) - - assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'), - ival_Deoq_to_AJAN) - assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'), - ival_Deoq_to_AJUN) - assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'), - ival_Deoq_to_ADEC) - - assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A) - assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC) - assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN) - assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN) - assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC) - assert_equal(ival_D.asfreq('M'), ival_D_to_M) - assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M) - assert_equal(ival_D.asfreq('WK'), ival_D_to_W) - assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W) - - assert_equal(ival_D_friday.asfreq('B'), ival_B_friday) - assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday) - assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday) - assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday) - assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday) - - assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start) - assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end) - assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start) - assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end) - assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start) - assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end) - - assert_equal(ival_D.asfreq('D'), ival_D) - - def test_conv_hourly(self): - # frequency conversion tests: from Hourly Frequency" - - ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0) - ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31, - hour=23) - ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31, - hour=23) - ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31, - hour=23) - ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7, - hour=23) - ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1, - hour=23) - ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1, - hour=23) - - ival_H_to_A = Period(freq='A', year=2007) - ival_H_to_Q = Period(freq='Q', year=2007, quarter=1) - ival_H_to_M = Period(freq='M', year=2007, month=1) - ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7) - ival_H_to_D = Period(freq='D', year=2007, month=1, day=1) - ival_H_to_B = Period(freq='B', year=2007, month=1, day=1) - - ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=0) - ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=59) - ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=0) - ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=59, second=59) - - assert_equal(ival_H.asfreq('A'), ival_H_to_A) - assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A) - assert_equal(ival_H.asfreq('Q'), ival_H_to_Q) - assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q) - assert_equal(ival_H.asfreq('M'), ival_H_to_M) - assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M) - assert_equal(ival_H.asfreq('WK'), ival_H_to_W) - assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W) - assert_equal(ival_H.asfreq('D'), ival_H_to_D) - assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D) - assert_equal(ival_H.asfreq('B'), ival_H_to_B) - assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B) - - assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start) - assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end) - assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start) - assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end) - - assert_equal(ival_H.asfreq('H'), ival_H) - - def test_conv_minutely(self): - # frequency conversion tests: from Minutely Frequency" - - ival_T = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=0) - ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31, - hour=23, minute=59) - ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31, - hour=23, minute=59) - ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31, - hour=23, minute=59) - ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7, - hour=23, minute=59) - ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1, - hour=23, minute=59) - ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1, - hour=23, minute=59) - ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=59) - - ival_T_to_A = Period(freq='A', year=2007) - ival_T_to_Q = Period(freq='Q', year=2007, quarter=1) - ival_T_to_M = Period(freq='M', year=2007, month=1) - ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7) - ival_T_to_D = Period(freq='D', year=2007, month=1, day=1) - ival_T_to_B = Period(freq='B', year=2007, month=1, day=1) - ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0) - - ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=0) - ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=59) - - assert_equal(ival_T.asfreq('A'), ival_T_to_A) - assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A) - assert_equal(ival_T.asfreq('Q'), ival_T_to_Q) - assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q) - assert_equal(ival_T.asfreq('M'), ival_T_to_M) - assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M) - assert_equal(ival_T.asfreq('WK'), ival_T_to_W) - assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W) - assert_equal(ival_T.asfreq('D'), ival_T_to_D) - assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D) - assert_equal(ival_T.asfreq('B'), ival_T_to_B) - assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B) - assert_equal(ival_T.asfreq('H'), ival_T_to_H) - assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H) - - assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start) - assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end) - - assert_equal(ival_T.asfreq('Min'), ival_T) - - def test_conv_secondly(self): - # frequency conversion tests: from Secondly Frequency" - - ival_S = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=0) - ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31, - hour=23, minute=59, second=59) - ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31, - hour=23, minute=59, second=59) - ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31, - hour=23, minute=59, second=59) - ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7, - hour=23, minute=59, second=59) - ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1, - hour=23, minute=59, second=59) - ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1, - hour=23, minute=59, second=59) - ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=59, second=59) - ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1, - hour=0, minute=0, second=59) - - ival_S_to_A = Period(freq='A', year=2007) - ival_S_to_Q = Period(freq='Q', year=2007, quarter=1) - ival_S_to_M = Period(freq='M', year=2007, month=1) - ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7) - ival_S_to_D = Period(freq='D', year=2007, month=1, day=1) - ival_S_to_B = Period(freq='B', year=2007, month=1, day=1) - ival_S_to_H = Period(freq='H', year=2007, month=1, day=1, - hour=0) - ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1, - hour=0, minute=0) - - assert_equal(ival_S.asfreq('A'), ival_S_to_A) - assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A) - assert_equal(ival_S.asfreq('Q'), ival_S_to_Q) - assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q) - assert_equal(ival_S.asfreq('M'), ival_S_to_M) - assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M) - assert_equal(ival_S.asfreq('WK'), ival_S_to_W) - assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W) - assert_equal(ival_S.asfreq('D'), ival_S_to_D) - assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D) - assert_equal(ival_S.asfreq('B'), ival_S_to_B) - assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B) - assert_equal(ival_S.asfreq('H'), ival_S_to_H) - assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H) - assert_equal(ival_S.asfreq('Min'), ival_S_to_T) - assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T) - - assert_equal(ival_S.asfreq('S'), ival_S) - - def test_asfreq_nat(self): - p = Period('NaT', freq='A') - result = p.asfreq('M') - self.assertEqual(result.ordinal, tslib.iNaT) - self.assertEqual(result.freq, 'M') - - -class TestPeriodIndex(tm.TestCase): - - def setUp(self): - pass - - def test_hash_error(self): - index = period_range('20010101', periods=10) - with tm.assertRaisesRegexp(TypeError, - "unhashable type: %r" % - type(index).__name__): - hash(index) - - def test_make_time_series(self): - index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - series = Series(1, index=index) - tm.assertIsInstance(series, Series) - - def test_astype(self): - idx = period_range('1990', '2009', freq='A') - - result = idx.astype('i8') - self.assert_numpy_array_equal(result, idx.values) - - def test_constructor_use_start_freq(self): - # GH #1118 - p = Period('4/2/2012', freq='B') - index = PeriodIndex(start=p, periods=10) - expected = PeriodIndex(start='4/2/2012', periods=10, freq='B') - self.assertTrue(index.equals(expected)) - - def test_constructor_field_arrays(self): - # GH #1264 - - years = np.arange(1990, 2010).repeat(4)[2:-2] - quarters = np.tile(np.arange(1, 5), 20)[2:-2] - - index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC') - expected = period_range('1990Q3', '2009Q2', freq='Q-DEC') - self.assertTrue(index.equals(expected)) - - self.assertRaises( - ValueError, PeriodIndex, year=years, quarter=quarters, - freq='2Q-DEC') - - index = PeriodIndex(year=years, quarter=quarters) - self.assertTrue(index.equals(expected)) - - years = [2007, 2007, 2007] - months = [1, 2] - self.assertRaises(ValueError, PeriodIndex, year=years, month=months, - freq='M') - self.assertRaises(ValueError, PeriodIndex, year=years, month=months, - freq='2M') - self.assertRaises(ValueError, PeriodIndex, year=years, month=months, - freq='M', start=Period('2007-01', freq='M')) - - years = [2007, 2007, 2007] - months = [1, 2, 3] - idx = PeriodIndex(year=years, month=months, freq='M') - exp = period_range('2007-01', periods=3, freq='M') - self.assertTrue(idx.equals(exp)) - - def test_constructor_U(self): - # U was used as undefined period - self.assertRaises(ValueError, period_range, '2007-1-1', periods=500, - freq='X') - - def test_constructor_arrays_negative_year(self): - years = np.arange(1960, 2000).repeat(4) - quarters = np.tile(lrange(1, 5), 40) - - pindex = PeriodIndex(year=years, quarter=quarters) - - self.assert_numpy_array_equal(pindex.year, years) - self.assert_numpy_array_equal(pindex.quarter, quarters) - - def test_constructor_invalid_quarters(self): - self.assertRaises(ValueError, PeriodIndex, year=lrange(2000, 2004), - quarter=lrange(4), freq='Q-DEC') - - def test_constructor_corner(self): - self.assertRaises(ValueError, PeriodIndex, periods=10, freq='A') - - start = Period('2007', freq='A-JUN') - end = Period('2010', freq='A-DEC') - self.assertRaises(ValueError, PeriodIndex, start=start, end=end) - self.assertRaises(ValueError, PeriodIndex, start=start) - self.assertRaises(ValueError, PeriodIndex, end=end) - - result = period_range('2007-01', periods=10.5, freq='M') - exp = period_range('2007-01', periods=10, freq='M') - self.assertTrue(result.equals(exp)) - - def test_constructor_fromarraylike(self): - idx = period_range('2007-01', periods=20, freq='M') - - self.assertRaises(ValueError, PeriodIndex, idx.values) - self.assertRaises(ValueError, PeriodIndex, list(idx.values)) - self.assertRaises(ValueError, PeriodIndex, - data=Period('2007', freq='A')) - - result = PeriodIndex(iter(idx)) - self.assertTrue(result.equals(idx)) - - result = PeriodIndex(idx) - self.assertTrue(result.equals(idx)) - - result = PeriodIndex(idx, freq='M') - self.assertTrue(result.equals(idx)) - - result = PeriodIndex(idx, freq='D') - exp = idx.asfreq('D', 'e') - self.assertTrue(result.equals(exp)) - - def test_constructor_datetime64arr(self): - vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64) - vals = vals.view(np.dtype('M8[us]')) - - self.assertRaises(ValueError, PeriodIndex, vals, freq='D') - - def test_constructor_simple_new(self): - idx = period_range('2007-01', name='p', periods=20, freq='M') - result = idx._simple_new(idx, 'p', freq=idx.freq) - self.assertTrue(result.equals(idx)) - - result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq) - self.assertTrue(result.equals(idx)) - - def test_constructor_nat(self): - self.assertRaises( - ValueError, period_range, start='NaT', end='2011-01-01', freq='M') - self.assertRaises( - ValueError, period_range, start='2011-01-01', end='NaT', freq='M') - - def test_constructor_year_and_quarter(self): - year = pd.Series([2001, 2002, 2003]) - quarter = year - 2000 - idx = PeriodIndex(year=year, quarter=quarter) - strs = ['%dQ%d' % t for t in zip(quarter, year)] - lops = list(map(Period, strs)) - p = PeriodIndex(lops) - tm.assert_index_equal(p, idx) - - def test_is_(self): - create_index = lambda: PeriodIndex(freq='A', start='1/1/2001', - end='12/1/2009') - index = create_index() - self.assertEqual(index.is_(index), True) - self.assertEqual(index.is_(create_index()), False) - self.assertEqual(index.is_(index.view()), True) - self.assertEqual(index.is_(index.view().view().view().view().view()), True) - self.assertEqual(index.view().is_(index), True) - ind2 = index.view() - index.name = "Apple" - self.assertEqual(ind2.is_(index), True) - self.assertEqual(index.is_(index[:]), False) - self.assertEqual(index.is_(index.asfreq('M')), False) - self.assertEqual(index.is_(index.asfreq('A')), False) - self.assertEqual(index.is_(index - 2), False) - self.assertEqual(index.is_(index - 0), False) - - def test_comp_period(self): - idx = period_range('2007-01', periods=20, freq='M') - - result = idx < idx[10] - exp = idx.values < idx.values[10] - self.assert_numpy_array_equal(result, exp) - - def test_getitem_ndim2(self): - idx = period_range('2007-01', periods=3, freq='M') - - result = idx[:, None] - # MPL kludge - tm.assertIsInstance(result, PeriodIndex) - - def test_getitem_partial(self): - rng = period_range('2007-01', periods=50, freq='M') - ts = Series(np.random.randn(len(rng)), rng) - - self.assertRaises(KeyError, ts.__getitem__, '2006') - - result = ts['2008'] - self.assertTrue((result.index.year == 2008).all()) - - result = ts['2008':'2009'] - self.assertEqual(len(result), 24) - - result = ts['2008-1':'2009-12'] - self.assertEqual(len(result), 24) - - result = ts['2008Q1':'2009Q4'] - self.assertEqual(len(result), 24) - - result = ts[:'2009'] - self.assertEqual(len(result), 36) - - result = ts['2009':] - self.assertEqual(len(result), 50 - 24) - - exp = result - result = ts[24:] - assert_series_equal(exp, result) - - ts = ts[10:].append(ts[10:]) - self.assertRaisesRegexp( - KeyError, "left slice bound for non-unique label: '2008'", - ts.__getitem__, slice('2008', '2009')) - - def test_getitem_datetime(self): - rng = period_range(start='2012-01-01', periods=10, freq='W-MON') - ts = Series(lrange(len(rng)), index=rng) - - dt1 = datetime(2011, 10, 2) - dt4 = datetime(2012, 4, 20) - - rs = ts[dt1:dt4] - assert_series_equal(rs, ts) - - def test_slice_with_negative_step(self): - ts = Series(np.arange(20), - period_range('2014-01', periods=20, freq='M')) - SLC = pd.IndexSlice - - def assert_slices_equivalent(l_slc, i_slc): - assert_series_equal(ts[l_slc], ts.iloc[i_slc]) - assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc]) - assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc]) - - assert_slices_equivalent(SLC[Period('2014-10')::-1], SLC[9::-1]) - assert_slices_equivalent(SLC['2014-10'::-1], SLC[9::-1]) - - assert_slices_equivalent(SLC[:Period('2014-10'):-1], SLC[:8:-1]) - assert_slices_equivalent(SLC[:'2014-10':-1], SLC[:8:-1]) - - assert_slices_equivalent(SLC['2015-02':'2014-10':-1], SLC[13:8:-1]) - assert_slices_equivalent(SLC[Period('2015-02'):Period('2014-10'):-1], SLC[13:8:-1]) - assert_slices_equivalent(SLC['2015-02':Period('2014-10'):-1], SLC[13:8:-1]) - assert_slices_equivalent(SLC[Period('2015-02'):'2014-10':-1], SLC[13:8:-1]) - - assert_slices_equivalent(SLC['2014-10':'2015-02':-1], SLC[:0]) - - def test_slice_with_zero_step_raises(self): - ts = Series(np.arange(20), - period_range('2014-01', periods=20, freq='M')) - self.assertRaisesRegexp(ValueError, 'slice step cannot be zero', - lambda: ts[::0]) - self.assertRaisesRegexp(ValueError, 'slice step cannot be zero', - lambda: ts.loc[::0]) - self.assertRaisesRegexp(ValueError, 'slice step cannot be zero', - lambda: ts.ix[::0]) - - def test_sub(self): - rng = period_range('2007-01', periods=50) - - result = rng - 5 - exp = rng + (-5) - self.assertTrue(result.equals(exp)) - - def test_periods_number_check(self): - self.assertRaises( - ValueError, period_range, '2011-1-1', '2012-1-1', 'B') - - def test_tolist(self): - index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - rs = index.tolist() - [tm.assertIsInstance(x, Period) for x in rs] - - recon = PeriodIndex(rs) - self.assertTrue(index.equals(recon)) - - def test_to_timestamp(self): - index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - series = Series(1, index=index, name='foo') - - exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC') - result = series.to_timestamp(how='end') - self.assertTrue(result.index.equals(exp_index)) - self.assertEqual(result.name, 'foo') - - exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN') - result = series.to_timestamp(how='start') - self.assertTrue(result.index.equals(exp_index)) - - def _get_with_delta(delta, freq='A-DEC'): - return date_range(to_datetime('1/1/2001') + delta, - to_datetime('12/31/2009') + delta, freq=freq) - - delta = timedelta(hours=23) - result = series.to_timestamp('H', 'end') - exp_index = _get_with_delta(delta) - self.assertTrue(result.index.equals(exp_index)) - - delta = timedelta(hours=23, minutes=59) - result = series.to_timestamp('T', 'end') - exp_index = _get_with_delta(delta) - self.assertTrue(result.index.equals(exp_index)) - - result = series.to_timestamp('S', 'end') - delta = timedelta(hours=23, minutes=59, seconds=59) - exp_index = _get_with_delta(delta) - self.assertTrue(result.index.equals(exp_index)) - - self.assertRaises(ValueError, index.to_timestamp, '5t') - - index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001') - series = Series(1, index=index, name='foo') - - exp_index = date_range('1/1/2001 00:59:59', end='1/2/2001 00:59:59', - freq='H') - result = series.to_timestamp(how='end') - self.assertTrue(result.index.equals(exp_index)) - self.assertEqual(result.name, 'foo') - - def test_to_timestamp_quarterly_bug(self): - years = np.arange(1960, 2000).repeat(4) - quarters = np.tile(lrange(1, 5), 40) - - pindex = PeriodIndex(year=years, quarter=quarters) - - stamps = pindex.to_timestamp('D', 'end') - expected = DatetimeIndex([x.to_timestamp('D', 'end') for x in pindex]) - self.assertTrue(stamps.equals(expected)) - - def test_to_timestamp_preserve_name(self): - index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009', - name='foo') - self.assertEqual(index.name, 'foo') - - conv = index.to_timestamp('D') - self.assertEqual(conv.name, 'foo') - - def test_to_timestamp_repr_is_code(self): - zs=[Timestamp('99-04-17 00:00:00',tz='UTC'), - Timestamp('2001-04-17 00:00:00',tz='UTC'), - Timestamp('2001-04-17 00:00:00',tz='America/Los_Angeles'), - Timestamp('2001-04-17 00:00:00',tz=None)] - for z in zs: - self.assertEqual( eval(repr(z)), z) - - def test_to_timestamp_period_nat(self): - # GH 7228 - index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx') - - result = index.to_timestamp('D') - expected = DatetimeIndex([pd.NaT, datetime(2011, 1, 1), - datetime(2011, 2, 1)], name='idx') - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, 'idx') - - result2 = result.to_period(freq='M') - self.assertTrue(result2.equals(index)) - self.assertEqual(result2.name, 'idx') - - def test_as_frame_columns(self): - rng = period_range('1/1/2000', periods=5) - df = DataFrame(randn(10, 5), columns=rng) - - ts = df[rng[0]] - assert_series_equal(ts, df.ix[:, 0]) - - # GH # 1211 - repr(df) - - ts = df['1/1/2000'] - assert_series_equal(ts, df.ix[:, 0]) - - def test_indexing(self): - - # GH 4390, iat incorrectly indexing - index = period_range('1/1/2001', periods=10) - s = Series(randn(10), index=index) - expected = s[index[0]] - result = s.iat[0] - self.assertEqual(expected, result) - - def test_frame_setitem(self): - rng = period_range('1/1/2000', periods=5) - rng.name = 'index' - df = DataFrame(randn(5, 3), index=rng) - - df['Index'] = rng - rs = Index(df['Index']) - self.assertTrue(rs.equals(rng)) - - rs = df.reset_index().set_index('index') - tm.assertIsInstance(rs.index, PeriodIndex) - self.assertTrue(rs.index.equals(rng)) - - def test_period_set_index_reindex(self): - # GH 6631 - df = DataFrame(np.random.random(6)) - idx1 = period_range('2011/01/01', periods=6, freq='M') - idx2 = period_range('2013', periods=6, freq='A') - - df = df.set_index(idx1) - self.assertTrue(df.index.equals(idx1)) - df = df.set_index(idx2) - self.assertTrue(df.index.equals(idx2)) - - def test_nested_dict_frame_constructor(self): - rng = period_range('1/1/2000', periods=5) - df = DataFrame(randn(10, 5), columns=rng) - - data = {} - for col in df.columns: - for row in df.index: - data.setdefault(col, {})[row] = df.get_value(row, col) - - result = DataFrame(data, columns=rng) - tm.assert_frame_equal(result, df) - - data = {} - for col in df.columns: - for row in df.index: - data.setdefault(row, {})[col] = df.get_value(row, col) - - result = DataFrame(data, index=rng).T - tm.assert_frame_equal(result, df) - - def test_frame_to_time_stamp(self): - K = 5 - index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - df = DataFrame(randn(len(index), K), index=index) - df['mix'] = 'a' - - exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC') - result = df.to_timestamp('D', 'end') - self.assertTrue(result.index.equals(exp_index)) - assert_almost_equal(result.values, df.values) - - exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN') - result = df.to_timestamp('D', 'start') - self.assertTrue(result.index.equals(exp_index)) - - def _get_with_delta(delta, freq='A-DEC'): - return date_range(to_datetime('1/1/2001') + delta, - to_datetime('12/31/2009') + delta, freq=freq) - - delta = timedelta(hours=23) - result = df.to_timestamp('H', 'end') - exp_index = _get_with_delta(delta) - self.assertTrue(result.index.equals(exp_index)) - - delta = timedelta(hours=23, minutes=59) - result = df.to_timestamp('T', 'end') - exp_index = _get_with_delta(delta) - self.assertTrue(result.index.equals(exp_index)) - - result = df.to_timestamp('S', 'end') - delta = timedelta(hours=23, minutes=59, seconds=59) - exp_index = _get_with_delta(delta) - self.assertTrue(result.index.equals(exp_index)) - - # columns - df = df.T - - exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC') - result = df.to_timestamp('D', 'end', axis=1) - self.assertTrue(result.columns.equals(exp_index)) - assert_almost_equal(result.values, df.values) - - exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN') - result = df.to_timestamp('D', 'start', axis=1) - self.assertTrue(result.columns.equals(exp_index)) - - delta = timedelta(hours=23) - result = df.to_timestamp('H', 'end', axis=1) - exp_index = _get_with_delta(delta) - self.assertTrue(result.columns.equals(exp_index)) - - delta = timedelta(hours=23, minutes=59) - result = df.to_timestamp('T', 'end', axis=1) - exp_index = _get_with_delta(delta) - self.assertTrue(result.columns.equals(exp_index)) - - result = df.to_timestamp('S', 'end', axis=1) - delta = timedelta(hours=23, minutes=59, seconds=59) - exp_index = _get_with_delta(delta) - self.assertTrue(result.columns.equals(exp_index)) - - # invalid axis - assertRaisesRegexp(ValueError, 'axis', df.to_timestamp, axis=2) - assertRaisesRegexp(ValueError, 'Only mult == 1', df.to_timestamp, '5t', axis=1) - - def test_index_duplicate_periods(self): - # monotonic - idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN') - ts = Series(np.random.randn(len(idx)), index=idx) - - result = ts[2007] - expected = ts[1:3] - assert_series_equal(result, expected) - result[:] = 1 - self.assertTrue((ts[1:3] == 1).all()) - - # not monotonic - idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN') - ts = Series(np.random.randn(len(idx)), index=idx) - - result = ts[2007] - expected = ts[idx == 2007] - assert_series_equal(result, expected) - - def test_index_unique(self): - idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN') - expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN') - self.assert_numpy_array_equal(idx.unique(), expected.values) - self.assertEqual(idx.nunique(), 3) - - idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN', tz='US/Eastern') - expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN', tz='US/Eastern') - self.assert_numpy_array_equal(idx.unique(), expected.values) - self.assertEqual(idx.nunique(), 3) - - def test_constructor(self): - pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - assert_equal(len(pi), 9) - - pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009') - assert_equal(len(pi), 4 * 9) - - pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') - assert_equal(len(pi), 12 * 9) - - pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009') - assert_equal(len(pi), 365 * 9 + 2) - - pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009') - assert_equal(len(pi), 261 * 9) - - pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00') - assert_equal(len(pi), 365 * 24) - - pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59') - assert_equal(len(pi), 24 * 60) - - pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59') - assert_equal(len(pi), 24 * 60 * 60) - - start = Period('02-Apr-2005', 'B') - i1 = PeriodIndex(start=start, periods=20) - assert_equal(len(i1), 20) - assert_equal(i1.freq, start.freq) - assert_equal(i1[0], start) - - end_intv = Period('2006-12-31', 'W') - i1 = PeriodIndex(end=end_intv, periods=10) - assert_equal(len(i1), 10) - assert_equal(i1.freq, end_intv.freq) - assert_equal(i1[-1], end_intv) - - end_intv = Period('2006-12-31', '1w') - i2 = PeriodIndex(end=end_intv, periods=10) - assert_equal(len(i1), len(i2)) - self.assertTrue((i1 == i2).all()) - assert_equal(i1.freq, i2.freq) - - end_intv = Period('2006-12-31', ('w', 1)) - i2 = PeriodIndex(end=end_intv, periods=10) - assert_equal(len(i1), len(i2)) - self.assertTrue((i1 == i2).all()) - assert_equal(i1.freq, i2.freq) - - try: - PeriodIndex(start=start, end=end_intv) - raise AssertionError('Cannot allow mixed freq for start and end') - except ValueError: - pass - - end_intv = Period('2005-05-01', 'B') - i1 = PeriodIndex(start=start, end=end_intv) - - try: - PeriodIndex(start=start) - raise AssertionError( - 'Must specify periods if missing start or end') - except ValueError: - pass - - # infer freq from first element - i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')]) - assert_equal(len(i2), 2) - assert_equal(i2[0], end_intv) - - i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')])) - assert_equal(len(i2), 2) - assert_equal(i2[0], end_intv) - - # Mixed freq should fail - vals = [end_intv, Period('2006-12-31', 'w')] - self.assertRaises(ValueError, PeriodIndex, vals) - vals = np.array(vals) - self.assertRaises(ValueError, PeriodIndex, vals) - - def test_shift(self): - pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010') - - self.assertTrue(pi1.shift(0).equals(pi1)) - - assert_equal(len(pi1), len(pi2)) - assert_equal(pi1.shift(1).values, pi2.values) - - pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008') - assert_equal(len(pi1), len(pi2)) - assert_equal(pi1.shift(-1).values, pi2.values) - - pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010') - assert_equal(len(pi1), len(pi2)) - assert_equal(pi1.shift(1).values, pi2.values) - - pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009') - assert_equal(len(pi1), len(pi2)) - assert_equal(pi1.shift(-1).values, pi2.values) - - pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009') - assert_equal(len(pi1), len(pi2)) - assert_equal(pi1.shift(1).values, pi2.values) - - pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009') - pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009') - assert_equal(len(pi1), len(pi2)) - assert_equal(pi1.shift(-1).values, pi2.values) - - def test_shift_nat(self): - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx') - result = idx.shift(1) - expected = PeriodIndex(['2011-02', '2011-03', 'NaT', '2011-05'], freq='M', name='idx') - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - - def test_asfreq(self): - pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001') - pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001') - pi3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001') - pi4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001') - pi5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00') - pi6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00') - pi7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00') - - self.assertEqual(pi1.asfreq('Q', 'S'), pi2) - self.assertEqual(pi1.asfreq('Q', 's'), pi2) - self.assertEqual(pi1.asfreq('M', 'start'), pi3) - self.assertEqual(pi1.asfreq('D', 'StarT'), pi4) - self.assertEqual(pi1.asfreq('H', 'beGIN'), pi5) - self.assertEqual(pi1.asfreq('Min', 'S'), pi6) - self.assertEqual(pi1.asfreq('S', 'S'), pi7) - - self.assertEqual(pi2.asfreq('A', 'S'), pi1) - self.assertEqual(pi2.asfreq('M', 'S'), pi3) - self.assertEqual(pi2.asfreq('D', 'S'), pi4) - self.assertEqual(pi2.asfreq('H', 'S'), pi5) - self.assertEqual(pi2.asfreq('Min', 'S'), pi6) - self.assertEqual(pi2.asfreq('S', 'S'), pi7) - - self.assertEqual(pi3.asfreq('A', 'S'), pi1) - self.assertEqual(pi3.asfreq('Q', 'S'), pi2) - self.assertEqual(pi3.asfreq('D', 'S'), pi4) - self.assertEqual(pi3.asfreq('H', 'S'), pi5) - self.assertEqual(pi3.asfreq('Min', 'S'), pi6) - self.assertEqual(pi3.asfreq('S', 'S'), pi7) - - self.assertEqual(pi4.asfreq('A', 'S'), pi1) - self.assertEqual(pi4.asfreq('Q', 'S'), pi2) - self.assertEqual(pi4.asfreq('M', 'S'), pi3) - self.assertEqual(pi4.asfreq('H', 'S'), pi5) - self.assertEqual(pi4.asfreq('Min', 'S'), pi6) - self.assertEqual(pi4.asfreq('S', 'S'), pi7) - - self.assertEqual(pi5.asfreq('A', 'S'), pi1) - self.assertEqual(pi5.asfreq('Q', 'S'), pi2) - self.assertEqual(pi5.asfreq('M', 'S'), pi3) - self.assertEqual(pi5.asfreq('D', 'S'), pi4) - self.assertEqual(pi5.asfreq('Min', 'S'), pi6) - self.assertEqual(pi5.asfreq('S', 'S'), pi7) - - self.assertEqual(pi6.asfreq('A', 'S'), pi1) - self.assertEqual(pi6.asfreq('Q', 'S'), pi2) - self.assertEqual(pi6.asfreq('M', 'S'), pi3) - self.assertEqual(pi6.asfreq('D', 'S'), pi4) - self.assertEqual(pi6.asfreq('H', 'S'), pi5) - self.assertEqual(pi6.asfreq('S', 'S'), pi7) - - self.assertEqual(pi7.asfreq('A', 'S'), pi1) - self.assertEqual(pi7.asfreq('Q', 'S'), pi2) - self.assertEqual(pi7.asfreq('M', 'S'), pi3) - self.assertEqual(pi7.asfreq('D', 'S'), pi4) - self.assertEqual(pi7.asfreq('H', 'S'), pi5) - self.assertEqual(pi7.asfreq('Min', 'S'), pi6) - - self.assertRaises(ValueError, pi7.asfreq, 'T', 'foo') - self.assertRaises(ValueError, pi1.asfreq, '5t') - - def test_asfreq_nat(self): - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M') - result = idx.asfreq(freq='Q') - expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q') - self.assertTrue(result.equals(expected)) - - def test_period_index_length(self): - pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - assert_equal(len(pi), 9) - - pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009') - assert_equal(len(pi), 4 * 9) - - pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') - assert_equal(len(pi), 12 * 9) - - start = Period('02-Apr-2005', 'B') - i1 = PeriodIndex(start=start, periods=20) - assert_equal(len(i1), 20) - assert_equal(i1.freq, start.freq) - assert_equal(i1[0], start) - - end_intv = Period('2006-12-31', 'W') - i1 = PeriodIndex(end=end_intv, periods=10) - assert_equal(len(i1), 10) - assert_equal(i1.freq, end_intv.freq) - assert_equal(i1[-1], end_intv) - - end_intv = Period('2006-12-31', '1w') - i2 = PeriodIndex(end=end_intv, periods=10) - assert_equal(len(i1), len(i2)) - self.assertTrue((i1 == i2).all()) - assert_equal(i1.freq, i2.freq) - - end_intv = Period('2006-12-31', ('w', 1)) - i2 = PeriodIndex(end=end_intv, periods=10) - assert_equal(len(i1), len(i2)) - self.assertTrue((i1 == i2).all()) - assert_equal(i1.freq, i2.freq) - - try: - PeriodIndex(start=start, end=end_intv) - raise AssertionError('Cannot allow mixed freq for start and end') - except ValueError: - pass - - end_intv = Period('2005-05-01', 'B') - i1 = PeriodIndex(start=start, end=end_intv) - - try: - PeriodIndex(start=start) - raise AssertionError( - 'Must specify periods if missing start or end') - except ValueError: - pass - - # infer freq from first element - i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')]) - assert_equal(len(i2), 2) - assert_equal(i2[0], end_intv) - - i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')])) - assert_equal(len(i2), 2) - assert_equal(i2[0], end_intv) - - # Mixed freq should fail - vals = [end_intv, Period('2006-12-31', 'w')] - self.assertRaises(ValueError, PeriodIndex, vals) - vals = np.array(vals) - self.assertRaises(ValueError, PeriodIndex, vals) - - def test_frame_index_to_string(self): - index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M') - frame = DataFrame(np.random.randn(3, 4), index=index) - - # it works! - frame.to_string() - - def test_asfreq_ts(self): - index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010') - ts = Series(np.random.randn(len(index)), index=index) - df = DataFrame(np.random.randn(len(index), 3), index=index) - - result = ts.asfreq('D', how='end') - df_result = df.asfreq('D', how='end') - exp_index = index.asfreq('D', how='end') - self.assertEqual(len(result), len(ts)) - self.assertTrue(result.index.equals(exp_index)) - self.assertTrue(df_result.index.equals(exp_index)) - - result = ts.asfreq('D', how='start') - self.assertEqual(len(result), len(ts)) - self.assertTrue(result.index.equals(index.asfreq('D', how='start'))) - - def test_badinput(self): - self.assertRaises(datetools.DateParseError, Period, '1/1/-2000', 'A') - # self.assertRaises(datetools.DateParseError, Period, '-2000', 'A') - # self.assertRaises(datetools.DateParseError, Period, '0', 'A') - - def test_negative_ordinals(self): - p = Period(ordinal=-1000, freq='A') - p = Period(ordinal=0, freq='A') - - idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A') - idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A') - tm.assert_numpy_array_equal(idx1,idx2) - - def test_dti_to_period(self): - dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') - pi1 = dti.to_period() - pi2 = dti.to_period(freq='D') - - self.assertEqual(pi1[0], Period('Jan 2005', freq='M')) - self.assertEqual(pi2[0], Period('1/31/2005', freq='D')) - - self.assertEqual(pi1[-1], Period('Nov 2005', freq='M')) - self.assertEqual(pi2[-1], Period('11/30/2005', freq='D')) - - def test_pindex_slice_index(self): - pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='M') - s = Series(np.random.rand(len(pi)), index=pi) - res = s['2010'] - exp = s[0:12] - assert_series_equal(res, exp) - res = s['2011'] - exp = s[12:24] - assert_series_equal(res, exp) - - def test_getitem_day(self): - # GH 6716 - # Confirm DatetimeIndex and PeriodIndex works identically - didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400) - pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400) - - for idx in [didx, pidx]: - # getitem against index should raise ValueError - values = ['2014', '2013/02', '2013/01/02', - '2013/02/01 9H', '2013/02/01 09:00'] - for v in values: - - if _np_version_under1p9: - with tm.assertRaises(ValueError): - idx[v] - else: - # GH7116 - # these show deprecations as we are trying - # to slice with non-integer indexers - #with tm.assertRaises(IndexError): - # idx[v] - continue - - s = Series(np.random.rand(len(idx)), index=idx) - assert_series_equal(s['2013/01'], s[0:31]) - assert_series_equal(s['2013/02'], s[31:59]) - assert_series_equal(s['2014'], s[365:]) - - invalid = ['2013/02/01 9H', '2013/02/01 09:00'] - for v in invalid: - with tm.assertRaises(KeyError): - s[v] - - def test_range_slice_day(self): - # GH 6716 - didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400) - pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400) - - for idx in [didx, pidx]: - # slices against index should raise IndexError - values = ['2014', '2013/02', '2013/01/02', - '2013/02/01 9H', '2013/02/01 09:00'] - for v in values: - with tm.assertRaises(IndexError): - idx[v:] - - s = Series(np.random.rand(len(idx)), index=idx) - - assert_series_equal(s['2013/01/02':], s[1:]) - assert_series_equal(s['2013/01/02':'2013/01/05'], s[1:5]) - assert_series_equal(s['2013/02':], s[31:]) - assert_series_equal(s['2014':], s[365:]) - - invalid = ['2013/02/01 9H', '2013/02/01 09:00'] - for v in invalid: - with tm.assertRaises(IndexError): - idx[v:] - - def test_getitem_seconds(self): - # GH 6716 - didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000) - pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000) - - for idx in [didx, pidx]: - # getitem against index should raise ValueError - values = ['2014', '2013/02', '2013/01/02', - '2013/02/01 9H', '2013/02/01 09:00'] - for v in values: - if _np_version_under1p9: - with tm.assertRaises(ValueError): - idx[v] - else: - # GH7116 - # these show deprecations as we are trying - # to slice with non-integer indexers - #with tm.assertRaises(IndexError): - # idx[v] - continue - - s = Series(np.random.rand(len(idx)), index=idx) - - assert_series_equal(s['2013/01/01 10:00'], s[3600:3660]) - assert_series_equal(s['2013/01/01 9H'], s[:3600]) - for d in ['2013/01/01', '2013/01', '2013']: - assert_series_equal(s[d], s) - - def test_range_slice_seconds(self): - # GH 6716 - didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000) - pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000) - - for idx in [didx, pidx]: - # slices against index should raise IndexError - values = ['2014', '2013/02', '2013/01/02', - '2013/02/01 9H', '2013/02/01 09:00'] - for v in values: - with tm.assertRaises(IndexError): - idx[v:] - - s = Series(np.random.rand(len(idx)), index=idx) - - assert_series_equal(s['2013/01/01 09:05':'2013/01/01 09:10'], s[300:660]) - assert_series_equal(s['2013/01/01 10:00':'2013/01/01 10:05'], s[3600:3960]) - assert_series_equal(s['2013/01/01 10H':], s[3600:]) - assert_series_equal(s[:'2013/01/01 09:30'], s[:1860]) - for d in ['2013/01/01', '2013/01', '2013']: - assert_series_equal(s[d:], s) - - def test_range_slice_outofbounds(self): - # GH 5407 - didx = DatetimeIndex(start='2013/10/01', freq='D', periods=10) - pidx = PeriodIndex(start='2013/10/01', freq='D', periods=10) - - for idx in [didx, pidx]: - df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx) - empty = DataFrame(index=idx.__class__([], freq='D'), columns=['units']) - empty['units'] = empty['units'].astype('int64') - - tm.assert_frame_equal(df['2013/09/01':'2013/09/30'], empty) - tm.assert_frame_equal(df['2013/09/30':'2013/10/02'], df.iloc[:2]) - tm.assert_frame_equal(df['2013/10/01':'2013/10/02'], df.iloc[:2]) - tm.assert_frame_equal(df['2013/10/02':'2013/09/30'], empty) - tm.assert_frame_equal(df['2013/10/15':'2013/10/17'], empty) - tm.assert_frame_equal(df['2013-06':'2013-09'], empty) - tm.assert_frame_equal(df['2013-11':'2013-12'], empty) - - def test_pindex_fieldaccessor_nat(self): - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2012-03', '2012-04'], freq='D') - self.assert_numpy_array_equal(idx.year, np.array([2011, 2011, -1, 2012, 2012])) - self.assert_numpy_array_equal(idx.month, np.array([1, 2, -1, 3, 4])) - - def test_pindex_qaccess(self): - pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q') - s = Series(np.random.rand(len(pi)), index=pi).cumsum() - # Todo: fix these accessors! - self.assertEqual(s['05Q4'], s[2]) - - def test_period_dt64_round_trip(self): - dti = date_range('1/1/2000', '1/7/2002', freq='B') - pi = dti.to_period() - self.assertTrue(pi.to_timestamp().equals(dti)) - - dti = date_range('1/1/2000', '1/7/2002', freq='B') - pi = dti.to_period(freq='H') - self.assertTrue(pi.to_timestamp().equals(dti)) - - def test_to_period_quarterly(self): - # make sure we can make the round trip - for month in MONTHS: - freq = 'Q-%s' % month - rng = period_range('1989Q3', '1991Q3', freq=freq) - stamps = rng.to_timestamp() - result = stamps.to_period(freq) - self.assertTrue(rng.equals(result)) - - def test_to_period_quarterlyish(self): - offsets = ['BQ', 'QS', 'BQS'] - for off in offsets: - rng = date_range('01-Jan-2012', periods=8, freq=off) - prng = rng.to_period() - self.assertEqual(prng.freq, 'Q-DEC') - - def test_to_period_annualish(self): - offsets = ['BA', 'AS', 'BAS'] - for off in offsets: - rng = date_range('01-Jan-2012', periods=8, freq=off) - prng = rng.to_period() - self.assertEqual(prng.freq, 'A-DEC') - - def test_to_period_monthish(self): - offsets = ['MS', 'EOM', 'BM'] - for off in offsets: - rng = date_range('01-Jan-2012', periods=8, freq=off) - prng = rng.to_period() - self.assertEqual(prng.freq, 'M') - - def test_no_multiples(self): - self.assertRaises(ValueError, period_range, '1989Q3', periods=10, - freq='2Q') - - self.assertRaises(ValueError, period_range, '1989', periods=10, - freq='2A') - self.assertRaises(ValueError, Period, '1989', freq='2A') - - # def test_pindex_multiples(self): - # pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='2M') - # self.assertEqual(pi[0], Period('1/1/10', '2M')) - # self.assertEqual(pi[1], Period('3/1/10', '2M')) - - # self.assertEqual(pi[0].asfreq('6M'), pi[2].asfreq('6M')) - # self.assertEqual(pi[0].asfreq('A'), pi[2].asfreq('A')) - - # self.assertEqual(pi[0].asfreq('M', how='S'), - # Period('Jan 2010', '1M')) - # self.assertEqual(pi[0].asfreq('M', how='E'), - # Period('Feb 2010', '1M')) - # self.assertEqual(pi[1].asfreq('M', how='S'), - # Period('Mar 2010', '1M')) - - # i = Period('1/1/2010 12:05:18', '5S') - # self.assertEqual(i, Period('1/1/2010 12:05:15', '5S')) - - # i = Period('1/1/2010 12:05:18', '5S') - # self.assertEqual(i.asfreq('1S', how='E'), - # Period('1/1/2010 12:05:19', '1S')) - - def test_iteration(self): - index = PeriodIndex(start='1/1/10', periods=4, freq='B') - - result = list(index) - tm.assertIsInstance(result[0], Period) - self.assertEqual(result[0].freq, index.freq) - - def test_take(self): - index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D', name='idx') - expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7), - datetime(2010, 1, 9), datetime(2010, 1, 13)], - freq='D', name='idx') - - taken1 = index.take([5, 6, 8, 12]) - taken2 = index[[5, 6, 8, 12]] - - for taken in [taken1, taken2]: - self.assertTrue(taken.equals(expected)) - tm.assertIsInstance(taken, PeriodIndex) - self.assertEqual(taken.freq, index.freq) - self.assertEqual(taken.name, expected.name) - - def test_joins(self): - index = period_range('1/1/2000', '1/20/2000', freq='D') - - for kind in ['inner', 'outer', 'left', 'right']: - joined = index.join(index[:-5], how=kind) - - tm.assertIsInstance(joined, PeriodIndex) - self.assertEqual(joined.freq, index.freq) - - def test_join_self(self): - index = period_range('1/1/2000', '1/20/2000', freq='D') - - for kind in ['inner', 'outer', 'left', 'right']: - res = index.join(index, how=kind) - self.assertIs(index, res) - - def test_join_does_not_recur(self): - df = tm.makeCustomDataframe(3, 2, data_gen_f=lambda *args: - np.random.randint(2), c_idx_type='p', - r_idx_type='dt') - s = df.iloc[:2, 0] - - res = s.index.join(df.columns, how='outer') - expected = Index([s.index[0], s.index[1], - df.columns[0], df.columns[1]], object) - tm.assert_index_equal(res, expected) - - def test_align_series(self): - rng = period_range('1/1/2000', '1/1/2010', freq='A') - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts + ts[::2] - expected = ts + ts - expected[1::2] = np.nan - assert_series_equal(result, expected) - - result = ts + _permute(ts[::2]) - assert_series_equal(result, expected) - - # it works! - for kind in ['inner', 'outer', 'left', 'right']: - ts.align(ts[::2], join=kind) - with assertRaisesRegexp(ValueError, 'Only like-indexed'): - ts + ts.asfreq('D', how="end") - - def test_align_frame(self): - rng = period_range('1/1/2000', '1/1/2010', freq='A') - ts = DataFrame(np.random.randn(len(rng), 3), index=rng) - - result = ts + ts[::2] - expected = ts + ts - expected.values[1::2] = np.nan - tm.assert_frame_equal(result, expected) - - result = ts + _permute(ts[::2]) - tm.assert_frame_equal(result, expected) - - def test_union(self): - index = period_range('1/1/2000', '1/20/2000', freq='D') - - result = index[:-5].union(index[10:]) - self.assertTrue(result.equals(index)) - - # not in order - result = _permute(index[:-5]).union(_permute(index[10:])) - self.assertTrue(result.equals(index)) - - # raise if different frequencies - index = period_range('1/1/2000', '1/20/2000', freq='D') - index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED') - self.assertRaises(ValueError, index.union, index2) - - self.assertRaises(ValueError, index.join, index.to_timestamp()) - - def test_intersection(self): - index = period_range('1/1/2000', '1/20/2000', freq='D') - - result = index[:-5].intersection(index[10:]) - self.assertTrue(result.equals(index[10:-5])) - - # not in order - left = _permute(index[:-5]) - right = _permute(index[10:]) - result = left.intersection(right).sort_values() - self.assertTrue(result.equals(index[10:-5])) - - # raise if different frequencies - index = period_range('1/1/2000', '1/20/2000', freq='D') - index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED') - self.assertRaises(ValueError, index.intersection, index2) - - def test_fields(self): - # year, month, day, hour, minute - # second, weekofyear, week, dayofweek, weekday, dayofyear, quarter - # qyear - pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005') - self._check_all_fields(pi) - - pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002') - self._check_all_fields(pi) - - pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002') - self._check_all_fields(pi) - - pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001') - self._check_all_fields(pi) - - pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001') - self._check_all_fields(pi) - - pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00') - self._check_all_fields(pi) - - pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20') - self._check_all_fields(pi) - - pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00', - end='12/31/2001 00:05:00') - self._check_all_fields(pi) - - end_intv = Period('2006-12-31', 'W') - i1 = PeriodIndex(end=end_intv, periods=10) - self._check_all_fields(i1) - - def _check_all_fields(self, periodindex): - fields = ['year', 'month', 'day', 'hour', 'minute', - 'second', 'weekofyear', 'week', 'dayofweek', - 'weekday', 'dayofyear', 'quarter', 'qyear', 'days_in_month'] - - periods = list(periodindex) - - for field in fields: - field_idx = getattr(periodindex, field) - assert_equal(len(periodindex), len(field_idx)) - for x, val in zip(periods, field_idx): - assert_equal(getattr(x, field), val) - - def test_is_full(self): - index = PeriodIndex([2005, 2007, 2009], freq='A') - self.assertFalse(index.is_full) - - index = PeriodIndex([2005, 2006, 2007], freq='A') - self.assertTrue(index.is_full) - - index = PeriodIndex([2005, 2005, 2007], freq='A') - self.assertFalse(index.is_full) - - index = PeriodIndex([2005, 2005, 2006], freq='A') - self.assertTrue(index.is_full) - - index = PeriodIndex([2006, 2005, 2005], freq='A') - self.assertRaises(ValueError, getattr, index, 'is_full') - - self.assertTrue(index[:0].is_full) - - def test_map(self): - index = PeriodIndex([2005, 2007, 2009], freq='A') - result = index.map(lambda x: x + 1) - expected = index + 1 - self.assertTrue(result.equals(expected)) - - result = index.map(lambda x: x.ordinal) - exp = [x.ordinal for x in index] - tm.assert_numpy_array_equal(result, exp) - - def test_map_with_string_constructor(self): - raw = [2005, 2007, 2009] - index = PeriodIndex(raw, freq='A') - types = str, - - if compat.PY3: - # unicode - types += compat.text_type, - - for t in types: - expected = np.array(lmap(t, raw), dtype=object) - res = index.map(t) - - # should return an array - tm.assertIsInstance(res, np.ndarray) - - # preserve element types - self.assertTrue(all(isinstance(resi, t) for resi in res)) - - # dtype should be object - self.assertEqual(res.dtype, np.dtype('object').type) - - # lastly, values should compare equal - tm.assert_numpy_array_equal(res, expected) - - def test_convert_array_of_periods(self): - rng = period_range('1/1/2000', periods=20, freq='D') - periods = list(rng) - - result = pd.Index(periods) - tm.assertIsInstance(result, PeriodIndex) - - def test_with_multi_index(self): - # #1705 - index = date_range('1/1/2012', periods=4, freq='12H') - index_as_arrays = [index.to_period(freq='D'), index.hour] - - s = Series([0, 1, 2, 3], index_as_arrays) - - tm.assertIsInstance(s.index.levels[0], PeriodIndex) - - tm.assertIsInstance(s.index.values[0][0], Period) - - def test_to_datetime_1703(self): - index = period_range('1/1/2012', periods=4, freq='D') - - result = index.to_datetime() - self.assertEqual(result[0], Timestamp('1/1/2012')) - - def test_get_loc_msg(self): - idx = period_range('2000-1-1', freq='A', periods=10) - bad_period = Period('2012', 'A') - self.assertRaises(KeyError, idx.get_loc, bad_period) - - try: - idx.get_loc(bad_period) - except KeyError as inst: - self.assertEqual(inst.args[0], bad_period) - - def test_append_concat(self): - # #1815 - d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC') - d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC') - - s1 = Series(np.random.randn(10), d1) - s2 = Series(np.random.randn(10), d2) - - s1 = s1.to_period() - s2 = s2.to_period() - - # drops index - result = pd.concat([s1, s2]) - tm.assertIsInstance(result.index, PeriodIndex) - self.assertEqual(result.index[0], s1.index[0]) - - def test_pickle_freq(self): - # GH2891 - prng = period_range('1/1/2011', '1/1/2012', freq='M') - new_prng = self.round_trip_pickle(prng) - self.assertEqual(new_prng.freq,'M') - - def test_slice_keep_name(self): - idx = period_range('20010101', periods=10, freq='D', name='bob') - self.assertEqual(idx.name, idx[1:].name) - - def test_factorize(self): - idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02', - '2014-03', '2014-03'], freq='M') - - exp_arr = np.array([0, 0, 1, 1, 2, 2]) - exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M') - - arr, idx = idx1.factorize() - self.assert_numpy_array_equal(arr, exp_arr) - self.assertTrue(idx.equals(exp_idx)) - - arr, idx = idx1.factorize(sort=True) - self.assert_numpy_array_equal(arr, exp_arr) - self.assertTrue(idx.equals(exp_idx)) - - idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01', - '2014-03', '2014-01'], freq='M') - - exp_arr = np.array([2, 2, 1, 0, 2, 0]) - arr, idx = idx2.factorize(sort=True) - self.assert_numpy_array_equal(arr, exp_arr) - self.assertTrue(idx.equals(exp_idx)) - - exp_arr = np.array([0, 0, 1, 2, 0, 2]) - exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M') - arr, idx = idx2.factorize() - self.assert_numpy_array_equal(arr, exp_arr) - self.assertTrue(idx.equals(exp_idx)) - - def test_recreate_from_data(self): - for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']: - org = PeriodIndex(start='2001/04/01', freq=o, periods=1) - idx = PeriodIndex(org.values, freq=o) - self.assertTrue(idx.equals(org)) - - def test_combine_first(self): - # GH 3367 - didx = pd.DatetimeIndex(start='1950-01-31', end='1950-07-31', freq='M') - pidx = pd.PeriodIndex(start=pd.Period('1950-1'), end=pd.Period('1950-7'), freq='M') - # check to be consistent with DatetimeIndex - for idx in [didx, pidx]: - a = pd.Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx) - b = pd.Series([9, 9, 9, 9, 9, 9, 9], index=idx) - - result = a.combine_first(b) - expected = pd.Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64) - tm.assert_series_equal(result, expected) - - def test_searchsorted(self): - pidx = pd.period_range('2014-01-01', periods=10, freq='D') - self.assertEqual( - pidx.searchsorted(pd.Period('2014-01-01', freq='D')), 0) - self.assertRaisesRegexp( - ValueError, 'Different period frequency: H', - lambda: pidx.searchsorted(pd.Period('2014-01-01', freq='H'))) - - def test_round_trip(self): - - p = Period('2000Q1') - new_p = self.round_trip_pickle(p) - self.assertEqual(new_p, p) - -def _permute(obj): - return obj.take(np.random.permutation(len(obj))) - - -class TestMethods(tm.TestCase): - "Base test class for MaskedArrays." - - def test_add(self): - dt1 = Period(freq='D', year=2008, month=1, day=1) - dt2 = Period(freq='D', year=2008, month=1, day=2) - assert_equal(dt1 + 1, dt2) - # - # GH 4731 - msg = "unsupported operand type\(s\)" - with tm.assertRaisesRegexp(TypeError, msg): - dt1 + "str" - - with tm.assertRaisesRegexp(TypeError, msg): - dt1 + dt2 - - def test_add_offset(self): - # freq is DateOffset - p = Period('2011', freq='A') - self.assertEqual(p + offsets.YearEnd(2), Period('2013', freq='A')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o - - p = Period('2011-03', freq='M') - self.assertEqual(p + offsets.MonthEnd(2), Period('2011-05', freq='M')) - self.assertEqual(p + offsets.MonthEnd(12), Period('2012-03', freq='M')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o - - # freq is Tick - p = Period('2011-04-01', freq='D') - self.assertEqual(p + offsets.Day(5), Period('2011-04-06', freq='D')) - self.assertEqual(p + offsets.Hour(24), Period('2011-04-02', freq='D')) - self.assertEqual(p + np.timedelta64(2, 'D'), Period('2011-04-03', freq='D')) - self.assertEqual(p + np.timedelta64(3600 * 24, 's'), Period('2011-04-02', freq='D')) - self.assertEqual(p + timedelta(-2), Period('2011-03-30', freq='D')) - self.assertEqual(p + timedelta(hours=48), Period('2011-04-03', freq='D')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(4, 'h'), timedelta(hours=23)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o - - p = Period('2011-04-01 09:00', freq='H') - self.assertEqual(p + offsets.Day(2), Period('2011-04-03 09:00', freq='H')) - self.assertEqual(p + offsets.Hour(3), Period('2011-04-01 12:00', freq='H')) - self.assertEqual(p + np.timedelta64(3, 'h'), Period('2011-04-01 12:00', freq='H')) - self.assertEqual(p + np.timedelta64(3600, 's'), Period('2011-04-01 10:00', freq='H')) - self.assertEqual(p + timedelta(minutes=120), Period('2011-04-01 11:00', freq='H')) - self.assertEqual(p + timedelta(days=4, minutes=180), Period('2011-04-05 12:00', freq='H')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o - - def test_add_offset_nat(self): - # freq is DateOffset - p = Period('NaT', freq='A') - for o in [offsets.YearEnd(2)]: - self.assertEqual((p + o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaises(ValueError): - p + o - - p = Period('NaT', freq='M') - for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]: - self.assertEqual((p + o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o - - # freq is Tick - p = Period('NaT', freq='D') - for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'), - np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]: - self.assertEqual((p + o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(4, 'h'), timedelta(hours=23)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o - - p = Period('NaT', freq='H') - for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'), - np.timedelta64(3600, 's'), timedelta(minutes=120), - timedelta(days=4, minutes=180)]: - self.assertEqual((p + o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: - with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): - p + o - - def test_sub_offset(self): - # freq is DateOffset - p = Period('2011', freq='A') - self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq='A')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaises(ValueError): - p - o - - p = Period('2011-03', freq='M') - self.assertEqual(p - offsets.MonthEnd(2), Period('2011-01', freq='M')) - self.assertEqual(p - offsets.MonthEnd(12), Period('2010-03', freq='M')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaises(ValueError): - p - o - - # freq is Tick - p = Period('2011-04-01', freq='D') - self.assertEqual(p - offsets.Day(5), Period('2011-03-27', freq='D')) - self.assertEqual(p - offsets.Hour(24), Period('2011-03-31', freq='D')) - self.assertEqual(p - np.timedelta64(2, 'D'), Period('2011-03-30', freq='D')) - self.assertEqual(p - np.timedelta64(3600 * 24, 's'), Period('2011-03-31', freq='D')) - self.assertEqual(p - timedelta(-2), Period('2011-04-03', freq='D')) - self.assertEqual(p - timedelta(hours=48), Period('2011-03-30', freq='D')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(4, 'h'), timedelta(hours=23)]: - with tm.assertRaises(ValueError): - p - o - - p = Period('2011-04-01 09:00', freq='H') - self.assertEqual(p - offsets.Day(2), Period('2011-03-30 09:00', freq='H')) - self.assertEqual(p - offsets.Hour(3), Period('2011-04-01 06:00', freq='H')) - self.assertEqual(p - np.timedelta64(3, 'h'), Period('2011-04-01 06:00', freq='H')) - self.assertEqual(p - np.timedelta64(3600, 's'), Period('2011-04-01 08:00', freq='H')) - self.assertEqual(p - timedelta(minutes=120), Period('2011-04-01 07:00', freq='H')) - self.assertEqual(p - timedelta(days=4, minutes=180), Period('2011-03-28 06:00', freq='H')) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: - with tm.assertRaises(ValueError): - p - o - - def test_sub_offset_nat(self): - # freq is DateOffset - p = Period('NaT', freq='A') - for o in [offsets.YearEnd(2)]: - self.assertEqual((p - o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaises(ValueError): - p - o - - p = Period('NaT', freq='M') - for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]: - self.assertEqual((p - o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(365, 'D'), timedelta(365)]: - with tm.assertRaises(ValueError): - p - o - - # freq is Tick - p = Period('NaT', freq='D') - for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'), - np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]: - self.assertEqual((p - o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(4, 'h'), timedelta(hours=23)]: - with tm.assertRaises(ValueError): - p - o - - p = Period('NaT', freq='H') - for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'), - np.timedelta64(3600, 's'), timedelta(minutes=120), - timedelta(days=4, minutes=180)]: - self.assertEqual((p - o).ordinal, tslib.iNaT) - - for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(), - np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]: - with tm.assertRaises(ValueError): - p - o - - def test_nat_ops(self): - p = Period('NaT', freq='M') - self.assertEqual((p + 1).ordinal, tslib.iNaT) - self.assertEqual((p - 1).ordinal, tslib.iNaT) - self.assertEqual((p - Period('2011-01', freq='M')).ordinal, tslib.iNaT) - self.assertEqual((Period('2011-01', freq='M') - p).ordinal, tslib.iNaT) - - def test_pi_ops_nat(self): - idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx') - result = idx + 2 - expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'], freq='M', name='idx') - self.assertTrue(result.equals(expected)) - - result2 = result - 2 - self.assertTrue(result2.equals(idx)) - - msg = "unsupported operand type\(s\)" - with tm.assertRaisesRegexp(TypeError, msg): - idx + "str" - - -class TestPeriodRepresentation(tm.TestCase): - """ - Wish to match NumPy units - """ - - def test_annual(self): - self._check_freq('A', 1970) - - def test_monthly(self): - self._check_freq('M', '1970-01') - - def test_weekly(self): - self._check_freq('W-THU', '1970-01-01') - - def test_daily(self): - self._check_freq('D', '1970-01-01') - - def test_business_daily(self): - self._check_freq('B', '1970-01-01') - - def test_hourly(self): - self._check_freq('H', '1970-01-01') - - def test_minutely(self): - self._check_freq('T', '1970-01-01') - - def test_secondly(self): - self._check_freq('S', '1970-01-01') - - def test_millisecondly(self): - self._check_freq('L', '1970-01-01') - - def test_microsecondly(self): - self._check_freq('U', '1970-01-01') - - def test_nanosecondly(self): - self._check_freq('N', '1970-01-01') - - def _check_freq(self, freq, base_date): - rng = PeriodIndex(start=base_date, periods=10, freq=freq) - exp = np.arange(10, dtype=np.int64) - self.assert_numpy_array_equal(rng.values, exp) - - def test_negone_ordinals(self): - freqs = ['A', 'M', 'Q', 'D', 'H', 'T', 'S'] - - period = Period(ordinal=-1, freq='D') - for freq in freqs: - repr(period.asfreq(freq)) - - for freq in freqs: - period = Period(ordinal=-1, freq=freq) - repr(period) - self.assertEqual(period.year, 1969) - - period = Period(ordinal=-1, freq='B') - repr(period) - period = Period(ordinal=-1, freq='W') - repr(period) - - -class TestComparisons(tm.TestCase): - def setUp(self): - self.january1 = Period('2000-01', 'M') - self.january2 = Period('2000-01', 'M') - self.february = Period('2000-02', 'M') - self.march = Period('2000-03', 'M') - self.day = Period('2012-01-01', 'D') - - def test_equal(self): - self.assertEqual(self.january1, self.january2) - - def test_equal_Raises_Value(self): - with tm.assertRaises(ValueError): - self.january1 == self.day - - def test_notEqual(self): - self.assertNotEqual(self.january1, 1) - self.assertNotEqual(self.january1, self.february) - - def test_greater(self): - self.assertTrue(self.february > self.january1) - - def test_greater_Raises_Value(self): - with tm.assertRaises(ValueError): - self.january1 > self.day - - def test_greater_Raises_Type(self): - with tm.assertRaises(TypeError): - self.january1 > 1 - - def test_greaterEqual(self): - self.assertTrue(self.january1 >= self.january2) - - def test_greaterEqual_Raises_Value(self): - with tm.assertRaises(ValueError): - self.january1 >= self.day - with tm.assertRaises(TypeError): - print(self.january1 >= 1) - - def test_smallerEqual(self): - self.assertTrue(self.january1 <= self.january2) - - def test_smallerEqual_Raises_Value(self): - with tm.assertRaises(ValueError): - self.january1 <= self.day - - def test_smallerEqual_Raises_Type(self): - with tm.assertRaises(TypeError): - self.january1 <= 1 - - def test_smaller(self): - self.assertTrue(self.january1 < self.february) - - def test_smaller_Raises_Value(self): - with tm.assertRaises(ValueError): - self.january1 < self.day - - def test_smaller_Raises_Type(self): - with tm.assertRaises(TypeError): - self.january1 < 1 - - def test_sort(self): - periods = [self.march, self.january1, self.february] - correctPeriods = [self.january1, self.february, self.march] - self.assertEqual(sorted(periods), correctPeriods) - - def test_period_nat_comp(self): - p_nat = Period('NaT', freq='D') - p = Period('2011-01-01', freq='D') - - nat = pd.Timestamp('NaT') - t = pd.Timestamp('2011-01-01') - # confirm Period('NaT') work identical with Timestamp('NaT') - for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat), - (nat, t), (t, nat), (nat, nat)]: - self.assertEqual(left < right, False) - self.assertEqual(left > right, False) - self.assertEqual(left == right, False) - self.assertEqual(left != right, True) - self.assertEqual(left <= right, False) - self.assertEqual(left >= right, False) - - def test_pi_nat_comp(self): - idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq='M') - - result = idx1 > Period('2011-02', freq='M') - self.assert_numpy_array_equal(result, np.array([False, False, False, True])) - - result = idx1 == Period('NaT', freq='M') - self.assert_numpy_array_equal(result, np.array([False, False, False, False])) - - result = idx1 != Period('NaT', freq='M') - self.assert_numpy_array_equal(result, np.array([True, True, True, True])) - - idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq='M') - result = idx1 < idx2 - self.assert_numpy_array_equal(result, np.array([True, False, False, False])) - - result = idx1 == idx1 - self.assert_numpy_array_equal(result, np.array([True, True, False, True])) - - result = idx1 != idx1 - self.assert_numpy_array_equal(result, np.array([False, False, True, False])) - - -if __name__ == '__main__': - import nose - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py deleted file mode 100644 index a9837e2794d58..0000000000000 --- a/pandas/tseries/tests/test_timeseries.py +++ /dev/null @@ -1,4655 +0,0 @@ -# pylint: disable-msg=E1101,W0612 -import calendar -from datetime import datetime, time, timedelta -import sys -import operator - -import nose - -import numpy as np -randn = np.random.randn - -from pandas import (Index, Series, DataFrame, - isnull, date_range, Timestamp, Period, DatetimeIndex, - Int64Index, to_datetime, bdate_range, Float64Index, TimedeltaIndex, NaT) - -import pandas.core.datetools as datetools -import pandas.tseries.offsets as offsets -import pandas.tseries.tools as tools -import pandas.tseries.frequencies as frequencies -import pandas as pd - -from pandas.util.testing import assert_series_equal, assert_almost_equal -import pandas.util.testing as tm - -from pandas.tslib import NaT, iNaT -import pandas.lib as lib -import pandas.tslib as tslib - -import pandas.index as _index - -from pandas.compat import range, long, StringIO, lrange, lmap, zip, product -from numpy.random import rand -from pandas.util.testing import assert_frame_equal -from pandas.io.common import PerformanceWarning -import pandas.compat as compat -import pandas.core.common as com -from pandas import concat -from pandas import _np_version_under1p8 - -from numpy.testing.decorators import slow - - -def _skip_if_has_locale(): - import locale - lang, _ = locale.getlocale() - if lang is not None: - raise nose.SkipTest("Specific locale is set {0}".format(lang)) - - -class TestTimeSeriesDuplicates(tm.TestCase): - _multiprocess_can_split_ = True - - def setUp(self): - dates = [datetime(2000, 1, 2), datetime(2000, 1, 2), - datetime(2000, 1, 2), datetime(2000, 1, 3), - datetime(2000, 1, 3), datetime(2000, 1, 3), - datetime(2000, 1, 4), datetime(2000, 1, 4), - datetime(2000, 1, 4), datetime(2000, 1, 5)] - - self.dups = Series(np.random.randn(len(dates)), index=dates) - - def test_constructor(self): - tm.assertIsInstance(self.dups, Series) - tm.assertIsInstance(self.dups.index, DatetimeIndex) - - def test_is_unique_monotonic(self): - self.assertFalse(self.dups.index.is_unique) - - def test_index_unique(self): - uniques = self.dups.index.unique() - expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3), - datetime(2000, 1, 4), datetime(2000, 1, 5)]) - self.assertEqual(uniques.dtype, 'M8[ns]') # sanity - self.assertTrue(uniques.equals(expected)) - self.assertEqual(self.dups.index.nunique(), 4) - - # #2563 - self.assertTrue(isinstance(uniques, DatetimeIndex)) - - dups_local = self.dups.index.tz_localize('US/Eastern') - dups_local.name = 'foo' - result = dups_local.unique() - expected = DatetimeIndex(expected, tz='US/Eastern') - self.assertTrue(result.tz is not None) - self.assertEqual(result.name, 'foo') - self.assertTrue(result.equals(expected)) - - # NaT, note this is excluded - arr = [ 1370745748 + t for t in range(20) ] + [iNaT] - idx = DatetimeIndex(arr * 3) - self.assertTrue(idx.unique().equals(DatetimeIndex(arr))) - self.assertEqual(idx.nunique(), 20) - self.assertEqual(idx.nunique(dropna=False), 21) - - arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT] - idx = DatetimeIndex(arr * 3) - self.assertTrue(idx.unique().equals(DatetimeIndex(arr))) - self.assertEqual(idx.nunique(), 20) - self.assertEqual(idx.nunique(dropna=False), 21) - - - def test_index_dupes_contains(self): - d = datetime(2011, 12, 5, 20, 30) - ix = DatetimeIndex([d, d]) - self.assertTrue(d in ix) - - def test_duplicate_dates_indexing(self): - ts = self.dups - - uniques = ts.index.unique() - for date in uniques: - result = ts[date] - - mask = ts.index == date - total = (ts.index == date).sum() - expected = ts[mask] - if total > 1: - assert_series_equal(result, expected) - else: - assert_almost_equal(result, expected[0]) - - cp = ts.copy() - cp[date] = 0 - expected = Series(np.where(mask, 0, ts), index=ts.index) - assert_series_equal(cp, expected) - - self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6)) - - # new index - ts[datetime(2000,1,6)] = 0 - self.assertEqual(ts[datetime(2000,1,6)], 0) - - def test_range_slice(self): - idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000', - '1/4/2000']) - - ts = Series(np.random.randn(len(idx)), index=idx) - - result = ts['1/2/2000':] - expected = ts[1:] - assert_series_equal(result, expected) - - result = ts['1/2/2000':'1/3/2000'] - expected = ts[1:4] - assert_series_equal(result, expected) - - def test_groupby_average_dup_values(self): - result = self.dups.groupby(level=0).mean() - expected = self.dups.groupby(self.dups.index).mean() - assert_series_equal(result, expected) - - def test_indexing_over_size_cutoff(self): - import datetime - # #1821 - - old_cutoff = _index._SIZE_CUTOFF - try: - _index._SIZE_CUTOFF = 1000 - - # create large list of non periodic datetime - dates = [] - sec = datetime.timedelta(seconds=1) - half_sec = datetime.timedelta(microseconds=500000) - d = datetime.datetime(2011, 12, 5, 20, 30) - n = 1100 - for i in range(n): - dates.append(d) - dates.append(d + sec) - dates.append(d + sec + half_sec) - dates.append(d + sec + sec + half_sec) - d += 3 * sec - - # duplicate some values in the list - duplicate_positions = np.random.randint(0, len(dates) - 1, 20) - for p in duplicate_positions: - dates[p + 1] = dates[p] - - df = DataFrame(np.random.randn(len(dates), 4), - index=dates, - columns=list('ABCD')) - - pos = n * 3 - timestamp = df.index[pos] - self.assertIn(timestamp, df.index) - - # it works! - df.ix[timestamp] - self.assertTrue(len(df.ix[[timestamp]]) > 0) - finally: - _index._SIZE_CUTOFF = old_cutoff - - def test_indexing_unordered(self): - # GH 2437 - rng = date_range(start='2011-01-01', end='2011-01-15') - ts = Series(randn(len(rng)), index=rng) - ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]]) - - for t in ts.index: - s = str(t) - expected = ts[t] - result = ts2[t] - self.assertTrue(expected == result) - - # GH 3448 (ranges) - def compare(slobj): - result = ts2[slobj].copy() - result = result.sort_index() - expected = ts[slobj] - assert_series_equal(result,expected) - - compare(slice('2011-01-01','2011-01-15')) - compare(slice('2010-12-30','2011-01-15')) - compare(slice('2011-01-01','2011-01-16')) - - # partial ranges - compare(slice('2011-01-01','2011-01-6')) - compare(slice('2011-01-06','2011-01-8')) - compare(slice('2011-01-06','2011-01-12')) - - # single values - result = ts2['2011'].sort_index() - expected = ts['2011'] - assert_series_equal(result,expected) - - # diff freq - rng = date_range(datetime(2005, 1, 1), periods=20, freq='M') - ts = Series(np.arange(len(rng)), index=rng) - ts = ts.take(np.random.permutation(20)) - - result = ts['2005'] - for t in result.index: - self.assertTrue(t.year == 2005) - - def test_indexing(self): - - idx = date_range("2001-1-1", periods=20, freq='M') - ts = Series(np.random.rand(len(idx)),index=idx) - - # getting - - # GH 3070, make sure semantics work on Series/Frame - expected = ts['2001'] - expected.name = 'A' - - df = DataFrame(dict(A = ts)) - result = df['2001']['A'] - assert_series_equal(expected, result) - - # setting - ts['2001'] = 1 - expected = ts['2001'] - expected.name = 'A' - - df.loc['2001','A'] = 1 - - result = df['2001']['A'] - assert_series_equal(expected, result) - - # GH3546 (not including times on the last day) - idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H') - ts = Series(lrange(len(idx)), index=idx) - expected = ts['2013-05'] - assert_series_equal(expected, ts) - - idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S') - ts = Series(lrange(len(idx)), index=idx) - expected = ts['2013-05'] - assert_series_equal(expected,ts) - - idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))] - ts = Series(lrange(len(idx)), index=idx) - expected = ts['2013'] - assert_series_equal(expected,ts) - - # GH 3925, indexing with a seconds resolution string / datetime object - df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s')) - expected = df.loc[[df.index[2]]] - result = df['2012-01-02 18:01:02'] - assert_frame_equal(result,expected) - - # this is a single date, so will raise - self.assertRaises(KeyError, df.__getitem__, df.index[2],) - - def test_recreate_from_data(self): - freqs = ['M', 'Q', 'A', 'D', 'B', 'BH', 'T', 'S', 'L', 'U', 'H', 'N', 'C'] - - for f in freqs: - org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1) - idx = DatetimeIndex(org, freq=f) - self.assertTrue(idx.equals(org)) - - org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1) - idx = DatetimeIndex(org, freq=f, tz='US/Pacific') - self.assertTrue(idx.equals(org)) - - -def assert_range_equal(left, right): - assert(left.equals(right)) - assert(left.freq == right.freq) - assert(left.tz == right.tz) - - -class TestTimeSeries(tm.TestCase): - _multiprocess_can_split_ = True - - def test_is_(self): - dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') - self.assertTrue(dti.is_(dti)) - self.assertTrue(dti.is_(dti.view())) - self.assertFalse(dti.is_(dti.copy())) - - def test_dti_slicing(self): - dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') - dti2 = dti[[1, 3, 5]] - - v1 = dti2[0] - v2 = dti2[1] - v3 = dti2[2] - - self.assertEqual(v1, Timestamp('2/28/2005')) - self.assertEqual(v2, Timestamp('4/30/2005')) - self.assertEqual(v3, Timestamp('6/30/2005')) - - # don't carry freq through irregular slicing - self.assertIsNone(dti2.freq) - - def test_pass_datetimeindex_to_index(self): - # Bugs in #1396 - rng = date_range('1/1/2000', '3/1/2000') - idx = Index(rng, dtype=object) - - expected = Index(rng.to_pydatetime(), dtype=object) - - self.assert_numpy_array_equal(idx.values, expected.values) - - def test_contiguous_boolean_preserve_freq(self): - rng = date_range('1/1/2000', '3/1/2000', freq='B') - - mask = np.zeros(len(rng), dtype=bool) - mask[10:20] = True - - masked = rng[mask] - expected = rng[10:20] - self.assertIsNotNone(expected.freq) - assert_range_equal(masked, expected) - - mask[22] = True - masked = rng[mask] - self.assertIsNone(masked.freq) - - def test_getitem_median_slice_bug(self): - index = date_range('20090415', '20090519', freq='2B') - s = Series(np.random.randn(13), index=index) - - indexer = [slice(6, 7, None)] - result = s[indexer] - expected = s[indexer[0]] - assert_series_equal(result, expected) - - def test_series_box_timestamp(self): - rng = date_range('20090415', '20090519', freq='B') - s = Series(rng) - - tm.assertIsInstance(s[5], Timestamp) - - rng = date_range('20090415', '20090519', freq='B') - s = Series(rng, index=rng) - tm.assertIsInstance(s[5], Timestamp) - - tm.assertIsInstance(s.iat[5], Timestamp) - - def test_date_range_ambiguous_arguments(self): - # #2538 - start = datetime(2011, 1, 1, 5, 3, 40) - end = datetime(2011, 1, 1, 8, 9, 40) - - self.assertRaises(ValueError, date_range, start, end, - freq='s', periods=10) - - def test_timestamp_to_datetime(self): - tm._skip_if_no_pytz() - rng = date_range('20090415', '20090519', - tz='US/Eastern') - - stamp = rng[0] - dtval = stamp.to_pydatetime() - self.assertEqual(stamp, dtval) - self.assertEqual(stamp.tzinfo, dtval.tzinfo) - - def test_timestamp_to_datetime_dateutil(self): - tm._skip_if_no_pytz() - rng = date_range('20090415', '20090519', - tz='dateutil/US/Eastern') - - stamp = rng[0] - dtval = stamp.to_pydatetime() - self.assertEqual(stamp, dtval) - self.assertEqual(stamp.tzinfo, dtval.tzinfo) - - def test_timestamp_to_datetime_explicit_pytz(self): - tm._skip_if_no_pytz() - import pytz - rng = date_range('20090415', '20090519', - tz=pytz.timezone('US/Eastern')) - - stamp = rng[0] - dtval = stamp.to_pydatetime() - self.assertEqual(stamp, dtval) - self.assertEqual(stamp.tzinfo, dtval.tzinfo) - - def test_timestamp_to_datetime_explicit_dateutil(self): - tm._skip_if_windows_python_3() - tm._skip_if_no_dateutil() - from pandas.tslib import _dateutil_gettz as gettz - rng = date_range('20090415', '20090519', - tz=gettz('US/Eastern')) - - stamp = rng[0] - dtval = stamp.to_pydatetime() - self.assertEqual(stamp, dtval) - self.assertEqual(stamp.tzinfo, dtval.tzinfo) - - def test_index_convert_to_datetime_array(self): - tm._skip_if_no_pytz() - - def _check_rng(rng): - converted = rng.to_pydatetime() - tm.assertIsInstance(converted, np.ndarray) - for x, stamp in zip(converted, rng): - tm.assertIsInstance(x, datetime) - self.assertEqual(x, stamp.to_pydatetime()) - self.assertEqual(x.tzinfo, stamp.tzinfo) - - rng = date_range('20090415', '20090519') - rng_eastern = date_range('20090415', '20090519', tz='US/Eastern') - rng_utc = date_range('20090415', '20090519', tz='utc') - - _check_rng(rng) - _check_rng(rng_eastern) - _check_rng(rng_utc) - - def test_index_convert_to_datetime_array_explicit_pytz(self): - tm._skip_if_no_pytz() - import pytz - - def _check_rng(rng): - converted = rng.to_pydatetime() - tm.assertIsInstance(converted, np.ndarray) - for x, stamp in zip(converted, rng): - tm.assertIsInstance(x, datetime) - self.assertEqual(x, stamp.to_pydatetime()) - self.assertEqual(x.tzinfo, stamp.tzinfo) - - rng = date_range('20090415', '20090519') - rng_eastern = date_range('20090415', '20090519', tz=pytz.timezone('US/Eastern')) - rng_utc = date_range('20090415', '20090519', tz=pytz.utc) - - _check_rng(rng) - _check_rng(rng_eastern) - _check_rng(rng_utc) - - def test_index_convert_to_datetime_array_dateutil(self): - tm._skip_if_no_dateutil() - import dateutil - - def _check_rng(rng): - converted = rng.to_pydatetime() - tm.assertIsInstance(converted, np.ndarray) - for x, stamp in zip(converted, rng): - tm.assertIsInstance(x, datetime) - self.assertEqual(x, stamp.to_pydatetime()) - self.assertEqual(x.tzinfo, stamp.tzinfo) - - rng = date_range('20090415', '20090519') - rng_eastern = date_range('20090415', '20090519', tz='dateutil/US/Eastern') - rng_utc = date_range('20090415', '20090519', tz=dateutil.tz.tzutc()) - - _check_rng(rng) - _check_rng(rng_eastern) - _check_rng(rng_utc) - - def test_ctor_str_intraday(self): - rng = DatetimeIndex(['1-1-2000 00:00:01']) - self.assertEqual(rng[0].second, 1) - - def test_series_ctor_plus_datetimeindex(self): - rng = date_range('20090415', '20090519', freq='B') - data = dict((k, 1) for k in rng) - - result = Series(data, index=rng) - self.assertIs(result.index, rng) - - def test_series_pad_backfill_limit(self): - index = np.arange(10) - s = Series(np.random.randn(10), index=index) - - result = s[:2].reindex(index, method='pad', limit=5) - - expected = s[:2].reindex(index).fillna(method='pad') - expected[-3:] = np.nan - assert_series_equal(result, expected) - - result = s[-2:].reindex(index, method='backfill', limit=5) - - expected = s[-2:].reindex(index).fillna(method='backfill') - expected[:3] = np.nan - assert_series_equal(result, expected) - - def test_series_fillna_limit(self): - index = np.arange(10) - s = Series(np.random.randn(10), index=index) - - result = s[:2].reindex(index) - result = result.fillna(method='pad', limit=5) - - expected = s[:2].reindex(index).fillna(method='pad') - expected[-3:] = np.nan - assert_series_equal(result, expected) - - result = s[-2:].reindex(index) - result = result.fillna(method='bfill', limit=5) - - expected = s[-2:].reindex(index).fillna(method='backfill') - expected[:3] = np.nan - assert_series_equal(result, expected) - - def test_frame_pad_backfill_limit(self): - index = np.arange(10) - df = DataFrame(np.random.randn(10, 4), index=index) - - result = df[:2].reindex(index, method='pad', limit=5) - - expected = df[:2].reindex(index).fillna(method='pad') - expected.values[-3:] = np.nan - tm.assert_frame_equal(result, expected) - - result = df[-2:].reindex(index, method='backfill', limit=5) - - expected = df[-2:].reindex(index).fillna(method='backfill') - expected.values[:3] = np.nan - tm.assert_frame_equal(result, expected) - - def test_frame_fillna_limit(self): - index = np.arange(10) - df = DataFrame(np.random.randn(10, 4), index=index) - - result = df[:2].reindex(index) - result = result.fillna(method='pad', limit=5) - - expected = df[:2].reindex(index).fillna(method='pad') - expected.values[-3:] = np.nan - tm.assert_frame_equal(result, expected) - - result = df[-2:].reindex(index) - result = result.fillna(method='backfill', limit=5) - - expected = df[-2:].reindex(index).fillna(method='backfill') - expected.values[:3] = np.nan - tm.assert_frame_equal(result, expected) - - def test_frame_setitem_timestamp(self): - # 2155 - columns = DatetimeIndex(start='1/1/2012', end='2/1/2012', - freq=datetools.bday) - index = lrange(10) - data = DataFrame(columns=columns, index=index) - t = datetime(2012, 11, 1) - ts = Timestamp(t) - data[ts] = np.nan # works - - def test_sparse_series_fillna_limit(self): - index = np.arange(10) - s = Series(np.random.randn(10), index=index) - - ss = s[:2].reindex(index).to_sparse() - result = ss.fillna(method='pad', limit=5) - expected = ss.fillna(method='pad', limit=5) - expected = expected.to_dense() - expected[-3:] = np.nan - expected = expected.to_sparse() - assert_series_equal(result, expected) - - ss = s[-2:].reindex(index).to_sparse() - result = ss.fillna(method='backfill', limit=5) - expected = ss.fillna(method='backfill') - expected = expected.to_dense() - expected[:3] = np.nan - expected = expected.to_sparse() - assert_series_equal(result, expected) - - def test_sparse_series_pad_backfill_limit(self): - index = np.arange(10) - s = Series(np.random.randn(10), index=index) - s = s.to_sparse() - - result = s[:2].reindex(index, method='pad', limit=5) - expected = s[:2].reindex(index).fillna(method='pad') - expected = expected.to_dense() - expected[-3:] = np.nan - expected = expected.to_sparse() - assert_series_equal(result, expected) - - result = s[-2:].reindex(index, method='backfill', limit=5) - expected = s[-2:].reindex(index).fillna(method='backfill') - expected = expected.to_dense() - expected[:3] = np.nan - expected = expected.to_sparse() - assert_series_equal(result, expected) - - def test_sparse_frame_pad_backfill_limit(self): - index = np.arange(10) - df = DataFrame(np.random.randn(10, 4), index=index) - sdf = df.to_sparse() - - result = sdf[:2].reindex(index, method='pad', limit=5) - - expected = sdf[:2].reindex(index).fillna(method='pad') - expected = expected.to_dense() - expected.values[-3:] = np.nan - expected = expected.to_sparse() - tm.assert_frame_equal(result, expected) - - result = sdf[-2:].reindex(index, method='backfill', limit=5) - - expected = sdf[-2:].reindex(index).fillna(method='backfill') - expected = expected.to_dense() - expected.values[:3] = np.nan - expected = expected.to_sparse() - tm.assert_frame_equal(result, expected) - - def test_sparse_frame_fillna_limit(self): - index = np.arange(10) - df = DataFrame(np.random.randn(10, 4), index=index) - sdf = df.to_sparse() - - result = sdf[:2].reindex(index) - result = result.fillna(method='pad', limit=5) - - expected = sdf[:2].reindex(index).fillna(method='pad') - expected = expected.to_dense() - expected.values[-3:] = np.nan - expected = expected.to_sparse() - tm.assert_frame_equal(result, expected) - - result = sdf[-2:].reindex(index) - result = result.fillna(method='backfill', limit=5) - - expected = sdf[-2:].reindex(index).fillna(method='backfill') - expected = expected.to_dense() - expected.values[:3] = np.nan - expected = expected.to_sparse() - tm.assert_frame_equal(result, expected) - - def test_pad_require_monotonicity(self): - rng = date_range('1/1/2000', '3/1/2000', freq='B') - - # neither monotonic increasing or decreasing - rng2 = rng[[1, 0, 2]] - - self.assertRaises(ValueError, rng2.get_indexer, rng, - method='pad') - - def test_frame_ctor_datetime64_column(self): - rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', - freq='10s') - dates = np.asarray(rng) - - df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates}) - self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]'))) - - def test_frame_add_datetime64_column(self): - rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', - freq='10s') - df = DataFrame(index=np.arange(len(rng))) - - df['A'] = rng - self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]'))) - - def test_frame_datetime64_pre1900_repr(self): - df = DataFrame({'year': date_range('1/1/1700', periods=50, - freq='A-DEC')}) - # it works! - repr(df) - - def test_frame_add_datetime64_col_other_units(self): - n = 100 - - units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y'] - - ns_dtype = np.dtype('M8[ns]') - - for unit in units: - dtype = np.dtype('M8[%s]' % unit) - vals = np.arange(n, dtype=np.int64).view(dtype) - - df = DataFrame({'ints': np.arange(n)}, index=np.arange(n)) - df[unit] = vals - - ex_vals = to_datetime(vals.astype('O')).values - - self.assertEqual(df[unit].dtype, ns_dtype) - self.assertTrue((df[unit].values == ex_vals).all()) - - # Test insertion into existing datetime64 column - df = DataFrame({'ints': np.arange(n)}, index=np.arange(n)) - df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype) - - for unit in units: - dtype = np.dtype('M8[%s]' % unit) - vals = np.arange(n, dtype=np.int64).view(dtype) - - tmp = df.copy() - - tmp['dates'] = vals - ex_vals = to_datetime(vals.astype('O')).values - - self.assertTrue((tmp['dates'].values == ex_vals).all()) - - def test_to_datetime_unit(self): - - epoch = 1370745748 - s = Series([ epoch + t for t in range(20) ]) - result = to_datetime(s,unit='s') - expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ]) - assert_series_equal(result,expected) - - s = Series([ epoch + t for t in range(20) ]).astype(float) - result = to_datetime(s,unit='s') - expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ]) - assert_series_equal(result,expected) - - s = Series([ epoch + t for t in range(20) ] + [iNaT]) - result = to_datetime(s,unit='s') - expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]) - assert_series_equal(result,expected) - - s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float) - result = to_datetime(s,unit='s') - expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]) - assert_series_equal(result,expected) - - s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True) - result = to_datetime(s,unit='s') - expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]) - assert_series_equal(result,expected) - - def test_series_ctor_datetime64(self): - rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', - freq='10s') - dates = np.asarray(rng) - - series = Series(dates) - self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]'))) - - def test_index_cast_datetime64_other_units(self): - arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]') - - idx = Index(arr) - - self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all()) - - def test_reindex_series_add_nat(self): - rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s') - series = Series(rng) - - result = series.reindex(lrange(15)) - self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]'))) - - mask = result.isnull() - self.assertTrue(mask[-5:].all()) - self.assertFalse(mask[:-5].any()) - - def test_reindex_frame_add_nat(self): - rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s') - df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng}) - - result = df.reindex(lrange(15)) - self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]'))) - - mask = com.isnull(result)['B'] - self.assertTrue(mask[-5:].all()) - self.assertFalse(mask[:-5].any()) - - def test_series_repr_nat(self): - series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]') - - result = repr(series) - expected = ('0 1970-01-01 00:00:00.000000\n' - '1 1970-01-01 00:00:00.000001\n' - '2 1970-01-01 00:00:00.000002\n' - '3 NaT\n' - 'dtype: datetime64[ns]') - self.assertEqual(result, expected) - - def test_fillna_nat(self): - series = Series([0, 1, 2, iNaT], dtype='M8[ns]') - - filled = series.fillna(method='pad') - filled2 = series.fillna(value=series.values[2]) - - expected = series.copy() - expected.values[3] = expected.values[2] - - assert_series_equal(filled, expected) - assert_series_equal(filled2, expected) - - df = DataFrame({'A': series}) - filled = df.fillna(method='pad') - filled2 = df.fillna(value=series.values[2]) - expected = DataFrame({'A': expected}) - assert_frame_equal(filled, expected) - assert_frame_equal(filled2, expected) - - series = Series([iNaT, 0, 1, 2], dtype='M8[ns]') - - filled = series.fillna(method='bfill') - filled2 = series.fillna(value=series[1]) - - expected = series.copy() - expected[0] = expected[1] - - assert_series_equal(filled, expected) - assert_series_equal(filled2, expected) - - df = DataFrame({'A': series}) - filled = df.fillna(method='bfill') - filled2 = df.fillna(value=series[1]) - expected = DataFrame({'A': expected}) - assert_frame_equal(filled, expected) - assert_frame_equal(filled2, expected) - - def test_string_na_nat_conversion(self): - # GH #999, #858 - - from pandas.compat import parse_date - - strings = np.array(['1/1/2000', '1/2/2000', np.nan, - '1/4/2000, 12:34:56'], dtype=object) - - expected = np.empty(4, dtype='M8[ns]') - for i, val in enumerate(strings): - if com.isnull(val): - expected[i] = iNaT - else: - expected[i] = parse_date(val) - - result = tslib.array_to_datetime(strings) - assert_almost_equal(result, expected) - - result2 = to_datetime(strings) - tm.assertIsInstance(result2, DatetimeIndex) - tm.assert_numpy_array_equal(result, result2) - - malformed = np.array(['1/100/2000', np.nan], dtype=object) - - # GH 10636, default is now 'raise' - self.assertRaises(ValueError, lambda : to_datetime(malformed, errors='raise')) - - result = to_datetime(malformed, errors='ignore') - tm.assert_numpy_array_equal(result, malformed) - - self.assertRaises(ValueError, to_datetime, malformed, - errors='raise') - - idx = ['a', 'b', 'c', 'd', 'e'] - series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan, - '1/5/2000'], index=idx, name='foo') - dseries = Series([to_datetime('1/1/2000'), np.nan, - to_datetime('1/3/2000'), np.nan, - to_datetime('1/5/2000')], index=idx, name='foo') - - result = to_datetime(series) - dresult = to_datetime(dseries) - - expected = Series(np.empty(5, dtype='M8[ns]'), index=idx) - for i in range(5): - x = series[i] - if isnull(x): - expected[i] = iNaT - else: - expected[i] = to_datetime(x) - - assert_series_equal(result, expected, check_names=False) - self.assertEqual(result.name, 'foo') - - assert_series_equal(dresult, expected, check_names=False) - self.assertEqual(dresult.name, 'foo') - - def test_to_datetime_iso8601(self): - result = to_datetime(["2012-01-01 00:00:00"]) - exp = Timestamp("2012-01-01 00:00:00") - self.assertEqual(result[0], exp) - - result = to_datetime(['20121001']) # bad iso 8601 - exp = Timestamp('2012-10-01') - self.assertEqual(result[0], exp) - - def test_to_datetime_default(self): - rs = to_datetime('2001') - xp = datetime(2001, 1, 1) - self.assertTrue(rs, xp) - - #### dayfirst is essentially broken - #### to_datetime('01-13-2012', dayfirst=True) - #### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True)) - - def test_to_datetime_on_datetime64_series(self): - # #2699 - s = Series(date_range('1/1/2000', periods=10)) - - result = to_datetime(s) - self.assertEqual(result[0], s[0]) - - def test_to_datetime_with_apply(self): - # this is only locale tested with US/None locales - _skip_if_has_locale() - - # GH 5195 - # with a format and coerce a single item to_datetime fails - td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3]) - expected = pd.to_datetime(td, format='%b %y') - result = td.apply(pd.to_datetime, format='%b %y') - assert_series_equal(result, expected) - - td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3]) - self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y', errors='raise')) - self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y', errors='raise')) - expected = pd.to_datetime(td, format='%b %y', errors='coerce') - - result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', errors='coerce')) - assert_series_equal(result, expected) - - def test_nat_vector_field_access(self): - idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000']) - - fields = ['year', 'quarter', 'month', 'day', 'hour', - 'minute', 'second', 'microsecond', 'nanosecond', - 'week', 'dayofyear', 'days_in_month'] - for field in fields: - result = getattr(idx, field) - expected = [getattr(x, field) if x is not NaT else np.nan - for x in idx] - self.assert_numpy_array_equal(result, np.array(expected)) - - def test_nat_scalar_field_access(self): - fields = ['year', 'quarter', 'month', 'day', 'hour', - 'minute', 'second', 'microsecond', 'nanosecond', - 'week', 'dayofyear', 'days_in_month', 'daysinmonth', - 'dayofweek'] - for field in fields: - result = getattr(NaT, field) - self.assertTrue(np.isnan(result)) - - def test_NaT_methods(self): - # GH 9513 - raise_methods = ['astimezone', 'combine', 'ctime', 'dst', 'fromordinal', - 'fromtimestamp', 'isocalendar', 'isoformat', - 'strftime', 'strptime', - 'time', 'timestamp', 'timetuple', 'timetz', - 'toordinal', 'tzname', 'utcfromtimestamp', - 'utcnow', 'utcoffset', 'utctimetuple'] - nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today'] - nan_methods = ['weekday', 'isoweekday'] - - for method in raise_methods: - if hasattr(NaT, method): - self.assertRaises(ValueError, getattr(NaT, method)) - - for method in nan_methods: - if hasattr(NaT, method): - self.assertTrue(np.isnan(getattr(NaT, method)())) - - for method in nat_methods: - if hasattr(NaT, method): - self.assertIs(getattr(NaT, method)(), NaT) - - def test_to_datetime_types(self): - - # empty string - result = to_datetime('') - self.assertIs(result, NaT) - - result = to_datetime(['', '']) - self.assertTrue(isnull(result).all()) - - # ints - result = Timestamp(0) - expected = to_datetime(0) - self.assertEqual(result, expected) - - # GH 3888 (strings) - expected = to_datetime(['2012'])[0] - result = to_datetime('2012') - self.assertEqual(result, expected) - - ### array = ['2012','20120101','20120101 12:01:01'] - array = ['20120101','20120101 12:01:01'] - expected = list(to_datetime(array)) - result = lmap(Timestamp,array) - tm.assert_almost_equal(result,expected) - - ### currently fails ### - ### result = Timestamp('2012') - ### expected = to_datetime('2012') - ### self.assertEqual(result, expected) - - def test_to_datetime_unprocessable_input(self): - # GH 4928 - self.assert_numpy_array_equal( - to_datetime([1, '1'], errors='ignore'), - np.array([1, '1'], dtype='O') - ) - self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise') - - def test_to_datetime_other_datetime64_units(self): - # 5/25/2012 - scalar = np.int64(1337904000000000).view('M8[us]') - as_obj = scalar.astype('O') - - index = DatetimeIndex([scalar]) - self.assertEqual(index[0], scalar.astype('O')) - - value = Timestamp(scalar) - self.assertEqual(value, as_obj) - - def test_to_datetime_list_of_integers(self): - rng = date_range('1/1/2000', periods=20) - rng = DatetimeIndex(rng.values) - - ints = list(rng.asi8) - - result = DatetimeIndex(ints) - - self.assertTrue(rng.equals(result)) - - def test_to_datetime_dt64s(self): - in_bound_dts = [ - np.datetime64('2000-01-01'), - np.datetime64('2000-01-02'), - ] - - for dt in in_bound_dts: - self.assertEqual( - pd.to_datetime(dt), - Timestamp(dt) - ) - - oob_dts = [ - np.datetime64('1000-01-01'), - np.datetime64('5000-01-02'), - ] - - for dt in oob_dts: - self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise') - self.assertRaises(ValueError, tslib.Timestamp, dt) - self.assertIs(pd.to_datetime(dt, errors='coerce'), NaT) - - def test_to_datetime_array_of_dt64s(self): - dts = [ - np.datetime64('2000-01-01'), - np.datetime64('2000-01-02'), - ] - - # Assuming all datetimes are in bounds, to_datetime() returns - # an array that is equal to Timestamp() parsing - self.assert_numpy_array_equal( - pd.to_datetime(dts, box=False), - np.array([Timestamp(x).asm8 for x in dts]) - ) - - # A list of datetimes where the last one is out of bounds - dts_with_oob = dts + [np.datetime64('9999-01-01')] - - self.assertRaises( - ValueError, - pd.to_datetime, - dts_with_oob, - errors='raise' - ) - - self.assert_numpy_array_equal( - pd.to_datetime(dts_with_oob, box=False, errors='coerce'), - np.array( - [ - Timestamp(dts_with_oob[0]).asm8, - Timestamp(dts_with_oob[1]).asm8, - iNaT, - ], - dtype='M8' - ) - ) - - # With errors='ignore', out of bounds datetime64s - # are converted to their .item(), which depending on the version of - # numpy is either a python datetime.datetime or datetime.date - self.assert_numpy_array_equal( - pd.to_datetime(dts_with_oob, box=False, errors='ignore'), - np.array( - [dt.item() for dt in dts_with_oob], - dtype='O' - ) - ) - - def test_index_to_datetime(self): - idx = Index(['1/1/2000', '1/2/2000', '1/3/2000']) - - result = idx.to_datetime() - expected = DatetimeIndex(datetools.to_datetime(idx.values)) - self.assertTrue(result.equals(expected)) - - today = datetime.today() - idx = Index([today], dtype=object) - result = idx.to_datetime() - expected = DatetimeIndex([today]) - self.assertTrue(result.equals(expected)) - - def test_to_datetime_freq(self): - xp = bdate_range('2000-1-1', periods=10, tz='UTC') - rs = xp.to_datetime() - self.assertEqual(xp.freq, rs.freq) - self.assertEqual(xp.tzinfo, rs.tzinfo) - - def test_range_misspecified(self): - # GH #1095 - - self.assertRaises(ValueError, date_range, '1/1/2000') - self.assertRaises(ValueError, date_range, end='1/1/2000') - self.assertRaises(ValueError, date_range, periods=10) - - self.assertRaises(ValueError, date_range, '1/1/2000', freq='H') - self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H') - self.assertRaises(ValueError, date_range, periods=10, freq='H') - - def test_reasonable_keyerror(self): - # GH #1062 - index = DatetimeIndex(['1/3/2000']) - try: - index.get_loc('1/1/2000') - except KeyError as e: - self.assertIn('2000', str(e)) - - def test_reindex_with_datetimes(self): - rng = date_range('1/1/2000', periods=20) - ts = Series(np.random.randn(20), index=rng) - - result = ts.reindex(list(ts.index[5:10])) - expected = ts[5:10] - tm.assert_series_equal(result, expected) - - result = ts[list(ts.index[5:10])] - tm.assert_series_equal(result, expected) - - def test_asfreq_keep_index_name(self): - # GH #9854 - index_name = 'bar' - index = pd.date_range('20130101',periods=20,name=index_name) - df = pd.DataFrame([x for x in range(20)],columns=['foo'],index=index) - - tm.assert_equal(index_name, df.index.name) - tm.assert_equal(index_name, df.asfreq('10D').index.name) - - def test_promote_datetime_date(self): - rng = date_range('1/1/2000', periods=20) - ts = Series(np.random.randn(20), index=rng) - - ts_slice = ts[5:] - ts2 = ts_slice.copy() - ts2.index = [x.date() for x in ts2.index] - - result = ts + ts2 - result2 = ts2 + ts - expected = ts + ts[5:] - assert_series_equal(result, expected) - assert_series_equal(result2, expected) - - # test asfreq - result = ts2.asfreq('4H', method='ffill') - expected = ts[5:].asfreq('4H', method='ffill') - assert_series_equal(result, expected) - - result = rng.get_indexer(ts2.index) - expected = rng.get_indexer(ts_slice.index) - self.assert_numpy_array_equal(result, expected) - - def test_asfreq_normalize(self): - rng = date_range('1/1/2000 09:30', periods=20) - norm = date_range('1/1/2000', periods=20) - vals = np.random.randn(20) - ts = Series(vals, index=rng) - - result = ts.asfreq('D', normalize=True) - norm = date_range('1/1/2000', periods=20) - expected = Series(vals, index=norm) - - assert_series_equal(result, expected) - - vals = np.random.randn(20, 3) - ts = DataFrame(vals, index=rng) - - result = ts.asfreq('D', normalize=True) - expected = DataFrame(vals, index=norm) - - assert_frame_equal(result, expected) - - def test_date_range_gen_error(self): - rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min') - self.assertEqual(len(rng), 4) - - def test_first_subset(self): - ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h') - result = ts.first('10d') - self.assertEqual(len(result), 20) - - ts = _simple_ts('1/1/2000', '1/1/2010') - result = ts.first('10d') - self.assertEqual(len(result), 10) - - result = ts.first('3M') - expected = ts[:'3/31/2000'] - assert_series_equal(result, expected) - - result = ts.first('21D') - expected = ts[:21] - assert_series_equal(result, expected) - - result = ts[:0].first('3M') - assert_series_equal(result, ts[:0]) - - def test_last_subset(self): - ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h') - result = ts.last('10d') - self.assertEqual(len(result), 20) - - ts = _simple_ts('1/1/2000', '1/1/2010') - result = ts.last('10d') - self.assertEqual(len(result), 10) - - result = ts.last('21D') - expected = ts['12/12/2009':] - assert_series_equal(result, expected) - - result = ts.last('21D') - expected = ts[-21:] - assert_series_equal(result, expected) - - result = ts[:0].last('3M') - assert_series_equal(result, ts[:0]) - - def test_format_pre_1900_dates(self): - rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC') - rng.format() - ts = Series(1, index=rng) - repr(ts) - - def test_repeat(self): - rng = date_range('1/1/2000', '1/1/2001') - - result = rng.repeat(5) - self.assertIsNone(result.freq) - self.assertEqual(len(result), 5 * len(rng)) - - def test_at_time(self): - rng = date_range('1/1/2000', '1/5/2000', freq='5min') - ts = Series(np.random.randn(len(rng)), index=rng) - rs = ts.at_time(rng[1]) - self.assertTrue((rs.index.hour == rng[1].hour).all()) - self.assertTrue((rs.index.minute == rng[1].minute).all()) - self.assertTrue((rs.index.second == rng[1].second).all()) - - result = ts.at_time('9:30') - expected = ts.at_time(time(9, 30)) - assert_series_equal(result, expected) - - df = DataFrame(np.random.randn(len(rng), 3), index=rng) - - result = ts[time(9, 30)] - result_df = df.ix[time(9, 30)] - expected = ts[(rng.hour == 9) & (rng.minute == 30)] - exp_df = df[(rng.hour == 9) & (rng.minute == 30)] - - # expected.index = date_range('1/1/2000', '1/4/2000') - - assert_series_equal(result, expected) - tm.assert_frame_equal(result_df, exp_df) - - chunk = df.ix['1/4/2000':] - result = chunk.ix[time(9, 30)] - expected = result_df[-1:] - tm.assert_frame_equal(result, expected) - - # midnight, everything - rng = date_range('1/1/2000', '1/31/2000') - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts.at_time(time(0, 0)) - assert_series_equal(result, ts) - - # time doesn't exist - rng = date_range('1/1/2012', freq='23Min', periods=384) - ts = Series(np.random.randn(len(rng)), rng) - rs = ts.at_time('16:00') - self.assertEqual(len(rs), 0) - - def test_at_time_frame(self): - rng = date_range('1/1/2000', '1/5/2000', freq='5min') - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - rs = ts.at_time(rng[1]) - self.assertTrue((rs.index.hour == rng[1].hour).all()) - self.assertTrue((rs.index.minute == rng[1].minute).all()) - self.assertTrue((rs.index.second == rng[1].second).all()) - - result = ts.at_time('9:30') - expected = ts.at_time(time(9, 30)) - assert_frame_equal(result, expected) - - result = ts.ix[time(9, 30)] - expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)] - - assert_frame_equal(result, expected) - - # midnight, everything - rng = date_range('1/1/2000', '1/31/2000') - ts = DataFrame(np.random.randn(len(rng), 3), index=rng) - - result = ts.at_time(time(0, 0)) - assert_frame_equal(result, ts) - - # time doesn't exist - rng = date_range('1/1/2012', freq='23Min', periods=384) - ts = DataFrame(np.random.randn(len(rng), 2), rng) - rs = ts.at_time('16:00') - self.assertEqual(len(rs), 0) - - def test_between_time(self): - rng = date_range('1/1/2000', '1/5/2000', freq='5min') - ts = Series(np.random.randn(len(rng)), index=rng) - stime = time(0, 0) - etime = time(1, 0) - - close_open = product([True, False], [True, False]) - for inc_start, inc_end in close_open: - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = 13 * 4 + 1 - if not inc_start: - exp_len -= 5 - if not inc_end: - exp_len -= 4 - - self.assertEqual(len(filtered), exp_len) - for rs in filtered.index: - t = rs.time() - if inc_start: - self.assertTrue(t >= stime) - else: - self.assertTrue(t > stime) - - if inc_end: - self.assertTrue(t <= etime) - else: - self.assertTrue(t < etime) - - result = ts.between_time('00:00', '01:00') - expected = ts.between_time(stime, etime) - assert_series_equal(result, expected) - - # across midnight - rng = date_range('1/1/2000', '1/5/2000', freq='5min') - ts = Series(np.random.randn(len(rng)), index=rng) - stime = time(22, 0) - etime = time(9, 0) - - close_open = product([True, False], [True, False]) - for inc_start, inc_end in close_open: - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = (12 * 11 + 1) * 4 + 1 - if not inc_start: - exp_len -= 4 - if not inc_end: - exp_len -= 4 - - self.assertEqual(len(filtered), exp_len) - for rs in filtered.index: - t = rs.time() - if inc_start: - self.assertTrue((t >= stime) or (t <= etime)) - else: - self.assertTrue((t > stime) or (t <= etime)) - - if inc_end: - self.assertTrue((t <= etime) or (t >= stime)) - else: - self.assertTrue((t < etime) or (t >= stime)) - - def test_between_time_frame(self): - rng = date_range('1/1/2000', '1/5/2000', freq='5min') - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - stime = time(0, 0) - etime = time(1, 0) - - close_open = product([True, False], [True, False]) - for inc_start, inc_end in close_open: - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = 13 * 4 + 1 - if not inc_start: - exp_len -= 5 - if not inc_end: - exp_len -= 4 - - self.assertEqual(len(filtered), exp_len) - for rs in filtered.index: - t = rs.time() - if inc_start: - self.assertTrue(t >= stime) - else: - self.assertTrue(t > stime) - - if inc_end: - self.assertTrue(t <= etime) - else: - self.assertTrue(t < etime) - - result = ts.between_time('00:00', '01:00') - expected = ts.between_time(stime, etime) - assert_frame_equal(result, expected) - - # across midnight - rng = date_range('1/1/2000', '1/5/2000', freq='5min') - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - stime = time(22, 0) - etime = time(9, 0) - - close_open = product([True, False], [True, False]) - for inc_start, inc_end in close_open: - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = (12 * 11 + 1) * 4 + 1 - if not inc_start: - exp_len -= 4 - if not inc_end: - exp_len -= 4 - - self.assertEqual(len(filtered), exp_len) - for rs in filtered.index: - t = rs.time() - if inc_start: - self.assertTrue((t >= stime) or (t <= etime)) - else: - self.assertTrue((t > stime) or (t <= etime)) - - if inc_end: - self.assertTrue((t <= etime) or (t >= stime)) - else: - self.assertTrue((t < etime) or (t >= stime)) - - def test_dti_constructor_preserve_dti_freq(self): - rng = date_range('1/1/2000', '1/2/2000', freq='5min') - - rng2 = DatetimeIndex(rng) - self.assertEqual(rng.freq, rng2.freq) - - def test_dti_constructor_years_only(self): - # GH 6961 - for tz in [None, 'UTC', 'Asia/Tokyo', 'dateutil/US/Pacific']: - rng1 = date_range('2014', '2015', freq='M', tz=tz) - expected1 = date_range('2014-01-31', '2014-12-31', freq='M', tz=tz) - - rng2 = date_range('2014', '2015', freq='MS', tz=tz) - expected2 = date_range('2014-01-01', '2015-01-01', freq='MS', tz=tz) - - rng3 = date_range('2014', '2020', freq='A', tz=tz) - expected3 = date_range('2014-12-31', '2019-12-31', freq='A', tz=tz) - - rng4 = date_range('2014', '2020', freq='AS', tz=tz) - expected4 = date_range('2014-01-01', '2020-01-01', freq='AS', tz=tz) - - for rng, expected in [(rng1, expected1), (rng2, expected2), - (rng3, expected3), (rng4, expected4)]: - tm.assert_index_equal(rng, expected) - - def test_normalize(self): - rng = date_range('1/1/2000 9:30', periods=10, freq='D') - - result = rng.normalize() - expected = date_range('1/1/2000', periods=10, freq='D') - self.assertTrue(result.equals(expected)) - - rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]")) - rng_ns_normalized = rng_ns.normalize() - expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]")) - self.assertTrue(rng_ns_normalized.equals(expected)) - - self.assertTrue(result.is_normalized) - self.assertFalse(rng.is_normalized) - - def test_to_period(self): - from pandas.tseries.period import period_range - - ts = _simple_ts('1/1/2000', '1/1/2001') - - pts = ts.to_period() - exp = ts.copy() - exp.index = period_range('1/1/2000', '1/1/2001') - assert_series_equal(pts, exp) - - pts = ts.to_period('M') - exp.index = exp.index.asfreq('M') - self.assertTrue(pts.index.equals(exp.index.asfreq('M'))) - assert_series_equal(pts, exp) - - # GH 7606 without freq - idx = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04']) - exp_idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03', - '2011-01-04'], freq='D') - - s = Series(np.random.randn(4), index=idx) - expected = s.copy() - expected.index = exp_idx - assert_series_equal(s.to_period(), expected) - - df = DataFrame(np.random.randn(4, 4), index=idx, columns=idx) - expected = df.copy() - expected.index = exp_idx - assert_frame_equal(df.to_period(), expected) - - expected = df.copy() - expected.columns = exp_idx - assert_frame_equal(df.to_period(axis=1), expected) - - def create_dt64_based_index(self): - data = [Timestamp('2007-01-01 10:11:12.123456Z'), - Timestamp('2007-01-01 10:11:13.789123Z')] - index = DatetimeIndex(data) - return index - - def test_to_period_millisecond(self): - index = self.create_dt64_based_index() - - period = index.to_period(freq='L') - self.assertEqual(2, len(period)) - self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L')) - self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L')) - - def test_to_period_microsecond(self): - index = self.create_dt64_based_index() - - period = index.to_period(freq='U') - self.assertEqual(2, len(period)) - self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U')) - self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U')) - - def test_to_period_tz_pytz(self): - tm._skip_if_no_pytz() - from dateutil.tz import tzlocal - from pytz import utc as UTC - - xp = date_range('1/1/2000', '4/1/2000').to_period() - - ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern') - - result = ts.to_period()[0] - expected = ts[0].to_period() - - self.assertEqual(result, expected) - self.assertTrue(ts.to_period().equals(xp)) - - ts = date_range('1/1/2000', '4/1/2000', tz=UTC) - - result = ts.to_period()[0] - expected = ts[0].to_period() - - self.assertEqual(result, expected) - self.assertTrue(ts.to_period().equals(xp)) - - ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal()) - - result = ts.to_period()[0] - expected = ts[0].to_period() - - self.assertEqual(result, expected) - self.assertTrue(ts.to_period().equals(xp)) - - def test_to_period_tz_explicit_pytz(self): - tm._skip_if_no_pytz() - import pytz - from dateutil.tz import tzlocal - - xp = date_range('1/1/2000', '4/1/2000').to_period() - - ts = date_range('1/1/2000', '4/1/2000', tz=pytz.timezone('US/Eastern')) - - result = ts.to_period()[0] - expected = ts[0].to_period() - - self.assertTrue(result == expected) - self.assertTrue(ts.to_period().equals(xp)) - - ts = date_range('1/1/2000', '4/1/2000', tz=pytz.utc) - - result = ts.to_period()[0] - expected = ts[0].to_period() - - self.assertTrue(result == expected) - self.assertTrue(ts.to_period().equals(xp)) - - ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal()) - - result = ts.to_period()[0] - expected = ts[0].to_period() - - self.assertTrue(result == expected) - self.assertTrue(ts.to_period().equals(xp)) - - def test_to_period_tz_dateutil(self): - tm._skip_if_no_dateutil() - import dateutil - from dateutil.tz import tzlocal - - xp = date_range('1/1/2000', '4/1/2000').to_period() - - ts = date_range('1/1/2000', '4/1/2000', tz='dateutil/US/Eastern') - - result = ts.to_period()[0] - expected = ts[0].to_period() - - self.assertTrue(result == expected) - self.assertTrue(ts.to_period().equals(xp)) - - ts = date_range('1/1/2000', '4/1/2000', tz=dateutil.tz.tzutc()) - - result = ts.to_period()[0] - expected = ts[0].to_period() - - self.assertTrue(result == expected) - self.assertTrue(ts.to_period().equals(xp)) - - ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal()) - - result = ts.to_period()[0] - expected = ts[0].to_period() - - self.assertTrue(result == expected) - self.assertTrue(ts.to_period().equals(xp)) - - def test_frame_to_period(self): - K = 5 - from pandas.tseries.period import period_range - - dr = date_range('1/1/2000', '1/1/2001') - pr = period_range('1/1/2000', '1/1/2001') - df = DataFrame(randn(len(dr), K), index=dr) - df['mix'] = 'a' - - pts = df.to_period() - exp = df.copy() - exp.index = pr - assert_frame_equal(pts, exp) - - pts = df.to_period('M') - self.assertTrue(pts.index.equals(exp.index.asfreq('M'))) - - df = df.T - pts = df.to_period(axis=1) - exp = df.copy() - exp.columns = pr - assert_frame_equal(pts, exp) - - pts = df.to_period('M', axis=1) - self.assertTrue(pts.columns.equals(exp.columns.asfreq('M'))) - - self.assertRaises(ValueError, df.to_period, axis=2) - - def test_timestamp_fields(self): - # extra fields from DatetimeIndex like quarter and week - idx = tm.makeDateIndex(100) - - fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'days_in_month', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end'] - for f in fields: - expected = getattr(idx, f)[-1] - result = getattr(Timestamp(idx[-1]), f) - self.assertEqual(result, expected) - - self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq) - self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr) - - def test_woy_boundary(self): - # make sure weeks at year boundaries are correct - d = datetime(2013,12,31) - result = Timestamp(d).week - expected = 1 # ISO standard - self.assertEqual(result, expected) - - d = datetime(2008,12,28) - result = Timestamp(d).week - expected = 52 # ISO standard - self.assertEqual(result, expected) - - d = datetime(2009,12,31) - result = Timestamp(d).week - expected = 53 # ISO standard - self.assertEqual(result, expected) - - d = datetime(2010,1,1) - result = Timestamp(d).week - expected = 53 # ISO standard - self.assertEqual(result, expected) - - d = datetime(2010,1,3) - result = Timestamp(d).week - expected = 53 # ISO standard - self.assertEqual(result, expected) - - result = np.array([Timestamp(datetime(*args)).week for args in - [(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]]) - self.assertTrue((result == [52, 52, 53, 53]).all()) - - def test_timestamp_date_out_of_range(self): - self.assertRaises(ValueError, Timestamp, '1676-01-01') - self.assertRaises(ValueError, Timestamp, '2263-01-01') - - # 1475 - self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01']) - self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) - - def test_timestamp_repr(self): - # pre-1900 - stamp = Timestamp('1850-01-01', tz='US/Eastern') - repr(stamp) - - iso8601 = '1850-01-01 01:23:45.012345' - stamp = Timestamp(iso8601, tz='US/Eastern') - result = repr(stamp) - self.assertIn(iso8601, result) - - def test_timestamp_from_ordinal(self): - - # GH 3042 - dt = datetime(2011, 4, 16, 0, 0) - ts = Timestamp.fromordinal(dt.toordinal()) - self.assertEqual(ts.to_pydatetime(), dt) - - # with a tzinfo - stamp = Timestamp('2011-4-16', tz='US/Eastern') - dt_tz = stamp.to_pydatetime() - ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern') - self.assertEqual(ts.to_pydatetime(), dt_tz) - - def test_datetimeindex_integers_shift(self): - rng = date_range('1/1/2000', periods=20) - - result = rng + 5 - expected = rng.shift(5) - self.assertTrue(result.equals(expected)) - - result = rng - 5 - expected = rng.shift(-5) - self.assertTrue(result.equals(expected)) - - def test_astype_object(self): - # NumPy 1.6.1 weak ns support - rng = date_range('1/1/2000', periods=20) - - casted = rng.astype('O') - exp_values = list(rng) - - self.assert_numpy_array_equal(casted, exp_values) - - def test_catch_infinite_loop(self): - offset = datetools.DateOffset(minute=5) - # blow up, don't loop forever - self.assertRaises(Exception, date_range, datetime(2011, 11, 11), - datetime(2011, 11, 12), freq=offset) - - def test_append_concat(self): - rng = date_range('5/8/2012 1:45', periods=10, freq='5T') - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - - result = ts.append(ts) - result_df = df.append(df) - ex_index = DatetimeIndex(np.tile(rng.values, 2)) - self.assertTrue(result.index.equals(ex_index)) - self.assertTrue(result_df.index.equals(ex_index)) - - appended = rng.append(rng) - self.assertTrue(appended.equals(ex_index)) - - appended = rng.append([rng, rng]) - ex_index = DatetimeIndex(np.tile(rng.values, 3)) - self.assertTrue(appended.equals(ex_index)) - - # different index names - rng1 = rng.copy() - rng2 = rng.copy() - rng1.name = 'foo' - rng2.name = 'bar' - self.assertEqual(rng1.append(rng1).name, 'foo') - self.assertIsNone(rng1.append(rng2).name) - - def test_append_concat_tz(self): - #GH 2938 - tm._skip_if_no_pytz() - - rng = date_range('5/8/2012 1:45', periods=10, freq='5T', - tz='US/Eastern') - rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T', - tz='US/Eastern') - rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T', - tz='US/Eastern') - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - ts2 = Series(np.random.randn(len(rng2)), rng2) - df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) - - result = ts.append(ts2) - result_df = df.append(df2) - self.assertTrue(result.index.equals(rng3)) - self.assertTrue(result_df.index.equals(rng3)) - - appended = rng.append(rng2) - self.assertTrue(appended.equals(rng3)) - - def test_append_concat_tz_explicit_pytz(self): - # GH 2938 - tm._skip_if_no_pytz() - from pytz import timezone as timezone - - rng = date_range('5/8/2012 1:45', periods=10, freq='5T', - tz=timezone('US/Eastern')) - rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T', - tz=timezone('US/Eastern')) - rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T', - tz=timezone('US/Eastern')) - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - ts2 = Series(np.random.randn(len(rng2)), rng2) - df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) - - result = ts.append(ts2) - result_df = df.append(df2) - self.assertTrue(result.index.equals(rng3)) - self.assertTrue(result_df.index.equals(rng3)) - - appended = rng.append(rng2) - self.assertTrue(appended.equals(rng3)) - - def test_append_concat_tz_dateutil(self): - # GH 2938 - tm._skip_if_no_dateutil() - from pandas.tslib import _dateutil_gettz as timezone - - rng = date_range('5/8/2012 1:45', periods=10, freq='5T', - tz='dateutil/US/Eastern') - rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T', - tz='dateutil/US/Eastern') - rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T', - tz='dateutil/US/Eastern') - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - ts2 = Series(np.random.randn(len(rng2)), rng2) - df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) - - result = ts.append(ts2) - result_df = df.append(df2) - self.assertTrue(result.index.equals(rng3)) - self.assertTrue(result_df.index.equals(rng3)) - - appended = rng.append(rng2) - self.assertTrue(appended.equals(rng3)) - - def test_set_dataframe_column_ns_dtype(self): - x = DataFrame([datetime.now(), datetime.now()]) - self.assertEqual(x[0].dtype, np.dtype('M8[ns]')) - - def test_groupby_count_dateparseerror(self): - dr = date_range(start='1/1/2012', freq='5min', periods=10) - - # BAD Example, datetimes first - s = Series(np.arange(10), index=[dr, lrange(10)]) - grouped = s.groupby(lambda x: x[1] % 2 == 0) - result = grouped.count() - - s = Series(np.arange(10), index=[lrange(10), dr]) - grouped = s.groupby(lambda x: x[0] % 2 == 0) - expected = grouped.count() - - assert_series_equal(result, expected) - - def test_datetimeindex_repr_short(self): - dr = date_range(start='1/1/2012', periods=1) - repr(dr) - - dr = date_range(start='1/1/2012', periods=2) - repr(dr) - - dr = date_range(start='1/1/2012', periods=3) - repr(dr) - - def test_constructor_int64_nocopy(self): - # #1624 - arr = np.arange(1000, dtype=np.int64) - index = DatetimeIndex(arr) - - arr[50:100] = -1 - self.assertTrue((index.asi8[50:100] == -1).all()) - - arr = np.arange(1000, dtype=np.int64) - index = DatetimeIndex(arr, copy=True) - - arr[50:100] = -1 - self.assertTrue((index.asi8[50:100] != -1).all()) - - def test_series_interpolate_method_values(self): - # #1646 - ts = _simple_ts('1/1/2000', '1/20/2000') - ts[::2] = np.nan - - result = ts.interpolate(method='values') - exp = ts.interpolate() - assert_series_equal(result, exp) - - def test_frame_datetime64_handling_groupby(self): - # it works! - df = DataFrame([(3, np.datetime64('2012-07-03')), - (3, np.datetime64('2012-07-04'))], - columns=['a', 'date']) - result = df.groupby('a').first() - self.assertEqual(result['date'][3], Timestamp('2012-07-03')) - - def test_series_interpolate_intraday(self): - # #1698 - index = pd.date_range('1/1/2012', periods=4, freq='12D') - ts = pd.Series([0, 12, 24, 36], index) - new_index = index.append(index + pd.DateOffset(days=1)).sort_values() - - exp = ts.reindex(new_index).interpolate(method='time') - - index = pd.date_range('1/1/2012', periods=4, freq='12H') - ts = pd.Series([0, 12, 24, 36], index) - new_index = index.append(index + pd.DateOffset(hours=1)).sort_values() - result = ts.reindex(new_index).interpolate(method='time') - - self.assert_numpy_array_equal(result.values, exp.values) - - def test_frame_dict_constructor_datetime64_1680(self): - dr = date_range('1/1/2012', periods=10) - s = Series(dr, index=dr) - - # it works! - DataFrame({'a': 'foo', 'b': s}, index=dr) - DataFrame({'a': 'foo', 'b': s.values}, index=dr) - - def test_frame_datetime64_mixed_index_ctor_1681(self): - dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') - ts = Series(dr) - - # it works! - d = DataFrame({'A': 'foo', 'B': ts}, index=dr) - self.assertTrue(d['B'].isnull().all()) - - def test_frame_timeseries_to_records(self): - index = date_range('1/1/2000', periods=10) - df = DataFrame(np.random.randn(10, 3), index=index, - columns=['a', 'b', 'c']) - - result = df.to_records() - result['index'].dtype == 'M8[ns]' - - result = df.to_records(index=False) - - def test_frame_datetime64_duplicated(self): - dates = date_range('2010-07-01', end='2010-08-05') - - tst = DataFrame({'symbol': 'AAA', 'date': dates}) - result = tst.duplicated(['date', 'symbol']) - self.assertTrue((-result).all()) - - tst = DataFrame({'date': dates}) - result = tst.duplicated() - self.assertTrue((-result).all()) - - def test_timestamp_compare_with_early_datetime(self): - # e.g. datetime.min - stamp = Timestamp('2012-01-01') - - self.assertFalse(stamp == datetime.min) - self.assertFalse(stamp == datetime(1600, 1, 1)) - self.assertFalse(stamp == datetime(2700, 1, 1)) - self.assertNotEqual(stamp, datetime.min) - self.assertNotEqual(stamp, datetime(1600, 1, 1)) - self.assertNotEqual(stamp, datetime(2700, 1, 1)) - self.assertTrue(stamp > datetime(1600, 1, 1)) - self.assertTrue(stamp >= datetime(1600, 1, 1)) - self.assertTrue(stamp < datetime(2700, 1, 1)) - self.assertTrue(stamp <= datetime(2700, 1, 1)) - - def test_to_html_timestamp(self): - rng = date_range('2000-01-01', periods=10) - df = DataFrame(np.random.randn(10, 4), index=rng) - - result = df.to_html() - self.assertIn('2000-01-01', result) - - def test_to_csv_numpy_16_bug(self): - frame = DataFrame({'a': date_range('1/1/2000', periods=10)}) - - buf = StringIO() - frame.to_csv(buf) - - result = buf.getvalue() - self.assertIn('2000-01-01', result) - - def test_series_map_box_timestamps(self): - # #2689, #2627 - s = Series(date_range('1/1/2000', periods=10)) - - def f(x): - return (x.hour, x.day, x.month) - - # it works! - s.map(f) - s.apply(f) - DataFrame(s).applymap(f) - - def test_concat_datetime_datetime64_frame(self): - # #2624 - rows = [] - rows.append([datetime(2010, 1, 1), 1]) - rows.append([datetime(2010, 1, 2), 'hi']) - - df2_obj = DataFrame.from_records(rows, columns=['date', 'test']) - - ind = date_range(start="2000/1/1", freq="D", periods=10) - df1 = DataFrame({'date': ind, 'test':lrange(10)}) - - # it works! - pd.concat([df1, df2_obj]) - - def test_period_resample(self): - # GH3609 - s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float') - s[10:30] = np.nan - expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')]) - result = s.to_period().resample('T', kind='period') - assert_series_equal(result, expected) - result2 = s.resample('T', kind='period') - assert_series_equal(result2, expected) - - def test_period_resample_with_local_timezone_pytz(self): - # GH5430 - tm._skip_if_no_pytz() - import pytz - - local_timezone = pytz.timezone('America/Los_Angeles') - - start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc) - # 1 day later - end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc) - - index = pd.date_range(start, end, freq='H') - - series = pd.Series(1, index=index) - series = series.tz_convert(local_timezone) - result = series.resample('D', kind='period') - # Create the expected series - expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific - expected = pd.Series(1, index=expected_index) - assert_series_equal(result, expected) - - def test_period_resample_with_local_timezone_dateutil(self): - # GH5430 - tm._skip_if_no_dateutil() - import dateutil - - local_timezone = 'dateutil/America/Los_Angeles' - - start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=dateutil.tz.tzutc()) - # 1 day later - end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=dateutil.tz.tzutc()) - - index = pd.date_range(start, end, freq='H') - - series = pd.Series(1, index=index) - series = series.tz_convert(local_timezone) - result = series.resample('D', kind='period') - # Create the expected series - expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific - expected = pd.Series(1, index=expected_index) - assert_series_equal(result, expected) - - - def test_pickle(self): - #GH4606 - - p = self.round_trip_pickle(NaT) - self.assertTrue(p is NaT) - - idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06']) - idx_p = self.round_trip_pickle(idx) - self.assertTrue(idx_p[0] == idx[0]) - self.assertTrue(idx_p[1] is NaT) - self.assertTrue(idx_p[2] == idx[2]) - - -def _simple_ts(start, end, freq='D'): - rng = date_range(start, end, freq=freq) - return Series(np.random.randn(len(rng)), index=rng) - - -class TestDatetimeIndex(tm.TestCase): - _multiprocess_can_split_ = True - - def test_hash_error(self): - index = date_range('20010101', periods=10) - with tm.assertRaisesRegexp(TypeError, - "unhashable type: %r" % - type(index).__name__): - hash(index) - - def test_stringified_slice_with_tz(self): - #GH2658 - import datetime - start=datetime.datetime.now() - idx=DatetimeIndex(start=start,freq="1d",periods=10) - df=DataFrame(lrange(10),index=idx) - df["2013-01-14 23:44:34.437768-05:00":] # no exception here - - def test_append_join_nondatetimeindex(self): - rng = date_range('1/1/2000', periods=10) - idx = Index(['a', 'b', 'c', 'd']) - - result = rng.append(idx) - tm.assertIsInstance(result[0], Timestamp) - - # it works - rng.join(idx, how='outer') - - def test_astype(self): - rng = date_range('1/1/2000', periods=10) - - result = rng.astype('i8') - self.assert_numpy_array_equal(result, rng.asi8) - - def test_to_period_nofreq(self): - idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04']) - self.assertRaises(ValueError, idx.to_period) - - idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'], - freq='infer') - self.assertEqual(idx.freqstr, 'D') - expected = pd.PeriodIndex(['2000-01-01', '2000-01-02', '2000-01-03'], freq='D') - self.assertTrue(idx.to_period().equals(expected)) - - # GH 7606 - idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03']) - self.assertEqual(idx.freqstr, None) - self.assertTrue(idx.to_period().equals(expected)) - - def test_000constructor_resolution(self): - # 2252 - t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1) - idx = DatetimeIndex([t1]) - - self.assertEqual(idx.nanosecond[0], t1.nanosecond) - - def test_constructor_coverage(self): - rng = date_range('1/1/2000', periods=10.5) - exp = date_range('1/1/2000', periods=10) - self.assertTrue(rng.equals(exp)) - - self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000', - periods='foo', freq='D') - - self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000', - end='1/10/2000') - - self.assertRaises(ValueError, DatetimeIndex, '1/1/2000') - - # generator expression - gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10)) - result = DatetimeIndex(gen) - expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i) - for i in range(10)]) - self.assertTrue(result.equals(expected)) - - # NumPy string array - strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03']) - result = DatetimeIndex(strings) - expected = DatetimeIndex(strings.astype('O')) - self.assertTrue(result.equals(expected)) - - from_ints = DatetimeIndex(expected.asi8) - self.assertTrue(from_ints.equals(expected)) - - # string with NaT - strings = np.array(['2000-01-01', '2000-01-02', 'NaT']) - result = DatetimeIndex(strings) - expected = DatetimeIndex(strings.astype('O')) - self.assertTrue(result.equals(expected)) - - from_ints = DatetimeIndex(expected.asi8) - self.assertTrue(from_ints.equals(expected)) - - # non-conforming - self.assertRaises(ValueError, DatetimeIndex, - ['2000-01-01', '2000-01-02', '2000-01-04'], - freq='D') - - self.assertRaises(ValueError, DatetimeIndex, - start='2011-01-01', freq='b') - self.assertRaises(ValueError, DatetimeIndex, - end='2011-01-01', freq='B') - self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D') - - def test_constructor_datetime64_tzformat(self): - # GH 6572 - tm._skip_if_no_pytz() - import pytz - # ISO 8601 format results in pytz.FixedOffset - for freq in ['AS', 'W-SUN']: - idx = date_range('2013-01-01T00:00:00-05:00', '2016-01-01T23:59:59-05:00', freq=freq) - expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz=pytz.FixedOffset(-300)) - tm.assert_index_equal(idx, expected) - # Unable to use `US/Eastern` because of DST - expected_i8 = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz='America/Lima') - self.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) - - idx = date_range('2013-01-01T00:00:00+09:00', '2016-01-01T23:59:59+09:00', freq=freq) - expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz=pytz.FixedOffset(540)) - tm.assert_index_equal(idx, expected) - expected_i8 = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz='Asia/Tokyo') - self.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) - - tm._skip_if_no_dateutil() - from dateutil.tz import tzoffset - # Non ISO 8601 format results in dateutil.tz.tzoffset - for freq in ['AS', 'W-SUN']: - idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00', freq=freq) - expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz=tzoffset(None, -18000)) - tm.assert_index_equal(idx, expected) - # Unable to use `US/Eastern` because of DST - expected_i8 = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz='America/Lima') - self.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) - - idx = date_range('2013/1/1 0:00:00+9:00', '2016/1/1 23:59:59+09:00', freq=freq) - expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz=tzoffset(None, 32400)) - tm.assert_index_equal(idx, expected) - expected_i8 = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz='Asia/Tokyo') - self.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) - - def test_constructor_name(self): - idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A', - name='TEST') - self.assertEqual(idx.name, 'TEST') - - def test_comparisons_coverage(self): - rng = date_range('1/1/2000', periods=10) - - # raise TypeError for now - self.assertRaises(TypeError, rng.__lt__, rng[3].value) - - result = rng == list(rng) - exp = rng == rng - self.assert_numpy_array_equal(result, exp) - - def test_comparisons_nat(self): - - fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0]) - fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0]) - - didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT, - '2014-05-01', '2014-07-01']) - didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT, - '2014-06-01', '2014-07-01']) - darr = np.array([np.datetime64('2014-02-01 00:00Z'), - np.datetime64('2014-03-01 00:00Z'), - np.datetime64('nat'), np.datetime64('nat'), - np.datetime64('2014-06-01 00:00Z'), - np.datetime64('2014-07-01 00:00Z')]) - - if _np_version_under1p8: - # cannot test array because np.datetime('nat') returns today's date - cases = [(fidx1, fidx2), (didx1, didx2)] - else: - cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)] - - # Check pd.NaT is handles as the same as np.nan - for idx1, idx2 in cases: - - result = idx1 < idx2 - expected = np.array([True, False, False, False, True, False]) - self.assert_numpy_array_equal(result, expected) - - result = idx2 > idx1 - expected = np.array([True, False, False, False, True, False]) - self.assert_numpy_array_equal(result, expected) - - result = idx1 <= idx2 - expected = np.array([True, False, False, False, True, True]) - self.assert_numpy_array_equal(result, expected) - - result = idx2 >= idx1 - expected = np.array([True, False, False, False, True, True]) - self.assert_numpy_array_equal(result, expected) - - result = idx1 == idx2 - expected = np.array([False, False, False, False, False, True]) - self.assert_numpy_array_equal(result, expected) - - result = idx1 != idx2 - expected = np.array([True, True, True, True, True, False]) - self.assert_numpy_array_equal(result, expected) - - for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]: - result = idx1 < val - expected = np.array([False, False, False, False, False, False]) - self.assert_numpy_array_equal(result, expected) - result = idx1 > val - self.assert_numpy_array_equal(result, expected) - - result = idx1 <= val - self.assert_numpy_array_equal(result, expected) - result = idx1 >= val - self.assert_numpy_array_equal(result, expected) - - result = idx1 == val - self.assert_numpy_array_equal(result, expected) - - result = idx1 != val - expected = np.array([True, True, True, True, True, True]) - self.assert_numpy_array_equal(result, expected) - - # Check pd.NaT is handles as the same as np.nan - for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]: - result = idx1 < val - expected = np.array([True, False, False, False, False, False]) - self.assert_numpy_array_equal(result, expected) - result = idx1 > val - expected = np.array([False, False, False, False, True, True]) - self.assert_numpy_array_equal(result, expected) - - result = idx1 <= val - expected = np.array([True, False, True, False, False, False]) - self.assert_numpy_array_equal(result, expected) - result = idx1 >= val - expected = np.array([False, False, True, False, True, True]) - self.assert_numpy_array_equal(result, expected) - - result = idx1 == val - expected = np.array([False, False, True, False, False, False]) - self.assert_numpy_array_equal(result, expected) - - result = idx1 != val - expected = np.array([True, True, False, True, True, True]) - self.assert_numpy_array_equal(result, expected) - - def test_map(self): - rng = date_range('1/1/2000', periods=10) - - f = lambda x: x.strftime('%Y%m%d') - result = rng.map(f) - exp = [f(x) for x in rng] - tm.assert_almost_equal(result, exp) - - - def test_iteration_preserves_tz(self): - - tm._skip_if_no_dateutil() - - # GH 8890 - import dateutil - index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern') - - for i, ts in enumerate(index): - result = ts - expected = index[i] - self.assertEqual(result, expected) - - index = date_range("2012-01-01", periods=3, freq='H', tz=dateutil.tz.tzoffset(None, -28800)) - - for i, ts in enumerate(index): - result = ts - expected = index[i] - self.assertEqual(result._repr_base, expected._repr_base) - self.assertEqual(result, expected) - - # 9100 - index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00','2014-12-01 04:12:34.987000-08:00']) - for i, ts in enumerate(index): - result = ts - expected = index[i] - self.assertEqual(result._repr_base, expected._repr_base) - self.assertEqual(result, expected) - - - def test_misc_coverage(self): - rng = date_range('1/1/2000', periods=5) - result = rng.groupby(rng.day) - tm.assertIsInstance(list(result.values())[0][0], Timestamp) - - idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02']) - self.assertTrue(idx.equals(list(idx))) - - non_datetime = Index(list('abc')) - self.assertFalse(idx.equals(list(non_datetime))) - - def test_union_coverage(self): - idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02']) - ordered = DatetimeIndex(idx.sort_values(), freq='infer') - result = ordered.union(idx) - self.assertTrue(result.equals(ordered)) - - result = ordered[:0].union(ordered) - self.assertTrue(result.equals(ordered)) - self.assertEqual(result.freq, ordered.freq) - - def test_union_bug_1730(self): - rng_a = date_range('1/1/2012', periods=4, freq='3H') - rng_b = date_range('1/1/2012', periods=4, freq='4H') - - result = rng_a.union(rng_b) - exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b)))) - self.assertTrue(result.equals(exp)) - - def test_union_bug_1745(self): - left = DatetimeIndex(['2012-05-11 15:19:49.695000']) - right = DatetimeIndex(['2012-05-29 13:04:21.322000', - '2012-05-11 15:27:24.873000', - '2012-05-11 15:31:05.350000']) - - result = left.union(right) - exp = DatetimeIndex(sorted(set(list(left)) | set(list(right)))) - self.assertTrue(result.equals(exp)) - - def test_union_bug_4564(self): - from pandas import DateOffset - left = date_range("2013-01-01", "2013-02-01") - right = left + DateOffset(minutes=15) - - result = left.union(right) - exp = DatetimeIndex(sorted(set(list(left)) | set(list(right)))) - self.assertTrue(result.equals(exp)) - - def test_intersection_bug_1708(self): - from pandas import DateOffset - index_1 = date_range('1/1/2012', periods=4, freq='12H') - index_2 = index_1 + DateOffset(hours=1) - - result = index_1 & index_2 - self.assertEqual(len(result), 0) - - # GH 10699 - def test_datetime64_with_DateOffset(self): - for klass, assert_func in zip([Series, DatetimeIndex], - [self.assert_series_equal, - tm.assert_index_equal]): - s = klass(date_range('2000-01-01', '2000-01-31')) - result = s + pd.DateOffset(years=1) - result2 = pd.DateOffset(years=1) + s - exp = klass(date_range('2001-01-01', '2001-01-31')) - assert_func(result, exp) - assert_func(result2, exp) - - result = s - pd.DateOffset(years=1) - exp = klass(date_range('1999-01-01', '1999-01-31')) - assert_func(result, exp) - - s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'), - pd.Timestamp('2000-02-15', tz='US/Central')]) - result = s + pd.offsets.MonthEnd() - result2 = pd.offsets.MonthEnd() + s - exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'), - Timestamp('2000-02-29', tz='US/Central')]) - assert_func(result, exp) - assert_func(result2, exp) - - # array of offsets - valid for Series only - if klass is Series: - with tm.assert_produces_warning(PerformanceWarning): - s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')]) - result = s + Series([pd.offsets.DateOffset(years=1), - pd.offsets.MonthEnd()]) - exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')]) - assert_func(result, exp) - - # same offset - result = s + Series([pd.offsets.DateOffset(years=1), - pd.offsets.DateOffset(years=1)]) - exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')]) - assert_func(result, exp) - - s = klass([Timestamp('2000-01-05 00:15:00'), Timestamp('2000-01-31 00:23:00'), - Timestamp('2000-01-01'), Timestamp('2000-02-29'), Timestamp('2000-12-31')]) - - #DateOffset relativedelta fastpath - relative_kwargs = [('years', 2), ('months', 5), ('days', 3), - ('hours', 5), ('minutes', 10), ('seconds', 2), - ('microseconds', 5)] - for i, kwd in enumerate(relative_kwargs): - op = pd.DateOffset(**dict([kwd])) - assert_func(klass([x + op for x in s]), s + op) - assert_func(klass([x - op for x in s]), s - op) - op = pd.DateOffset(**dict(relative_kwargs[:i+1])) - assert_func(klass([x + op for x in s]), s + op) - assert_func(klass([x - op for x in s]), s - op) - - - # split by fast/slow path to test perf warning - off = {False: - ['YearBegin', ('YearBegin', {'month': 5}), - 'YearEnd', ('YearEnd', {'month': 5}), - 'MonthBegin', 'MonthEnd', 'Week', ('Week', {'weekday': 3}), - 'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin'], - PerformanceWarning: - ['CustomBusinessDay', 'CDay', 'CBMonthEnd','CBMonthBegin', - 'BMonthBegin', 'BMonthEnd', 'BusinessHour', 'BYearBegin', - 'BYearEnd','BQuarterBegin', ('LastWeekOfMonth', {'weekday':2}), - ('FY5253Quarter', {'qtr_with_extra_week': 1, 'startingMonth': 1, - 'weekday': 2, 'variation': 'nearest'}), - ('FY5253',{'weekday': 0, 'startingMonth': 2, 'variation': 'nearest'}), - ('WeekOfMonth', {'weekday': 2, 'week': 2}), 'Easter', - ('DateOffset', {'day': 4}), ('DateOffset', {'month': 5})]} - - for normalize in (True, False): - for warning, offsets in off.items(): - for do in offsets: - if isinstance(do, tuple): - do, kwargs = do - else: - do = do - kwargs = {} - op = getattr(pd.offsets,do)(5, normalize=normalize, **kwargs) - with tm.assert_produces_warning(warning): - assert_func(klass([x + op for x in s]), s + op) - assert_func(klass([x - op for x in s]), s - op) - assert_func(klass([op + x for x in s]), op + s) - # def test_add_timedelta64(self): - # rng = date_range('1/1/2000', periods=5) - # delta = rng.values[3] - rng.values[1] - - # result = rng + delta - # expected = rng + timedelta(2) - # self.assertTrue(result.equals(expected)) - - def test_get_duplicates(self): - idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02', - '2000-01-03', '2000-01-03', '2000-01-04']) - - result = idx.get_duplicates() - ex = DatetimeIndex(['2000-01-02', '2000-01-03']) - self.assertTrue(result.equals(ex)) - - def test_argmin_argmax(self): - idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) - self.assertEqual(idx.argmin(), 1) - self.assertEqual(idx.argmax(), 0) - - def test_sort_values(self): - idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) - - ordered = idx.sort_values() - self.assertTrue(ordered.is_monotonic) - - ordered = idx.sort_values(ascending=False) - self.assertTrue(ordered[::-1].is_monotonic) - - ordered, dexer = idx.sort_values(return_indexer=True) - self.assertTrue(ordered.is_monotonic) - self.assert_numpy_array_equal(dexer, [1, 2, 0]) - - ordered, dexer = idx.sort_values(return_indexer=True, ascending=False) - self.assertTrue(ordered[::-1].is_monotonic) - self.assert_numpy_array_equal(dexer, [0, 2, 1]) - - def test_insert(self): - idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'], name='idx') - - result = idx.insert(2, datetime(2000, 1, 5)) - exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05', - '2000-01-02'], name='idx') - self.assertTrue(result.equals(exp)) - - # insertion of non-datetime should coerce to object index - result = idx.insert(1, 'inserted') - expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1), - datetime(2000, 1, 2)], name='idx') - self.assertNotIsInstance(result, DatetimeIndex) - tm.assert_index_equal(result, expected) - self.assertEqual(result.name, expected.name) - - idx = date_range('1/1/2000', periods=3, freq='M', name='idx') - - # preserve freq - expected_0 = DatetimeIndex(['1999-12-31', '2000-01-31', '2000-02-29', - '2000-03-31'], name='idx', freq='M') - expected_3 = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31', - '2000-04-30'], name='idx', freq='M') - - # reset freq to None - expected_1_nofreq = DatetimeIndex(['2000-01-31', '2000-01-31', '2000-02-29', - '2000-03-31'], name='idx', freq=None) - expected_3_nofreq = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31', - '2000-01-02'], name='idx', freq=None) - - cases = [(0, datetime(1999, 12, 31), expected_0), - (-3, datetime(1999, 12, 31), expected_0), - (3, datetime(2000, 4, 30), expected_3), - (1, datetime(2000, 1, 31), expected_1_nofreq), - (3, datetime(2000, 1, 2), expected_3_nofreq)] - - for n, d, expected in cases: - result = idx.insert(n, d) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freq, expected.freq) - - # reset freq to None - result = idx.insert(3, datetime(2000, 1, 2)) - expected = DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31', - '2000-01-02'], name='idx', freq=None) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertTrue(result.freq is None) - - # GH 7299 - tm._skip_if_no_pytz() - import pytz - - idx = date_range('1/1/2000', periods=3, freq='D', tz='Asia/Tokyo', name='idx') - with tm.assertRaises(ValueError): - result = idx.insert(3, pd.Timestamp('2000-01-04')) - with tm.assertRaises(ValueError): - result = idx.insert(3, datetime(2000, 1, 4)) - with tm.assertRaises(ValueError): - result = idx.insert(3, pd.Timestamp('2000-01-04', tz='US/Eastern')) - with tm.assertRaises(ValueError): - result = idx.insert(3, datetime(2000, 1, 4, tzinfo=pytz.timezone('US/Eastern'))) - - for tz in ['US/Pacific', 'Asia/Singapore']: - idx = date_range('1/1/2000 09:00', periods=6, freq='H', tz=tz, name='idx') - # preserve freq - expected = date_range('1/1/2000 09:00', periods=7, freq='H', tz=tz, name='idx') - for d in [pd.Timestamp('2000-01-01 15:00', tz=tz), - pytz.timezone(tz).localize(datetime(2000, 1, 1, 15))]: - - result = idx.insert(6, d) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freq, expected.freq) - self.assertEqual(result.tz, expected.tz) - - expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 10:00', '2000-01-01 11:00', - '2000-01-01 12:00', '2000-01-01 13:00', '2000-01-01 14:00', - '2000-01-01 10:00'], name='idx', - tz=tz, freq=None) - # reset freq to None - for d in [pd.Timestamp('2000-01-01 10:00', tz=tz), - pytz.timezone(tz).localize(datetime(2000, 1, 1, 10))]: - result = idx.insert(6, d) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertTrue(result.freq is None) - self.assertEqual(result.tz, expected.tz) - - def test_delete(self): - idx = date_range(start='2000-01-01', periods=5, freq='M', name='idx') - - # prserve freq - expected_0 = date_range(start='2000-02-01', periods=4, freq='M', name='idx') - expected_4 = date_range(start='2000-01-01', periods=4, freq='M', name='idx') - - # reset freq to None - expected_1 = DatetimeIndex(['2000-01-31', '2000-03-31', '2000-04-30', - '2000-05-31'], freq=None, name='idx') - - cases ={0: expected_0, -5: expected_0, - -1: expected_4, 4: expected_4, - 1: expected_1} - for n, expected in compat.iteritems(cases): - result = idx.delete(n) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freq, expected.freq) - - with tm.assertRaises((IndexError, ValueError)): - # either depeidnig on numpy version - result = idx.delete(5) - - for tz in [None, 'Asia/Tokyo', 'US/Pacific']: - idx = date_range(start='2000-01-01 09:00', periods=10, - freq='H', name='idx', tz=tz) - - expected = date_range(start='2000-01-01 10:00', periods=9, - freq='H', name='idx', tz=tz) - result = idx.delete(0) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freqstr, 'H') - self.assertEqual(result.tz, expected.tz) - - expected = date_range(start='2000-01-01 09:00', periods=9, - freq='H', name='idx', tz=tz) - result = idx.delete(-1) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freqstr, 'H') - self.assertEqual(result.tz, expected.tz) - - def test_delete_slice(self): - idx = date_range(start='2000-01-01', periods=10, freq='D', name='idx') - - # prserve freq - expected_0_2 = date_range(start='2000-01-04', periods=7, freq='D', name='idx') - expected_7_9 = date_range(start='2000-01-01', periods=7, freq='D', name='idx') - - # reset freq to None - expected_3_5 = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03', - '2000-01-07', '2000-01-08', '2000-01-09', - '2000-01-10'], freq=None, name='idx') - - cases ={(0, 1, 2): expected_0_2, - (7, 8, 9): expected_7_9, - (3, 4, 5): expected_3_5} - for n, expected in compat.iteritems(cases): - result = idx.delete(n) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freq, expected.freq) - - result = idx.delete(slice(n[0], n[-1] + 1)) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freq, expected.freq) - - for tz in [None, 'Asia/Tokyo', 'US/Pacific']: - ts = pd.Series(1, index=pd.date_range('2000-01-01 09:00', periods=10, - freq='H', name='idx', tz=tz)) - # preserve freq - result = ts.drop(ts.index[:5]).index - expected = pd.date_range('2000-01-01 14:00', periods=5, freq='H', name='idx', tz=tz) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freq, expected.freq) - self.assertEqual(result.tz, expected.tz) - - # reset freq to None - result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index - expected = DatetimeIndex(['2000-01-01 09:00', '2000-01-01 11:00', '2000-01-01 13:00', - '2000-01-01 15:00', '2000-01-01 17:00'], - freq=None, name='idx', tz=tz) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freq, expected.freq) - self.assertEqual(result.tz, expected.tz) - - def test_take(self): - dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15), - datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)] - - for tz in [None, 'US/Eastern', 'Asia/Tokyo']: - idx = DatetimeIndex(start='2010-01-01 09:00', end='2010-02-01 09:00', - freq='H', tz=tz, name='idx') - expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz) - - taken1 = idx.take([5, 6, 8, 12]) - taken2 = idx[[5, 6, 8, 12]] - - for taken in [taken1, taken2]: - self.assertTrue(taken.equals(expected)) - tm.assertIsInstance(taken, DatetimeIndex) - self.assertIsNone(taken.freq) - self.assertEqual(taken.tz, expected.tz) - self.assertEqual(taken.name, expected.name) - - def test_map_bug_1677(self): - index = DatetimeIndex(['2012-04-25 09:30:00.393000']) - f = index.asof - - result = index.map(f) - expected = np.array([f(index[0])]) - self.assert_numpy_array_equal(result, expected) - - def test_groupby_function_tuple_1677(self): - df = DataFrame(np.random.rand(100), - index=date_range("1/1/2000", periods=100)) - monthly_group = df.groupby(lambda x: (x.year, x.month)) - - result = monthly_group.mean() - tm.assertIsInstance(result.index[0], tuple) - - def test_append_numpy_bug_1681(self): - # another datetime64 bug - dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') - a = DataFrame() - c = DataFrame({'A': 'foo', 'B': dr}, index=dr) - - result = a.append(c) - self.assertTrue((result['B'] == dr).all()) - - def test_isin(self): - index = tm.makeDateIndex(4) - result = index.isin(index) - self.assertTrue(result.all()) - - result = index.isin(list(index)) - self.assertTrue(result.all()) - - assert_almost_equal(index.isin([index[2], 5]), - [False, False, True, False]) - - def test_union(self): - i1 = Int64Index(np.arange(0, 20, 2)) - i2 = Int64Index(np.arange(10, 30, 2)) - result = i1.union(i2) - expected = Int64Index(np.arange(0, 30, 2)) - self.assert_numpy_array_equal(result, expected) - - def test_union_with_DatetimeIndex(self): - i1 = Int64Index(np.arange(0, 20, 2)) - i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D') - i1.union(i2) # Works - i2.union(i1) # Fails with "AttributeError: can't set attribute" - - def test_time(self): - rng = pd.date_range('1/1/2000', freq='12min', periods=10) - result = pd.Index(rng).time - expected = [t.time() for t in rng] - self.assertTrue((result == expected).all()) - - def test_date(self): - rng = pd.date_range('1/1/2000', freq='12H', periods=10) - result = pd.Index(rng).date - expected = [t.date() for t in rng] - self.assertTrue((result == expected).all()) - - def test_does_not_convert_mixed_integer(self): - df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs: - randn(), r_idx_type='i', c_idx_type='dt') - cols = df.columns.join(df.index, how='outer') - joined = cols.join(df.columns) - self.assertEqual(cols.dtype, np.dtype('O')) - self.assertEqual(cols.dtype, joined.dtype) - tm.assert_numpy_array_equal(cols.values, joined.values) - - def test_slice_keeps_name(self): - # GH4226 - st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles') - et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles') - dr = pd.date_range(st, et, freq='H', name='timebucket') - self.assertEqual(dr[1:].name, dr.name) - - def test_join_self(self): - index = date_range('1/1/2000', periods=10) - kinds = 'outer', 'inner', 'left', 'right' - for kind in kinds: - joined = index.join(index, how=kind) - self.assertIs(index, joined) - - def assert_index_parameters(self, index): - assert index.freq == '40960N' - assert index.inferred_freq == '40960N' - - def test_ns_index(self): - nsamples = 400 - ns = int(1e9 / 24414) - dtstart = np.datetime64('2012-09-20T00:00:00') - - dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns') - freq = ns * pd.datetools.Nano() - index = pd.DatetimeIndex(dt, freq=freq, name='time') - self.assert_index_parameters(index) - - new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq) - self.assert_index_parameters(new_index) - - def test_join_with_period_index(self): - df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args: - np.random.randint(2), c_idx_type='p', - r_idx_type='dt') - s = df.iloc[:5, 0] - joins = 'left', 'right', 'inner', 'outer' - - for join in joins: - with tm.assertRaisesRegexp(ValueError, 'can only call with other ' - 'PeriodIndex-ed objects'): - df.columns.join(s.index, how=join) - - def test_factorize(self): - idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', - '2014-02', '2014-03', '2014-03']) - - exp_arr = np.array([0, 0, 1, 1, 2, 2]) - exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03']) - - arr, idx = idx1.factorize() - self.assert_numpy_array_equal(arr, exp_arr) - self.assertTrue(idx.equals(exp_idx)) - - arr, idx = idx1.factorize(sort=True) - self.assert_numpy_array_equal(arr, exp_arr) - self.assertTrue(idx.equals(exp_idx)) - - # tz must be preserved - idx1 = idx1.tz_localize('Asia/Tokyo') - exp_idx = exp_idx.tz_localize('Asia/Tokyo') - - arr, idx = idx1.factorize() - self.assert_numpy_array_equal(arr, exp_arr) - self.assertTrue(idx.equals(exp_idx)) - - idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01', - '2014-03', '2014-01']) - - exp_arr = np.array([2, 2, 1, 0, 2, 0]) - exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03']) - arr, idx = idx2.factorize(sort=True) - self.assert_numpy_array_equal(arr, exp_arr) - self.assertTrue(idx.equals(exp_idx)) - - exp_arr = np.array([0, 0, 1, 2, 0, 2]) - exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01']) - arr, idx = idx2.factorize() - self.assert_numpy_array_equal(arr, exp_arr) - self.assertTrue(idx.equals(exp_idx)) - - # freq must be preserved - idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo') - exp_arr = np.array([0, 1, 2, 3]) - arr, idx = idx3.factorize() - self.assert_numpy_array_equal(arr, exp_arr) - self.assertTrue(idx.equals(idx3)) - - - def test_slice_with_negative_step(self): - ts = Series(np.arange(20), - date_range('2014-01-01', periods=20, freq='MS')) - SLC = pd.IndexSlice - - def assert_slices_equivalent(l_slc, i_slc): - assert_series_equal(ts[l_slc], ts.iloc[i_slc]) - assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc]) - assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc]) - - assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1]) - assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1]) - - assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1]) - assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1]) - - assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1], SLC[13:8:-1]) - assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp('2014-10-01'):-1], SLC[13:8:-1]) - assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1], SLC[13:8:-1]) - assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1], SLC[13:8:-1]) - - assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0]) - - def test_slice_with_zero_step_raises(self): - ts = Series(np.arange(20), - date_range('2014-01-01', periods=20, freq='MS')) - self.assertRaisesRegexp(ValueError, 'slice step cannot be zero', - lambda: ts[::0]) - self.assertRaisesRegexp(ValueError, 'slice step cannot be zero', - lambda: ts.loc[::0]) - self.assertRaisesRegexp(ValueError, 'slice step cannot be zero', - lambda: ts.ix[::0]) - - - -class TestDatetime64(tm.TestCase): - """ - Also test support for datetime64[ns] in Series / DataFrame - """ - - def setUp(self): - dti = DatetimeIndex(start=datetime(2005, 1, 1), - end=datetime(2005, 1, 10), freq='Min') - self.series = Series(rand(len(dti)), dti) - - def test_datetimeindex_accessors(self): - dti = DatetimeIndex( - freq='D', start=datetime(1998, 1, 1), periods=365) - - self.assertEqual(dti.year[0], 1998) - self.assertEqual(dti.month[0], 1) - self.assertEqual(dti.day[0], 1) - self.assertEqual(dti.hour[0], 0) - self.assertEqual(dti.minute[0], 0) - self.assertEqual(dti.second[0], 0) - self.assertEqual(dti.microsecond[0], 0) - self.assertEqual(dti.dayofweek[0], 3) - - self.assertEqual(dti.dayofyear[0], 1) - self.assertEqual(dti.dayofyear[120], 121) - - self.assertEqual(dti.weekofyear[0], 1) - self.assertEqual(dti.weekofyear[120], 18) - - self.assertEqual(dti.quarter[0], 1) - self.assertEqual(dti.quarter[120], 2) - - self.assertEqual(dti.days_in_month[0], 31) - self.assertEqual(dti.days_in_month[90], 30) - - self.assertEqual(dti.is_month_start[0], True) - self.assertEqual(dti.is_month_start[1], False) - self.assertEqual(dti.is_month_start[31], True) - self.assertEqual(dti.is_quarter_start[0], True) - self.assertEqual(dti.is_quarter_start[90], True) - self.assertEqual(dti.is_year_start[0], True) - self.assertEqual(dti.is_year_start[364], False) - self.assertEqual(dti.is_month_end[0], False) - self.assertEqual(dti.is_month_end[30], True) - self.assertEqual(dti.is_month_end[31], False) - self.assertEqual(dti.is_month_end[364], True) - self.assertEqual(dti.is_quarter_end[0], False) - self.assertEqual(dti.is_quarter_end[30], False) - self.assertEqual(dti.is_quarter_end[89], True) - self.assertEqual(dti.is_quarter_end[364], True) - self.assertEqual(dti.is_year_end[0], False) - self.assertEqual(dti.is_year_end[364], True) - - self.assertEqual(len(dti.year), 365) - self.assertEqual(len(dti.month), 365) - self.assertEqual(len(dti.day), 365) - self.assertEqual(len(dti.hour), 365) - self.assertEqual(len(dti.minute), 365) - self.assertEqual(len(dti.second), 365) - self.assertEqual(len(dti.microsecond), 365) - self.assertEqual(len(dti.dayofweek), 365) - self.assertEqual(len(dti.dayofyear), 365) - self.assertEqual(len(dti.weekofyear), 365) - self.assertEqual(len(dti.quarter), 365) - self.assertEqual(len(dti.is_month_start), 365) - self.assertEqual(len(dti.is_month_end), 365) - self.assertEqual(len(dti.is_quarter_start), 365) - self.assertEqual(len(dti.is_quarter_end), 365) - self.assertEqual(len(dti.is_year_start), 365) - self.assertEqual(len(dti.is_year_end), 365) - - dti = DatetimeIndex( - freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4) - - self.assertEqual(sum(dti.is_quarter_start), 0) - self.assertEqual(sum(dti.is_quarter_end), 4) - self.assertEqual(sum(dti.is_year_start), 0) - self.assertEqual(sum(dti.is_year_end), 1) - - # Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7 - bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu') - dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt) - self.assertRaises(ValueError, lambda: dti.is_month_start) - - dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03']) - - self.assertEqual(dti.is_month_start[0], 1) - - tests = [ - (Timestamp('2013-06-01', offset='M').is_month_start, 1), - (Timestamp('2013-06-01', offset='BM').is_month_start, 0), - (Timestamp('2013-06-03', offset='M').is_month_start, 0), - (Timestamp('2013-06-03', offset='BM').is_month_start, 1), - (Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1), - (Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1), - (Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1), - (Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1), - (Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1), - (Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1), - (Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1), - (Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0), - (Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0), - (Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1), - (Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1), - (Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1), - (Timestamp('2013-06-30', offset='BQ').is_month_end, 0), - (Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0), - (Timestamp('2013-06-30', offset='BQ').is_year_end, 0), - (Timestamp('2013-06-28', offset='BQ').is_month_end, 1), - (Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1), - (Timestamp('2013-06-28', offset='BQ').is_year_end, 0), - (Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0), - (Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0), - (Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0), - (Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1), - (Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1), - (Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1), - (Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1), - (Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1), - (Timestamp('2012-02-01').days_in_month, 29), - (Timestamp('2013-02-01').days_in_month, 28)] - - for ts, value in tests: - self.assertEqual(ts, value) - - - def test_nanosecond_field(self): - dti = DatetimeIndex(np.arange(10)) - - self.assert_numpy_array_equal(dti.nanosecond, np.arange(10)) - - def test_datetimeindex_diff(self): - dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31), - periods=100) - dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31), - periods=98) - self.assertEqual(len(dti1.difference(dti2)), 2) - - def test_fancy_getitem(self): - dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1), - end=datetime(2010, 1, 1)) - - s = Series(np.arange(len(dti)), index=dti) - - self.assertEqual(s[48], 48) - self.assertEqual(s['1/2/2009'], 48) - self.assertEqual(s['2009-1-2'], 48) - self.assertEqual(s[datetime(2009, 1, 2)], 48) - self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48) - self.assertRaises(KeyError, s.__getitem__, '2009-1-3') - - assert_series_equal(s['3/6/2009':'2009-06-05'], - s[datetime(2009, 3, 6):datetime(2009, 6, 5)]) - - def test_fancy_setitem(self): - dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1), - end=datetime(2010, 1, 1)) - - s = Series(np.arange(len(dti)), index=dti) - s[48] = -1 - self.assertEqual(s[48], -1) - s['1/2/2009'] = -2 - self.assertEqual(s[48], -2) - s['1/2/2009':'2009-06-05'] = -3 - self.assertTrue((s[48:54] == -3).all()) - - def test_datetimeindex_constructor(self): - arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04'] - self.assertRaises(Exception, DatetimeIndex, arr) - - arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'] - idx1 = DatetimeIndex(arr) - - arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04'] - idx2 = DatetimeIndex(arr) - - arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005', - '2005-01-04'] - idx3 = DatetimeIndex(arr) - - arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005', - '2005-01-04'], dtype='O') - idx4 = DatetimeIndex(arr) - - arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']) - idx5 = DatetimeIndex(arr) - - arr = to_datetime( - ['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04']) - idx6 = DatetimeIndex(arr) - - idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True) - idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False, - yearfirst=True) - self.assertTrue(idx7.equals(idx8)) - - for other in [idx2, idx3, idx4, idx5, idx6]: - self.assertTrue((idx1.values == other.values).all()) - - sdate = datetime(1999, 12, 25) - edate = datetime(2000, 1, 1) - idx = DatetimeIndex(start=sdate, freq='1B', periods=20) - self.assertEqual(len(idx), 20) - self.assertEqual(idx[0], sdate + 0 * datetools.bday) - self.assertEqual(idx.freq, 'B') - - idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20) - self.assertEqual(len(idx), 20) - self.assertEqual(idx[-1], edate) - self.assertEqual(idx.freq, '5D') - - idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN') - idx2 = DatetimeIndex(start=sdate, end=edate, - freq=datetools.Week(weekday=6)) - self.assertEqual(len(idx1), len(idx2)) - self.assertEqual(idx1.offset, idx2.offset) - - idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS') - idx2 = DatetimeIndex(start=sdate, end=edate, - freq=datetools.QuarterBegin(startingMonth=1)) - self.assertEqual(len(idx1), len(idx2)) - self.assertEqual(idx1.offset, idx2.offset) - - idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ') - idx2 = DatetimeIndex(start=sdate, end=edate, - freq=datetools.BQuarterEnd(startingMonth=12)) - self.assertEqual(len(idx1), len(idx2)) - self.assertEqual(idx1.offset, idx2.offset) - - def test_dayfirst(self): - # GH 5917 - arr = ['10/02/2014', '11/02/2014', '12/02/2014'] - expected = DatetimeIndex([datetime(2014, 2, 10), - datetime(2014, 2, 11), - datetime(2014, 2, 12)]) - idx1 = DatetimeIndex(arr, dayfirst=True) - idx2 = DatetimeIndex(np.array(arr), dayfirst=True) - idx3 = to_datetime(arr, dayfirst=True) - idx4 = to_datetime(np.array(arr), dayfirst=True) - idx5 = DatetimeIndex(Index(arr), dayfirst=True) - idx6 = DatetimeIndex(Series(arr), dayfirst=True) - self.assertTrue(expected.equals(idx1)) - self.assertTrue(expected.equals(idx2)) - self.assertTrue(expected.equals(idx3)) - self.assertTrue(expected.equals(idx4)) - self.assertTrue(expected.equals(idx5)) - self.assertTrue(expected.equals(idx6)) - - def test_dti_snap(self): - dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002', - '1/5/2002', '1/6/2002', '1/7/2002'], freq='D') - - res = dti.snap(freq='W-MON') - exp = date_range('12/31/2001', '1/7/2002', freq='w-mon') - exp = exp.repeat([3, 4]) - self.assertTrue((res == exp).all()) - - res = dti.snap(freq='B') - - exp = date_range('1/1/2002', '1/7/2002', freq='b') - exp = exp.repeat([1, 1, 1, 2, 2]) - self.assertTrue((res == exp).all()) - - def test_dti_reset_index_round_trip(self): - dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D') - d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti) - d2 = d1.reset_index() - self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]')) - d3 = d2.set_index('index') - assert_frame_equal(d1, d3, check_names=False) - - # #2329 - stamp = datetime(2012, 11, 22) - df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value']) - df = df.set_index('Date') - - self.assertEqual(df.index[0], stamp) - self.assertEqual(df.reset_index()['Date'][0], stamp) - - def test_dti_set_index_reindex(self): - # GH 6631 - df = DataFrame(np.random.random(6)) - idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern') - idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo') - - df = df.set_index(idx1) - self.assertTrue(df.index.equals(idx1)) - df = df.reindex(idx2) - self.assertTrue(df.index.equals(idx2)) - - def test_datetimeindex_union_join_empty(self): - dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D') - empty = Index([]) - - result = dti.union(empty) - tm.assertIsInstance(result, DatetimeIndex) - self.assertIs(result, result) - - result = dti.join(empty) - tm.assertIsInstance(result, DatetimeIndex) - - def test_series_set_value(self): - # #1561 - - dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)] - index = DatetimeIndex(dates) - - s = Series().set_value(dates[0], 1.) - s2 = s.set_value(dates[1], np.nan) - - exp = Series([1., np.nan], index=index) - - assert_series_equal(s2, exp) - - # s = Series(index[:1], index[:1]) - # s2 = s.set_value(dates[1], index[1]) - # self.assertEqual(s2.values.dtype, 'M8[ns]') - - @slow - def test_slice_locs_indexerror(self): - times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10) - for i in range(100000)] - s = Series(lrange(100000), times) - s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)] - - def test_slicing_datetimes(self): - - # GH 7523 - - # unique - df = DataFrame(np.arange(4.,dtype='float64'), - index=[datetime(2001, 1, i, 10, 00) for i in [1,2,3,4]]) - result = df.ix[datetime(2001,1,1,10):] - assert_frame_equal(result,df) - result = df.ix[:datetime(2001,1,4,10)] - assert_frame_equal(result,df) - result = df.ix[datetime(2001,1,1,10):datetime(2001,1,4,10)] - assert_frame_equal(result,df) - - result = df.ix[datetime(2001,1,1,11):] - expected = df.iloc[1:] - assert_frame_equal(result,expected) - result = df.ix['20010101 11':] - assert_frame_equal(result,expected) - - # duplicates - df = pd.DataFrame(np.arange(5.,dtype='float64'), - index=[datetime(2001, 1, i, 10, 00) for i in [1,2,2,3,4]]) - - result = df.ix[datetime(2001,1,1,10):] - assert_frame_equal(result,df) - result = df.ix[:datetime(2001,1,4,10)] - assert_frame_equal(result,df) - result = df.ix[datetime(2001,1,1,10):datetime(2001,1,4,10)] - assert_frame_equal(result,df) - - result = df.ix[datetime(2001,1,1,11):] - expected = df.iloc[1:] - assert_frame_equal(result,expected) - result = df.ix['20010101 11':] - assert_frame_equal(result,expected) - -class TestSeriesDatetime64(tm.TestCase): - - def setUp(self): - self.series = Series(date_range('1/1/2000', periods=10)) - - def test_auto_conversion(self): - series = Series(list(date_range('1/1/2000', periods=10))) - self.assertEqual(series.dtype, 'M8[ns]') - - def test_constructor_cant_cast_datetime64(self): - self.assertRaises(TypeError, Series, - date_range('1/1/2000', periods=10), dtype=float) - - def test_series_comparison_scalars(self): - val = datetime(2000, 1, 4) - result = self.series > val - expected = np.array([x > val for x in self.series]) - self.assert_numpy_array_equal(result, expected) - - val = self.series[5] - result = self.series > val - expected = np.array([x > val for x in self.series]) - self.assert_numpy_array_equal(result, expected) - - def test_between(self): - left, right = self.series[[2, 7]] - - result = self.series.between(left, right) - expected = (self.series >= left) & (self.series <= right) - assert_series_equal(result, expected) - - #---------------------------------------------------------------------- - # NaT support - - def test_NaT_scalar(self): - series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]') - - val = series[3] - self.assertTrue(com.isnull(val)) - - series[2] = val - self.assertTrue(com.isnull(series[2])) - - def test_NaT_cast(self): - # GH10747 - result = Series([np.nan]).astype('M8[ns]') - expected = Series([NaT]) - assert_series_equal(result, expected) - - def test_set_none_nan(self): - self.series[3] = None - self.assertIs(self.series[3], NaT) - - self.series[3:5] = None - self.assertIs(self.series[4], NaT) - - self.series[5] = np.nan - self.assertIs(self.series[5], NaT) - - self.series[5:7] = np.nan - self.assertIs(self.series[6], NaT) - - def test_intercept_astype_object(self): - - # this test no longer makes sense as series is by default already M8[ns] - expected = self.series.astype('object') - - df = DataFrame({'a': self.series, - 'b': np.random.randn(len(self.series))}) - - result = df.values.squeeze() - self.assertTrue((result[:, 0] == expected.values).all()) - - df = DataFrame({'a': self.series, - 'b': ['foo'] * len(self.series)}) - - result = df.values.squeeze() - self.assertTrue((result[:, 0] == expected.values).all()) - - def test_union(self): - rng1 = date_range('1/1/1999', '1/1/2012', freq='MS') - s1 = Series(np.random.randn(len(rng1)), rng1) - - rng2 = date_range('1/1/1980', '12/1/2001', freq='MS') - s2 = Series(np.random.randn(len(rng2)), rng2) - df = DataFrame({'s1': s1, 's2': s2}) - self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]')) - - def test_intersection(self): - # GH 4690 (with tz) - for tz in [None, 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']: - base = date_range('6/1/2000', '6/30/2000', freq='D', name='idx') - - # if target has the same name, it is preserved - rng2 = date_range('5/15/2000', '6/20/2000', freq='D', name='idx') - expected2 = date_range('6/1/2000', '6/20/2000', freq='D', name='idx') - - # if target name is different, it will be reset - rng3 = date_range('5/15/2000', '6/20/2000', freq='D', name='other') - expected3 = date_range('6/1/2000', '6/20/2000', freq='D', name=None) - - rng4 = date_range('7/1/2000', '7/31/2000', freq='D', name='idx') - expected4 = DatetimeIndex([], name='idx') - - for (rng, expected) in [(rng2, expected2), (rng3, expected3), (rng4, expected4)]: - result = base.intersection(rng) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertEqual(result.freq, expected.freq) - self.assertEqual(result.tz, expected.tz) - - # non-monotonic - base = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-02', '2011-01-03'], - tz=tz, name='idx') - - rng2 = DatetimeIndex(['2011-01-04', '2011-01-02', '2011-02-02', '2011-02-03'], - tz=tz, name='idx') - expected2 = DatetimeIndex(['2011-01-04', '2011-01-02'], tz=tz, name='idx') - - rng3 = DatetimeIndex(['2011-01-04', '2011-01-02', '2011-02-02', '2011-02-03'], - tz=tz, name='other') - expected3 = DatetimeIndex(['2011-01-04', '2011-01-02'], tz=tz, name=None) - - # GH 7880 - rng4 = date_range('7/1/2000', '7/31/2000', freq='D', tz=tz, name='idx') - expected4 = DatetimeIndex([], tz=tz, name='idx') - - for (rng, expected) in [(rng2, expected2), (rng3, expected3), (rng4, expected4)]: - result = base.intersection(rng) - self.assertTrue(result.equals(expected)) - self.assertEqual(result.name, expected.name) - self.assertIsNone(result.freq) - self.assertEqual(result.tz, expected.tz) - - # empty same freq GH2129 - rng = date_range('6/1/2000', '6/15/2000', freq='T') - result = rng[0:0].intersection(rng) - self.assertEqual(len(result), 0) - - result = rng.intersection(rng[0:0]) - self.assertEqual(len(result), 0) - - def test_date_range_bms_bug(self): - # #1645 - rng = date_range('1/1/2000', periods=10, freq='BMS') - - ex_first = Timestamp('2000-01-03') - self.assertEqual(rng[0], ex_first) - - def test_date_range_businesshour(self): - idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00', - '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00', - '2014-07-04 15:00', '2014-07-04 16:00'], freq='BH') - rng = date_range('2014-07-04 09:00', '2014-07-04 16:00', freq='BH') - tm.assert_index_equal(idx, rng) - - idx = DatetimeIndex(['2014-07-04 16:00', '2014-07-07 09:00'], freq='BH') - rng = date_range('2014-07-04 16:00', '2014-07-07 09:00', freq='BH') - tm.assert_index_equal(idx, rng) - - idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00', - '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00', - '2014-07-04 15:00', '2014-07-04 16:00', - '2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00', - '2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00', - '2014-07-07 15:00', '2014-07-07 16:00', - '2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00', - '2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00', - '2014-07-08 15:00', '2014-07-08 16:00'], freq='BH') - rng = date_range('2014-07-04 09:00', '2014-07-08 16:00', freq='BH') - tm.assert_index_equal(idx, rng) - - def test_string_index_series_name_converted(self): - # #1644 - df = DataFrame(np.random.randn(10, 4), - index=date_range('1/1/2000', periods=10)) - - result = df.ix['1/3/2000'] - self.assertEqual(result.name, df.index[2]) - - result = df.T['1/3/2000'] - self.assertEqual(result.name, df.index[2]) - - -class TestTimestamp(tm.TestCase): - - def test_class_ops_pytz(self): - tm._skip_if_no_pytz() - from pytz import timezone - - def compare(x, y): - self.assertEqual(int(Timestamp(x).value / 1e9), int(Timestamp(y).value / 1e9)) - - compare(Timestamp.now(), datetime.now()) - compare(Timestamp.now('UTC'), datetime.now(timezone('UTC'))) - compare(Timestamp.utcnow(), datetime.utcnow()) - compare(Timestamp.today(), datetime.today()) - current_time = calendar.timegm(datetime.now().utctimetuple()) - compare(Timestamp.utcfromtimestamp(current_time), - datetime.utcfromtimestamp(current_time)) - compare(Timestamp.fromtimestamp(current_time), - datetime.fromtimestamp(current_time)) - - date_component = datetime.utcnow() - time_component = (date_component + timedelta(minutes=10)).time() - compare(Timestamp.combine(date_component, time_component), - datetime.combine(date_component, time_component)) - - def test_class_ops_dateutil(self): - tm._skip_if_no_dateutil() - from dateutil.tz import tzutc - - def compare(x,y): - self.assertEqual(int(np.round(Timestamp(x).value/1e9)), int(np.round(Timestamp(y).value/1e9))) - - compare(Timestamp.now(),datetime.now()) - compare(Timestamp.now('UTC'), datetime.now(tzutc())) - compare(Timestamp.utcnow(),datetime.utcnow()) - compare(Timestamp.today(),datetime.today()) - current_time = calendar.timegm(datetime.now().utctimetuple()) - compare(Timestamp.utcfromtimestamp(current_time), - datetime.utcfromtimestamp(current_time)) - compare(Timestamp.fromtimestamp(current_time), - datetime.fromtimestamp(current_time)) - - date_component = datetime.utcnow() - time_component = (date_component + timedelta(minutes=10)).time() - compare(Timestamp.combine(date_component, time_component), - datetime.combine(date_component, time_component)) - - def test_basics_nanos(self): - val = np.int64(946684800000000000).view('M8[ns]') - stamp = Timestamp(val.view('i8') + 500) - self.assertEqual(stamp.year, 2000) - self.assertEqual(stamp.month, 1) - self.assertEqual(stamp.microsecond, 0) - self.assertEqual(stamp.nanosecond, 500) - - def test_unit(self): - def check(val,unit=None,h=1,s=1,us=0): - stamp = Timestamp(val, unit=unit) - self.assertEqual(stamp.year, 2000) - self.assertEqual(stamp.month, 1) - self.assertEqual(stamp.day, 1) - self.assertEqual(stamp.hour, h) - if unit != 'D': - self.assertEqual(stamp.minute, 1) - self.assertEqual(stamp.second, s) - self.assertEqual(stamp.microsecond, us) - else: - self.assertEqual(stamp.minute, 0) - self.assertEqual(stamp.second, 0) - self.assertEqual(stamp.microsecond, 0) - self.assertEqual(stamp.nanosecond, 0) - - ts = Timestamp('20000101 01:01:01') - val = ts.value - days = (ts - Timestamp('1970-01-01')).days - - check(val) - check(val/long(1000),unit='us') - check(val/long(1000000),unit='ms') - check(val/long(1000000000),unit='s') - check(days,unit='D',h=0) - - # using truediv, so these are like floats - if compat.PY3: - check((val+500000)/long(1000000000),unit='s',us=500) - check((val+500000000)/long(1000000000),unit='s',us=500000) - check((val+500000)/long(1000000),unit='ms',us=500) - - # get chopped in py2 - else: - check((val+500000)/long(1000000000),unit='s') - check((val+500000000)/long(1000000000),unit='s') - check((val+500000)/long(1000000),unit='ms') - - # ok - check((val+500000)/long(1000),unit='us',us=500) - check((val+500000000)/long(1000000),unit='ms',us=500000) - - # floats - check(val/1000.0 + 5,unit='us',us=5) - check(val/1000.0 + 5000,unit='us',us=5000) - check(val/1000000.0 + 0.5,unit='ms',us=500) - check(val/1000000.0 + 0.005,unit='ms',us=5) - check(val/1000000000.0 + 0.5,unit='s',us=500000) - check(days + 0.5,unit='D',h=12) - - # nan - result = Timestamp(np.nan) - self.assertIs(result, NaT) - - result = Timestamp(None) - self.assertIs(result, NaT) - - result = Timestamp(iNaT) - self.assertIs(result, NaT) - - result = Timestamp(NaT) - self.assertIs(result, NaT) - - result = Timestamp('NaT') - self.assertIs(result, NaT) - - def test_roundtrip(self): - - # test value to string and back conversions - # further test accessors - base = Timestamp('20140101 00:00:00') - - result = Timestamp(base.value + pd.Timedelta('5ms').value) - self.assertEqual(result,Timestamp(str(base) + ".005000")) - self.assertEqual(result.microsecond,5000) - - result = Timestamp(base.value + pd.Timedelta('5us').value) - self.assertEqual(result,Timestamp(str(base) + ".000005")) - self.assertEqual(result.microsecond,5) - - result = Timestamp(base.value + pd.Timedelta('5ns').value) - self.assertEqual(result,Timestamp(str(base) + ".000000005")) - self.assertEqual(result.nanosecond,5) - self.assertEqual(result.microsecond,0) - - result = Timestamp(base.value + pd.Timedelta('6ms 5us').value) - self.assertEqual(result,Timestamp(str(base) + ".006005")) - self.assertEqual(result.microsecond,5+6*1000) - - result = Timestamp(base.value + pd.Timedelta('200ms 5us').value) - self.assertEqual(result,Timestamp(str(base) + ".200005")) - self.assertEqual(result.microsecond,5+200*1000) - - def test_comparison(self): - # 5-18-2012 00:00:00.000 - stamp = long(1337299200000000000) - - val = Timestamp(stamp) - - self.assertEqual(val, val) - self.assertFalse(val != val) - self.assertFalse(val < val) - self.assertTrue(val <= val) - self.assertFalse(val > val) - self.assertTrue(val >= val) - - other = datetime(2012, 5, 18) - self.assertEqual(val, other) - self.assertFalse(val != other) - self.assertFalse(val < other) - self.assertTrue(val <= other) - self.assertFalse(val > other) - self.assertTrue(val >= other) - - other = Timestamp(stamp + 100) - - self.assertNotEqual(val, other) - self.assertNotEqual(val, other) - self.assertTrue(val < other) - self.assertTrue(val <= other) - self.assertTrue(other > val) - self.assertTrue(other >= val) - - def test_compare_invalid(self): - - # GH 8058 - val = Timestamp('20130101 12:01:02') - self.assertFalse(val == 'foo') - self.assertFalse(val == 10.0) - self.assertFalse(val == 1) - self.assertFalse(val == long(1)) - self.assertFalse(val == []) - self.assertFalse(val == {'foo' : 1}) - self.assertFalse(val == np.float64(1)) - self.assertFalse(val == np.int64(1)) - - self.assertTrue(val != 'foo') - self.assertTrue(val != 10.0) - self.assertTrue(val != 1) - self.assertTrue(val != long(1)) - self.assertTrue(val != []) - self.assertTrue(val != {'foo' : 1}) - self.assertTrue(val != np.float64(1)) - self.assertTrue(val != np.int64(1)) - - # ops testing - df = DataFrame(randn(5,2)) - a = df[0] - b = Series(randn(5)) - b.name = Timestamp('2000-01-01') - tm.assert_series_equal(a / b, 1 / (b / a)) - - def test_cant_compare_tz_naive_w_aware(self): - tm._skip_if_no_pytz() - # #1404 - a = Timestamp('3/12/2012') - b = Timestamp('3/12/2012', tz='utc') - - self.assertRaises(Exception, a.__eq__, b) - self.assertRaises(Exception, a.__ne__, b) - self.assertRaises(Exception, a.__lt__, b) - self.assertRaises(Exception, a.__gt__, b) - self.assertRaises(Exception, b.__eq__, a) - self.assertRaises(Exception, b.__ne__, a) - self.assertRaises(Exception, b.__lt__, a) - self.assertRaises(Exception, b.__gt__, a) - - if sys.version_info < (3, 3): - self.assertRaises(Exception, a.__eq__, b.to_pydatetime()) - self.assertRaises(Exception, a.to_pydatetime().__eq__, b) - else: - self.assertFalse(a == b.to_pydatetime()) - self.assertFalse(a.to_pydatetime() == b) - - def test_cant_compare_tz_naive_w_aware_explicit_pytz(self): - tm._skip_if_no_pytz() - from pytz import utc - # #1404 - a = Timestamp('3/12/2012') - b = Timestamp('3/12/2012', tz=utc) - - self.assertRaises(Exception, a.__eq__, b) - self.assertRaises(Exception, a.__ne__, b) - self.assertRaises(Exception, a.__lt__, b) - self.assertRaises(Exception, a.__gt__, b) - self.assertRaises(Exception, b.__eq__, a) - self.assertRaises(Exception, b.__ne__, a) - self.assertRaises(Exception, b.__lt__, a) - self.assertRaises(Exception, b.__gt__, a) - - if sys.version_info < (3, 3): - self.assertRaises(Exception, a.__eq__, b.to_pydatetime()) - self.assertRaises(Exception, a.to_pydatetime().__eq__, b) - else: - self.assertFalse(a == b.to_pydatetime()) - self.assertFalse(a.to_pydatetime() == b) - - def test_cant_compare_tz_naive_w_aware_dateutil(self): - tm._skip_if_no_dateutil() - from dateutil.tz import tzutc - utc = tzutc() - # #1404 - a = Timestamp('3/12/2012') - b = Timestamp('3/12/2012', tz=utc) - - self.assertRaises(Exception, a.__eq__, b) - self.assertRaises(Exception, a.__ne__, b) - self.assertRaises(Exception, a.__lt__, b) - self.assertRaises(Exception, a.__gt__, b) - self.assertRaises(Exception, b.__eq__, a) - self.assertRaises(Exception, b.__ne__, a) - self.assertRaises(Exception, b.__lt__, a) - self.assertRaises(Exception, b.__gt__, a) - - if sys.version_info < (3, 3): - self.assertRaises(Exception, a.__eq__, b.to_pydatetime()) - self.assertRaises(Exception, a.to_pydatetime().__eq__, b) - else: - self.assertFalse(a == b.to_pydatetime()) - self.assertFalse(a.to_pydatetime() == b) - - def test_delta_preserve_nanos(self): - val = Timestamp(long(1337299200000000123)) - result = val + timedelta(1) - self.assertEqual(result.nanosecond, val.nanosecond) - - def test_frequency_misc(self): - self.assertEqual(frequencies.get_freq_group('T'), - frequencies.FreqGroup.FR_MIN) - - code, stride = frequencies.get_freq_code(offsets.Hour()) - self.assertEqual(code, frequencies.FreqGroup.FR_HR) - - code, stride = frequencies.get_freq_code((5, 'T')) - self.assertEqual(code, frequencies.FreqGroup.FR_MIN) - self.assertEqual(stride, 5) - - offset = offsets.Hour() - result = frequencies.to_offset(offset) - self.assertEqual(result, offset) - - result = frequencies.to_offset((5, 'T')) - expected = offsets.Minute(5) - self.assertEqual(result, expected) - - self.assertRaises(ValueError, frequencies.get_freq_code, (5, 'baz')) - - self.assertRaises(ValueError, frequencies.to_offset, '100foo') - - self.assertRaises(ValueError, frequencies.to_offset, ('', '')) - - result = frequencies.get_standard_freq(offsets.Hour()) - self.assertEqual(result, 'H') - - def test_hash_equivalent(self): - d = {datetime(2011, 1, 1): 5} - stamp = Timestamp(datetime(2011, 1, 1)) - self.assertEqual(d[stamp], 5) - - def test_timestamp_compare_scalars(self): - # case where ndim == 0 - lhs = np.datetime64(datetime(2013, 12, 6)) - rhs = Timestamp('now') - nat = Timestamp('nat') - - ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq', - 'ne': 'ne'} - - for left, right in ops.items(): - left_f = getattr(operator, left) - right_f = getattr(operator, right) - expected = left_f(lhs, rhs) - - result = right_f(rhs, lhs) - self.assertEqual(result, expected) - - expected = left_f(rhs, nat) - result = right_f(nat, rhs) - self.assertEqual(result, expected) - - def test_timestamp_compare_series(self): - # make sure we can compare Timestamps on the right AND left hand side - # GH4982 - s = Series(date_range('20010101', periods=10), name='dates') - s_nat = s.copy(deep=True) - - s[0] = pd.Timestamp('nat') - s[3] = pd.Timestamp('nat') - - ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'} - - for left, right in ops.items(): - left_f = getattr(operator, left) - right_f = getattr(operator, right) - - # no nats - expected = left_f(s, Timestamp('20010109')) - result = right_f(Timestamp('20010109'), s) - tm.assert_series_equal(result, expected) - - # nats - expected = left_f(s, Timestamp('nat')) - result = right_f(Timestamp('nat'), s) - tm.assert_series_equal(result, expected) - - # compare to timestamp with series containing nats - expected = left_f(s_nat, Timestamp('20010109')) - result = right_f(Timestamp('20010109'), s_nat) - tm.assert_series_equal(result, expected) - - # compare to nat with series containing nats - expected = left_f(s_nat, Timestamp('nat')) - result = right_f(Timestamp('nat'), s_nat) - tm.assert_series_equal(result, expected) - - -class TestSlicing(tm.TestCase): - - def test_slice_year(self): - dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500) - - s = Series(np.arange(len(dti)), index=dti) - result = s['2005'] - expected = s[s.index.year == 2005] - assert_series_equal(result, expected) - - df = DataFrame(np.random.rand(len(dti), 5), index=dti) - result = df.ix['2005'] - expected = df[df.index.year == 2005] - assert_frame_equal(result, expected) - - rng = date_range('1/1/2000', '1/1/2010') - - result = rng.get_loc('2009') - expected = slice(3288, 3653) - self.assertEqual(result, expected) - - def test_slice_quarter(self): - dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500) - - s = Series(np.arange(len(dti)), index=dti) - self.assertEqual(len(s['2001Q1']), 90) - - df = DataFrame(np.random.rand(len(dti), 5), index=dti) - self.assertEqual(len(df.ix['1Q01']), 90) - - def test_slice_month(self): - dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500) - s = Series(np.arange(len(dti)), index=dti) - self.assertEqual(len(s['2005-11']), 30) - - df = DataFrame(np.random.rand(len(dti), 5), index=dti) - self.assertEqual(len(df.ix['2005-11']), 30) - - assert_series_equal(s['2005-11'], s['11-2005']) - - def test_partial_slice(self): - rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500) - s = Series(np.arange(len(rng)), index=rng) - - result = s['2005-05':'2006-02'] - expected = s['20050501':'20060228'] - assert_series_equal(result, expected) - - result = s['2005-05':] - expected = s['20050501':] - assert_series_equal(result, expected) - - result = s[:'2006-02'] - expected = s[:'20060228'] - assert_series_equal(result, expected) - - result = s['2005-1-1'] - self.assertEqual(result, s.iloc[0]) - - self.assertRaises(Exception, s.__getitem__, '2004-12-31') - - def test_partial_slice_daily(self): - rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500) - s = Series(np.arange(len(rng)), index=rng) - - result = s['2005-1-31'] - assert_series_equal(result, s.ix[:24]) - - self.assertRaises(Exception, s.__getitem__, '2004-12-31 00') - - def test_partial_slice_hourly(self): - rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0), - periods=500) - s = Series(np.arange(len(rng)), index=rng) - - result = s['2005-1-1'] - assert_series_equal(result, s.ix[:60 * 4]) - - result = s['2005-1-1 20'] - assert_series_equal(result, s.ix[:60]) - - self.assertEqual(s['2005-1-1 20:00'], s.ix[0]) - self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15') - - def test_partial_slice_minutely(self): - rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0), - periods=500) - s = Series(np.arange(len(rng)), index=rng) - - result = s['2005-1-1 23:59'] - assert_series_equal(result, s.ix[:60]) - - result = s['2005-1-1'] - assert_series_equal(result, s.ix[:60]) - - self.assertEqual(s[Timestamp('2005-1-1 23:59:00')], s.ix[0]) - self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00') - - def test_partial_slice_second_precision(self): - rng = DatetimeIndex(start=datetime(2005, 1, 1, 0, 0, 59, - microsecond=999990), - periods=20, freq='US') - s = Series(np.arange(20), rng) - - assert_series_equal(s['2005-1-1 00:00'], s.iloc[:10]) - assert_series_equal(s['2005-1-1 00:00:59'], s.iloc[:10]) - - assert_series_equal(s['2005-1-1 00:01'], s.iloc[10:]) - assert_series_equal(s['2005-1-1 00:01:00'], s.iloc[10:]) - - self.assertEqual(s[Timestamp('2005-1-1 00:00:59.999990')], s.iloc[0]) - self.assertRaisesRegexp(KeyError, '2005-1-1 00:00:00', - lambda: s['2005-1-1 00:00:00']) - - def test_partial_slicing_with_multiindex(self): - - # GH 4758 - # partial string indexing with a multi-index buggy - df = DataFrame({'ACCOUNT':["ACCT1", "ACCT1", "ACCT1", "ACCT2"], - 'TICKER':["ABC", "MNP", "XYZ", "XYZ"], - 'val':[1,2,3,4]}, - index=date_range("2013-06-19 09:30:00", periods=4, freq='5T')) - df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True) - - expected = DataFrame([[1]],index=Index(['ABC'],name='TICKER'),columns=['val']) - result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')] - assert_frame_equal(result, expected) - - expected = df_multi.loc[(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')] - result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')] - assert_series_equal(result, expected) - - # this is a KeyError as we don't do partial string selection on multi-levels - def f(): - df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')] - self.assertRaises(KeyError, f) - - # GH 4294 - # partial slice on a series mi - s = pd.DataFrame(randn(1000, 1000), index=pd.date_range('2000-1-1', periods=1000)).stack() - - s2 = s[:-1].copy() - expected = s2['2000-1-4'] - result = s2[pd.Timestamp('2000-1-4')] - assert_series_equal(result, expected) - - result = s[pd.Timestamp('2000-1-4')] - expected = s['2000-1-4'] - assert_series_equal(result, expected) - - df2 = pd.DataFrame(s) - expected = df2.ix['2000-1-4'] - result = df2.ix[pd.Timestamp('2000-1-4')] - assert_frame_equal(result, expected) - - def test_date_range_normalize(self): - snap = datetime.today() - n = 50 - - rng = date_range(snap, periods=n, normalize=False, freq='2D') - - offset = timedelta(2) - values = np.array([snap + i * offset for i in range(n)], - dtype='M8[ns]') - - self.assert_numpy_array_equal(rng, values) - - rng = date_range( - '1/1/2000 08:15', periods=n, normalize=False, freq='B') - the_time = time(8, 15) - for val in rng: - self.assertEqual(val.time(), the_time) - - def test_timedelta(self): - # this is valid too - index = date_range('1/1/2000', periods=50, freq='B') - shifted = index + timedelta(1) - back = shifted + timedelta(-1) - self.assertTrue(tm.equalContents(index, back)) - self.assertEqual(shifted.freq, index.freq) - self.assertEqual(shifted.freq, back.freq) - - result = index - timedelta(1) - expected = index + timedelta(-1) - self.assertTrue(result.equals(expected)) - - # GH4134, buggy with timedeltas - rng = date_range('2013', '2014') - s = Series(rng) - result1 = rng - pd.offsets.Hour(1) - result2 = DatetimeIndex(s - np.timedelta64(100000000)) - result3 = rng - np.timedelta64(100000000) - result4 = DatetimeIndex(s - pd.offsets.Hour(1)) - self.assertTrue(result1.equals(result4)) - self.assertTrue(result2.equals(result3)) - - def test_shift(self): - ts = Series(np.random.randn(5), - index=date_range('1/1/2000', periods=5, freq='H')) - - result = ts.shift(1, freq='5T') - exp_index = ts.index.shift(1, freq='5T') - self.assertTrue(result.index.equals(exp_index)) - - # GH #1063, multiple of same base - result = ts.shift(1, freq='4H') - exp_index = ts.index + datetools.Hour(4) - self.assertTrue(result.index.equals(exp_index)) - - idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04']) - self.assertRaises(ValueError, idx.shift, 1) - - def test_setops_preserve_freq(self): - for tz in [None, 'Asia/Tokyo', 'US/Eastern']: - rng = date_range('1/1/2000', '1/1/2002', name='idx', tz=tz) - - result = rng[:50].union(rng[50:100]) - self.assertEqual(result.name, rng.name) - self.assertEqual(result.freq, rng.freq) - self.assertEqual(result.tz, rng.tz) - - result = rng[:50].union(rng[30:100]) - self.assertEqual(result.name, rng.name) - self.assertEqual(result.freq, rng.freq) - self.assertEqual(result.tz, rng.tz) - - result = rng[:50].union(rng[60:100]) - self.assertEqual(result.name, rng.name) - self.assertIsNone(result.freq) - self.assertEqual(result.tz, rng.tz) - - result = rng[:50].intersection(rng[25:75]) - self.assertEqual(result.name, rng.name) - self.assertEqual(result.freqstr, 'D') - self.assertEqual(result.tz, rng.tz) - - nofreq = DatetimeIndex(list(rng[25:75]), name='other') - result = rng[:50].union(nofreq) - self.assertIsNone(result.name) - self.assertEqual(result.freq, rng.freq) - self.assertEqual(result.tz, rng.tz) - - result = rng[:50].intersection(nofreq) - self.assertIsNone(result.name) - self.assertEqual(result.freq, rng.freq) - self.assertEqual(result.tz, rng.tz) - - def test_min_max(self): - rng = date_range('1/1/2000', '12/31/2000') - rng2 = rng.take(np.random.permutation(len(rng))) - - the_min = rng2.min() - the_max = rng2.max() - tm.assertIsInstance(the_min, Timestamp) - tm.assertIsInstance(the_max, Timestamp) - self.assertEqual(the_min, rng[0]) - self.assertEqual(the_max, rng[-1]) - - self.assertEqual(rng.min(), rng[0]) - self.assertEqual(rng.max(), rng[-1]) - - def test_min_max_series(self): - rng = date_range('1/1/2000', periods=10, freq='4h') - lvls = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'C'] - df = DataFrame({'TS': rng, 'V': np.random.randn(len(rng)), - 'L': lvls}) - - result = df.TS.max() - exp = Timestamp(df.TS.iat[-1]) - self.assertTrue(isinstance(result, Timestamp)) - self.assertEqual(result, exp) - - result = df.TS.min() - exp = Timestamp(df.TS.iat[0]) - self.assertTrue(isinstance(result, Timestamp)) - self.assertEqual(result, exp) - - def test_from_M8_structured(self): - dates = [(datetime(2012, 9, 9, 0, 0), - datetime(2012, 9, 8, 15, 10))] - arr = np.array(dates, - dtype=[('Date', 'M8[us]'), ('Forecasting', 'M8[us]')]) - df = DataFrame(arr) - - self.assertEqual(df['Date'][0], dates[0][0]) - self.assertEqual(df['Forecasting'][0], dates[0][1]) - - s = Series(arr['Date']) - self.assertTrue(s[0], Timestamp) - self.assertEqual(s[0], dates[0][0]) - - s = Series.from_array(arr['Date'], Index([0])) - self.assertEqual(s[0], dates[0][0]) - - def test_get_level_values_box(self): - from pandas import MultiIndex - - dates = date_range('1/1/2000', periods=4) - levels = [dates, [0, 1]] - labels = [[0, 0, 1, 1, 2, 2, 3, 3], - [0, 1, 0, 1, 0, 1, 0, 1]] - - index = MultiIndex(levels=levels, labels=labels) - - self.assertTrue(isinstance(index.get_level_values(0)[0], Timestamp)) - - def test_frame_apply_dont_convert_datetime64(self): - from pandas.tseries.offsets import BDay - df = DataFrame({'x1': [datetime(1996, 1, 1)]}) - - df = df.applymap(lambda x: x + BDay()) - df = df.applymap(lambda x: x + BDay()) - - self.assertTrue(df.x1.dtype == 'M8[ns]') - - def test_date_range_fy5252(self): - dr = date_range(start="2013-01-01", - periods=2, - freq=offsets.FY5253(startingMonth=1, - weekday=3, - variation="nearest")) - self.assertEqual(dr[0], Timestamp('2013-01-31')) - self.assertEqual(dr[1], Timestamp('2014-01-30')) - - def test_partial_slice_doesnt_require_monotonicity(self): - # For historical reasons. - s = pd.Series(np.arange(10), - pd.date_range('2014-01-01', periods=10)) - - nonmonotonic = s[[3, 5, 4]] - expected = nonmonotonic.iloc[:0] - timestamp = pd.Timestamp('2014-01-10') - - assert_series_equal(nonmonotonic['2014-01-10':], expected) - self.assertRaisesRegexp(KeyError, "Timestamp\('2014-01-10 00:00:00'\)", - lambda: nonmonotonic[timestamp:]) - - assert_series_equal(nonmonotonic.ix['2014-01-10':], expected) - self.assertRaisesRegexp(KeyError, "Timestamp\('2014-01-10 00:00:00'\)", - lambda: nonmonotonic.ix[timestamp:]) - - -class TimeConversionFormats(tm.TestCase): - def test_to_datetime_format(self): - values = ['1/1/2000', '1/2/2000', '1/3/2000'] - - results1 = [ Timestamp('20000101'), Timestamp('20000201'), - Timestamp('20000301') ] - results2 = [ Timestamp('20000101'), Timestamp('20000102'), - Timestamp('20000103') ] - for vals, expecteds in [ (values, (Index(results1), Index(results2))), - (Series(values),(Series(results1), Series(results2))), - (values[0], (results1[0], results2[0])), - (values[1], (results1[1], results2[1])), - (values[2], (results1[2], results2[2])) ]: - - for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']): - result = to_datetime(vals, format=fmt) - expected = expecteds[i] - - if isinstance(expected, Series): - assert_series_equal(result, Series(expected)) - elif isinstance(expected, Timestamp): - self.assertEqual(result, expected) - else: - self.assertTrue(result.equals(expected)) - - def test_to_datetime_format_YYYYMMDD(self): - s = Series([19801222,19801222] + [19810105]*5) - expected = Series([ Timestamp(x) for x in s.apply(str) ]) - - result = to_datetime(s,format='%Y%m%d') - assert_series_equal(result, expected) - - result = to_datetime(s.apply(str),format='%Y%m%d') - assert_series_equal(result, expected) - - # with NaT - expected = Series([Timestamp("19801222"),Timestamp("19801222")] + [Timestamp("19810105")]*5) - expected[2] = np.nan - s[2] = np.nan - - result = to_datetime(s,format='%Y%m%d') - assert_series_equal(result, expected) - - # string with NaT - s = s.apply(str) - s[2] = 'nat' - result = to_datetime(s,format='%Y%m%d') - assert_series_equal(result, expected) - - # coercion - # GH 7930 - s = Series([20121231, 20141231, 99991231]) - result = pd.to_datetime(s,format='%Y%m%d',errors='ignore') - expected = np.array([ datetime(2012,12,31), datetime(2014,12,31), datetime(9999,12,31) ], dtype=object) - self.assert_numpy_array_equal(result, expected) - - result = pd.to_datetime(s,format='%Y%m%d', errors='coerce') - expected = Series(['20121231','20141231','NaT'],dtype='M8[ns]') - assert_series_equal(result, expected) - - # GH 10178 - def test_to_datetime_format_integer(self): - s = Series([2000, 2001, 2002]) - expected = Series([ Timestamp(x) for x in s.apply(str) ]) - - result = to_datetime(s,format='%Y') - assert_series_equal(result, expected) - - s = Series([200001, 200105, 200206]) - expected = Series([ Timestamp(x[:4] + '-' + x[4:]) for x in s.apply(str) ]) - - result = to_datetime(s,format='%Y%m') - assert_series_equal(result, expected) - - def test_to_datetime_format_microsecond(self): - val = '01-Apr-2011 00:00:01.978' - format = '%d-%b-%Y %H:%M:%S.%f' - result = to_datetime(val, format=format) - exp = datetime.strptime(val, format) - self.assertEqual(result, exp) - - def test_to_datetime_format_time(self): - data = [ - ['01/10/2010 15:20', '%m/%d/%Y %H:%M', Timestamp('2010-01-10 15:20')], - ['01/10/2010 05:43', '%m/%d/%Y %I:%M', Timestamp('2010-01-10 05:43')], - ['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S', Timestamp('2010-01-10 13:56:01')]#, - #['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 20:14')], - #['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 07:40')], - #['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p', Timestamp('2010-01-10 09:12:56')] - ] - for s, format, dt in data: - self.assertEqual(to_datetime(s, format=format), dt) - - def test_to_datetime_with_non_exact(self): - # GH 10834 - _skip_if_has_locale() - - # 8904 - # exact kw - if sys.version_info < (2, 7): - raise nose.SkipTest('on python version < 2.7') - - s = Series(['19MAY11','foobar19MAY11','19MAY11:00:00:00','19MAY11 00:00:00Z']) - result = to_datetime(s,format='%d%b%y',exact=False) - expected = to_datetime(s.str.extract('(\d+\w+\d+)'),format='%d%b%y') - assert_series_equal(result, expected) - - def test_parse_nanoseconds_with_formula(self): - - # GH8989 - # trunctaing the nanoseconds when a format was provided - for v in ["2012-01-01 09:00:00.000000001", - "2012-01-01 09:00:00.000001", - "2012-01-01 09:00:00.001", - "2012-01-01 09:00:00.001000", - "2012-01-01 09:00:00.001000000", - ]: - expected = pd.to_datetime(v) - result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f") - self.assertEqual(result,expected) - - def test_to_datetime_format_weeks(self): - data = [ - ['2009324', '%Y%W%w', Timestamp('2009-08-13')], - ['2013020', '%Y%U%w', Timestamp('2013-01-13')] - ] - for s, format, dt in data: - self.assertEqual(to_datetime(s, format=format), dt) - -class TestToDatetimeInferFormat(tm.TestCase): - def test_to_datetime_infer_datetime_format_consistent_format(self): - time_series = pd.Series( - pd.date_range('20000101', periods=50, freq='H') - ) - - test_formats = [ - '%m-%d-%Y', - '%m/%d/%Y %H:%M:%S.%f', - '%Y-%m-%dT%H:%M:%S.%f', - ] - - for test_format in test_formats: - s_as_dt_strings = time_series.apply( - lambda x: x.strftime(test_format) - ) - - with_format = pd.to_datetime(s_as_dt_strings, format=test_format) - no_infer = pd.to_datetime( - s_as_dt_strings, infer_datetime_format=False - ) - yes_infer = pd.to_datetime( - s_as_dt_strings, infer_datetime_format=True - ) - - # Whether the format is explicitly passed, it is inferred, or - # it is not inferred, the results should all be the same - self.assert_numpy_array_equal(with_format, no_infer) - self.assert_numpy_array_equal(no_infer, yes_infer) - - def test_to_datetime_infer_datetime_format_inconsistent_format(self): - test_series = pd.Series( - np.array([ - '01/01/2011 00:00:00', - '01-02-2011 00:00:00', - '2011-01-03T00:00:00', - ])) - - # When the format is inconsistent, infer_datetime_format should just - # fallback to the default parsing - self.assert_numpy_array_equal( - pd.to_datetime(test_series, infer_datetime_format=False), - pd.to_datetime(test_series, infer_datetime_format=True) - ) - - test_series = pd.Series( - np.array([ - 'Jan/01/2011', - 'Feb/01/2011', - 'Mar/01/2011', - ])) - - self.assert_numpy_array_equal( - pd.to_datetime(test_series, infer_datetime_format=False), - pd.to_datetime(test_series, infer_datetime_format=True) - ) - - def test_to_datetime_infer_datetime_format_series_with_nans(self): - test_series = pd.Series( - np.array([ - '01/01/2011 00:00:00', - np.nan, - '01/03/2011 00:00:00', - np.nan, - ])) - - self.assert_numpy_array_equal( - pd.to_datetime(test_series, infer_datetime_format=False), - pd.to_datetime(test_series, infer_datetime_format=True) - ) - - def test_to_datetime_infer_datetime_format_series_starting_with_nans(self): - test_series = pd.Series( - np.array([ - np.nan, - np.nan, - '01/01/2011 00:00:00', - '01/02/2011 00:00:00', - '01/03/2011 00:00:00', - ])) - - self.assert_numpy_array_equal( - pd.to_datetime(test_series, infer_datetime_format=False), - pd.to_datetime(test_series, infer_datetime_format=True) - ) - - -class TestGuessDatetimeFormat(tm.TestCase): - def test_guess_datetime_format_with_parseable_formats(self): - dt_string_to_format = ( - ('20111230', '%Y%m%d'), - ('2011-12-30', '%Y-%m-%d'), - ('30-12-2011', '%d-%m-%Y'), - ('2011-12-30 00:00:00', '%Y-%m-%d %H:%M:%S'), - ('2011-12-30T00:00:00', '%Y-%m-%dT%H:%M:%S'), - ('2011-12-30 00:00:00.000000', '%Y-%m-%d %H:%M:%S.%f'), - ) - - for dt_string, dt_format in dt_string_to_format: - self.assertEqual( - tools._guess_datetime_format(dt_string), - dt_format - ) - - def test_guess_datetime_format_with_dayfirst(self): - ambiguous_string = '01/01/2011' - self.assertEqual( - tools._guess_datetime_format(ambiguous_string, dayfirst=True), - '%d/%m/%Y' - ) - self.assertEqual( - tools._guess_datetime_format(ambiguous_string, dayfirst=False), - '%m/%d/%Y' - ) - - def test_guess_datetime_format_with_locale_specific_formats(self): - # The month names will vary depending on the locale, in which - # case these wont be parsed properly (dateutil can't parse them) - _skip_if_has_locale() - - dt_string_to_format = ( - ('30/Dec/2011', '%d/%b/%Y'), - ('30/December/2011', '%d/%B/%Y'), - ('30/Dec/2011 00:00:00', '%d/%b/%Y %H:%M:%S'), - ) - - for dt_string, dt_format in dt_string_to_format: - self.assertEqual( - tools._guess_datetime_format(dt_string), - dt_format - ) - - def test_guess_datetime_format_invalid_inputs(self): - # A datetime string must include a year, month and a day for it - # to be guessable, in addition to being a string that looks like - # a datetime - invalid_dts = [ - '2013', - '01/2013', - '12:00:00', - '1/1/1/1', - 'this_is_not_a_datetime', - '51a', - 9, - datetime(2011, 1, 1), - ] - - for invalid_dt in invalid_dts: - self.assertTrue(tools._guess_datetime_format(invalid_dt) is None) - - def test_guess_datetime_format_for_array(self): - expected_format = '%Y-%m-%d %H:%M:%S.%f' - dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format) - - test_arrays = [ - np.array([dt_string, dt_string, dt_string], dtype='O'), - np.array([np.nan, np.nan, dt_string], dtype='O'), - np.array([dt_string, 'random_string'], dtype='O'), - ] - - for test_array in test_arrays: - self.assertEqual( - tools._guess_datetime_format_for_array(test_array), - expected_format - ) - - format_for_string_of_nans = tools._guess_datetime_format_for_array( - np.array([np.nan, np.nan, np.nan], dtype='O') - ) - self.assertTrue(format_for_string_of_nans is None) - - -class TestTimestampToJulianDate(tm.TestCase): - - def test_compare_1700(self): - r = Timestamp('1700-06-23').to_julian_date() - self.assertEqual(r, 2342145.5) - - def test_compare_2000(self): - r = Timestamp('2000-04-12').to_julian_date() - self.assertEqual(r, 2451646.5) - - def test_compare_2100(self): - r = Timestamp('2100-08-12').to_julian_date() - self.assertEqual(r, 2488292.5) - - def test_compare_hour01(self): - r = Timestamp('2000-08-12T01:00:00').to_julian_date() - self.assertEqual(r, 2451768.5416666666666666) - - def test_compare_hour13(self): - r = Timestamp('2000-08-12T13:00:00').to_julian_date() - self.assertEqual(r, 2451769.0416666666666666) - - -class TestDateTimeIndexToJulianDate(tm.TestCase): - def test_1700(self): - r1 = Float64Index([2345897.5, - 2345898.5, - 2345899.5, - 2345900.5, - 2345901.5]) - r2 = date_range(start=Timestamp('1710-10-01'), - periods=5, - freq='D').to_julian_date() - self.assertIsInstance(r2, Float64Index) - tm.assert_index_equal(r1, r2) - - def test_2000(self): - r1 = Float64Index([2451601.5, - 2451602.5, - 2451603.5, - 2451604.5, - 2451605.5]) - r2 = date_range(start=Timestamp('2000-02-27'), - periods=5, - freq='D').to_julian_date() - self.assertIsInstance(r2, Float64Index) - tm.assert_index_equal(r1, r2) - - def test_hour(self): - r1 = Float64Index([2451601.5, - 2451601.5416666666666666, - 2451601.5833333333333333, - 2451601.625, - 2451601.6666666666666666]) - r2 = date_range(start=Timestamp('2000-02-27'), - periods=5, - freq='H').to_julian_date() - self.assertIsInstance(r2, Float64Index) - tm.assert_index_equal(r1, r2) - - def test_minute(self): - r1 = Float64Index([2451601.5, - 2451601.5006944444444444, - 2451601.5013888888888888, - 2451601.5020833333333333, - 2451601.5027777777777777]) - r2 = date_range(start=Timestamp('2000-02-27'), - periods=5, - freq='T').to_julian_date() - self.assertIsInstance(r2, Float64Index) - tm.assert_index_equal(r1, r2) - - def test_second(self): - r1 = Float64Index([2451601.5, - 2451601.500011574074074, - 2451601.5000231481481481, - 2451601.5000347222222222, - 2451601.5000462962962962]) - r2 = date_range(start=Timestamp('2000-02-27'), - periods=5, - freq='S').to_julian_date() - self.assertIsInstance(r2, Float64Index) - tm.assert_index_equal(r1, r2) - -class TestDaysInMonth(tm.TestCase): - - def test_coerce_deprecation(self): - - # deprecation of coerce - with tm.assert_produces_warning(FutureWarning): - to_datetime('2015-02-29', coerce=True) - with tm.assert_produces_warning(FutureWarning): - self.assertRaises(ValueError, lambda : to_datetime('2015-02-29', coerce=False)) - - # multiple arguments - for e, c in zip(['raise','ignore','coerce'],[True,False]): - with tm.assert_produces_warning(FutureWarning): - self.assertRaises(TypeError, lambda : to_datetime('2015-02-29', errors=e, coerce=c)) - - # tests for issue #10154 - def test_day_not_in_month_coerce(self): - self.assertTrue(isnull(to_datetime('2015-02-29', errors='coerce'))) - self.assertTrue(isnull(to_datetime('2015-02-29', format="%Y-%m-%d", errors='coerce'))) - self.assertTrue(isnull(to_datetime('2015-02-32', format="%Y-%m-%d", errors='coerce'))) - self.assertTrue(isnull(to_datetime('2015-04-31', format="%Y-%m-%d", errors='coerce'))) - - def test_day_not_in_month_raise(self): - self.assertRaises(ValueError, to_datetime, '2015-02-29', errors='raise') - self.assertRaises(ValueError, to_datetime, '2015-02-29', errors='raise', format="%Y-%m-%d") - self.assertRaises(ValueError, to_datetime, '2015-02-32', errors='raise', format="%Y-%m-%d") - self.assertRaises(ValueError, to_datetime, '2015-04-31', errors='raise', format="%Y-%m-%d") - - def test_day_not_in_month_ignore(self): - self.assertEqual(to_datetime('2015-02-29', errors='ignore'), '2015-02-29') - self.assertEqual(to_datetime('2015-02-29', errors='ignore', format="%Y-%m-%d"), '2015-02-29') - self.assertEqual(to_datetime('2015-02-32', errors='ignore', format="%Y-%m-%d"), '2015-02-32') - self.assertEqual(to_datetime('2015-04-31', errors='ignore', format="%Y-%m-%d"), '2015-04-31') - -if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False)
I fixed my mess up from earlier.
https://api.github.com/repos/pandas-dev/pandas/pulls/10940
2015-08-30T14:01:20Z
2015-08-30T14:02:58Z
null
2015-08-30T14:03:02Z
ENH: Add Series.dt.total_seconds GH #10817
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 3e81a923a114c..b7feec3895f97 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -176,6 +176,10 @@ Other enhancements - ``pandas.tseries.offsets`` larger than the ``Day`` offset can now be used with with ``Series`` for addition/subtraction (:issue:`10699`). See the :ref:`Documentation <timeseries.offsetseries>` for more details. +- ``pd.Series`` of type ``timedelta64`` has new method ``.dt.total_seconds()`` returning the duration of the timedelta in seconds (:issue: `10817`) + +- ``pd.Timedelta.total_seconds()`` now returns Timedelta duration to ns precision (previously microsecond precision) (:issue: `10939`) + - ``.as_blocks`` will now take a ``copy`` optional argument to return a copy of the data, default is to copy (no change in behavior from prior versions), (:issue:`9607`) - ``regex`` argument to ``DataFrame.filter`` now handles numeric column names instead of raising ``ValueError`` (:issue:`10384`). diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 34ea674fe10c0..86eafdf7ca2c8 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -89,7 +89,7 @@ def test_dt_namespace_accessor(self): 'is_quarter_end', 'is_year_start', 'is_year_end', 'tz'] ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert', 'normalize', 'strftime'] ok_for_td = ['days','seconds','microseconds','nanoseconds'] - ok_for_td_methods = ['components','to_pytimedelta'] + ok_for_td_methods = ['components','to_pytimedelta','total_seconds'] def get_expected(s, name): result = getattr(Index(s.values),prop) @@ -157,6 +157,10 @@ def compare(s, name): result = s.dt.to_pytimedelta() self.assertIsInstance(result,np.ndarray) self.assertTrue(result.dtype == object) + + result = s.dt.total_seconds() + self.assertIsInstance(result,pd.Series) + self.assertTrue(result.dtype == 'float64') freq_result = s.dt.freq self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq) diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py index a4d5939d386ae..9a282bec2e9e4 100644 --- a/pandas/tseries/common.py +++ b/pandas/tseries/common.py @@ -161,7 +161,7 @@ def components(self): accessors=TimedeltaIndex._datetimelike_ops, typ='property') TimedeltaProperties._add_delegate_accessors(delegate=TimedeltaIndex, - accessors=["to_pytimedelta"], + accessors=["to_pytimedelta", "total_seconds"], typ='method') class PeriodProperties(Properties): diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py index b0c9d8852f8c9..984f2a1cec706 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/tseries/tdi.py @@ -391,6 +391,10 @@ def f(x): result = result.astype('int64') return result + def total_seconds(self): + """ Total duration of each element expressed in seconds. """ + return self._maybe_mask_results(1e-9*self.asi8) + def to_pytimedelta(self): """ Return TimedeltaIndex as object ndarray of datetime.timedelta objects diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index 4870fbd55f33e..eef0894bdd349 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -19,6 +19,7 @@ assert_almost_equal, assert_index_equal, ensure_clean) +from numpy.testing import assert_allclose from pandas.tseries.offsets import Day, Second, Hour import pandas.util.testing as tm from numpy.random import rand, randn @@ -945,6 +946,36 @@ def test_fields(self): tm.assert_series_equal(s.dt.days,Series([1,np.nan],index=[0,1])) tm.assert_series_equal(s.dt.seconds,Series([10*3600+11*60+12,np.nan],index=[0,1])) + def test_total_seconds(self): + # GH 10939 + # test index + rng = timedelta_range('1 days, 10:11:12.100123456', periods=2, freq='s') + expt = [1*86400+10*3600+11*60+12+100123456./1e9,1*86400+10*3600+11*60+13+100123456./1e9] + assert_allclose(rng.total_seconds(), expt, atol=1e-10, rtol=0) + + # test Series + s = Series(rng) + s_expt = Series(expt,index=[0,1]) + tm.assert_series_equal(s.dt.total_seconds(),s_expt) + + # with nat + s[1] = np.nan + s_expt = Series([1*86400+10*3600+11*60+12+100123456./1e9,np.nan],index=[0,1]) + tm.assert_series_equal(s.dt.total_seconds(),s_expt) + + # with both nat + s = Series([np.nan,np.nan], dtype='timedelta64[ns]') + tm.assert_series_equal(s.dt.total_seconds(),Series([np.nan,np.nan],index=[0,1])) + + def test_total_seconds_scalar(self): + # GH 10939 + rng = Timedelta('1 days, 10:11:12.100123456') + expt = 1*86400+10*3600+11*60+12+100123456./1e9 + assert_allclose(rng.total_seconds(), expt, atol=1e-10, rtol=0) + + rng = Timedelta(np.nan) + self.assertTrue(np.isnan(rng.total_seconds())) + def test_components(self): rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s') rng.components diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 369993b4c54d1..226cfc843b3cf 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -642,6 +642,10 @@ class NaTType(_NaT): def __reduce__(self): return (__nat_unpickle, (None, )) + + def total_seconds(self): + # GH 10939 + return np.nan fields = ['year', 'quarter', 'month', 'day', 'hour', @@ -673,7 +677,7 @@ def _make_nan_func(func_name): _nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today'] -_nan_methods = ['weekday', 'isoweekday'] +_nan_methods = ['weekday', 'isoweekday', 'total_seconds'] _implemented_methods = ['to_datetime64'] _implemented_methods.extend(_nat_methods) @@ -2412,6 +2416,12 @@ class Timedelta(_Timedelta): """ self._ensure_components() return self._ns + + def total_seconds(self): + """ + Total duration of timedelta in seconds (to ns precision) + """ + return 1e-9*self.value def __setstate__(self, state): (value) = state
Implements a Series.dt.total_seconds method for timedelta64 Series. closes #10817
https://api.github.com/repos/pandas-dev/pandas/pulls/10939
2015-08-30T12:44:17Z
2015-09-02T11:52:40Z
2015-09-02T11:52:39Z
2015-09-02T20:13:16Z
consistent imports in text.rst
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2b5a5d28c3906..9580f90c29dcd 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,21 +5,13 @@ .. ipython:: python :suppress: - import os - import csv - from pandas.compat import StringIO import pandas as pd - ExcelWriter = pd.ExcelWriter - import numpy as np np.random.seed(123456) - randn = np.random.randn np.set_printoptions(precision=4, suppress=True) - import matplotlib.pyplot as plt plt.close('all') - from pandas import * options.display.max_rows=15 import pandas.util.testing as tm @@ -1998,9 +1990,9 @@ Improvements to existing features .. ipython:: python - p = Panel(randn(3,4,4),items=['ItemA','ItemB','ItemC'], - major_axis=date_range('20010102',periods=4), - minor_axis=['A','B','C','D']) + p = pd.Panel(np.random.randn(3,4,4),items=['ItemA','ItemB','ItemC'], + major_axis=pd.date_range('20010102',periods=4), + minor_axis=['A','B','C','D']) p p.reindex(items=['ItemA']).squeeze() p.reindex(items=['ItemA'],minor=['B']).squeeze() @@ -2016,11 +2008,11 @@ Improvements to existing features .. ipython:: python - idx = date_range("2001-10-1", periods=5, freq='M') - ts = Series(np.random.rand(len(idx)),index=idx) + idx = pd.date_range("2001-10-1", periods=5, freq='M') + ts = pd.Series(np.random.rand(len(idx)),index=idx) ts['2001'] - df = DataFrame(dict(A = ts)) + df = pd.DataFrame(dict(A = ts)) df['2001'] - added option `display.mpl_style` providing a sleeker visual style for plots. Based on https://gist.github.com/huyng/816622 (:issue:`3075`). diff --git a/doc/source/sparse.rst b/doc/source/sparse.rst index 79def066f0710..257fb2909d42c 100644 --- a/doc/source/sparse.rst +++ b/doc/source/sparse.rst @@ -6,9 +6,8 @@ import numpy as np np.random.seed(123456) - from pandas import * + import pandas as pd import pandas.util.testing as tm - randn = np.random.randn np.set_printoptions(precision=4, suppress=True) options.display.max_rows = 15 @@ -26,7 +25,7 @@ method: .. ipython:: python - ts = Series(randn(10)) + ts = pd.Series(randn(10)) ts[2:-2] = np.nan sts = ts.to_sparse() sts @@ -44,7 +43,7 @@ large, mostly NA DataFrame: .. ipython:: python - df = DataFrame(randn(10000, 4)) + df = pd.DataFrame(randn(10000, 4)) df.ix[:9998] = np.nan sdf = df.to_sparse() sdf @@ -75,7 +74,7 @@ distinct from the ``fill_value``: arr = np.random.randn(10) arr[2:5] = np.nan; arr[7:8] = np.nan - sparr = SparseArray(arr) + sparr = pd.SparseArray(arr) sparr Like the indexed objects (SparseSeries, SparseDataFrame, SparsePanel), a @@ -97,7 +96,7 @@ a ``fill_value`` (defaulting to ``NaN``): .. ipython:: python - spl = SparseList() + spl = pd.SparseList() spl The two important methods are ``append`` and ``to_array``. ``append`` can @@ -108,8 +107,7 @@ accept scalar values or any 1-dimensional sequence: .. ipython:: python - from numpy import nan - spl.append(np.array([1., nan, nan, 2., 3.])) + spl.append(np.array([1., np.nan, np.nan, 2., 3.])) spl.append(5) spl.append(sparr) spl @@ -149,15 +147,14 @@ The method requires a ``MultiIndex`` with two or more levels. .. ipython:: python - from numpy import nan - s = Series([3.0, nan, 1.0, 3.0, nan, nan]) - s.index = MultiIndex.from_tuples([(1, 2, 'a', 0), - (1, 2, 'a', 1), - (1, 1, 'b', 0), - (1, 1, 'b', 1), - (2, 1, 'b', 0), - (2, 1, 'b', 1)], - names=['A', 'B', 'C', 'D']) + s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan]) + s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0), + (1, 2, 'a', 1), + (1, 1, 'b', 0), + (1, 1, 'b', 1), + (2, 1, 'b', 0), + (2, 1, 'b', 1)], + names=['A', 'B', 'C', 'D']) s # SparseSeries @@ -199,7 +196,7 @@ A convenience method :meth:`SparseSeries.from_coo` is implemented for creating a from scipy import sparse A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), - shape=(3, 4)) + shape=(3, 4)) A A.todense() @@ -208,7 +205,7 @@ only the non-null entries. .. ipython:: python - ss = SparseSeries.from_coo(A) + ss = pd.SparseSeries.from_coo(A) ss Specifying ``dense_index=True`` will result in an index that is the Cartesian product of the @@ -217,5 +214,5 @@ row and columns coordinates of the matrix. Note that this will consume a signifi .. ipython:: python - ss_dense = SparseSeries.from_coo(A, dense_index=True) + ss_dense = pd.SparseSeries.from_coo(A, dense_index=True) ss_dense diff --git a/doc/source/text.rst b/doc/source/text.rst index 9bbb152f5a69b..ee4f96b41c7de 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -5,7 +5,7 @@ :suppress: import numpy as np - from pandas import * + import pandas as pd randn = np.random.randn np.set_printoptions(precision=4, suppress=True) from pandas.compat import lrange @@ -25,14 +25,14 @@ the equivalent (scalar) built-in string methods: .. ipython:: python - s = Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) + s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) s.str.lower() s.str.upper() s.str.len() .. ipython:: python - idx = Index([' jack', 'jill ', ' jesse ', 'frank']) + idx = pd.Index([' jack', 'jill ', ' jesse ', 'frank']) idx.str.strip() idx.str.lstrip() idx.str.rstrip() @@ -43,8 +43,8 @@ leading or trailing whitespace: .. ipython:: python - df = DataFrame(randn(3, 2), columns=[' Column A ', ' Column B '], - index=range(3)) + df = pd.DataFrame(randn(3, 2), columns=[' Column A ', ' Column B '], + index=range(3)) df Since ``df.columns`` is an Index object, we can use the ``.str`` accessor @@ -72,7 +72,7 @@ Methods like ``split`` return a Series of lists: .. ipython:: python - s2 = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h']) + s2 = pd.Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h']) s2.str.split('_') Elements in the split lists can be accessed using ``get`` or ``[]`` notation: @@ -106,8 +106,8 @@ Methods like ``replace`` and ``findall`` take `regular expressions .. ipython:: python - s3 = Series(['A', 'B', 'C', 'Aaba', 'Baca', - '', np.nan, 'CABA', 'dog', 'cat']) + s3 = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', + '', np.nan, 'CABA', 'dog', 'cat']) s3 s3.str.replace('^.a|dog', 'XX-XX ', case=False) @@ -118,7 +118,7 @@ following code will cause trouble because of the regular expression meaning of .. ipython:: python # Consider the following badly formatted financial data - dollars = Series(['12', '-$10', '$10,000']) + dollars = pd.Series(['12', '-$10', '$10,000']) # This does what you'd naively expect: dollars.str.replace('$', '') @@ -140,8 +140,8 @@ of the string, the result will be a ``NaN``. .. ipython:: python - s = Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, - 'CABA', 'dog', 'cat']) + s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, + 'CABA', 'dog', 'cat']) s.str[0] s.str[1] @@ -157,14 +157,14 @@ regular expression with one group returns a Series of strings. .. ipython:: python - Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)') + pd.Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)') Elements that do not match return ``NaN``. Extracting a regular expression with more than one group returns a DataFrame with one column per group. .. ipython:: python - Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)') + pd.Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)') Elements that do not match return a row filled with ``NaN``. Thus, a Series of messy strings can be "converted" into a @@ -178,13 +178,13 @@ Named groups like .. ipython:: python - Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)') + pd.Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)') and optional groups like .. ipython:: python - Series(['a1', 'b2', '3']).str.extract('(?P<letter>[ab])?(?P<digit>\d)') + pd.Series(['a1', 'b2', '3']).str.extract('(?P<letter>[ab])?(?P<digit>\d)') can also be used. @@ -196,14 +196,14 @@ You can check whether elements contain a pattern: .. ipython:: python pattern = r'[a-z][0-9]' - Series(['1', '2', '3a', '3b', '03c']).str.contains(pattern) + pd.Series(['1', '2', '3a', '3b', '03c']).str.contains(pattern) or match a pattern: .. ipython:: python - Series(['1', '2', '3a', '3b', '03c']).str.match(pattern, as_indexer=True) + pd.Series(['1', '2', '3a', '3b', '03c']).str.match(pattern, as_indexer=True) The distinction between ``match`` and ``contains`` is strictness: ``match`` relies on strict ``re.match``, while ``contains`` relies on ``re.search``. @@ -225,7 +225,7 @@ Methods like ``match``, ``contains``, ``startswith``, and ``endswith`` take .. ipython:: python - s4 = Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) + s4 = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) s4.str.contains('A', na=False) Creating Indicator Variables @@ -236,7 +236,7 @@ For example if they are separated by a ``'|'``: .. ipython:: python - s = Series(['a', 'a|b', np.nan, 'a|c']) + s = pd.Series(['a', 'a|b', np.nan, 'a|c']) s.str.get_dummies(sep='|') See also :func:`~pandas.get_dummies`.
https://api.github.com/repos/pandas-dev/pandas/pulls/10938
2015-08-30T12:15:18Z
2015-09-01T11:56:01Z
null
2015-09-01T11:56:01Z
updating docs for the new sorting mechanisms - GH #10886
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index 1714e00030026..359ec76533520 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -157,7 +157,7 @@ Sorting by values .. ipython:: python - df.sort(columns='B') + df.sort_values(by='B') Selection --------- @@ -680,7 +680,7 @@ Sorting is per order in the categories, not lexical order. .. ipython:: python - df.sort("grade") + df.sort_values(by="grade") Grouping by a categorical column shows also empty categories. diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 850f59c2713eb..973674fe62745 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -286,7 +286,7 @@ As usual, **both sides** of the slicers are included as this is label indexing. names=['lvl0', 'lvl1']) dfmi = pd.DataFrame(np.arange(len(miindex)*len(micolumns)).reshape((len(miindex),len(micolumns))), index=miindex, - columns=micolumns).sortlevel().sortlevel(axis=1) + columns=micolumns).sort_index().sort_index(axis=1) dfmi Basic multi-index slicing using slices, lists, and labels. @@ -458,7 +458,7 @@ correctly. You can think about breaking the axis into unique groups, where at the hierarchical level of interest, each distinct group shares a label, but no two have the same label. However, the ``MultiIndex`` does not enforce this: **you are responsible for ensuring that things are properly sorted**. There is -an important new method ``sortlevel`` to sort an axis within a ``MultiIndex`` +an important new method ``sort_index`` to sort an axis within a ``MultiIndex`` so that its labels are grouped and sorted by the original ordering of the associated factor at that level. Note that this does not necessarily mean the labels will be sorted lexicographically! @@ -468,19 +468,19 @@ labels will be sorted lexicographically! import random; random.shuffle(tuples) s = pd.Series(np.random.randn(8), index=pd.MultiIndex.from_tuples(tuples)) s - s.sortlevel(0) - s.sortlevel(1) + s.sort_index(level=0) + s.sort_index(level=1) .. _advanced.sortlevel_byname: -Note, you may also pass a level name to ``sortlevel`` if the MultiIndex levels +Note, you may also pass a level name to ``sort_index`` if the MultiIndex levels are named. .. ipython:: python s.index.set_names(['L1', 'L2'], inplace=True) - s.sortlevel(level='L1') - s.sortlevel(level='L2') + s.sort_index(level='L1') + s.sort_index(level='L2') Some indexing will work even if the data are not sorted, but will be rather inefficient and will also return a copy of the data rather than a view: @@ -488,14 +488,14 @@ inefficient and will also return a copy of the data rather than a view: .. ipython:: python s['qux'] - s.sortlevel(1)['qux'] + s.sort_index(level=1)['qux'] On higher dimensional objects, you can sort any of the other axes by level if they have a MultiIndex: .. ipython:: python - df.T.sortlevel(1, axis=1) + df.T.sort_index(level=1, axis=1) The ``MultiIndex`` object has code to **explicity check the sort depth**. Thus, if you try to index at a depth at which the index is not sorted, it will raise diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 956c90ae63034..3ea90447dd44f 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -328,7 +328,7 @@ equality to be True: df1 = pd.DataFrame({'col':['foo', 0, np.nan]}) df2 = pd.DataFrame({'col':[np.nan, 0, 'foo']}, index=[2,1,0]) df1.equals(df2) - df1.equals(df2.sort()) + df1.equals(df2.sort_index()) Comparing array-like objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1489,7 +1489,7 @@ The ``by`` argument can take a list of column names, e.g.: .. ipython:: python - df1[['one', 'two', 'three']].sort_index(by=['one','two']) + df1[['one', 'two', 'three']].sort_values(by=['one','two']) These methods have special treatment of NA values via the ``na_position`` argument: @@ -1497,8 +1497,8 @@ argument: .. ipython:: python s[2] = np.nan - s.order() - s.order(na_position='first') + s.sort_values() + s.sort_values(na_position='first') .. _basics.searchsorted: @@ -1564,7 +1564,7 @@ all levels to ``by``. .. ipython:: python df1.columns = pd.MultiIndex.from_tuples([('a','one'),('a','two'),('b','three')]) - df1.sort_index(by=('a','two')) + df1.sort_values(by=('a','two')) Copying diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index 0c63759201517..3c9b538caa555 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -280,9 +280,9 @@ meaning and certain operations are possible. If the categorical is unordered, `` .. ipython:: python s = pd.Series(pd.Categorical(["a","b","c","a"], ordered=False)) - s.sort() + s.sort_values(inplace=True) s = pd.Series(["a","b","c","a"]).astype('category', ordered=True) - s.sort() + s.sort_values(inplace=True) s s.min(), s.max() @@ -302,7 +302,7 @@ This is even true for strings and numeric data: s = pd.Series([1,2,3,1], dtype="category") s = s.cat.set_categories([2,3,1], ordered=True) s - s.sort() + s.sort_values(inplace=True) s s.min(), s.max() @@ -320,7 +320,7 @@ necessarily make the sort order the same as the categories order. s = pd.Series([1,2,3,1], dtype="category") s = s.cat.reorder_categories([2,3,1], ordered=True) s - s.sort() + s.sort_values(inplace=True) s s.min(), s.max() @@ -349,14 +349,14 @@ The ordering of the categorical is determined by the ``categories`` of that colu dfs = pd.DataFrame({'A' : pd.Categorical(list('bbeebbaa'), categories=['e','a','b'], ordered=True), 'B' : [1,2,1,2,2,1,2,1] }) - dfs.sort(['A', 'B']) + dfs.sort_values(by=['A', 'B']) Reordering the ``categories`` changes a future sort. .. ipython:: python dfs['A'] = dfs['A'].cat.reorder_categories(['a','b','e']) - dfs.sort(['A','B']) + dfs.sort_values(by=['A','B']) Comparisons ----------- diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 9e7b9ad0b7582..0b05f062f5fce 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -309,7 +309,7 @@ Method 2 : sort then take first of each .. ipython:: python - df.sort("BBB").groupby("AAA", as_index=False).first() + df.sort_values(by="BBB").groupby("AAA", as_index=False).first() Notice the same results, with the exception of the index. @@ -410,7 +410,7 @@ Sorting .. ipython:: python - df.sort(('Labs', 'II'), ascending=False) + df.sort_values(by=('Labs', 'II'), ascending=False) `Partial Selection, the need for sortedness; <https://github.com/pydata/pandas/issues/2995>`__ @@ -547,7 +547,7 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to code_groups = df.groupby('code') - agg_n_sort_order = code_groups[['data']].transform(sum).sort('data') + agg_n_sort_order = code_groups[['data']].transform(sum).sort_values(by='data') sorted_df = df.ix[agg_n_sort_order.index] diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 26aaf9c2be69d..dbf3b838593a9 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -164,9 +164,9 @@ will result in a **sorted** copy of the original DataFrame or Series: index = pd.MultiIndex.from_product([[2,1], ['a', 'b']]) df = pd.DataFrame(np.random.randn(4), index=index, columns=['A']) df - all(df.unstack().stack() == df.sort()) + all(df.unstack().stack() == df.sort_index()) -while the above code will raise a ``TypeError`` if the call to ``sort`` is +while the above code will raise a ``TypeError`` if the call to ``sort_index`` is removed. .. _reshaping.stack_multiple: @@ -206,7 +206,7 @@ Missing Data These functions are intelligent about handling missing data and do not expect each subgroup within the hierarchical index to have the same set of labels. They also can handle the index being unsorted (but you can make it sorted by -calling ``sortlevel``, of course). Here is a more complex example: +calling ``sort_index``, of course). Here is a more complex example: .. ipython:: python diff --git a/doc/source/whatsnew/v0.13.1.txt b/doc/source/whatsnew/v0.13.1.txt index 64ca1612f00c1..349acf508bbf3 100644 --- a/doc/source/whatsnew/v0.13.1.txt +++ b/doc/source/whatsnew/v0.13.1.txt @@ -120,7 +120,8 @@ API changes equal. (:issue:`5283`) See also :ref:`the docs<basics.equals>` for a motivating example. .. ipython:: python - + :okwarning: + df = DataFrame({'col':['foo', 0, np.nan]}) df2 = DataFrame({'col':[np.nan, 0, 'foo']}, index=[2,1,0]) df.equals(df2) diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt index 01dc8bb080726..a33e0f19961ab 100644 --- a/doc/source/whatsnew/v0.15.0.txt +++ b/doc/source/whatsnew/v0.15.0.txt @@ -67,7 +67,8 @@ For full docs, see the :ref:`categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`. .. ipython:: python - + :okwarning: + df = DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']}) df["grade"] = df["raw_grade"].astype("category") diff --git a/doc/source/whatsnew/v0.7.3.txt b/doc/source/whatsnew/v0.7.3.txt index afb4b8faac2cc..21aa16e5fcb06 100644 --- a/doc/source/whatsnew/v0.7.3.txt +++ b/doc/source/whatsnew/v0.7.3.txt @@ -83,6 +83,7 @@ When calling ``apply`` on a grouped Series, the return value will also be a Series, to be more consistent with the ``groupby`` behavior with DataFrame: .. ipython:: python + :okwarning: df = DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], @@ -93,4 +94,3 @@ Series, to be more consistent with the ``groupby`` behavior with DataFrame: grouped = df.groupby('A')['C'] grouped.describe() grouped.apply(lambda x: x.order()[-2:]) # top 2 values - diff --git a/doc/source/whatsnew/v0.9.1.txt b/doc/source/whatsnew/v0.9.1.txt index 6718a049a0ab9..ce7439b8ecd92 100644 --- a/doc/source/whatsnew/v0.9.1.txt +++ b/doc/source/whatsnew/v0.9.1.txt @@ -21,6 +21,7 @@ New features specified in a per-column manner to support multiple sort orders (:issue:`928`) .. ipython:: python + :okwarning: df = DataFrame(np.random.randint(0, 2, (6, 3)), columns=['A', 'B', 'C']) @@ -66,7 +67,7 @@ New features .. ipython:: python df[df>0] - + df.where(df>0) df.where(df>0,-df)
closes #10886 I didn't notice warnings or errors associated with those modifications when building the docs. No more `FutureWarning` either (except in a the v. 0.13 what's new, which is legit)...
https://api.github.com/repos/pandas-dev/pandas/pulls/10937
2015-08-30T11:17:01Z
2015-08-31T01:03:13Z
2015-08-31T01:03:13Z
2015-08-31T08:18:13Z
BUG: tests that can't fail
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 619de8d6bad3b..950e4fc48c8e1 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -260,7 +260,7 @@ def _check_iris_loaded_frame(self, iris_frame): self.assertTrue( issubclass(pytype, np.floating), 'Loaded frame has incorrect type') - tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) + self.assertTrue(tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])) def _load_test1_data(self): columns = ['index', 'A', 'B', 'C', 'D'] @@ -434,7 +434,7 @@ def _execute_sql(self): # drop_sql = "DROP TABLE IF EXISTS test" # should already be done iris_results = self.pandasSQL.execute("SELECT * FROM iris") row = iris_results.fetchone() - tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) + self.assertTrue(tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])) def _to_sql_save_index(self): df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')], @@ -616,7 +616,7 @@ def test_execute_sql(self): # drop_sql = "DROP TABLE IF EXISTS test" # should already be done iris_results = sql.execute("SELECT * FROM iris", con=self.conn) row = iris_results.fetchone() - tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) + self.assertTrue(tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])) def test_date_parsing(self): # Test date parsing in read_sq @@ -1031,7 +1031,7 @@ def test_tquery(self): with tm.assert_produces_warning(FutureWarning): iris_results = sql.tquery("SELECT * FROM iris", con=self.conn) row = iris_results[0] - tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) + self.assertTrue(tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])) def test_uquery(self): with tm.assert_produces_warning(FutureWarning): @@ -1179,8 +1179,8 @@ def test_read_table(self): def test_read_table_columns(self): iris_frame = sql.read_sql_table( "iris", con=self.conn, columns=['SepalLength', 'SepalLength']) - tm.equalContents( - iris_frame.columns.values, ['SepalLength', 'SepalLength']) + self.assertTrue(tm.equalContents( + iris_frame.columns.values, ['SepalLength', 'SepalLength'])) def test_read_table_absent(self): self.assertRaises(
tm.equalContent does not raise an exception, it returns a Bool. Therefore these tests need wrapping in self.assertTrue()
https://api.github.com/repos/pandas-dev/pandas/pulls/10936
2015-08-30T11:09:01Z
2015-11-02T12:07:07Z
null
2022-10-13T00:16:47Z
DOC: clarification on -b flag in asv
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 2c9b6a0a889f4..5d26ca2414690 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -517,7 +517,7 @@ performance regressions. *pandas* is in the process of migrating to the `asv library <https://github.com/spacetelescope/asv>`__ to enable easy monitoring of the performance of critical *pandas* operations. These benchmarks are all found in the ``pandas/asv_bench`` directory. *asv* -supports both python2 and python3. +supports both python2 and python3. .. note:: @@ -525,7 +525,10 @@ supports both python2 and python3. so many stylistic issues are likely a result of automated transformation of the code. -To install asv:: +To use ''asv'' you will need either ''conda'' or ''virtualenv''. For more details +please check installation webpage http://asv.readthedocs.org/en/latest/installing.html + +To install ''asv'':: pip install git+https://github.com/spacetelescope/asv @@ -546,6 +549,25 @@ to the Pull Request to show that the committed changes do not cause unexpected performance regressions. You can run specific benchmarks using the *-b* flag which takes a regular expression. +For example this will only run tests from a ``pandas/asv_bench/benchmarks/groupby.py`` +file:: + + asv continuous master -b groupby + +If you want to run only some specific group of tests from a file you can do it +using ``.`` as a separator. For example:: + + asv continuous master -b groupby.groupby_agg_builtins1 + +will only run a ``groupby_agg_builtins1`` test defined in a ``groupby`` file. + +It is also useful to run tests in your current environment. You can simply do it by:: + + asv dev + +which would be equivalent to ``asv run --quick --show-stderr --python=same``. This +will launch every test only once, display stderr from the benchmarks and use your +local ``python'' that comes from your $PATH. Information on how to write a benchmark can be found in `*asv*'s documentation http://asv.readthedocs.org/en/latest/writing_benchmarks.html`.
https://api.github.com/repos/pandas-dev/pandas/pulls/10935
2015-08-30T11:06:37Z
2015-08-31T01:05:17Z
2015-08-31T01:05:17Z
2015-08-31T01:05:22Z
DOC: consistent imports (GH9886) part V
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index d51c2f62b8a0c..ce76d8c3abb37 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -6,9 +6,8 @@ import numpy as np np.random.seed(123456) - from numpy import nan - from pandas import * - options.display.max_rows=15 + import pandas as pd + pd.options.display.max_rows=15 randn = np.random.randn np.set_printoptions(precision=4, suppress=True) @@ -43,26 +42,26 @@ a simple example: .. ipython:: python - df1 = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], 'B': ['B0', 'B1', 'B2', 'B3'], 'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=[0, 1, 2, 3]) - df2 = DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], + df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], 'B': ['B4', 'B5', 'B6', 'B7'], 'C': ['C4', 'C5', 'C6', 'C7'], 'D': ['D4', 'D5', 'D6', 'D7']}, index=[4, 5, 6, 7]) - df3 = DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], + df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], 'B': ['B8', 'B9', 'B10', 'B11'], 'C': ['C8', 'C9', 'C10', 'C11'], 'D': ['D8', 'D9', 'D10', 'D11']}, index=[8, 9, 10, 11]) frames = [df1, df2, df3] - result = concat(frames) + result = pd.concat(frames) .. ipython:: python :suppress: @@ -78,7 +77,7 @@ some configurable handling of "what to do with the other axes": :: - concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, + pd.concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False) - ``objs``: list or dict of Series, DataFrame, or Panel objects. If a dict is @@ -112,7 +111,7 @@ this using the ``keys`` argument: .. ipython:: python - result = concat(frames, keys=['x', 'y', 'z']) + result = pd.concat(frames, keys=['x', 'y', 'z']) .. ipython:: python :suppress: @@ -163,11 +162,11 @@ behavior: .. ipython:: python - df4 = DataFrame({'B': ['B2', 'B3', 'B6', 'B7'], + df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'], 'D': ['D2', 'D3', 'D6', 'D7'], 'F': ['F2', 'F3', 'F6', 'F7']}, index=[2, 3, 6, 7]) - result = concat([df1, df4], axis=1) + result = pd.concat([df1, df4], axis=1) .. ipython:: python @@ -183,7 +182,7 @@ with ``join='inner'``: .. ipython:: python - result = concat([df1, df4], axis=1, join='inner') + result = pd.concat([df1, df4], axis=1, join='inner') .. ipython:: python :suppress: @@ -198,7 +197,7 @@ DataFrame: .. ipython:: python - result = concat([df1, df4], axis=1, join_axes=[df1.index]) + result = pd.concat([df1, df4], axis=1, join_axes=[df1.index]) .. ipython:: python :suppress: @@ -275,7 +274,7 @@ To do this, use the ``ignore_index`` argument: .. ipython:: python - result = concat([df1, df4], ignore_index=True) + result = pd.concat([df1, df4], ignore_index=True) .. ipython:: python :suppress: @@ -310,8 +309,8 @@ the name of the Series. .. ipython:: python - s1 = Series(['X0', 'X1', 'X2', 'X3'], name='X') - result = concat([df1, s1], axis=1) + s1 = pd.Series(['X0', 'X1', 'X2', 'X3'], name='X') + result = pd.concat([df1, s1], axis=1) .. ipython:: python :suppress: @@ -325,8 +324,8 @@ If unnamed Series are passed they will be numbered consecutively. .. ipython:: python - s2 = Series(['_0', '_1', '_2', '_3']) - result = concat([df1, s2, s2, s2], axis=1) + s2 = pd.Series(['_0', '_1', '_2', '_3']) + result = pd.concat([df1, s2, s2, s2], axis=1) .. ipython:: python :suppress: @@ -340,7 +339,7 @@ Passing ``ignore_index=True`` will drop all name references. .. ipython:: python - result = concat([df1, s1], axis=1, ignore_index=True) + result = pd.concat([df1, s1], axis=1, ignore_index=True) .. ipython:: python :suppress: @@ -357,7 +356,7 @@ Let's consider a variation on the first example presented: .. ipython:: python - result = concat(frames, keys=['x', 'y', 'z']) + result = pd.concat(frames, keys=['x', 'y', 'z']) .. ipython:: python :suppress: @@ -373,7 +372,7 @@ for the ``keys`` argument (unless other keys are specified): .. ipython:: python pieces = {'x': df1, 'y': df2, 'z': df3} - result = concat(pieces) + result = pd.concat(pieces) .. ipython:: python :suppress: @@ -385,7 +384,7 @@ for the ``keys`` argument (unless other keys are specified): .. ipython:: python - result = concat(pieces, keys=['z', 'y']) + result = pd.concat(pieces, keys=['z', 'y']) .. ipython:: python :suppress: @@ -407,7 +406,7 @@ do so using the ``levels`` argument: .. ipython:: python - result = concat(pieces, keys=['x', 'y', 'z'], + result = pd.concat(pieces, keys=['x', 'y', 'z'], levels=[['z', 'y', 'x', 'w']], names=['group_key']) @@ -437,7 +436,7 @@ which returns a new DataFrame as above. .. ipython:: python - s2 = Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D']) + s2 = pd.Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D']) result = df1.append(s2, ignore_index=True) .. ipython:: python @@ -464,7 +463,7 @@ You can also pass a list of dicts or Series: :suppress: @savefig merging_append_dits.png - p.plot([df1, DataFrame(dicts)], result, + p.plot([df1, pd.DataFrame(dicts)], result, labels=['df1', 'dicts'], vertical=True); plt.close('all'); @@ -490,9 +489,9 @@ standard database join operations between DataFrame objects: :: - merge(left, right, how='inner', on=None, left_on=None, right_on=None, - left_index=False, right_index=False, sort=True, - suffixes=('_x', '_y'), copy=True) + pd.merge(left, right, how='inner', on=None, left_on=None, right_on=None, + left_index=False, right_index=False, sort=True, + suffixes=('_x', '_y'), copy=True) Here's a description of what each argument is for: @@ -566,14 +565,14 @@ key combination: .. ipython:: python - left = DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], - 'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3']}) + left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], + 'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3']}) - right = DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], - 'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}) - result = merge(left, right, on='key') + right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}) + result = pd.merge(left, right, on='key') .. ipython:: python :suppress: @@ -587,17 +586,17 @@ Here is a more complicated example with multiple join keys: .. ipython:: python - left = DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'], - 'key2': ['K0', 'K1', 'K0', 'K1'], - 'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3']}) + left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'], + 'key2': ['K0', 'K1', 'K0', 'K1'], + 'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3']}) - right = DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'], - 'key2': ['K0', 'K0', 'K0', 'K0'], - 'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}) + right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'], + 'key2': ['K0', 'K0', 'K0', 'K0'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}) - result = merge(left, right, on=['key1', 'key2']) + result = pd.merge(left, right, on=['key1', 'key2']) .. ipython:: python :suppress: @@ -623,7 +622,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='left', on=['key1', 'key2']) + result = pd.merge(left, right, how='left', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -635,7 +634,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='right', on=['key1', 'key2']) + result = pd.merge(left, right, how='right', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -646,7 +645,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='outer', on=['key1', 'key2']) + result = pd.merge(left, right, how='outer', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -658,7 +657,7 @@ either the left or right tables, the values in the joined table will be .. ipython:: python - result = merge(left, right, how='inner', on=['key1', 'key2']) + result = pd.merge(left, right, how='inner', on=['key1', 'key2']) .. ipython:: python :suppress: @@ -679,13 +678,13 @@ is a very basic example: .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2'], - 'B': ['B0', 'B1', 'B2']}, - index=['K0', 'K1', 'K2']) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=['K0', 'K1', 'K2']) - right = DataFrame({'C': ['C0', 'C2', 'C3'], - 'D': ['D0', 'D2', 'D3']}, - index=['K0', 'K2', 'K3']) + right = pd.DataFrame({'C': ['C0', 'C2', 'C3'], + 'D': ['D0', 'D2', 'D3']}, + index=['K0', 'K2', 'K3']) result = left.join(right) @@ -727,7 +726,7 @@ indexes: .. ipython:: python - result = merge(left, right, left_index=True, right_index=True, how='outer') + result = pd.merge(left, right, left_index=True, right_index=True, how='outer') .. ipython:: python :suppress: @@ -739,7 +738,7 @@ indexes: .. ipython:: python - result = merge(left, right, left_index=True, right_index=True, how='inner'); + result = pd.merge(left, right, left_index=True, right_index=True, how='inner'); .. ipython:: python :suppress: @@ -760,7 +759,7 @@ equivalent: :: left.join(right, on=key_or_keys) - merge(left, right, left_on=key_or_keys, right_index=True, + pd.merge(left, right, left_on=key_or_keys, right_index=True, how='left', sort=False) Obviously you can choose whichever form you find more convenient. For @@ -769,13 +768,13 @@ key), using ``join`` may be more convenient. Here is a simple example: .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3'], - 'key': ['K0', 'K1', 'K0', 'K1']}) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'key': ['K0', 'K1', 'K0', 'K1']}) - right = DataFrame({'C': ['C0', 'C1'], - 'D': ['D0', 'D1']}, - index=['K0', 'K1']) + right = pd.DataFrame({'C': ['C0', 'C1'], + 'D': ['D0', 'D1']}, + index=['K0', 'K1']) result = left.join(right, on='key') @@ -789,8 +788,8 @@ key), using ``join`` may be more convenient. Here is a simple example: .. ipython:: python - result = merge(left, right, left_on='key', right_index=True, - how='left', sort=False); + result = pd.merge(left, right, left_on='key', right_index=True, + how='left', sort=False); .. ipython:: python :suppress: @@ -806,14 +805,14 @@ To join on multiple keys, the passed DataFrame must have a ``MultiIndex``: .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], - 'B': ['B0', 'B1', 'B2', 'B3'], - 'key1': ['K0', 'K0', 'K1', 'K2'], - 'key2': ['K0', 'K1', 'K0', 'K1']}) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'key1': ['K0', 'K0', 'K1', 'K2'], + 'key2': ['K0', 'K1', 'K0', 'K1']}) - index = MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'), - ('K2', 'K0'), ('K2', 'K1')]) - right = DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], + index = pd.MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'), + ('K2', 'K0'), ('K2', 'K1')]) + right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}, index=index) @@ -865,16 +864,16 @@ a level name of the multi-indexed frame. .. ipython:: python - left = DataFrame({'A': ['A0', 'A1', 'A2'], - 'B': ['B0', 'B1', 'B2']}, - index=Index(['K0', 'K1', 'K2'], name='key')) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=Index(['K0', 'K1', 'K2'], name='key')) - index = MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), - ('K2', 'Y2'), ('K2', 'Y3')], - names=['key', 'Y']) - right = DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], - 'D': ['D0', 'D1', 'D2', 'D3']}, - index=index) + index = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), + ('K2', 'Y2'), ('K2', 'Y3')], + names=['key', 'Y']) + right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}, + index=index) result = left.join(right, how='inner') @@ -890,7 +889,7 @@ This is equivalent but less verbose and more memory efficient / faster than this .. ipython:: python - result = merge(left.reset_index(), right.reset_index(), + result = pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner').set_index(['key','Y']) .. ipython:: python @@ -908,15 +907,15 @@ This is not Implemented via ``join`` at-the-moment, however it can be done using .. ipython:: python - index = MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), - ('K1', 'X2')], - names=['key', 'X']) - left = DataFrame({'A': ['A0', 'A1', 'A2'], - 'B': ['B0', 'B1', 'B2']}, - index=index) + index = pd.MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), + ('K1', 'X2')], + names=['key', 'X']) + left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=index) - result = merge(left.reset_index(), right.reset_index(), - on=['key'], how='inner').set_index(['key','X','Y']) + result = pd.merge(left.reset_index(), right.reset_index(), + on=['key'], how='inner').set_index(['key','X','Y']) .. ipython:: python :suppress: @@ -935,10 +934,10 @@ columns: .. ipython:: python - left = DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]}) - right = DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]}) + left = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]}) + right = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]}) - result = merge(left, right, on='k') + result = pd.merge(left, right, on='k') .. ipython:: python :suppress: @@ -950,7 +949,7 @@ columns: .. ipython:: python - result = merge(left, right, on='k', suffixes=['_l', '_r']) + result = pd.merge(left, right, on='k', suffixes=['_l', '_r']) .. ipython:: python :suppress: @@ -987,7 +986,7 @@ them together on their indexes. The same is true for ``Panel.join``. .. ipython:: python - right2 = DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2']) + right2 = pd.DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2']) result = left.join([right, right2]) .. ipython:: python @@ -1037,10 +1036,10 @@ object from values for matching indices in the other. Here is an example: .. ipython:: python - df1 = DataFrame([[nan, 3., 5.], [-4.6, np.nan, nan], - [nan, 7., nan]]) - df2 = DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]], - index=[1, 2]) + df1 = pd.DataFrame([[np.nan, 3., 5.], [-4.6, np.nan, np.nan], + [np.nan, 7., np.nan]]) + df2 = pd.DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]], + index=[1, 2]) For this, use the ``combine_first`` method: diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 3e81a923a114c..879c2a9e6df90 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -724,15 +724,40 @@ Performance Improvements Bug Fixes ~~~~~~~~~ - - Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) +======= +BUG: 10633 and 10800 fix +======= +ENH: pickle support for Period #10439 +======= +BUG: 10633 and 10800 fix +======= +ENH: pickle support for Period #10439 - Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) +======= + + +- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) updating examples and the bug fix +======= +- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) +ENH: pickle support for Period #10439 +======= +- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) + +updating examples and the bug fix +BUG: 10633 and 10800 fix +======= +ENH: pickle support for Period #10439 - Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`) - Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`) - Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`) - Bug in ``Series.quantile`` dropping name (:issue:`10881`) - Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`) + - Bug in ``pd.Series.interpolate`` with invalid ``order`` keyword values. (:issue:`10633`) +======= +- Bug in ``pd.Series.interpolate`` when setting no order value on ``Series.interpolate`` this needs to be at least 1. (:issue:`10633`) and (:issue:`10800`) +BUG: 10633 and 10800 fix - Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`) - Bug in ``Index`` construction with a mixed list of tuples (:issue:`10697`) - Bug in ``DataFrame.reset_index`` when index contains ``NaT``. (:issue:`10388`) diff --git a/pandas/io/tests/generate_legacy_storage_files.py b/pandas/io/tests/generate_legacy_storage_files.py index 0ca5ced1b8d1a..8a2cd621263f1 100644 --- a/pandas/io/tests/generate_legacy_storage_files.py +++ b/pandas/io/tests/generate_legacy_storage_files.py @@ -83,9 +83,20 @@ def create_data(): index=MultiIndex.from_tuples(tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=['one', 'two'])), dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A']), +<<<<<<< HEAD +<<<<<<< HEAD cat=Series(Categorical(['foo', 'bar', 'baz']))) if LooseVersion(pandas.__version__) >= '0.17.0': series['period'] = Series([Period('2000Q1')] * 5) +======= + cat=Series(Categorical(['foo', 'bar', 'baz'])), + per=Series([Period('2000Q1')] * 5)) +>>>>>>> 0525684... ENH: pickle support for Period #10439 +======= + cat=Series(Categorical(['foo', 'bar', 'baz']))) + if LooseVersion(pandas.__version__) >= '0.17.0': + series['period'] = Series([Period('2000Q1')] * 5) +>>>>>>> aa04812... update legacy_storage for pickles mixed_dup_df = DataFrame(data) mixed_dup_df.columns = list("ABCDA") diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 7ed8799dd6ded..65fa185267f27 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -799,7 +799,17 @@ def test_nan_interpolate(self): tm._skip_if_no_scipy() result = s.interpolate(method='polynomial', order=1) assert_series_equal(result, expected) +ts + + # GH #10633 + def test_interpolate_spline(self): + np.random.seed(1) + t = pd.Series(np.arange(10)**2) + t[np.random.randint(0,9,3)] = np.nan + with tm.assertRaises(ValueError): + t.interpolate(method='spline', order=0) + def test_nan_irregular_index(self): s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9]) result = s.interpolate() @@ -1392,6 +1402,16 @@ def test_no_order(self): s.interpolate(method='polynomial') with tm.assertRaises(ValueError): s.interpolate(method='spline') + + # GH #10633 + def test_order_spline_interpolation(self): + tm._skip_if_no_scipy() + np.random.seed(1) + s = Series(np.arange(10)**2) + s[np.random.randint(0,9,3)] = np.nan + result1 = s.interpolate(method='spline', order=1) + expected1 = s.interpolate(method='spline', order=1) + assert_series_equal(result1, expected1) def test_spline(self): tm._skip_if_no_scipy() diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index dca37d9ce164c..0030354086c2f 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2537,6 +2537,8 @@ def test_searchsorted(self): def test_round_trip(self): + + import pickle p = Period('2000Q1') new_p = self.round_trip_pickle(p) self.assertEqual(new_p, p) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index a9837e2794d58..417a8f9dc13ba 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -923,8 +923,8 @@ def test_to_datetime_with_apply(self): assert_series_equal(result, expected) td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3]) - self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y', errors='raise')) - self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y', errors='raise')) + self.assertRaises(ValueError, 'Unknown string format. You can coerce errors to NaT by passing coerce', lambda : pd.to_datetime(td,format='%b %y', errors='raise')) + self.assertRaises(ValueError, 'Unknown string format. You can coerce errors to NaT by passing coerce',lambda : td.apply(pd.to_datetime, format='%b %y', errors='raise')) expected = pd.to_datetime(td, format='%b %y', errors='coerce') result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', errors='coerce'))
I made a few alterations to the consistent imports in the merging.rst file. Does anyone have any objections to what I did? I notice a few problems with the rebase though first - that's 12 commits rather than one.
https://api.github.com/repos/pandas-dev/pandas/pulls/10934
2015-08-30T10:32:25Z
2015-09-01T08:55:40Z
null
2015-09-01T08:55:40Z
ENH: #10143 Function to walk the group hierarchy of a PyTables HDF5 file
diff --git a/doc/source/io.rst b/doc/source/io.rst index ded314229225c..21a5f13d65d73 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2677,6 +2677,19 @@ everything in the sub-store and BELOW, so be *careful*. store.remove('food') store +You can walk through the group hierarchy using the ``walk`` method which +will yield a tuple for each group key along with the relative keys of its contents. + +.. ipython:: python + + for (path, subgroups, subkeys) in store.walk(): + for subgroup in subgroups: + print('GROUP: {}/{}'.format(path, subgroup)) + for subkey in subkeys: + key = '/'.join([path, subkey]) + print('KEY: {}'.format(key)) + print(store.get(key)) + .. _io.hdf5-types: Storing Mixed Types in a Table diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 31b6bb0d5575d..30a268dd20f5b 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -255,6 +255,8 @@ Other enhancements pd.concat([foo, bar, baz], 1) +- New method ``HDFStore.walk`` will recursively walk the group hierarchy of a HDF5 file (:issue:`10932`) + .. _whatsnew_0170.api: .. _whatsnew_0170.api_breaking: diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index ea0a59ce2ab31..8d2e3c7ab1e25 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1038,6 +1038,38 @@ def groups(self): g._v_name != u('table'))) ] + def walk(self): + """ Walk the pytables group hierarchy yielding the group name and pandas object names + for each group. Any non-pandas PyTables objects that are not a group will be ignored. + + Returns + ------- + A generator yielding tuples (`path`, `groups`, `leaves`) where: + + - `path` is the full path to a group, + - `groups` is a list of group names contained in `path` + - `leaves` is a list of pandas object names contained in `path` + + """ + _tables() + self._check_if_open() + for g in self._handle.walk_groups(): + if getattr(g._v_attrs, 'pandas_type', None) is not None: + continue + + groups = [] + leaves = [] + for child in g._v_children.values(): + pandas_type = getattr(child._v_attrs, 'pandas_type', None) + if pandas_type is None: + if isinstance(child, _table_mod.group.Group): + groups.append(child._v_name) + else: + leaves.append(child._v_name) + + yield (g._v_pathname.rstrip('/'), groups, leaves) + + def get_node(self, key): """ return the node with the key or None if it does not exist """ self._check_if_open() diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index b4f1e6a429198..74821eadf13a6 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -4813,6 +4813,45 @@ def test_read_nokey(self): df.to_hdf(path, 'df2', mode='a') self.assertRaises(ValueError, read_hdf, path) + # GH10143 + def test_walk(self): + + objs = { + 'df1': pd.DataFrame([1,2,3]), + 'df2': pd.DataFrame([4,5,6]), + 'df3': pd.DataFrame([6,7,8]), + 'df4': pd.DataFrame([9,10,11]), + 's1': pd.Series([10,9,8]), + 'a1': np.array([[1,2,3], [4,5,6]]) + } + + with ensure_clean_store('walk_groups.hdf', mode='w') as store: + store.put('/first_group/df1', objs['df1']) + store.put('/first_group/df2', objs['df2']) + store.put('/second_group/df3', objs['df3']) + store.put('/second_group/s1', objs['s1']) + store.put('/second_group/third_group/df4', objs['df4']) + g1 = store._handle.get_node('/first_group') + store._handle.create_array(g1, 'a1', objs['a1']) + + expect = { + '': (set(['first_group', 'second_group']), set()), + '/first_group': (set(), set(['df1', 'df2'])), + '/second_group': (set(['third_group']), set(['df3', 's1'])), + '/second_group/third_group': (set(), set(['df4'])), + } + + for path, groups, leaves in store.walk(): + self.assertIn(path, expect) + expect_groups, expect_frames = expect[path] + + self.assertEqual(expect_groups, set(groups)) + self.assertEqual(expect_frames, set(leaves)) + for leaf in leaves: + frame_path = '/'.join([path, leaf]) + df = store.get(frame_path) + self.assert_(df.equals(objs[leaf])) + class TestHDFComplexValues(Base): # GH10447
closes #10143 This implementation is inspired by os.walk and follows the interface as much as possible.
https://api.github.com/repos/pandas-dev/pandas/pulls/10932
2015-08-30T09:14:49Z
2015-10-18T14:02:48Z
null
2023-05-11T01:13:09Z
Add tests to ensure sort preserved by groupby, add docs
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index acddf1bb3fe30..b5a382ce24342 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -160,6 +160,31 @@ only verifies that you've passed a valid mapping. GroupBy operations (though can't be guaranteed to be the most efficient). You can get quite creative with the label mapping functions. +.. _groupby.sorting: + +GroupBy sorting +~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default the group keys are sorted during the ``groupby`` operation. You may however pass ``sort=False`` for potential speedups: + +.. ipython:: python + + df2 = pd.DataFrame({'X' : ['B', 'B', 'A', 'A'], 'Y' : [1, 2, 3, 4]}) + df2.groupby(['X']).sum() + df2.groupby(['X'], sort=False).sum() + + +Note that ``groupby`` will preserve the order in which *observations* are sorted *within* each group. For example, the groups created by ``groupby()`` below are in the order the appeared in the original ``DataFrame``: + +.. ipython:: python + + df3 = pd.DataFrame({'X' : ['A', 'B', 'A', 'B'], 'Y' : [1, 4, 3, 2]}) + df3.groupby(['X']).get_group('A') + + df3.groupby(['X']).get_group('B') + + + .. _groupby.attributes: GroupBy object attributes @@ -183,14 +208,6 @@ the length of the ``groups`` dict, so it is largely just a convenience: grouped.groups len(grouped) -By default the group keys are sorted during the groupby operation. You may -however pass ``sort=False`` for potential speedups: - -.. ipython:: python - - df2 = pd.DataFrame({'X' : ['B', 'B', 'A', 'A'], 'Y' : [1, 2, 3, 4]}) - df2.groupby(['X'], sort=True).sum() - df2.groupby(['X'], sort=False).sum() .. _groupby.tabcompletion: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d3a63f9f5d851..958bd2933d63b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3247,11 +3247,13 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, index. Only relevant for DataFrame input. as_index=False is effectively "SQL-style" grouped output sort : boolean, default True - Sort group keys. Get better performance by turning this off + Sort group keys. Get better performance by turning this off. + Note this does not influence the order of observations within each group. + groupby preserves the order of rows within each group. group_keys : boolean, default True When calling apply, add group keys to index to identify pieces squeeze : boolean, default False - reduce the dimensionaility of the return type if possible, + reduce the dimensionality of the return type if possible, otherwise return a consistent type Examples diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index f7b6f947d8924..f5693983f1cc1 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -5436,6 +5436,32 @@ def test_first_last_max_min_on_time_data(self): assert_frame_equal(grouped_ref.first(),grouped_test.first()) assert_frame_equal(grouped_ref.last(),grouped_test.last()) + def test_groupby_preserves_sort(self): + # Test to ensure that groupby always preserves sort order of original + # object. Issue #8588 and #9651 + + df = DataFrame({'int_groups':[3,1,0,1,0,3,3,3], + 'string_groups':['z','a','z','a','a','g','g','g'], + 'ints':[8,7,4,5,2,9,1,1], + 'floats':[2.3,5.3,6.2,-2.4,2.2,1.1,1.1,5], + 'strings':['z','d','a','e','word','word2','42','47']}) + + # Try sorting on different types and with different group types + for sort_column in ['ints', 'floats', 'strings', ['ints','floats'], + ['ints','strings']]: + for group_column in ['int_groups', 'string_groups', + ['int_groups','string_groups']]: + + df = df.sort_values(by=sort_column) + + g = df.groupby(group_column) + + def test_sort(x): + assert_frame_equal(x, x.sort_values(by=sort_column)) + + g.apply(test_sort) + + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all()
xref #9651 closes #8588 Adds test to ensure the sort of a target object is preserved within `groupby()` groups, modifies docs to make it clear sort is preserved within groups.
https://api.github.com/repos/pandas-dev/pandas/pulls/10931
2015-08-29T18:16:32Z
2015-09-05T22:20:25Z
2015-09-05T22:20:25Z
2015-09-05T22:20:28Z
fixed bug in DataFrame.diff - issue #10907
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 3e81a923a114c..70a33a6915516 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -811,6 +811,7 @@ Bug Fixes - Bug in ``read_csv`` when using the ``nrows`` or ``chunksize`` parameters if file contains only a header line (:issue:`9535`) - Bug in serialization of ``category`` types in HDF5 in presence of alternate encodings. (:issue:`10366`) - Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`) +- Bug in ``pd.DataFrame.diff`` when DataFrame is not consolidated (:issue:`10907`) - Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue:`9431`) - Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`) - Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 1d6269ae904d2..15069bf23672b 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2414,7 +2414,7 @@ def _verify_integrity(self): 'tot_items: {1}'.format(len(self.items), tot_items)) - def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs): + def apply(self, f, axes=None, filter=None, do_integrity_check=False, consolidate=True, **kwargs): """ iterate over the blocks, collect and create a new block manager @@ -2425,6 +2425,7 @@ def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs): filter : list, if supplied, only call the block if the filter is in the block do_integrity_check : boolean, default False. Do the block manager integrity check + consolidate: boolean, default True. Join together blocks having same dtype Returns ------- @@ -2443,6 +2444,9 @@ def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs): else: kwargs['filter'] = filter_locs + if consolidate: + self._consolidate_inplace() + if f == 'where': align_copy = True if kwargs.get('align', True): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 58c6d15f8ada5..57a43592b3866 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -10771,6 +10771,14 @@ def test_diff(self): assert_series_equal(the_diff['A'], tf['A'] - tf['A'].shift(1)) + # issue 10907 + df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])}) + df.insert(0, 'x', 1) + result = df.diff(axis=1) + expected = pd.DataFrame({'x':np.nan, 'y':pd.Series(1), 'z':pd.Series(1)}).astype('float64') + self.assert_frame_equal(result, expected) + + def test_diff_timedelta(self): # GH 4533 df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Fixed issue #10907
https://api.github.com/repos/pandas-dev/pandas/pulls/10930
2015-08-29T16:31:30Z
2015-09-01T11:01:58Z
null
2015-09-11T13:15:00Z
[DEPR]: Deprecate setting nans in categories
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index a449639f1560e..a0f9383336940 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -1,5 +1,5 @@ from .pandas_vb_common import * - +import string class concat_categorical(object): goal_time = 0.2 @@ -25,3 +25,21 @@ def time_value_counts(self): def time_value_counts_dropna(self): self.ts.value_counts(dropna=True) + +class categorical_constructor(object): + goal_time = 0.2 + + def setup(self): + n = 5 + N = 1e6 + self.categories = list(string.ascii_letters[:n]) + self.cat_idx = Index(self.categories) + self.values = np.tile(self.categories, N) + self.codes = np.tile(range(n), N) + + def time_regular_constructor(self): + Categorical(self.values, self.categories) + + def time_fastpath(self): + Categorical(self.codes, self.cat_idx, fastpath=True) + diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index 3c9b538caa555..534ab0e343398 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -632,41 +632,35 @@ Missing Data pandas primarily uses the value `np.nan` to represent missing data. It is by default not included in computations. See the :ref:`Missing Data section -<missing_data>` +<missing_data>`. -There are two ways a `np.nan` can be represented in categorical data: either the value is not -available ("missing value") or `np.nan` is a valid category. +Missing values should **not** be included in the Categorical's ``categories``, +only in the ``values``. +Instead, it is understood that NaN is different, and is always a possibility. +When working with the Categorical's ``codes``, missing values will always have +a code of ``-1``. .. ipython:: python s = pd.Series(["a","b",np.nan,"a"], dtype="category") # only two categories s - s2 = pd.Series(["a","b","c","a"], dtype="category") - s2.cat.categories = [1,2,np.nan] - # three categories, np.nan included - s2 + s.codes -.. note:: - As integer `Series` can't include NaN, the categories were converted to `object`. -.. note:: - Missing value methods like ``isnull`` and ``fillna`` will take both missing values as well as - `np.nan` categories into account: +Methods for working with missing data, e.g. :meth:`~Series.isnull`, :meth:`~Series.fillna`, +:meth:`~Series.dropna`, all work normally: .. ipython:: python c = pd.Series(["a","b",np.nan], dtype="category") - c.cat.set_categories(["a","b",np.nan], inplace=True) - # will be inserted as a NA category: - c[0] = np.nan s = pd.Series(c) s pd.isnull(s) s.fillna("a") Differences to R's `factor` -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------- The following differences to R's factor functions can be observed: @@ -677,6 +671,9 @@ The following differences to R's factor functions can be observed: * In contrast to R's `factor` function, using categorical data as the sole input to create a new categorical series will *not* remove unused categories but create a new categorical series which is equal to the passed in one! +* R allows for missing values to be included in its `levels` (pandas' `categories`). Pandas + does not allow `NaN` categories, but missing values can still be in the `values`. + Gotchas ------- diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index eae33bc80be32..424be6d949f13 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -652,6 +652,7 @@ Deprecations ===================== ================================= - ``Categorical.name`` was deprecated to make ``Categorical`` more ``numpy.ndarray`` like. Use ``Series(cat, name="whatever")`` instead (:issue:`10482`). +- Setting missing values (NaN) in a ``Categorical``'s ``categories`` will issue a warning (:issue:`10748`). You can still have missing values in the ``values``. - ``drop_duplicates`` and ``duplicated``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`6511`, :issue:`8505`) - ``Series.nsmallest`` and ``nlargest``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`10792`) - ``DataFrame.combineAdd`` and ``DataFrame.combineMult`` are deprecated. They diff --git a/pandas/core/base.py b/pandas/core/base.py index 6d1c89a7a2f89..fe9bac7f4c68e 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -392,6 +392,7 @@ def argmin(self, axis=None): """ return nanops.nanargmin(self.values) + @cache_readonly def hasnans(self): """ return if I have any nans; enables various perf speedups """ return com.isnull(self).any() diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 9951024ffe218..4a6a26f21b5bf 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -207,7 +207,7 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F if fastpath: # fast path self._codes = _coerce_indexer_dtype(values, categories) - self.categories = categories + self._categories = self._validate_categories(categories, fastpath=isinstance(categories, ABCIndexClass)) self._ordered = ordered return @@ -274,6 +274,8 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F ### FIXME #### raise NotImplementedError("> 1 ndim Categorical are not supported at this time") + categories = self._validate_categories(categories) + else: # there were two ways if categories are present # - the old one, where each value is a int pointer to the levels array -> not anymore @@ -282,7 +284,6 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F # make sure that we always have the same type here, no matter what we get passed in categories = self._validate_categories(categories) - codes = _get_codes_for_values(values, categories) # TODO: check for old style usage. These warnings should be removes after 0.18/ in 2016 @@ -295,7 +296,7 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F "'Categorical.from_codes(codes, categories)'?", RuntimeWarning, stacklevel=2) self.set_ordered(ordered or False, inplace=True) - self.categories = categories + self._categories = categories self._codes = _coerce_indexer_dtype(codes, categories) def copy(self): @@ -421,9 +422,15 @@ def _get_labels(self): _categories = None @classmethod - def _validate_categories(cls, categories): + def _validate_categories(cls, categories, fastpath=False): """ Validates that we have good categories + + Parameters + ---------- + fastpath : boolean (default: False) + Don't perform validation of the categories for uniqueness or nulls + """ if not isinstance(categories, ABCIndexClass): dtype = None @@ -439,16 +446,40 @@ def _validate_categories(cls, categories): from pandas import Index categories = Index(categories, dtype=dtype) - if not categories.is_unique: - raise ValueError('Categorical categories must be unique') + + if not fastpath: + + # check properties of the categories + # we don't allow NaNs in the categories themselves + + if categories.hasnans: + # NaNs in cats deprecated in 0.17, remove in 0.18 or 0.19 GH 10748 + msg = ('\nSetting NaNs in `categories` is deprecated and ' + 'will be removed in a future version of pandas.') + warn(msg, FutureWarning, stacklevel=5) + + # categories must be unique + + if not categories.is_unique: + raise ValueError('Categorical categories must be unique') + return categories - def _set_categories(self, categories): - """ Sets new categories """ - categories = self._validate_categories(categories) - if not self._categories is None and len(categories) != len(self._categories): + def _set_categories(self, categories, fastpath=False): + """ Sets new categories + + Parameters + ---------- + fastpath : boolean (default: False) + Don't perform validation of the categories for uniqueness or nulls + + """ + + categories = self._validate_categories(categories, fastpath=fastpath) + if not fastpath and not self._categories is None and len(categories) != len(self._categories): raise ValueError("new categories need to have the same number of items than the old " "categories!") + self._categories = categories def _get_categories(self): @@ -581,11 +612,10 @@ def set_categories(self, new_categories, ordered=None, rename=False, inplace=Fal if not cat._categories is None and len(new_categories) < len(cat._categories): # remove all _codes which are larger and set to -1/NaN self._codes[self._codes >= len(new_categories)] = -1 - cat._categories = new_categories else: values = cat.__array__() cat._codes = _get_codes_for_values(values, new_categories) - cat._categories = new_categories + cat._categories = new_categories if ordered is None: ordered = self.ordered @@ -706,9 +736,8 @@ def add_categories(self, new_categories, inplace=False): msg = "new categories must not include old categories: %s" % str(already_included) raise ValueError(msg) new_categories = list(self._categories) + list(new_categories) - new_categories = self._validate_categories(new_categories) cat = self if inplace else self.copy() - cat._categories = new_categories + cat._categories = self._validate_categories(new_categories) cat._codes = _coerce_indexer_dtype(cat._codes, new_categories) if not inplace: return cat @@ -1171,7 +1200,7 @@ def order(self, inplace=False, ascending=True, na_position='last'): Category.sort """ warn("order is deprecated, use sort_values(...)", - FutureWarning, stacklevel=2) + FutureWarning, stacklevel=3) return self.sort_values(inplace=inplace, ascending=ascending, na_position=na_position) def sort(self, inplace=True, ascending=True, na_position='last'): diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 05da93a4fca0f..d847638ff105e 100755 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -129,7 +129,8 @@ def f(): Categorical(["a","b"], ["a","b","b"]) self.assertRaises(ValueError, f) def f(): - Categorical([1,2], [1,2,np.nan, np.nan]) + with tm.assert_produces_warning(FutureWarning): + Categorical([1,2], [1,2,np.nan, np.nan]) self.assertRaises(ValueError, f) # The default should be unordered @@ -187,17 +188,21 @@ def f(): cat = pd.Categorical([np.nan, 1., 2., 3. ]) self.assertTrue(com.is_float_dtype(cat.categories)) + # Deprecating NaNs in categoires (GH #10748) # preserve int as far as possible by converting to object if NaN is in categories - cat = pd.Categorical([np.nan, 1, 2, 3], categories=[np.nan, 1, 2, 3]) + with tm.assert_produces_warning(FutureWarning): + cat = pd.Categorical([np.nan, 1, 2, 3], categories=[np.nan, 1, 2, 3]) self.assertTrue(com.is_object_dtype(cat.categories)) # This doesn't work -> this would probably need some kind of "remember the original type" # feature to try to cast the array interface result to... #vals = np.asarray(cat[cat.notnull()]) #self.assertTrue(com.is_integer_dtype(vals)) - cat = pd.Categorical([np.nan,"a", "b", "c"], categories=[np.nan,"a", "b", "c"]) + with tm.assert_produces_warning(FutureWarning): + cat = pd.Categorical([np.nan,"a", "b", "c"], categories=[np.nan,"a", "b", "c"]) self.assertTrue(com.is_object_dtype(cat.categories)) # but don't do it for floats - cat = pd.Categorical([np.nan, 1., 2., 3.], categories=[np.nan, 1., 2., 3.]) + with tm.assert_produces_warning(FutureWarning): + cat = pd.Categorical([np.nan, 1., 2., 3.], categories=[np.nan, 1., 2., 3.]) self.assertTrue(com.is_float_dtype(cat.categories)) @@ -465,8 +470,9 @@ def test_describe(self): tm.assert_frame_equal(desc, expected) # NA as a category - cat = pd.Categorical(["a","c","c",np.nan], categories=["b","a","c",np.nan]) - result = cat.describe() + with tm.assert_produces_warning(FutureWarning): + cat = pd.Categorical(["a","c","c",np.nan], categories=["b","a","c",np.nan]) + result = cat.describe() expected = DataFrame([[0,0],[1,0.25],[2,0.5],[1,0.25]], columns=['counts','freqs'], @@ -474,8 +480,9 @@ def test_describe(self): tm.assert_frame_equal(result,expected) # NA as an unused category - cat = pd.Categorical(["a","c","c"], categories=["b","a","c",np.nan]) - result = cat.describe() + with tm.assert_produces_warning(FutureWarning): + cat = pd.Categorical(["a","c","c"], categories=["b","a","c",np.nan]) + result = cat.describe() expected = DataFrame([[0,0],[1,1/3.],[2,2/3.],[0,0]], columns=['counts','freqs'], @@ -827,29 +834,37 @@ def test_nan_handling(self): self.assert_numpy_array_equal(c._codes , np.array([0,-1,-1,0])) # If categories have nan included, the code should point to that instead - c = Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan]) - self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_)) - self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0])) + with tm.assert_produces_warning(FutureWarning): + c = Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan]) + self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan], + dtype=np.object_)) + self.assert_numpy_array_equal(c._codes, np.array([0,1,2,0])) c[1] = np.nan - self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_)) - self.assert_numpy_array_equal(c._codes , np.array([0,2,2,0])) + self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan], + dtype=np.object_)) + self.assert_numpy_array_equal(c._codes, np.array([0,2,2,0])) # Changing categories should also make the replaced category np.nan c = Categorical(["a","b","c","a"]) - c.categories = ["a","b",np.nan] - self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_)) - self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0])) + with tm.assert_produces_warning(FutureWarning): + c.categories = ["a","b",np.nan] + self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan], + dtype=np.object_)) + self.assert_numpy_array_equal(c._codes, np.array([0,1,2,0])) # Adding nan to categories should make assigned nan point to the category! c = Categorical(["a","b",np.nan,"a"]) self.assert_numpy_array_equal(c.categories , np.array(["a","b"])) self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0])) - c.set_categories(["a","b",np.nan], rename=True, inplace=True) - self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_)) - self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0])) + with tm.assert_produces_warning(FutureWarning): + c.set_categories(["a","b",np.nan], rename=True, inplace=True) + self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan], + dtype=np.object_)) + self.assert_numpy_array_equal(c._codes, np.array([0,1,-1,0])) c[1] = np.nan - self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_)) - self.assert_numpy_array_equal(c._codes , np.array([0,2,-1,0])) + self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan], + dtype=np.object_)) + self.assert_numpy_array_equal(c._codes, np.array([0,2,-1,0])) # Remove null categories (GH 10156) cases = [ @@ -861,17 +876,22 @@ def test_nan_handling(self): null_values = [np.nan, None, pd.NaT] for with_null, without in cases: - base = Categorical([], with_null) + with tm.assert_produces_warning(FutureWarning): + base = Categorical([], with_null) expected = Categorical([], without) for nullval in null_values: result = base.remove_categories(nullval) - self.assert_categorical_equal(result, expected) + self.assert_categorical_equal(result, expected) # Different null values are indistinguishable for i, j in [(0, 1), (0, 2), (1, 2)]: nulls = [null_values[i], null_values[j]] - self.assertRaises(ValueError, lambda: Categorical([], categories=nulls)) + + def f(): + with tm.assert_produces_warning(FutureWarning): + Categorical([], categories=nulls) + self.assertRaises(ValueError, f) def test_isnull(self): @@ -880,14 +900,16 @@ def test_isnull(self): res = c.isnull() self.assert_numpy_array_equal(res, exp) - c = Categorical(["a","b",np.nan], categories=["a","b",np.nan]) + with tm.assert_produces_warning(FutureWarning): + c = Categorical(["a","b",np.nan], categories=["a","b",np.nan]) res = c.isnull() self.assert_numpy_array_equal(res, exp) # test both nan in categories and as -1 exp = np.array([True, False, True]) c = Categorical(["a","b",np.nan]) - c.set_categories(["a","b",np.nan], rename=True, inplace=True) + with tm.assert_produces_warning(FutureWarning): + c.set_categories(["a","b",np.nan], rename=True, inplace=True) c[0] = np.nan res = c.isnull() self.assert_numpy_array_equal(res, exp) @@ -1087,31 +1109,36 @@ def test_set_item_nan(self): # if nan in categories, the proper code should be set! cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3]) - cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) + with tm.assert_produces_warning(FutureWarning): + cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) cat[1] = np.nan exp = np.array([0,3,2,-1]) self.assert_numpy_array_equal(cat.codes, exp) cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3]) - cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) + with tm.assert_produces_warning(FutureWarning): + cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) cat[1:3] = np.nan exp = np.array([0,3,3,-1]) self.assert_numpy_array_equal(cat.codes, exp) cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3]) - cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) + with tm.assert_produces_warning(FutureWarning): + cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) cat[1:3] = [np.nan, 1] exp = np.array([0,3,0,-1]) self.assert_numpy_array_equal(cat.codes, exp) cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3]) - cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) + with tm.assert_produces_warning(FutureWarning): + cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) cat[1:3] = [np.nan, np.nan] exp = np.array([0,3,3,-1]) self.assert_numpy_array_equal(cat.codes, exp) cat = pd.Categorical([1,2, np.nan, 3], categories=[1,2,3]) - cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) + with tm.assert_produces_warning(FutureWarning): + cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) cat[pd.isnull(cat)] = np.nan exp = np.array([0,1,3,2]) self.assert_numpy_array_equal(cat.codes, exp) @@ -1555,14 +1582,16 @@ def test_nan_handling(self): self.assert_numpy_array_equal(s.values.codes, np.array([0,1,-1,0])) # If categories have nan included, the label should point to that instead - s2 = Series(Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan])) + with tm.assert_produces_warning(FutureWarning): + s2 = Series(Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan])) self.assert_numpy_array_equal(s2.cat.categories, np.array(["a","b",np.nan], dtype=np.object_)) self.assert_numpy_array_equal(s2.values.codes, np.array([0,1,2,0])) # Changing categories should also make the replaced category np.nan s3 = Series(Categorical(["a","b","c","a"])) - s3.cat.categories = ["a","b",np.nan] + with tm.assert_produces_warning(FutureWarning): + s3.cat.categories = ["a","b",np.nan] self.assert_numpy_array_equal(s3.cat.categories, np.array(["a","b",np.nan], dtype=np.object_)) self.assert_numpy_array_equal(s3.values.codes, np.array([0,1,2,0])) @@ -2415,28 +2444,32 @@ def test_value_counts_with_nan(self): s.value_counts(dropna=False, sort=False), pd.Series([2, 1, 3], index=["a", "b", np.nan])) - s = pd.Series(pd.Categorical(["a", "b", "a"], categories=["a", "b", np.nan])) - tm.assert_series_equal( - s.value_counts(dropna=True), - pd.Series([2, 1], index=["a", "b"])) - tm.assert_series_equal( - s.value_counts(dropna=False), - pd.Series([2, 1, 0], index=["a", "b", np.nan])) + with tm.assert_produces_warning(FutureWarning): + s = pd.Series(pd.Categorical(["a", "b", "a"], categories=["a", "b", np.nan])) + tm.assert_series_equal( + s.value_counts(dropna=True), + pd.Series([2, 1], index=["a", "b"])) + tm.assert_series_equal( + s.value_counts(dropna=False), + pd.Series([2, 1, 0], index=["a", "b", np.nan])) - s = pd.Series(pd.Categorical(["a", "b", None, "a", None, None], categories=["a", "b", np.nan])) - tm.assert_series_equal( - s.value_counts(dropna=True), - pd.Series([2, 1], index=["a", "b"])) - tm.assert_series_equal( - s.value_counts(dropna=False), - pd.Series([3, 2, 1], index=[np.nan, "a", "b"])) + with tm.assert_produces_warning(FutureWarning): + s = pd.Series(pd.Categorical(["a", "b", None, "a", None, None], + categories=["a", "b", np.nan])) + tm.assert_series_equal( + s.value_counts(dropna=True), + pd.Series([2, 1], index=["a", "b"])) + tm.assert_series_equal( + s.value_counts(dropna=False), + pd.Series([3, 2, 1], index=[np.nan, "a", "b"])) def test_groupby(self): cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], categories=["a","b","c","d"], ordered=True) data = DataFrame({"a":[1,1,1,2,2,2,3,4,5], "b":cats}) - expected = DataFrame({ 'a' : Series([1,2,4,np.nan],index=Index(['a','b','c','d'],name='b')) }) + expected = DataFrame({'a': Series([1, 2, 4, np.nan], + index=Index(['a', 'b', 'c', 'd'], name='b'))}) result = data.groupby("b").mean() tm.assert_frame_equal(result, expected) @@ -3454,10 +3487,12 @@ def f(): # make sure that fillna takes both missing values and NA categories into account c = Categorical(["a","b",np.nan]) - c.set_categories(["a","b",np.nan], rename=True, inplace=True) + with tm.assert_produces_warning(FutureWarning): + c.set_categories(["a","b",np.nan], rename=True, inplace=True) c[0] = np.nan df = pd.DataFrame({"cats":c, "vals":[1,2,3]}) df_exp = pd.DataFrame({"cats": Categorical(["a","b","a"]), "vals": [1,2,3]}) + res = df.fillna("a") tm.assert_frame_equal(res, df_exp)
WIP still Closes https://github.com/pydata/pandas/issues/10748 I have to run for now, but will pick this up later today. I think I'm missing a few in the tests, since the warning is showing up in a bunch of places (there's a way to convert those to errors for testing right?) I had to refactor a couple function that were setting `._categories` directly instead of using `Categorical._set_categories`. I kept that in a separate commit. Could do a bit more refactoring with the `validate_categories` stuff, but that can be separate. And I need to figure out the proper `stacklevel` for this warning. I think I used 3 for now.
https://api.github.com/repos/pandas-dev/pandas/pulls/10929
2015-08-29T13:22:00Z
2015-09-01T19:18:15Z
2015-09-01T19:18:14Z
2017-04-05T02:06:34Z
Updates for asv suite
diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index 760db2086b125..239f9aa19f769 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -18,7 +18,7 @@ // If missing or the empty string, the tool will be automatically // determined by looking for tools on the PATH environment // variable. - "environment_type": "conda", + "environment_type": "", // the base URL to show a commit for the project. "show_commit_url": "https://github.com/pydata/pandas/commit/", @@ -26,7 +26,7 @@ // The Pythons you'd like to test against. If not provided, defaults // to the current version of Python used to run `asv`. // "pythons": ["2.7", "3.4"], - "pythons": ["2.7"], + "pythons": ["2.7", "3.4"], // The matrix of dependencies to test. Each key is the name of a // package (in PyPI) and the values are version numbers. An empty @@ -41,7 +41,10 @@ "sqlalchemy": [], "scipy": [], "numexpr": [], - "pytables": [], + "tables": [], + "openpyxl": [], + "xlrd": [], + "xlwt": [] }, // The directory (relative to the current directory) that benchmarks are diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py index ecb91923dc663..2b10cb88a3134 100644 --- a/asv_bench/benchmarks/attrs_caching.py +++ b/asv_bench/benchmarks/attrs_caching.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * class getattr_dataframe_index(object): diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py index 13976014ec6f1..187101b1f392b 100644 --- a/asv_bench/benchmarks/binary_ops.py +++ b/asv_bench/benchmarks/binary_ops.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * import pandas.computation.expressions as expr diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 80b277336df7a..a449639f1560e 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * class concat_categorical(object): diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py index b48211b3db83e..265ffbc7261ca 100644 --- a/asv_bench/benchmarks/ctors.py +++ b/asv_bench/benchmarks/ctors.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * class frame_constructor_ndarray(object): diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py index 397312355aa47..719d92567a7be 100644 --- a/asv_bench/benchmarks/eval.py +++ b/asv_bench/benchmarks/eval.py @@ -1,6 +1,6 @@ -from pandas_vb_common import * -import pandas.computation.expressions as expr +from .pandas_vb_common import * import pandas as pd +import pandas.computation.expressions as expr class eval_frame_add_all_threads(object): diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py index 2cb337e0e6b9d..85f3c1628bd8b 100644 --- a/asv_bench/benchmarks/frame_ctor.py +++ b/asv_bench/benchmarks/frame_ctor.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * try: from pandas.tseries.offsets import * except: @@ -9,1611 +9,1611 @@ class frame_ctor_dtindex_BDayx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BDay(1, **{})) + self.idx = self.get_index_for_offset(BDay(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BDayx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BDayx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BDay(2, **{})) + self.idx = self.get_index_for_offset(BDay(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BDayx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BMonthBeginx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BMonthBegin(1, **{})) + self.idx = self.get_index_for_offset(BMonthBegin(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BMonthBeginx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BMonthBeginx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BMonthBegin(2, **{})) + self.idx = self.get_index_for_offset(BMonthBegin(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BMonthBeginx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BMonthEndx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BMonthEnd(1, **{})) + self.idx = self.get_index_for_offset(BMonthEnd(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BMonthEndx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BMonthEndx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BMonthEnd(2, **{})) + self.idx = self.get_index_for_offset(BMonthEnd(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BMonthEndx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BQuarterBeginx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BQuarterBegin(1, **{})) + self.idx = self.get_index_for_offset(BQuarterBegin(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BQuarterBeginx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BQuarterBeginx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BQuarterBegin(2, **{})) + self.idx = self.get_index_for_offset(BQuarterBegin(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BQuarterBeginx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BQuarterEndx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BQuarterEnd(1, **{})) + self.idx = self.get_index_for_offset(BQuarterEnd(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BQuarterEndx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BQuarterEndx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BQuarterEnd(2, **{})) + self.idx = self.get_index_for_offset(BQuarterEnd(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BQuarterEndx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BYearBeginx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BYearBegin(1, **{})) + self.idx = self.get_index_for_offset(BYearBegin(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BYearBeginx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BYearBeginx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BYearBegin(2, **{})) + self.idx = self.get_index_for_offset(BYearBegin(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BYearBeginx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BYearEndx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BYearEnd(1, **{})) + self.idx = self.get_index_for_offset(BYearEnd(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BYearEndx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BYearEndx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BYearEnd(2, **{})) + self.idx = self.get_index_for_offset(BYearEnd(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BYearEndx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BusinessDayx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BusinessDay(1, **{})) + self.idx = self.get_index_for_offset(BusinessDay(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BusinessDayx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BusinessDayx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BusinessDay(2, **{})) + self.idx = self.get_index_for_offset(BusinessDay(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BusinessDayx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BusinessHourx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BusinessHour(1, **{})) + self.idx = self.get_index_for_offset(BusinessHour(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BusinessHourx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_BusinessHourx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(BusinessHour(2, **{})) + self.idx = self.get_index_for_offset(BusinessHour(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_BusinessHourx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_CBMonthBeginx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(CBMonthBegin(1, **{})) + self.idx = self.get_index_for_offset(CBMonthBegin(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_CBMonthBeginx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_CBMonthBeginx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(CBMonthBegin(2, **{})) + self.idx = self.get_index_for_offset(CBMonthBegin(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_CBMonthBeginx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_CBMonthEndx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(CBMonthEnd(1, **{})) + self.idx = self.get_index_for_offset(CBMonthEnd(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_CBMonthEndx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_CBMonthEndx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(CBMonthEnd(2, **{})) + self.idx = self.get_index_for_offset(CBMonthEnd(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_CBMonthEndx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_CDayx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(CDay(1, **{})) + self.idx = self.get_index_for_offset(CDay(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_CDayx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_CDayx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(CDay(2, **{})) + self.idx = self.get_index_for_offset(CDay(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_CDayx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_CustomBusinessDayx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(CustomBusinessDay(1, **{})) + self.idx = self.get_index_for_offset(CustomBusinessDay(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_CustomBusinessDayx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_CustomBusinessDayx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(CustomBusinessDay(2, **{})) + self.idx = self.get_index_for_offset(CustomBusinessDay(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_CustomBusinessDayx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_DateOffsetx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(DateOffset(1, **{})) + self.idx = self.get_index_for_offset(DateOffset(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_DateOffsetx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_DateOffsetx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(DateOffset(2, **{})) + self.idx = self.get_index_for_offset(DateOffset(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_DateOffsetx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Dayx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Day(1, **{})) + self.idx = self.get_index_for_offset(Day(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Dayx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Dayx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Day(2, **{})) + self.idx = self.get_index_for_offset(Day(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Dayx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Easterx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Easter(1, **{})) + self.idx = self.get_index_for_offset(Easter(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Easterx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Easterx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Easter(2, **{})) + self.idx = self.get_index_for_offset(Easter(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Easterx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_FY5253Quarterx1__variation_last(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(FY5253Quarter(1, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'last', })) + self.idx = self.get_index_for_offset(FY5253Quarter(1, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'last', })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_FY5253Quarterx1__variation_last(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_FY5253Quarterx1__variation_nearest(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(FY5253Quarter(1, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'nearest', })) + self.idx = self.get_index_for_offset(FY5253Quarter(1, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'nearest', })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_FY5253Quarterx1__variation_nearest(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_FY5253Quarterx2__variation_last(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(FY5253Quarter(2, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'last', })) + self.idx = self.get_index_for_offset(FY5253Quarter(2, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'last', })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_FY5253Quarterx2__variation_last(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_FY5253Quarterx2__variation_nearest(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(FY5253Quarter(2, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'nearest', })) + self.idx = self.get_index_for_offset(FY5253Quarter(2, **{'startingMonth': 1, 'qtr_with_extra_week': 1, 'weekday': 1, 'variation': 'nearest', })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_FY5253Quarterx2__variation_nearest(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_FY5253x1__variation_last(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(FY5253(1, **{'startingMonth': 1, 'weekday': 1, 'variation': 'last', })) + self.idx = self.get_index_for_offset(FY5253(1, **{'startingMonth': 1, 'weekday': 1, 'variation': 'last', })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_FY5253x1__variation_last(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_FY5253x1__variation_nearest(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(FY5253(1, **{'startingMonth': 1, 'weekday': 1, 'variation': 'nearest', })) + self.idx = self.get_index_for_offset(FY5253(1, **{'startingMonth': 1, 'weekday': 1, 'variation': 'nearest', })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_FY5253x1__variation_nearest(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_FY5253x2__variation_last(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(FY5253(2, **{'startingMonth': 1, 'weekday': 1, 'variation': 'last', })) + self.idx = self.get_index_for_offset(FY5253(2, **{'startingMonth': 1, 'weekday': 1, 'variation': 'last', })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_FY5253x2__variation_last(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_FY5253x2__variation_nearest(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(FY5253(2, **{'startingMonth': 1, 'weekday': 1, 'variation': 'nearest', })) + self.idx = self.get_index_for_offset(FY5253(2, **{'startingMonth': 1, 'weekday': 1, 'variation': 'nearest', })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_FY5253x2__variation_nearest(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Hourx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Hour(1, **{})) + self.idx = self.get_index_for_offset(Hour(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Hourx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Hourx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Hour(2, **{})) + self.idx = self.get_index_for_offset(Hour(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Hourx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_LastWeekOfMonthx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(LastWeekOfMonth(1, **{'week': 1, 'weekday': 1, })) + self.idx = self.get_index_for_offset(LastWeekOfMonth(1, **{'week': 1, 'weekday': 1, })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_LastWeekOfMonthx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_LastWeekOfMonthx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(LastWeekOfMonth(2, **{'week': 1, 'weekday': 1, })) + self.idx = self.get_index_for_offset(LastWeekOfMonth(2, **{'week': 1, 'weekday': 1, })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_LastWeekOfMonthx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Microx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Micro(1, **{})) + self.idx = self.get_index_for_offset(Micro(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Microx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Microx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Micro(2, **{})) + self.idx = self.get_index_for_offset(Micro(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Microx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Millix1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Milli(1, **{})) + self.idx = self.get_index_for_offset(Milli(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Millix1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Millix2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Milli(2, **{})) + self.idx = self.get_index_for_offset(Milli(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Millix2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Minutex1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Minute(1, **{})) + self.idx = self.get_index_for_offset(Minute(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Minutex1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Minutex2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Minute(2, **{})) + self.idx = self.get_index_for_offset(Minute(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Minutex2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_MonthBeginx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(MonthBegin(1, **{})) + self.idx = self.get_index_for_offset(MonthBegin(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_MonthBeginx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_MonthBeginx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(MonthBegin(2, **{})) + self.idx = self.get_index_for_offset(MonthBegin(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_MonthBeginx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_MonthEndx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(MonthEnd(1, **{})) + self.idx = self.get_index_for_offset(MonthEnd(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_MonthEndx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_MonthEndx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(MonthEnd(2, **{})) + self.idx = self.get_index_for_offset(MonthEnd(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_MonthEndx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Nanox1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Nano(1, **{})) + self.idx = self.get_index_for_offset(Nano(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Nanox1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Nanox2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Nano(2, **{})) + self.idx = self.get_index_for_offset(Nano(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Nanox2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_QuarterBeginx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(QuarterBegin(1, **{})) + self.idx = self.get_index_for_offset(QuarterBegin(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_QuarterBeginx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_QuarterBeginx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(QuarterBegin(2, **{})) + self.idx = self.get_index_for_offset(QuarterBegin(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_QuarterBeginx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_QuarterEndx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(QuarterEnd(1, **{})) + self.idx = self.get_index_for_offset(QuarterEnd(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_QuarterEndx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_QuarterEndx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(QuarterEnd(2, **{})) + self.idx = self.get_index_for_offset(QuarterEnd(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_QuarterEndx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Secondx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Second(1, **{})) + self.idx = self.get_index_for_offset(Second(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Secondx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Secondx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Second(2, **{})) + self.idx = self.get_index_for_offset(Second(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Secondx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_WeekOfMonthx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(WeekOfMonth(1, **{'week': 1, 'weekday': 1, })) + self.idx = self.get_index_for_offset(WeekOfMonth(1, **{'week': 1, 'weekday': 1, })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_WeekOfMonthx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_WeekOfMonthx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(WeekOfMonth(2, **{'week': 1, 'weekday': 1, })) + self.idx = self.get_index_for_offset(WeekOfMonth(2, **{'week': 1, 'weekday': 1, })) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_WeekOfMonthx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Weekx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Week(1, **{})) + self.idx = self.get_index_for_offset(Week(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Weekx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_Weekx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(Week(2, **{})) + self.idx = self.get_index_for_offset(Week(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_Weekx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_YearBeginx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(YearBegin(1, **{})) + self.idx = self.get_index_for_offset(YearBegin(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_YearBeginx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_YearBeginx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(YearBegin(2, **{})) + self.idx = self.get_index_for_offset(YearBegin(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_YearBeginx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_YearEndx1(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(YearEnd(1, **{})) + self.idx = self.get_index_for_offset(YearEnd(1, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_YearEndx1(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_dtindex_YearEndx2(object): goal_time = 0.2 def setup(self): - - def get_period_count(start_date, off): - self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days - if (self.ten_offsets_in_days == 0): - return 1000 - else: - return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) - - def get_index_for_offset(off): - self.start_date = Timestamp('1/1/1900') - return date_range(self.start_date, periods=min(1000, get_period_count(self.start_date, off)), freq=off) - self.idx = get_index_for_offset(YearEnd(2, **{})) + self.idx = self.get_index_for_offset(YearEnd(2, **{})) self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx) self.d = dict([(col, self.df[col]) for col in self.df.columns]) def time_frame_ctor_dtindex_YearEndx2(self): DataFrame(self.d) + def get_period_count(self, start_date, off): + self.ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days + if (self.ten_offsets_in_days == 0): + return 1000 + else: + return min((9 * ((Timestamp.max - start_date).days // self.ten_offsets_in_days)), 1000) + + def get_index_for_offset(self, off): + self.start_date = Timestamp('1/1/1900') + return date_range(self.start_date, periods=min(1000, self.get_period_count(self.start_date, off)), freq=off) + class frame_ctor_list_of_dict(object): goal_time = 0.2 @@ -1657,7 +1657,7 @@ class frame_ctor_nested_dict_int64(object): goal_time = 0.2 def setup(self): - self.data = dict(((i, dict(((j, float(j)) for j in xrange(100)))) for i in xrange(2000))) + self.data = dict(((i, dict(((j, float(j)) for j in range(100)))) for i in xrange(2000))) def time_frame_ctor_nested_dict_int64(self): DataFrame(self.data) diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 2bd51201b45ca..98b0ec73fb23c 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * class frame_apply_axis_1(object): @@ -69,12 +69,12 @@ def setup(self): self.idx = date_range('1/1/2000', periods=100000, freq='D') self.df = DataFrame(randn(100000, 1), columns=['A'], index=self.idx) - def f(x): - self.x = self.x.copy() - self.x['date'] = self.x.index - def time_frame_assign_timeseries_index(self): - f(self.df) + self.f(self.df) + + def f(self, df): + self.x = self.df.copy() + self.x['date'] = self.x.index class frame_boolean_row_select(object): @@ -339,80 +339,76 @@ class frame_float_equal(object): goal_time = 0.2 def setup(self): - - def make_pair(frame): - self.df = frame - self.df2 = self.df.copy() - self.df2.ix[((-1), (-1))] = np.nan - return (self.df, self.df2) - - def test_equal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df) - - def test_unequal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df2) self.float_df = DataFrame(np.random.randn(1000, 1000)) self.object_df = DataFrame(([(['foo'] * 1000)] * 1000)) self.nonunique_cols = self.object_df.copy() self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns)) - self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) + self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) def time_frame_float_equal(self): - test_equal('float_df') + self.test_equal('float_df') + def make_pair(self, frame): + self.df = frame + self.df2 = self.df.copy() + self.df2.ix[((-1), (-1))] = np.nan + return (self.df, self.df2) -class frame_float_unequal(object): - goal_time = 0.2 + def test_equal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df) - def setup(self): + def test_unequal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df2) - def make_pair(frame): - self.df = frame - self.df2 = self.df.copy() - self.df2.ix[((-1), (-1))] = np.nan - return (self.df, self.df2) - def test_equal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df) +class frame_float_unequal(object): + goal_time = 0.2 - def test_unequal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df2) + def setup(self): self.float_df = DataFrame(np.random.randn(1000, 1000)) self.object_df = DataFrame(([(['foo'] * 1000)] * 1000)) self.nonunique_cols = self.object_df.copy() self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns)) - self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) + self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) def time_frame_float_unequal(self): - test_unequal('float_df') + self.test_unequal('float_df') + def make_pair(self, frame): + self.df = frame + self.df2 = self.df.copy() + self.df2.ix[((-1), (-1))] = np.nan + return (self.df, self.df2) -class frame_from_records_generator(object): - goal_time = 0.2 + def test_equal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df) - def setup(self): + def test_unequal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df2) - def get_data(n=100000): - return ((x, (x * 20), (x * 100)) for x in xrange(n)) + +class frame_from_records_generator(object): + goal_time = 0.2 def time_frame_from_records_generator(self): - self.df = DataFrame.from_records(get_data()) + self.df = DataFrame.from_records(self.get_data()) + + def get_data(self, n=100000): + return ((x, (x * 20), (x * 100)) for x in range(n)) class frame_from_records_generator_nrows(object): goal_time = 0.2 - def setup(self): - - def get_data(n=100000): - return ((x, (x * 20), (x * 100)) for x in xrange(n)) - def time_frame_from_records_generator_nrows(self): - self.df = DataFrame.from_records(get_data(), nrows=1000) + self.df = DataFrame.from_records(self.get_data(), nrows=1000) + + def get_data(self, n=100000): + return ((x, (x * 20), (x * 100)) for x in range(n)) class frame_get_dtype_counts(object): @@ -433,26 +429,26 @@ def setup(self): self.df2 = DataFrame(randn(3000, 1), columns=['A']) self.df3 = DataFrame(randn(3000, 1)) - def f(): - if hasattr(self.df, '_item_cache'): - self.df._item_cache.clear() - for (name, col) in self.df.iteritems(): - pass + def time_frame_getitem_single_column(self): + self.h() - def g(): - for (name, col) in self.df.iteritems(): - pass + def f(self): + if hasattr(self.df, '_item_cache'): + self.df._item_cache.clear() + for (name, col) in self.df.iteritems(): + pass - def h(): - for i in xrange(10000): - self.df2['A'] + def g(self): + for (name, col) in self.df.iteritems(): + pass - def j(): - for i in xrange(10000): - self.df3[0] + def h(self): + for i in range(10000): + self.df2['A'] - def time_frame_getitem_single_column(self): - h() + def j(self): + for i in range(10000): + self.df3[0] class frame_getitem_single_column2(object): @@ -463,26 +459,26 @@ def setup(self): self.df2 = DataFrame(randn(3000, 1), columns=['A']) self.df3 = DataFrame(randn(3000, 1)) - def f(): - if hasattr(self.df, '_item_cache'): - self.df._item_cache.clear() - for (name, col) in self.df.iteritems(): - pass + def time_frame_getitem_single_column2(self): + self.j() - def g(): - for (name, col) in self.df.iteritems(): - pass + def f(self): + if hasattr(self.df, '_item_cache'): + self.df._item_cache.clear() + for (name, col) in self.df.iteritems(): + pass - def h(): - for i in xrange(10000): - self.df2['A'] + def g(self): + for (name, col) in self.df.iteritems(): + pass - def j(): - for i in xrange(10000): - self.df3[0] + def h(self): + for i in range(10000): + self.df2['A'] - def time_frame_getitem_single_column2(self): - j() + def j(self): + for i in range(10000): + self.df3[0] class frame_html_repr_trunc_mi(object): @@ -517,14 +513,14 @@ class frame_insert_100_columns_begin(object): def setup(self): self.N = 1000 - def f(K=100): - self.df = DataFrame(index=range(self.N)) - self.new_col = np.random.randn(self.N) - for i in range(K): - self.df.insert(0, i, self.new_col) - def time_frame_insert_100_columns_begin(self): - f() + self.f() + + def f(self, K=100): + self.df = DataFrame(index=range(self.N)) + self.new_col = np.random.randn(self.N) + for i in range(K): + self.df.insert(0, i, self.new_col) class frame_insert_500_columns_end(object): @@ -533,14 +529,14 @@ class frame_insert_500_columns_end(object): def setup(self): self.N = 1000 - def f(K=500): - self.df = DataFrame(index=range(self.N)) - self.new_col = np.random.randn(self.N) - for i in range(K): - self.df[i] = self.new_col - def time_frame_insert_500_columns_end(self): - f() + self.f() + + def f(self, K=500): + self.df = DataFrame(index=range(self.N)) + self.new_col = np.random.randn(self.N) + for i in range(K): + self.df[i] = self.new_col class frame_interpolate(object): @@ -597,26 +593,26 @@ def setup(self): self.df2 = DataFrame(randn(3000, 1), columns=['A']) self.df3 = DataFrame(randn(3000, 1)) - def f(): - if hasattr(self.df, '_item_cache'): - self.df._item_cache.clear() - for (name, col) in self.df.iteritems(): - pass + def time_frame_iteritems(self): + self.f() - def g(): - for (name, col) in self.df.iteritems(): - pass + def f(self): + if hasattr(self.df, '_item_cache'): + self.df._item_cache.clear() + for (name, col) in self.df.iteritems(): + pass - def h(): - for i in xrange(10000): - self.df2['A'] + def g(self): + for (name, col) in self.df.iteritems(): + pass - def j(): - for i in xrange(10000): - self.df3[0] + def h(self): + for i in range(10000): + self.df2['A'] - def time_frame_iteritems(self): - f() + def j(self): + for i in range(10000): + self.df3[0] class frame_iteritems_cached(object): @@ -627,26 +623,26 @@ def setup(self): self.df2 = DataFrame(randn(3000, 1), columns=['A']) self.df3 = DataFrame(randn(3000, 1)) - def f(): - if hasattr(self.df, '_item_cache'): - self.df._item_cache.clear() - for (name, col) in self.df.iteritems(): - pass + def time_frame_iteritems_cached(self): + self.g() - def g(): - for (name, col) in self.df.iteritems(): - pass + def f(self): + if hasattr(self.df, '_item_cache'): + self.df._item_cache.clear() + for (name, col) in self.df.iteritems(): + pass - def h(): - for i in xrange(10000): - self.df2['A'] + def g(self): + for (name, col) in self.df.iteritems(): + pass - def j(): - for i in xrange(10000): - self.df3[0] + def h(self): + for i in range(10000): + self.df2['A'] - def time_frame_iteritems_cached(self): - g() + def j(self): + for i in range(10000): + self.df3[0] class frame_mask_bools(object): @@ -681,112 +677,112 @@ class frame_nonunique_equal(object): goal_time = 0.2 def setup(self): - - def make_pair(frame): - self.df = frame - self.df2 = self.df.copy() - self.df2.ix[((-1), (-1))] = np.nan - return (self.df, self.df2) - - def test_equal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df) - - def test_unequal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df2) self.float_df = DataFrame(np.random.randn(1000, 1000)) self.object_df = DataFrame(([(['foo'] * 1000)] * 1000)) self.nonunique_cols = self.object_df.copy() self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns)) - self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) + self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) def time_frame_nonunique_equal(self): - test_equal('nonunique_cols') + self.test_equal('nonunique_cols') + def make_pair(self, frame): + self.df = frame + self.df2 = self.df.copy() + self.df2.ix[((-1), (-1))] = np.nan + return (self.df, self.df2) -class frame_nonunique_unequal(object): - goal_time = 0.2 + def test_equal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df) - def setup(self): + def test_unequal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df2) - def make_pair(frame): - self.df = frame - self.df2 = self.df.copy() - self.df2.ix[((-1), (-1))] = np.nan - return (self.df, self.df2) - def test_equal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df) +class frame_nonunique_unequal(object): + goal_time = 0.2 - def test_unequal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df2) + def setup(self): self.float_df = DataFrame(np.random.randn(1000, 1000)) self.object_df = DataFrame(([(['foo'] * 1000)] * 1000)) self.nonunique_cols = self.object_df.copy() self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns)) - self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) + self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) def time_frame_nonunique_unequal(self): - test_unequal('nonunique_cols') + self.test_unequal('nonunique_cols') + def make_pair(self, frame): + self.df = frame + self.df2 = self.df.copy() + self.df2.ix[((-1), (-1))] = np.nan + return (self.df, self.df2) -class frame_object_equal(object): - goal_time = 0.2 + def test_equal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df) - def setup(self): + def test_unequal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df2) - def make_pair(frame): - self.df = frame - self.df2 = self.df.copy() - self.df2.ix[((-1), (-1))] = np.nan - return (self.df, self.df2) - def test_equal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df) +class frame_object_equal(object): + goal_time = 0.2 - def test_unequal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df2) + def setup(self): self.float_df = DataFrame(np.random.randn(1000, 1000)) self.object_df = DataFrame(([(['foo'] * 1000)] * 1000)) self.nonunique_cols = self.object_df.copy() self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns)) - self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) + self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) def time_frame_object_equal(self): - test_equal('object_df') + self.test_equal('object_df') + def make_pair(self, frame): + self.df = frame + self.df2 = self.df.copy() + self.df2.ix[((-1), (-1))] = np.nan + return (self.df, self.df2) -class frame_object_unequal(object): - goal_time = 0.2 + def test_equal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df) - def setup(self): + def test_unequal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df2) - def make_pair(frame): - self.df = frame - self.df2 = self.df.copy() - self.df2.ix[((-1), (-1))] = np.nan - return (self.df, self.df2) - def test_equal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df) +class frame_object_unequal(object): + goal_time = 0.2 - def test_unequal(name): - (self.df, self.df2) = pairs[name] - return self.df.equals(self.df2) + def setup(self): self.float_df = DataFrame(np.random.randn(1000, 1000)) self.object_df = DataFrame(([(['foo'] * 1000)] * 1000)) self.nonunique_cols = self.object_df.copy() self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns)) - self.pairs = dict([(name, make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) + self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (('float_df', self.float_df), ('object_df', self.object_df), ('nonunique_cols', self.nonunique_cols))]) def time_frame_object_unequal(self): - test_unequal('object_df') + self.test_unequal('object_df') + + def make_pair(self, frame): + self.df = frame + self.df2 = self.df.copy() + self.df2.ix[((-1), (-1))] = np.nan + return (self.df, self.df2) + + def test_equal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df) + + def test_unequal(self, name): + (self.df, self.df2) = self.pairs[name] + return self.df.equals(self.df2) class frame_reindex_axis0(object): diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py index b0486617a52af..556dd2c364cdf 100644 --- a/asv_bench/benchmarks/gil.py +++ b/asv_bench/benchmarks/gil.py @@ -1,6 +1,16 @@ -from pandas_vb_common import * +from .pandas_vb_common import * from pandas.core import common as com -from pandas.util.testing import test_parallel +try: + from pandas.util.testing import test_parallel + have_real_test_parallel = True +except ImportError: + have_real_test_parallel = False + + def test_parallel(num_threads=1): + + def wrapper(fname): + return fname + return wrapper class nogil_groupby_count_2(object): @@ -11,13 +21,15 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) - - @test_parallel(num_threads=2) - def pg2(): - self.df.groupby('key')['data'].count() + if (not have_real_test_parallel): + raise NotImplementedError def time_nogil_groupby_count_2(self): - pg2() + self.pg2() + + @test_parallel(num_threads=2) + def pg2(self): + self.df.groupby('key')['data'].count() class nogil_groupby_last_2(object): @@ -28,13 +40,15 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) - - @test_parallel(num_threads=2) - def pg2(): - self.df.groupby('key')['data'].last() + if (not have_real_test_parallel): + raise NotImplementedError def time_nogil_groupby_last_2(self): - pg2() + self.pg2() + + @test_parallel(num_threads=2) + def pg2(self): + self.df.groupby('key')['data'].last() class nogil_groupby_max_2(object): @@ -45,13 +59,15 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) - - @test_parallel(num_threads=2) - def pg2(): - self.df.groupby('key')['data'].max() + if (not have_real_test_parallel): + raise NotImplementedError def time_nogil_groupby_max_2(self): - pg2() + self.pg2() + + @test_parallel(num_threads=2) + def pg2(self): + self.df.groupby('key')['data'].max() class nogil_groupby_mean_2(object): @@ -62,13 +78,15 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) - - @test_parallel(num_threads=2) - def pg2(): - self.df.groupby('key')['data'].mean() + if (not have_real_test_parallel): + raise NotImplementedError def time_nogil_groupby_mean_2(self): - pg2() + self.pg2() + + @test_parallel(num_threads=2) + def pg2(self): + self.df.groupby('key')['data'].mean() class nogil_groupby_min_2(object): @@ -79,13 +97,15 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) - - @test_parallel(num_threads=2) - def pg2(): - self.df.groupby('key')['data'].min() + if (not have_real_test_parallel): + raise NotImplementedError def time_nogil_groupby_min_2(self): - pg2() + self.pg2() + + @test_parallel(num_threads=2) + def pg2(self): + self.df.groupby('key')['data'].min() class nogil_groupby_prod_2(object): @@ -96,13 +116,15 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) - - @test_parallel(num_threads=2) - def pg2(): - self.df.groupby('key')['data'].prod() + if (not have_real_test_parallel): + raise NotImplementedError def time_nogil_groupby_prod_2(self): - pg2() + self.pg2() + + @test_parallel(num_threads=2) + def pg2(self): + self.df.groupby('key')['data'].prod() class nogil_groupby_sum_2(object): @@ -113,13 +135,15 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) - - @test_parallel(num_threads=2) - def pg2(): - self.df.groupby('key')['data'].sum() + if (not have_real_test_parallel): + raise NotImplementedError def time_nogil_groupby_sum_2(self): - pg2() + self.pg2() + + @test_parallel(num_threads=2) + def pg2(self): + self.df.groupby('key')['data'].sum() class nogil_groupby_sum_4(object): @@ -130,36 +154,38 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) + if (not have_real_test_parallel): + raise NotImplementedError - def f(): - self.df.groupby('key')['data'].sum() + def time_nogil_groupby_sum_4(self): + self.pg4() - def g2(): - for i in range(2): - f() + def f(self): + self.df.groupby('key')['data'].sum() - def g4(): - for i in range(4): - f() + def g2(self): + for i in range(2): + self.f() - def g8(): - for i in range(8): - f() + def g4(self): + for i in range(4): + self.f() - @test_parallel(num_threads=2) - def pg2(): - f() + def g8(self): + for i in range(8): + self.f() - @test_parallel(num_threads=4) - def pg4(): - f() + @test_parallel(num_threads=2) + def pg2(self): + self.f() - @test_parallel(num_threads=8) - def pg8(): - f() + @test_parallel(num_threads=4) + def pg4(self): + self.f() - def time_nogil_groupby_sum_4(self): - pg4() + @test_parallel(num_threads=8) + def pg8(self): + self.f() class nogil_groupby_sum_8(object): @@ -170,36 +196,38 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) + if (not have_real_test_parallel): + raise NotImplementedError - def f(): - self.df.groupby('key')['data'].sum() + def time_nogil_groupby_sum_8(self): + self.pg8() - def g2(): - for i in range(2): - f() + def f(self): + self.df.groupby('key')['data'].sum() - def g4(): - for i in range(4): - f() + def g2(self): + for i in range(2): + self.f() - def g8(): - for i in range(8): - f() + def g4(self): + for i in range(4): + self.f() - @test_parallel(num_threads=2) - def pg2(): - f() + def g8(self): + for i in range(8): + self.f() - @test_parallel(num_threads=4) - def pg4(): - f() + @test_parallel(num_threads=2) + def pg2(self): + self.f() - @test_parallel(num_threads=8) - def pg8(): - f() + @test_parallel(num_threads=4) + def pg4(self): + self.f() - def time_nogil_groupby_sum_8(self): - pg8() + @test_parallel(num_threads=8) + def pg8(self): + self.f() class nogil_groupby_var_2(object): @@ -210,13 +238,15 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) - - @test_parallel(num_threads=2) - def pg2(): - self.df.groupby('key')['data'].var() + if (not have_real_test_parallel): + raise NotImplementedError def time_nogil_groupby_var_2(self): - pg2() + self.pg2() + + @test_parallel(num_threads=2) + def pg2(self): + self.df.groupby('key')['data'].var() class nogil_take1d_float64(object): @@ -227,20 +257,22 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) + if (not have_real_test_parallel): + raise NotImplementedError self.N = 10000000.0 self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), }) self.indexer = np.arange(100, (len(self.df) - 100)) - @test_parallel(num_threads=2) - def take_1d_pg2_int64(): - com.take_1d(self.df.int64.values, self.indexer) + def time_nogil_take1d_float64(self): + self.take_1d_pg2_int64() - @test_parallel(num_threads=2) - def take_1d_pg2_float64(): - com.take_1d(self.df.float64.values, self.indexer) + @test_parallel(num_threads=2) + def take_1d_pg2_int64(self): + com.take_1d(self.df.int64.values, self.indexer) - def time_nogil_take1d_float64(self): - take_1d_pg2_int64() + @test_parallel(num_threads=2) + def take_1d_pg2_float64(self): + com.take_1d(self.df.float64.values, self.indexer) class nogil_take1d_int64(object): @@ -251,17 +283,19 @@ def setup(self): self.ngroups = 1000 np.random.seed(1234) self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) + if (not have_real_test_parallel): + raise NotImplementedError self.N = 10000000.0 self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), }) self.indexer = np.arange(100, (len(self.df) - 100)) - @test_parallel(num_threads=2) - def take_1d_pg2_int64(): - com.take_1d(self.df.int64.values, self.indexer) + def time_nogil_take1d_int64(self): + self.take_1d_pg2_float64() - @test_parallel(num_threads=2) - def take_1d_pg2_float64(): - com.take_1d(self.df.float64.values, self.indexer) + @test_parallel(num_threads=2) + def take_1d_pg2_int64(self): + com.take_1d(self.df.int64.values, self.indexer) - def time_nogil_take1d_int64(self): - take_1d_pg2_float64() \ No newline at end of file + @test_parallel(num_threads=2) + def take_1d_pg2_float64(self): + com.take_1d(self.df.float64.values, self.indexer) \ No newline at end of file diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 4f1f4e46b4a31..a84a5373651bb 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -1,6 +1,6 @@ -from pandas_vb_common import * -from itertools import product +from .pandas_vb_common import * from string import ascii_letters, digits +from itertools import product class groupby_agg_builtins1(object): @@ -128,11 +128,11 @@ def setup(self): self.labels2 = np.random.randint(0, 3, size=self.N) self.df = DataFrame({'key': self.labels, 'key2': self.labels2, 'value1': randn(self.N), 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N / 4)), }) - def f(g): - return 1 - def time_groupby_frame_apply(self): - self.df.groupby(['key', 'key2']).apply(f) + self.df.groupby(['key', 'key2']).apply(self.f) + + def f(self, g): + return 1 class groupby_frame_apply_overhead(object): @@ -144,11 +144,11 @@ def setup(self): self.labels2 = np.random.randint(0, 3, size=self.N) self.df = DataFrame({'key': self.labels, 'key2': self.labels2, 'value1': randn(self.N), 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N / 4)), }) - def f(g): - return 1 - def time_groupby_frame_apply_overhead(self): - self.df.groupby('key').apply(f) + self.df.groupby('key').apply(self.f) + + def f(self, g): + return 1 class groupby_frame_cython_many_columns(object): @@ -330,24 +330,24 @@ class groupby_multi_cython(object): def setup(self): self.N = 100000 self.ngroups = 100 - - def get_test_data(ngroups=100, n=self.N): - self.unique_groups = range(self.ngroups) - self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) - if (len(self.arr) < n): - self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) - random.shuffle(self.arr) - return self.arr - self.df = DataFrame({'key1': get_test_data(ngroups=self.ngroups), 'key2': get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) - - def f(): - self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) + self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) self.simple_series = Series(np.random.randn(self.N)) self.key1 = self.df['key1'] def time_groupby_multi_cython(self): self.df.groupby(['key1', 'key2']).sum() + def get_test_data(self, ngroups=100, n=100000): + self.unique_groups = range(self.ngroups) + self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) + if (len(self.arr) < n): + self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) + random.shuffle(self.arr) + return self.arr + + def f(self): + self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) + class groupby_multi_different_functions(object): goal_time = 0.2 @@ -395,24 +395,24 @@ class groupby_multi_python(object): def setup(self): self.N = 100000 self.ngroups = 100 - - def get_test_data(ngroups=100, n=self.N): - self.unique_groups = range(self.ngroups) - self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) - if (len(self.arr) < n): - self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) - random.shuffle(self.arr) - return self.arr - self.df = DataFrame({'key1': get_test_data(ngroups=self.ngroups), 'key2': get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) - - def f(): - self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) + self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) self.simple_series = Series(np.random.randn(self.N)) self.key1 = self.df['key1'] def time_groupby_multi_python(self): self.df.groupby(['key1', 'key2'])['data1'].agg((lambda x: x.values.sum())) + def get_test_data(self, ngroups=100, n=100000): + self.unique_groups = range(self.ngroups) + self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) + if (len(self.arr) < n): + self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) + random.shuffle(self.arr) + return self.arr + + def f(self): + self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) + class groupby_multi_series_op(object): goal_time = 0.2 @@ -420,24 +420,24 @@ class groupby_multi_series_op(object): def setup(self): self.N = 100000 self.ngroups = 100 - - def get_test_data(ngroups=100, n=self.N): - self.unique_groups = range(self.ngroups) - self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) - if (len(self.arr) < n): - self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) - random.shuffle(self.arr) - return self.arr - self.df = DataFrame({'key1': get_test_data(ngroups=self.ngroups), 'key2': get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) - - def f(): - self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) + self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) self.simple_series = Series(np.random.randn(self.N)) self.key1 = self.df['key1'] def time_groupby_multi_series_op(self): self.df.groupby(['key1', 'key2'])['data1'].agg(np.std) + def get_test_data(self, ngroups=100, n=100000): + self.unique_groups = range(self.ngroups) + self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) + if (len(self.arr) < n): + self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) + random.shuffle(self.arr) + return self.arr + + def f(self): + self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) + class groupby_multi_size(object): goal_time = 0.2 @@ -1468,24 +1468,24 @@ class groupby_series_simple_cython(object): def setup(self): self.N = 100000 self.ngroups = 100 - - def get_test_data(ngroups=100, n=self.N): - self.unique_groups = range(self.ngroups) - self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) - if (len(self.arr) < n): - self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) - random.shuffle(self.arr) - return self.arr - self.df = DataFrame({'key1': get_test_data(ngroups=self.ngroups), 'key2': get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) - - def f(): - self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) + self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), }) self.simple_series = Series(np.random.randn(self.N)) self.key1 = self.df['key1'] def time_groupby_series_simple_cython(self): self.df.groupby('key1').rank(pct=True) + def get_test_data(self, ngroups=100, n=100000): + self.unique_groups = range(self.ngroups) + self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) + if (len(self.arr) < n): + self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) + random.shuffle(self.arr) + return self.arr + + def f(self): + self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum())) + class groupby_simple_compress_timing(object): goal_time = 0.2 @@ -1535,12 +1535,12 @@ def setup(self): self.secid_max = int('F0000000', 16) self.step = ((self.secid_max - self.secid_min) // (self.n_securities - 1)) self.security_ids = map((lambda x: hex(x)[2:10].upper()), range(self.secid_min, (self.secid_max + 1), self.step)) - self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in xrange(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id']) + self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in range(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id']) self.n_data = len(self.data_index) - self.columns = Index(['factor{}'.format(i) for i in xrange(1, (self.n_columns + 1))]) + self.columns = Index(['factor{}'.format(i) for i in range(1, (self.n_columns + 1))]) self.data = DataFrame(np.random.randn(self.n_data, self.n_columns), index=self.data_index, columns=self.columns) self.step = int((self.n_data * self.share_na)) - for column_index in xrange(self.n_columns): + for column_index in range(self.n_columns): self.index = column_index while (self.index < self.n_data): self.data.set_value(self.data_index[self.index], self.columns[column_index], np.nan) @@ -1644,12 +1644,12 @@ def setup(self): self.secid_max = int('F0000000', 16) self.step = ((self.secid_max - self.secid_min) // (self.n_securities - 1)) self.security_ids = map((lambda x: hex(x)[2:10].upper()), range(self.secid_min, (self.secid_max + 1), self.step)) - self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in xrange(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id']) + self.data_index = MultiIndex(levels=[self.dates.values, self.security_ids], labels=[[i for i in range(self.n_dates) for _ in xrange(self.n_securities)], (range(self.n_securities) * self.n_dates)], names=['date', 'security_id']) self.n_data = len(self.data_index) - self.columns = Index(['factor{}'.format(i) for i in xrange(1, (self.n_columns + 1))]) + self.columns = Index(['factor{}'.format(i) for i in range(1, (self.n_columns + 1))]) self.data = DataFrame(np.random.randn(self.n_data, self.n_columns), index=self.data_index, columns=self.columns) self.step = int((self.n_data * self.share_na)) - for column_index in xrange(self.n_columns): + for column_index in range(self.n_columns): self.index = column_index while (self.index < self.n_data): self.data.set_value(self.data_index[self.index], self.columns[column_index], np.nan) @@ -1660,6 +1660,16 @@ def time_groupby_transform_ufunc(self): self.data.groupby(level='date').transform(np.max) +class series_value_counts_float64(object): + goal_time = 0.2 + + def setup(self): + self.s = Series(np.random.randint(0, 1000, size=100000)).astype(float) + + def time_series_value_counts_float64(self): + self.s.value_counts() + + class series_value_counts_int64(object): goal_time = 0.2 diff --git a/asv_bench/benchmarks/hdfstore_bench.py b/asv_bench/benchmarks/hdfstore_bench.py index 9e36f735f8608..7638cc2a0f8df 100644 --- a/asv_bench/benchmarks/hdfstore_bench.py +++ b/asv_bench/benchmarks/hdfstore_bench.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * import os @@ -7,15 +7,9 @@ class query_store_table(object): def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.index = date_range('1/1/2000', periods=25000) self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), }, index=self.index) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) self.store.append('df12', self.df) @@ -25,21 +19,21 @@ def time_query_store_table(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class query_store_table_wide(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.index = date_range('1/1/2000', periods=25000) self.df = DataFrame(np.random.randn(25000, 100), index=self.index) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) self.store.append('df11', self.df) @@ -49,21 +43,21 @@ def time_query_store_table_wide(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class read_store(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.index = tm.makeStringIndex(25000) self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), }, index=self.index) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) self.store.put('df1', self.df) @@ -73,21 +67,21 @@ def time_read_store(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class read_store_mixed(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.index = tm.makeStringIndex(25000) self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), 'string1': (['foo'] * 25000), 'bool1': ([True] * 25000), 'int1': np.random.randint(0, 250000, size=25000), }, index=self.index) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) self.store.put('df3', self.df) @@ -97,21 +91,21 @@ def time_read_store_mixed(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class read_store_table(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.index = tm.makeStringIndex(25000) self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), }, index=self.index) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) self.store.append('df7', self.df) @@ -121,22 +115,22 @@ def time_read_store_table(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class read_store_table_mixed(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 10000 self.index = tm.makeStringIndex(self.N) self.df = DataFrame({'float1': randn(self.N), 'float2': randn(self.N), 'string1': (['foo'] * self.N), 'bool1': ([True] * self.N), 'int1': np.random.randint(0, self.N, size=self.N), }, index=self.index) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) self.store.append('df5', self.df) @@ -146,20 +140,20 @@ def time_read_store_table_mixed(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class read_store_table_panel(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass - self.p = Panel(randn(20, 1000, 25), items=[('Item%03d' % i) for i in xrange(20)], major_axis=date_range('1/1/2000', periods=1000), minor_axis=[('E%03d' % i) for i in xrange(25)]) - remove(self.f) + self.p = Panel(randn(20, 1000, 25), items=[('Item%03d' % i) for i in range(20)], major_axis=date_range('1/1/2000', periods=1000), minor_axis=[('E%03d' % i) for i in range(25)]) + self.remove(self.f) self.store = HDFStore(self.f) self.store.append('p1', self.p) @@ -169,20 +163,20 @@ def time_read_store_table_panel(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class read_store_table_wide(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.df = DataFrame(np.random.randn(25000, 100)) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) self.store.append('df9', self.df) @@ -192,21 +186,21 @@ def time_read_store_table_wide(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class write_store(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.index = tm.makeStringIndex(25000) self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), }, index=self.index) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) def time_write_store(self): @@ -215,21 +209,21 @@ def time_write_store(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class write_store_mixed(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.index = tm.makeStringIndex(25000) self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), 'string1': (['foo'] * 25000), 'bool1': ([True] * 25000), 'int1': np.random.randint(0, 250000, size=25000), }, index=self.index) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) def time_write_store_mixed(self): @@ -238,21 +232,21 @@ def time_write_store_mixed(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class write_store_table(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.index = tm.makeStringIndex(25000) self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), }, index=self.index) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) def time_write_store_table(self): @@ -261,20 +255,20 @@ def time_write_store_table(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class write_store_table_dc(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass - self.df = DataFrame(np.random.randn(10000, 10), columns=[('C%03d' % i) for i in xrange(10)]) - remove(self.f) + self.df = DataFrame(np.random.randn(10000, 10), columns=[('C%03d' % i) for i in range(10)]) + self.remove(self.f) self.store = HDFStore(self.f) def time_write_store_table_dc(self): @@ -283,21 +277,21 @@ def time_write_store_table_dc(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class write_store_table_mixed(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.index = tm.makeStringIndex(25000) self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000), 'string1': (['foo'] * 25000), 'bool1': ([True] * 25000), 'int1': np.random.randint(0, 25000, size=25000), }, index=self.index) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) def time_write_store_table_mixed(self): @@ -306,20 +300,20 @@ def time_write_store_table_mixed(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class write_store_table_panel(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass - self.p = Panel(randn(20, 1000, 25), items=[('Item%03d' % i) for i in xrange(20)], major_axis=date_range('1/1/2000', periods=1000), minor_axis=[('E%03d' % i) for i in xrange(25)]) - remove(self.f) + self.p = Panel(randn(20, 1000, 25), items=[('Item%03d' % i) for i in range(20)], major_axis=date_range('1/1/2000', periods=1000), minor_axis=[('E%03d' % i) for i in range(25)]) + self.remove(self.f) self.store = HDFStore(self.f) def time_write_store_table_panel(self): @@ -328,24 +322,30 @@ def time_write_store_table_panel(self): def teardown(self): self.store.close() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class write_store_table_wide(object): goal_time = 0.2 def setup(self): self.f = '__test__.h5' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.df = DataFrame(np.random.randn(25000, 100)) - remove(self.f) + self.remove(self.f) self.store = HDFStore(self.f) def time_write_store_table_wide(self): self.store.append('df10', self.df) def teardown(self): - self.store.close() \ No newline at end of file + self.store.close() + + def remove(self, f): + try: + os.remove(self.f) + except: + pass \ No newline at end of file diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py index 9c181c92195ea..8c65f09937df4 100644 --- a/asv_bench/benchmarks/index_object.py +++ b/asv_bench/benchmarks/index_object.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * class datetime_index_intersection(object): @@ -248,7 +248,7 @@ class multiindex_from_product(object): goal_time = 0.2 def setup(self): - self.iterables = [tm.makeStringIndex(10000), xrange(20)] + self.iterables = [tm.makeStringIndex(10000), range(20)] def time_multiindex_from_product(self): MultiIndex.from_product(self.iterables) diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index e76a87ab881c9..32d80a7913234 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -1,5 +1,8 @@ -from pandas_vb_common import * -import pandas.computation.expressions as expr +from .pandas_vb_common import * +try: + import pandas.computation.expressions as expr +except: + expr = None class dataframe_getitem_scalar(object): @@ -121,6 +124,8 @@ class indexing_dataframe_boolean_no_ne(object): goal_time = 0.2 def setup(self): + if (expr is None): + raise NotImplementedError self.df = DataFrame(np.random.randn(50000, 100)) self.df2 = DataFrame(np.random.randn(50000, 100)) expr.set_use_numexpr(False) @@ -160,6 +165,8 @@ class indexing_dataframe_boolean_st(object): goal_time = 0.2 def setup(self): + if (expr is None): + raise NotImplementedError self.df = DataFrame(np.random.randn(50000, 100)) self.df2 = DataFrame(np.random.randn(50000, 100)) expr.set_numexpr_threads(1) @@ -421,6 +428,30 @@ def time_series_loc_slice(self): self.s.loc[:800000] +class series_take_dtindex(object): + goal_time = 0.2 + + def setup(self): + self.s = Series(np.random.rand(100000)) + self.ts = Series(np.random.rand(100000), index=date_range('2011-01-01', freq='S', periods=100000)) + self.indexer = ([True, False, True, True, False] * 20000) + + def time_series_take_dtindex(self): + self.ts.take(self.indexer) + + +class series_take_intindex(object): + goal_time = 0.2 + + def setup(self): + self.s = Series(np.random.rand(100000)) + self.ts = Series(np.random.rand(100000), index=date_range('2011-01-01', freq='S', periods=100000)) + self.indexer = ([True, False, True, True, False] * 20000) + + def time_series_take_intindex(self): + self.s.take(self.indexer) + + class series_xs_mi_ix(object): goal_time = 0.2 diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py index 2addc810a218f..3fceed087facb 100644 --- a/asv_bench/benchmarks/inference.py +++ b/asv_bench/benchmarks/inference.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * import pandas as pd diff --git a/asv_bench/benchmarks/io_bench.py b/asv_bench/benchmarks/io_bench.py index 9eee932de8b7c..a171641502d3c 100644 --- a/asv_bench/benchmarks/io_bench.py +++ b/asv_bench/benchmarks/io_bench.py @@ -1,6 +1,9 @@ -from pandas_vb_common import * +from .pandas_vb_common import * from pandas import concat, Timestamp -from StringIO import StringIO +try: + from StringIO import StringIO +except ImportError: + from io import StringIO class frame_to_csv(object): @@ -41,20 +44,20 @@ class frame_to_csv_mixed(object): goal_time = 0.2 def setup(self): - - def create_cols(name): - return [('%s%03d' % (name, i)) for i in xrange(5)] - self.df_float = DataFrame(np.random.randn(5000, 5), dtype='float64', columns=create_cols('float')) - self.df_int = DataFrame(np.random.randn(5000, 5), dtype='int64', columns=create_cols('int')) - self.df_bool = DataFrame(True, index=self.df_float.index, columns=create_cols('bool')) - self.df_object = DataFrame('foo', index=self.df_float.index, columns=create_cols('object')) - self.df_dt = DataFrame(Timestamp('20010101'), index=self.df_float.index, columns=create_cols('date')) + self.df_float = DataFrame(np.random.randn(5000, 5), dtype='float64', columns=self.create_cols('float')) + self.df_int = DataFrame(np.random.randn(5000, 5), dtype='int64', columns=self.create_cols('int')) + self.df_bool = DataFrame(True, index=self.df_float.index, columns=self.create_cols('bool')) + self.df_object = DataFrame('foo', index=self.df_float.index, columns=self.create_cols('object')) + self.df_dt = DataFrame(Timestamp('20010101'), index=self.df_float.index, columns=self.create_cols('date')) self.df_float.ix[30:500, 1:3] = np.nan self.df = concat([self.df_float, self.df_int, self.df_bool, self.df_object, self.df_dt], axis=1) def time_frame_to_csv_mixed(self): self.df.to_csv('__test__.csv') + def create_cols(self, name): + return [('%s%03d' % (name, i)) for i in range(5)] + class read_csv_infer_datetime_format_custom(object): goal_time = 0.2 diff --git a/asv_bench/benchmarks/io_sql.py b/asv_bench/benchmarks/io_sql.py index e75e691b61c96..9a6b21f9e067a 100644 --- a/asv_bench/benchmarks/io_sql.py +++ b/asv_bench/benchmarks/io_sql.py @@ -1,7 +1,7 @@ -from pandas_vb_common import * -from sqlalchemy import create_engine -import sqlite3 import sqlalchemy +from .pandas_vb_common import * +import sqlite3 +from sqlalchemy import create_engine class sql_datetime_read_and_parse_sqlalchemy(object): diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 08ae439e8fd5d..1da0d37d4a8dd 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * class append_frame_single_homogenous(object): @@ -322,38 +322,38 @@ class series_align_int64_index(object): def setup(self): self.n = 1000000 - - def sample(values, k): - self.sampler = np.random.permutation(len(values)) - return values.take(self.sampler[:k]) self.sz = 500000 self.rng = np.arange(0, 10000000000000, 10000000) self.stamps = (np.datetime64(datetime.now()).view('i8') + self.rng) - self.idx1 = np.sort(sample(self.stamps, self.sz)) - self.idx2 = np.sort(sample(self.stamps, self.sz)) + self.idx1 = np.sort(self.sample(self.stamps, self.sz)) + self.idx2 = np.sort(self.sample(self.stamps, self.sz)) self.ts1 = Series(np.random.randn(self.sz), self.idx1) self.ts2 = Series(np.random.randn(self.sz), self.idx2) def time_series_align_int64_index(self): (self.ts1 + self.ts2) + def sample(self, values, k): + self.sampler = np.random.permutation(len(values)) + return values.take(self.sampler[:k]) + class series_align_left_monotonic(object): goal_time = 0.2 def setup(self): self.n = 1000000 - - def sample(values, k): - self.sampler = np.random.permutation(len(values)) - return values.take(self.sampler[:k]) self.sz = 500000 self.rng = np.arange(0, 10000000000000, 10000000) self.stamps = (np.datetime64(datetime.now()).view('i8') + self.rng) - self.idx1 = np.sort(sample(self.stamps, self.sz)) - self.idx2 = np.sort(sample(self.stamps, self.sz)) + self.idx1 = np.sort(self.sample(self.stamps, self.sz)) + self.idx2 = np.sort(self.sample(self.stamps, self.sz)) self.ts1 = Series(np.random.randn(self.sz), self.idx1) self.ts2 = Series(np.random.randn(self.sz), self.idx2) def time_series_align_left_monotonic(self): - self.ts1.align(self.ts2, join='left') \ No newline at end of file + self.ts1.align(self.ts2, join='left') + + def sample(self, values, k): + self.sampler = np.random.permutation(len(values)) + return values.take(self.sampler[:k]) \ No newline at end of file diff --git a/asv_bench/benchmarks/miscellaneous.py b/asv_bench/benchmarks/miscellaneous.py index b9c02c85fb096..fe610ef4cb376 100644 --- a/asv_bench/benchmarks/miscellaneous.py +++ b/asv_bench/benchmarks/miscellaneous.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * from pandas.util.decorators import cache_readonly diff --git a/asv_bench/benchmarks/packers.py b/asv_bench/benchmarks/packers.py index 81fa7c2238d16..12e48295d8d05 100644 --- a/asv_bench/benchmarks/packers.py +++ b/asv_bench/benchmarks/packers.py @@ -1,9 +1,9 @@ +from .pandas_vb_common import * from numpy.random import randint import pandas as pd from collections import OrderedDict from pandas.compat import BytesIO import sqlite3 -from pandas_vb_common import * import os from sqlalchemy import create_engine import numpy as np @@ -16,12 +16,6 @@ class packers_read_csv(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -31,24 +25,24 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df.to_csv(self.f) def time_packers_read_csv(self): pd.read_csv(self.f) + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_read_excel(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -58,7 +52,7 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.bio = BytesIO() self.writer = pd.io.excel.ExcelWriter(self.bio, engine='xlsxwriter') self.df[:2000].to_excel(self.writer) @@ -68,18 +62,18 @@ def time_packers_read_excel(self): self.bio.seek(0) pd.read_excel(self.bio) + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_read_hdf_store(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -89,24 +83,24 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df2.to_hdf(self.f, 'df') def time_packers_read_hdf_store(self): pd.read_hdf(self.f, 'df') + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_read_hdf_table(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -116,24 +110,24 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df2.to_hdf(self.f, 'df', format='table') def time_packers_read_hdf_table(self): pd.read_hdf(self.f, 'df') + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_read_json(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -143,25 +137,25 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df.to_json(self.f, orient='split') self.df.index = np.arange(self.N) def time_packers_read_json(self): pd.read_json(self.f, orient='split') + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_read_json_date_index(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -171,24 +165,24 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df.to_json(self.f, orient='split') def time_packers_read_json_date_index(self): pd.read_json(self.f, orient='split') + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_read_pack(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -198,24 +192,24 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df2.to_msgpack(self.f) def time_packers_read_pack(self): pd.read_msgpack(self.f) + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_read_pickle(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -225,24 +219,24 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df2.to_pickle(self.f) def time_packers_read_pickle(self): pd.read_pickle(self.f) + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_read_sql(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -252,25 +246,25 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.engine = create_engine('sqlite:///:memory:') self.df2.to_sql('table', self.engine, if_exists='replace') def time_packers_read_sql(self): pd.read_sql_table('table', self.engine) + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_read_stata(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -280,24 +274,24 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df.to_stata(self.f, {'index': 'tc', }) def time_packers_read_stata(self): pd.read_stata(self.f) + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_read_stata_with_validation(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -307,7 +301,7 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df['int8_'] = [randint(np.iinfo(np.int8).min, (np.iinfo(np.int8).max - 27)) for _ in range(self.N)] self.df['int16_'] = [randint(np.iinfo(np.int16).min, (np.iinfo(np.int16).max - 27)) for _ in range(self.N)] self.df['int32_'] = [randint(np.iinfo(np.int32).min, (np.iinfo(np.int32).max - 27)) for _ in range(self.N)] @@ -317,18 +311,18 @@ def remove(f): def time_packers_read_stata_with_validation(self): pd.read_stata(self.f) + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_write_csv(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -338,13 +332,19 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) def time_packers_write_csv(self): self.df.to_csv(self.f) def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_excel_openpyxl(object): @@ -352,12 +352,6 @@ class packers_write_excel_openpyxl(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -367,7 +361,7 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.bio = BytesIO() def time_packers_write_excel_openpyxl(self): @@ -376,18 +370,18 @@ def time_packers_write_excel_openpyxl(self): self.df[:2000].to_excel(self.writer) self.writer.save() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_write_excel_xlsxwriter(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -397,7 +391,7 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.bio = BytesIO() def time_packers_write_excel_xlsxwriter(self): @@ -406,18 +400,18 @@ def time_packers_write_excel_xlsxwriter(self): self.df[:2000].to_excel(self.writer) self.writer.save() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_write_excel_xlwt(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -427,7 +421,7 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.bio = BytesIO() def time_packers_write_excel_xlwt(self): @@ -436,18 +430,18 @@ def time_packers_write_excel_xlwt(self): self.df[:2000].to_excel(self.writer) self.writer.save() + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_write_hdf_store(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -457,13 +451,19 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) def time_packers_write_hdf_store(self): self.df2.to_hdf(self.f, 'df') def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_hdf_table(object): @@ -471,12 +471,6 @@ class packers_write_hdf_table(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -486,13 +480,19 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) def time_packers_write_hdf_table(self): self.df2.to_hdf(self.f, 'df', table=True) def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_json(object): @@ -500,12 +500,6 @@ class packers_write_json(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -515,14 +509,20 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df.index = np.arange(self.N) def time_packers_write_json(self): self.df.to_json(self.f, orient='split') def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_json_T(object): @@ -530,12 +530,6 @@ class packers_write_json_T(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -545,14 +539,20 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df.index = np.arange(self.N) def time_packers_write_json_T(self): self.df.to_json(self.f, orient='columns') def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_json_date_index(object): @@ -560,12 +560,6 @@ class packers_write_json_date_index(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -575,13 +569,19 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) def time_packers_write_json_date_index(self): self.df.to_json(self.f, orient='split') def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_json_mixed_delta_int_tstamp(object): @@ -589,12 +589,6 @@ class packers_write_json_mixed_delta_int_tstamp(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -604,7 +598,7 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.cols = [(lambda i: ('{0}_timedelta'.format(i), [pd.Timedelta(('%d seconds' % randrange(1000000.0))) for _ in range(self.N)])), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N))), (lambda i: ('{0}_timestamp'.format(i), [pd.Timestamp((1418842918083256000 + randrange(1000000000.0, 1e+18, 200))) for _ in range(self.N)]))] self.df_mixed = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index) @@ -612,7 +606,13 @@ def time_packers_write_json_mixed_delta_int_tstamp(self): self.df_mixed.to_json(self.f, orient='split') def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_json_mixed_float_int(object): @@ -620,12 +620,6 @@ class packers_write_json_mixed_float_int(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -635,7 +629,7 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.cols = [(lambda i: ('{0}_float'.format(i), randn(self.N))), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N)))] self.df_mixed = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index) @@ -643,7 +637,13 @@ def time_packers_write_json_mixed_float_int(self): self.df_mixed.to_json(self.f, orient='index') def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_json_mixed_float_int_T(object): @@ -651,12 +651,6 @@ class packers_write_json_mixed_float_int_T(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -666,7 +660,7 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.cols = [(lambda i: ('{0}_float'.format(i), randn(self.N))), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N)))] self.df_mixed = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index) @@ -674,7 +668,13 @@ def time_packers_write_json_mixed_float_int_T(self): self.df_mixed.to_json(self.f, orient='columns') def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_json_mixed_float_int_str(object): @@ -682,12 +682,6 @@ class packers_write_json_mixed_float_int_str(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -697,7 +691,7 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.cols = [(lambda i: ('{0}_float'.format(i), randn(self.N))), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N))), (lambda i: ('{0}_str'.format(i), [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]))] self.df_mixed = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index) @@ -705,7 +699,13 @@ def time_packers_write_json_mixed_float_int_str(self): self.df_mixed.to_json(self.f, orient='split') def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_pack(object): @@ -713,12 +713,6 @@ class packers_write_pack(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -728,13 +722,19 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) def time_packers_write_pack(self): self.df2.to_msgpack(self.f) def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_pickle(object): @@ -742,12 +742,6 @@ class packers_write_pickle(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -757,13 +751,19 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) def time_packers_write_pickle(self): self.df2.to_pickle(self.f) def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_sql(object): @@ -771,12 +771,6 @@ class packers_write_sql(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -786,24 +780,24 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.engine = create_engine('sqlite:///:memory:') def time_packers_write_sql(self): self.df2.to_sql('table', self.engine, if_exists='replace') + def remove(self, f): + try: + os.remove(self.f) + except: + pass + class packers_write_stata(object): goal_time = 0.2 def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -813,14 +807,20 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df.to_stata(self.f, {'index': 'tc', }) def time_packers_write_stata(self): self.df.to_stata(self.f, {'index': 'tc', }) def teardown(self): - remove(self.f) + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass class packers_write_stata_with_validation(object): @@ -828,12 +828,6 @@ class packers_write_stata_with_validation(object): def setup(self): self.f = '__test__.msg' - - def remove(f): - try: - os.remove(self.f) - except: - pass self.N = 100000 self.C = 5 self.index = date_range('20000101', periods=self.N, freq='H') @@ -843,7 +837,7 @@ def remove(f): self.index = date_range('20000101', periods=self.N, freq='H') self.df2 = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)]), index=self.index) self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - remove(self.f) + self.remove(self.f) self.df['int8_'] = [randint(np.iinfo(np.int8).min, (np.iinfo(np.int8).max - 27)) for _ in range(self.N)] self.df['int16_'] = [randint(np.iinfo(np.int16).min, (np.iinfo(np.int16).max - 27)) for _ in range(self.N)] self.df['int32_'] = [randint(np.iinfo(np.int32).min, (np.iinfo(np.int32).max - 27)) for _ in range(self.N)] @@ -854,4 +848,10 @@ def time_packers_write_stata_with_validation(self): self.df.to_stata(self.f, {'index': 'tc', }) def teardown(self): - remove(self.f) \ No newline at end of file + self.remove(self.f) + + def remove(self, f): + try: + os.remove(self.f) + except: + pass \ No newline at end of file diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py deleted file mode 120000 index 6e2e449a4c00a..0000000000000 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ /dev/null @@ -1 +0,0 @@ -../../vb_suite/pandas_vb_common.py \ No newline at end of file diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py new file mode 100644 index 0000000000000..a1326d63a112a --- /dev/null +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -0,0 +1,30 @@ +from pandas import * +import pandas as pd +from datetime import timedelta +from numpy.random import randn +from numpy.random import randint +from numpy.random import permutation +import pandas.util.testing as tm +import random +import numpy as np +try: + from pandas.compat import range +except ImportError: + pass + +np.random.seed(1234) +try: + import pandas._tseries as lib +except: + import pandas.lib as lib + +try: + Panel = WidePanel +except Exception: + pass + +# didn't add to namespace until later +try: + from pandas.core.index import MultiIndex +except ImportError: + pass diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py index c755cb122a0bf..0b0e73847aa96 100644 --- a/asv_bench/benchmarks/panel_ctor.py +++ b/asv_bench/benchmarks/panel_ctor.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * class panel_from_dict_all_different_indexes(object): @@ -8,7 +8,7 @@ def setup(self): self.data_frames = {} self.start = datetime(1990, 1, 1) self.end = datetime(2012, 1, 1) - for x in xrange(100): + for x in range(100): self.end += timedelta(days=1) self.dr = np.asarray(date_range(self.start, self.end)) self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr) @@ -23,7 +23,7 @@ class panel_from_dict_equiv_indexes(object): def setup(self): self.data_frames = {} - for x in xrange(100): + for x in range(100): self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq=datetools.Day(1))) self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr) self.data_frames[x] = self.df @@ -38,7 +38,7 @@ class panel_from_dict_same_index(object): def setup(self): self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq=datetools.Day(1))) self.data_frames = {} - for x in xrange(100): + for x in range(100): self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr) self.data_frames[x] = self.df @@ -53,7 +53,7 @@ def setup(self): self.data_frames = {} self.start = datetime(1990, 1, 1) self.end = datetime(2012, 1, 1) - for x in xrange(100): + for x in range(100): if (x == 50): self.end += timedelta(days=1) self.dr = np.asarray(date_range(self.start, self.end)) diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py index 4145b68dca997..90118eaf6e407 100644 --- a/asv_bench/benchmarks/panel_methods.py +++ b/asv_bench/benchmarks/panel_methods.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * class panel_pct_change_items(object): diff --git a/asv_bench/benchmarks/parser_vb.py b/asv_bench/benchmarks/parser_vb.py index 46167dc2bb33c..18cd4de6cc9c5 100644 --- a/asv_bench/benchmarks/parser_vb.py +++ b/asv_bench/benchmarks/parser_vb.py @@ -1,7 +1,10 @@ -from cStringIO import StringIO -from pandas_vb_common import * +from .pandas_vb_common import * import os from pandas import read_csv, read_table +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO class read_csv_comment2(object): diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py index d1df1b429c656..f46082ac6f288 100644 --- a/asv_bench/benchmarks/plotting.py +++ b/asv_bench/benchmarks/plotting.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * try: from pandas import date_range except ImportError: diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py index 03e654b4886cc..b1c039058ff8f 100644 --- a/asv_bench/benchmarks/reindex.py +++ b/asv_bench/benchmarks/reindex.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * from random import shuffle @@ -168,20 +168,20 @@ def setup(self): self.ts3 = self.ts2.reindex(self.ts.index) self.ts4 = self.ts3.astype('float32') - def pad(source_series, target_index): - try: - source_series.reindex(target_index, method='pad') - except: - source_series.reindex(target_index, fillMethod='pad') + def time_reindex_daterange_backfill(self): + self.backfill(self.ts2, self.ts.index) - def backfill(source_series, target_index): - try: - source_series.reindex(target_index, method='backfill') - except: - source_series.reindex(target_index, fillMethod='backfill') + def pad(self, source_series, target_index): + try: + source_series.reindex(target_index, method='pad') + except: + source_series.reindex(target_index, fillMethod='pad') - def time_reindex_daterange_backfill(self): - backfill(self.ts2, self.ts.index) + def backfill(self, source_series, target_index): + try: + source_series.reindex(target_index, method='backfill') + except: + source_series.reindex(target_index, fillMethod='backfill') class reindex_daterange_pad(object): @@ -194,20 +194,20 @@ def setup(self): self.ts3 = self.ts2.reindex(self.ts.index) self.ts4 = self.ts3.astype('float32') - def pad(source_series, target_index): - try: - source_series.reindex(target_index, method='pad') - except: - source_series.reindex(target_index, fillMethod='pad') + def time_reindex_daterange_pad(self): + self.pad(self.ts2, self.ts.index) - def backfill(source_series, target_index): - try: - source_series.reindex(target_index, method='backfill') - except: - source_series.reindex(target_index, fillMethod='backfill') + def pad(self, source_series, target_index): + try: + source_series.reindex(target_index, method='pad') + except: + source_series.reindex(target_index, fillMethod='pad') - def time_reindex_daterange_pad(self): - pad(self.ts2, self.ts.index) + def backfill(self, source_series, target_index): + try: + source_series.reindex(target_index, method='backfill') + except: + source_series.reindex(target_index, fillMethod='backfill') class reindex_fillna_backfill(object): @@ -220,21 +220,21 @@ def setup(self): self.ts3 = self.ts2.reindex(self.ts.index) self.ts4 = self.ts3.astype('float32') - def pad(source_series, target_index): - try: - source_series.reindex(target_index, method='pad') - except: - source_series.reindex(target_index, fillMethod='pad') - - def backfill(source_series, target_index): - try: - source_series.reindex(target_index, method='backfill') - except: - source_series.reindex(target_index, fillMethod='backfill') - def time_reindex_fillna_backfill(self): self.ts3.fillna(method='backfill') + def pad(self, source_series, target_index): + try: + source_series.reindex(target_index, method='pad') + except: + source_series.reindex(target_index, fillMethod='pad') + + def backfill(self, source_series, target_index): + try: + source_series.reindex(target_index, method='backfill') + except: + source_series.reindex(target_index, fillMethod='backfill') + class reindex_fillna_backfill_float32(object): goal_time = 0.2 @@ -246,21 +246,21 @@ def setup(self): self.ts3 = self.ts2.reindex(self.ts.index) self.ts4 = self.ts3.astype('float32') - def pad(source_series, target_index): - try: - source_series.reindex(target_index, method='pad') - except: - source_series.reindex(target_index, fillMethod='pad') - - def backfill(source_series, target_index): - try: - source_series.reindex(target_index, method='backfill') - except: - source_series.reindex(target_index, fillMethod='backfill') - def time_reindex_fillna_backfill_float32(self): self.ts4.fillna(method='backfill') + def pad(self, source_series, target_index): + try: + source_series.reindex(target_index, method='pad') + except: + source_series.reindex(target_index, fillMethod='pad') + + def backfill(self, source_series, target_index): + try: + source_series.reindex(target_index, method='backfill') + except: + source_series.reindex(target_index, fillMethod='backfill') + class reindex_fillna_pad(object): goal_time = 0.2 @@ -272,21 +272,21 @@ def setup(self): self.ts3 = self.ts2.reindex(self.ts.index) self.ts4 = self.ts3.astype('float32') - def pad(source_series, target_index): - try: - source_series.reindex(target_index, method='pad') - except: - source_series.reindex(target_index, fillMethod='pad') - - def backfill(source_series, target_index): - try: - source_series.reindex(target_index, method='backfill') - except: - source_series.reindex(target_index, fillMethod='backfill') - def time_reindex_fillna_pad(self): self.ts3.fillna(method='pad') + def pad(self, source_series, target_index): + try: + source_series.reindex(target_index, method='pad') + except: + source_series.reindex(target_index, fillMethod='pad') + + def backfill(self, source_series, target_index): + try: + source_series.reindex(target_index, method='backfill') + except: + source_series.reindex(target_index, fillMethod='backfill') + class reindex_fillna_pad_float32(object): goal_time = 0.2 @@ -298,21 +298,21 @@ def setup(self): self.ts3 = self.ts2.reindex(self.ts.index) self.ts4 = self.ts3.astype('float32') - def pad(source_series, target_index): - try: - source_series.reindex(target_index, method='pad') - except: - source_series.reindex(target_index, fillMethod='pad') - - def backfill(source_series, target_index): - try: - source_series.reindex(target_index, method='backfill') - except: - source_series.reindex(target_index, fillMethod='backfill') - def time_reindex_fillna_pad_float32(self): self.ts4.fillna(method='pad') + def pad(self, source_series, target_index): + try: + source_series.reindex(target_index, method='pad') + except: + source_series.reindex(target_index, fillMethod='pad') + + def backfill(self, source_series, target_index): + try: + source_series.reindex(target_index, method='backfill') + except: + source_series.reindex(target_index, fillMethod='backfill') + class reindex_frame_level_align(object): goal_time = 0.2 @@ -362,18 +362,18 @@ class series_align_irregular_string(object): def setup(self): self.n = 50000 self.indices = tm.makeStringIndex(self.n) - - def sample(values, k): - self.sampler = np.arange(len(values)) - shuffle(self.sampler) - return values.take(self.sampler[:k]) self.subsample_size = 40000 self.x = Series(np.random.randn(50000), self.indices) - self.y = Series(np.random.randn(self.subsample_size), index=sample(self.indices, self.subsample_size)) + self.y = Series(np.random.randn(self.subsample_size), index=self.sample(self.indices, self.subsample_size)) def time_series_align_irregular_string(self): (self.x + self.y) + def sample(self, values, k): + self.sampler = np.arange(len(values)) + shuffle(self.sampler) + return values.take(self.sampler[:k]) + class series_drop_duplicates_int(object): goal_time = 0.2 diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py index 9b78c287c5ad4..e9f33ebfce0bd 100644 --- a/asv_bench/benchmarks/replace.py +++ b/asv_bench/benchmarks/replace.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * from pandas.compat import range from datetime import timedelta diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index b4081957af97b..604fa5092a231 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * from pandas.core.reshape import melt @@ -22,19 +22,19 @@ class reshape_pivot_time_series(object): def setup(self): self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)]) self.df = DataFrame(np.random.randn(10000, 4), index=self.index) - - def unpivot(frame): - (N, K) = frame.shape - self.data = {'value': frame.values.ravel('F'), 'variable': np.asarray(frame.columns).repeat(N), 'date': np.tile(np.asarray(frame.index), K), } - return DataFrame(self.data, columns=['date', 'variable', 'value']) self.index = date_range('1/1/2000', periods=10000, freq='h') self.df = DataFrame(randn(10000, 50), index=self.index, columns=range(50)) - self.pdf = unpivot(self.df) + self.pdf = self.unpivot(self.df) self.f = (lambda : self.pdf.pivot('date', 'variable', 'value')) def time_reshape_pivot_time_series(self): self.f() + def unpivot(self, frame): + (N, K) = frame.shape + self.data = {'value': frame.values.ravel('F'), 'variable': np.asarray(frame.columns).repeat(N), 'date': np.tile(np.asarray(frame.index), K), } + return DataFrame(self.data, columns=['date', 'variable', 'value']) + class reshape_stack_simple(object): goal_time = 0.2 diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index 9cd61c741dae1..d2167a8b6e9e1 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * class series_isin_int64(object): diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py index dbf35f5e40f55..d7ee58fc978ea 100644 --- a/asv_bench/benchmarks/sparse.py +++ b/asv_bench/benchmarks/sparse.py @@ -1,6 +1,6 @@ -from pandas_vb_common import * -import scipy.sparse +from .pandas_vb_common import * import pandas.sparse.series +import scipy.sparse from pandas.core.sparse import SparseSeries, SparseDataFrame from pandas.core.sparse import SparseDataFrame diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py index 98e2bbfce1a44..4125357455d2e 100644 --- a/asv_bench/benchmarks/stat_ops.py +++ b/asv_bench/benchmarks/stat_ops.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * class stat_ops_frame_mean_float_axis_0(object): diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index 5adfbf4c2557d..e4f91b1b9c0c6 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * import string import itertools as IT import pandas.util.testing as testing @@ -8,99 +8,99 @@ class strings_cat(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_cat(self): self.many.str.cat(sep=',') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_center(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_center(self): self.many.str.center(100) + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_contains_few(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_contains_few(self): self.few.str.contains('matchthis') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_contains_few_noregex(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_contains_few_noregex(self): self.few.str.contains('matchthis', regex=False) + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_contains_many(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_contains_many(self): self.many.str.contains('matchthis') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_contains_many_noregex(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_contains_many_noregex(self): self.many.str.contains('matchthis', regex=False) + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_count(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_count(self): self.many.str.count('matchthis') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_encode_decode(object): goal_time = 0.2 @@ -116,278 +116,278 @@ class strings_endswith(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_endswith(self): self.many.str.endswith('matchthis') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_extract(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_extract(self): self.many.str.extract('(\\w*)matchthis(\\w*)') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_findall(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_findall(self): self.many.str.findall('[A-Z]+') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_get(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_get(self): self.many.str.get(0) + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_get_dummies(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) - self.s = make_series(string.uppercase, strlen=10, size=10000).str.join('|') + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) + self.s = self.make_series(string.ascii_uppercase, strlen=10, size=10000).str.join('|') def time_strings_get_dummies(self): self.s.str.get_dummies('|') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_join_split(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_join_split(self): self.many.str.join('--').str.split('--') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_join_split_expand(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_join_split_expand(self): self.many.str.join('--').str.split('--', expand=True) + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_len(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_len(self): self.many.str.len() + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_lower(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_lower(self): self.many.str.lower() + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_lstrip(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_lstrip(self): self.many.str.lstrip('matchthis') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_match(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_match(self): self.many.str.match('mat..this') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_pad(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_pad(self): self.many.str.pad(100, side='both') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_repeat(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_repeat(self): self.many.str.repeat(list(IT.islice(IT.cycle(range(1, 4)), len(self.many)))) + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_replace(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_replace(self): self.many.str.replace('(matchthis)', '\x01\x01') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_rstrip(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_rstrip(self): self.many.str.rstrip('matchthis') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_slice(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_slice(self): self.many.str.slice(5, 15, 2) + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_startswith(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_startswith(self): self.many.str.startswith('matchthis') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_strip(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_strip(self): self.many.str.strip('matchthis') + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_title(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_title(self): self.many.str.title() + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + class strings_upper(object): goal_time = 0.2 def setup(self): - - def make_series(letters, strlen, size): - return Series(np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))) - self.many = make_series(('matchthis' + string.uppercase), strlen=19, size=10000) - self.few = make_series(('matchthis' + (string.uppercase * 42)), strlen=19, size=10000) + self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) + self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) def time_strings_upper(self): - self.many.str.upper() \ No newline at end of file + self.many.str.upper() + + def make_series(self, letters, strlen, size): + return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) \ No newline at end of file diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py index 36a0f98e3f5ef..2f252a4d3e1dc 100644 --- a/asv_bench/benchmarks/timedelta.py +++ b/asv_bench/benchmarks/timedelta.py @@ -1,4 +1,4 @@ -from pandas_vb_common import * +from .pandas_vb_common import * from pandas import to_timedelta diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index 266c198de1455..db0c526f25c7b 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -1,10 +1,13 @@ from pandas.tseries.converter import DatetimeConverter +from .pandas_vb_common import * import pandas as pd from datetime import timedelta import datetime as dt -from pandas_vb_common import * +try: + import pandas.tseries.holiday +except ImportError: + pass from pandas.tseries.frequencies import infer_freq -import pandas.tseries.holiday import numpy as np @@ -631,6 +634,63 @@ def time_timeseries_custom_bmonthend_incr_n(self): (self.date + (10 * self.cme)) +class timeseries_datetimeindex_offset_delta(object): + goal_time = 0.2 + + def setup(self): + self.N = 100000 + self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') + if hasattr(Series, 'convert'): + Series.resample = Series.convert + self.ts = Series(np.random.randn(self.N), index=self.rng) + self.N = 100000 + self.idx1 = date_range(start='20140101', freq='T', periods=self.N) + self.delta_offset = pd.offsets.Day() + self.fast_offset = pd.offsets.DateOffset(months=2, days=2) + self.slow_offset = pd.offsets.BusinessDay() + + def time_timeseries_datetimeindex_offset_delta(self): + (self.idx1 + self.delta_offset) + + +class timeseries_datetimeindex_offset_fast(object): + goal_time = 0.2 + + def setup(self): + self.N = 100000 + self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') + if hasattr(Series, 'convert'): + Series.resample = Series.convert + self.ts = Series(np.random.randn(self.N), index=self.rng) + self.N = 100000 + self.idx1 = date_range(start='20140101', freq='T', periods=self.N) + self.delta_offset = pd.offsets.Day() + self.fast_offset = pd.offsets.DateOffset(months=2, days=2) + self.slow_offset = pd.offsets.BusinessDay() + + def time_timeseries_datetimeindex_offset_fast(self): + (self.idx1 + self.fast_offset) + + +class timeseries_datetimeindex_offset_slow(object): + goal_time = 0.2 + + def setup(self): + self.N = 100000 + self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') + if hasattr(Series, 'convert'): + Series.resample = Series.convert + self.ts = Series(np.random.randn(self.N), index=self.rng) + self.N = 100000 + self.idx1 = date_range(start='20140101', freq='T', periods=self.N) + self.delta_offset = pd.offsets.Day() + self.fast_offset = pd.offsets.DateOffset(months=2, days=2) + self.slow_offset = pd.offsets.BusinessDay() + + def time_timeseries_datetimeindex_offset_slow(self): + (self.idx1 + self.slow_offset) + + class timeseries_day_apply(object): goal_time = 0.2 @@ -723,15 +783,15 @@ def setup(self): self.idx1 = date_range(start='20140101', freq='T', periods=self.N) self.idx2 = period_range(start='20140101', freq='T', periods=self.N) - def iter_n(iterable, n=None): - self.i = 0 - for _ in iterable: - self.i += 1 - if ((n is not None) and (self.i > n)): - break - def time_timeseries_iter_datetimeindex(self): - iter_n(self.idx1) + self.iter_n(self.idx1) + + def iter_n(self, iterable, n=None): + self.i = 0 + for _ in iterable: + self.i += 1 + if ((n is not None) and (self.i > n)): + break class timeseries_iter_datetimeindex_preexit(object): @@ -748,15 +808,15 @@ def setup(self): self.idx1 = date_range(start='20140101', freq='T', periods=self.N) self.idx2 = period_range(start='20140101', freq='T', periods=self.N) - def iter_n(iterable, n=None): - self.i = 0 - for _ in iterable: - self.i += 1 - if ((n is not None) and (self.i > n)): - break - def time_timeseries_iter_datetimeindex_preexit(self): - iter_n(self.idx1, self.M) + self.iter_n(self.idx1, self.M) + + def iter_n(self, iterable, n=None): + self.i = 0 + for _ in iterable: + self.i += 1 + if ((n is not None) and (self.i > n)): + break class timeseries_iter_periodindex(object): @@ -773,15 +833,15 @@ def setup(self): self.idx1 = date_range(start='20140101', freq='T', periods=self.N) self.idx2 = period_range(start='20140101', freq='T', periods=self.N) - def iter_n(iterable, n=None): - self.i = 0 - for _ in iterable: - self.i += 1 - if ((n is not None) and (self.i > n)): - break - def time_timeseries_iter_periodindex(self): - iter_n(self.idx2) + self.iter_n(self.idx2) + + def iter_n(self, iterable, n=None): + self.i = 0 + for _ in iterable: + self.i += 1 + if ((n is not None) and (self.i > n)): + break class timeseries_iter_periodindex_preexit(object): @@ -798,15 +858,15 @@ def setup(self): self.idx1 = date_range(start='20140101', freq='T', periods=self.N) self.idx2 = period_range(start='20140101', freq='T', periods=self.N) - def iter_n(iterable, n=None): - self.i = 0 - for _ in iterable: - self.i += 1 - if ((n is not None) and (self.i > n)): - break - def time_timeseries_iter_periodindex_preexit(self): - iter_n(self.idx2, self.M) + self.iter_n(self.idx2, self.M) + + def iter_n(self, iterable, n=None): + self.i = 0 + for _ in iterable: + self.i += 1 + if ((n is not None) and (self.i > n)): + break class timeseries_large_lookup_value(object): @@ -859,6 +919,63 @@ def time_timeseries_resample_datetime64(self): self.ts.resample('1S', how='last') +class timeseries_series_offset_delta(object): + goal_time = 0.2 + + def setup(self): + self.N = 100000 + self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') + if hasattr(Series, 'convert'): + Series.resample = Series.convert + self.ts = Series(np.random.randn(self.N), index=self.rng) + self.N = 100000 + self.s = Series(date_range(start='20140101', freq='T', periods=self.N)) + self.delta_offset = pd.offsets.Day() + self.fast_offset = pd.offsets.DateOffset(months=2, days=2) + self.slow_offset = pd.offsets.BusinessDay() + + def time_timeseries_series_offset_delta(self): + (self.s + self.delta_offset) + + +class timeseries_series_offset_fast(object): + goal_time = 0.2 + + def setup(self): + self.N = 100000 + self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') + if hasattr(Series, 'convert'): + Series.resample = Series.convert + self.ts = Series(np.random.randn(self.N), index=self.rng) + self.N = 100000 + self.s = Series(date_range(start='20140101', freq='T', periods=self.N)) + self.delta_offset = pd.offsets.Day() + self.fast_offset = pd.offsets.DateOffset(months=2, days=2) + self.slow_offset = pd.offsets.BusinessDay() + + def time_timeseries_series_offset_fast(self): + (self.s + self.fast_offset) + + +class timeseries_series_offset_slow(object): + goal_time = 0.2 + + def setup(self): + self.N = 100000 + self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') + if hasattr(Series, 'convert'): + Series.resample = Series.convert + self.ts = Series(np.random.randn(self.N), index=self.rng) + self.N = 100000 + self.s = Series(date_range(start='20140101', freq='T', periods=self.N)) + self.delta_offset = pd.offsets.Day() + self.fast_offset = pd.offsets.DateOffset(months=2, days=2) + self.slow_offset = pd.offsets.BusinessDay() + + def time_timeseries_series_offset_slow(self): + (self.s + self.slow_offset) + + class timeseries_slice_minutely(object): goal_time = 0.2 diff --git a/asv_bench/vbench_to_asv.py b/asv_bench/vbench_to_asv.py index b3980ffed1a57..c3041ec2b1ba1 100644 --- a/asv_bench/vbench_to_asv.py +++ b/asv_bench/vbench_to_asv.py @@ -43,7 +43,29 @@ def __init__(self): def visit_ClassDef(self, node): self.transforms = {} self.in_class_define = True + + functions_to_promote = [] + setup_func = None + + for class_func in ast.iter_child_nodes(node): + if isinstance(class_func, ast.FunctionDef): + if class_func.name == 'setup': + setup_func = class_func + for anon_func in ast.iter_child_nodes(class_func): + if isinstance(anon_func, ast.FunctionDef): + functions_to_promote.append(anon_func) + + if setup_func: + for func in functions_to_promote: + setup_func.body.remove(func) + func.args.args.insert(0, ast.Name(id='self', ctx=ast.Load())) + node.body.append(func) + self.transforms[func.name] = 'self.' + func.name + + ast.fix_missing_locations(node) + self.generic_visit(node) + return node def visit_TryExcept(self, node): @@ -81,18 +103,8 @@ def visit_FunctionDef(self, node): """Delete functions that are empty due to imports being moved""" self.in_class_define = False - if self.in_setup: - node.col_offset -= 4 - ast.increment_lineno(node, -1) - - if node.name == 'setup': - self.in_setup = True - self.generic_visit(node) - if node.name == 'setup': - self.in_setup = False - if node.body: return node diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 4ec2258df56f2..2c9b6a0a889f4 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -512,9 +512,49 @@ entire suite. This is done using one of the following constructs: Running the performance test suite ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Performance matters and it is worth considering that your code has not introduced +performance regressions. *pandas* is in the process of migrating to the +`asv library <https://github.com/spacetelescope/asv>`__ +to enable easy monitoring of the performance of critical *pandas* operations. +These benchmarks are all found in the ``pandas/asv_bench`` directory. *asv* +supports both python2 and python3. + +.. note:: + + The *asv* benchmark suite was translated from the previous framework, vbench, + so many stylistic issues are likely a result of automated transformation of the + code. + +To install asv:: + + pip install git+https://github.com/spacetelescope/asv + +If you need to run a benchmark, change your directory to asv_bench/ and run +the following if you have been developing on master:: + + asv continuous master + +Otherwise, if you are working on another branch, either of the following can be used:: + + asv continuous master HEAD + asv continuous master your_branch + +This will checkout the master revision and run the suite on both master and +your commit. Running the full test suite can take up to one hour and use up +to 3GB of RAM. Usually it is sufficient to paste a subset of the results in +to the Pull Request to show that the committed changes do not cause unexpected +performance regressions. + +You can run specific benchmarks using the *-b* flag which takes a regular expression. + +Information on how to write a benchmark can be found in +`*asv*'s documentation http://asv.readthedocs.org/en/latest/writing_benchmarks.html`. + +Running the vbench performance test suite (phasing out) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Performance matters and it is worth considering that your code has not introduced -performance regressions. Currently *pandas* uses the `vbench library <https://github.com/pydata/vbench>`__ +performance regressions. Historically, *pandas* used `vbench library <https://github.com/pydata/vbench>`__ to enable easy monitoring of the performance of critical *pandas* operations. These benchmarks are all found in the ``pandas/vb_suite`` directory. vbench currently only works on python2. @@ -530,7 +570,7 @@ using pip. If you need to run a benchmark, change your directory to the *pandas This will checkout the master revision and run the suite on both master and your commit. Running the full test suite can take up to one hour and use up -to 3GB of RAM. Usually it is sufficient to past a subset of the results in +to 3GB of RAM. Usually it is sufficient to paste a subset of the results in to the Pull Request to show that the committed changes do not cause unexpected performance regressions. diff --git a/vb_suite/attrs_caching.py b/vb_suite/attrs_caching.py index e196546e632fe..a7e3ed7094ed6 100644 --- a/vb_suite/attrs_caching.py +++ b/vb_suite/attrs_caching.py @@ -1,6 +1,6 @@ from vbench.benchmark import Benchmark -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- diff --git a/vb_suite/binary_ops.py b/vb_suite/binary_ops.py index cd8d1ad93b6e1..4c74688ce660e 100644 --- a/vb_suite/binary_ops.py +++ b/vb_suite/binary_ops.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ SECTION = 'Binary ops' diff --git a/vb_suite/categoricals.py b/vb_suite/categoricals.py index cb33f1bb6c0b1..a08d479df20cb 100644 --- a/vb_suite/categoricals.py +++ b/vb_suite/categoricals.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- diff --git a/vb_suite/ctors.py b/vb_suite/ctors.py index 6af8e65b8f57d..8123322383f0a 100644 --- a/vb_suite/ctors.py +++ b/vb_suite/ctors.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- diff --git a/vb_suite/eval.py b/vb_suite/eval.py index a350cdc54cd17..bf80aad956184 100644 --- a/vb_suite/eval.py +++ b/vb_suite/eval.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * import pandas as pd df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) @@ -112,7 +112,7 @@ start_date=datetime(2013, 7, 26)) -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ setup = common_setup + """ diff --git a/vb_suite/frame_ctor.py b/vb_suite/frame_ctor.py index 8ad63fc556c2e..0d57da7b88d3b 100644 --- a/vb_suite/frame_ctor.py +++ b/vb_suite/frame_ctor.py @@ -5,7 +5,7 @@ except: import pandas.core.datetools as offsets -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * try: from pandas.tseries.offsets import * except: @@ -40,7 +40,7 @@ # nested dict, integer indexes, regression described in #621 setup = common_setup + """ -data = dict((i,dict((j,float(j)) for j in xrange(100))) for i in xrange(2000)) +data = dict((i,dict((j,float(j)) for j in range(100))) for i in xrange(2000)) """ frame_ctor_nested_dict_int64 = Benchmark("DataFrame(data)", setup) diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index ce5109efe8f6d..46343e9c607fd 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -1,7 +1,7 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- @@ -98,11 +98,11 @@ def g(): pass def h(): - for i in xrange(10000): + for i in range(10000): df2['A'] def j(): - for i in xrange(10000): + for i in range(10000): df3[0] """ @@ -126,8 +126,8 @@ def j(): setup = common_setup + """ idx = date_range('1/1/2000', periods=100000, freq='D') df = DataFrame(randn(100000, 1),columns=['A'],index=idx) -def f(x): - x = x.copy() +def f(df): + x = df.copy() x['date'] = x.index """ @@ -494,7 +494,7 @@ def test_unequal(name): setup = common_setup + """ def get_data(n=100000): - return ((x, x*20, x*100) for x in xrange(n)) + return ((x, x*20, x*100) for x in range(n)) """ frame_from_records_generator = Benchmark('df = DataFrame.from_records(get_data())', diff --git a/vb_suite/gil.py b/vb_suite/gil.py index d5aec7c3e2917..df2bd2dcd8db4 100644 --- a/vb_suite/gil.py +++ b/vb_suite/gil.py @@ -1,11 +1,20 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ basic = common_setup + """ -from pandas.util.testing import test_parallel +try: + from pandas.util.testing import test_parallel + have_real_test_parallel = True +except ImportError: + have_real_test_parallel = False + def test_parallel(num_threads=1): + def wrapper(fname): + return fname + + return wrapper N = 1000000 ngroups = 1000 @@ -13,6 +22,9 @@ df = DataFrame({'key' : np.random.randint(0,ngroups,size=N), 'data' : np.random.randn(N) }) + +if not have_real_test_parallel: + raise NotImplementedError """ setup = basic + """ diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index 3e3b0241545e5..bc21372225322 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -1,14 +1,14 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ setup = common_setup + """ N = 100000 ngroups = 100 -def get_test_data(ngroups=100, n=N): +def get_test_data(ngroups=100, n=100000): unique_groups = range(ngroups) arr = np.asarray(np.tile(unique_groups, n / ngroups), dtype=object) @@ -429,16 +429,16 @@ def f(g): security_ids = map(lambda x: hex(x)[2:10].upper(), range(secid_min, secid_max + 1, step)) data_index = MultiIndex(levels=[dates.values, security_ids], - labels=[[i for i in xrange(n_dates) for _ in xrange(n_securities)], range(n_securities) * n_dates], + labels=[[i for i in range(n_dates) for _ in xrange(n_securities)], range(n_securities) * n_dates], names=['date', 'security_id']) n_data = len(data_index) -columns = Index(['factor{}'.format(i) for i in xrange(1, n_columns + 1)]) +columns = Index(['factor{}'.format(i) for i in range(1, n_columns + 1)]) data = DataFrame(np.random.randn(n_data, n_columns), index=data_index, columns=columns) step = int(n_data * share_na) -for column_index in xrange(n_columns): +for column_index in range(n_columns): index = column_index while index < n_data: data.set_value(data_index[index], columns[column_index], np.nan) diff --git a/vb_suite/hdfstore_bench.py b/vb_suite/hdfstore_bench.py index a822ad1c614be..393fd4cc77e66 100644 --- a/vb_suite/hdfstore_bench.py +++ b/vb_suite/hdfstore_bench.py @@ -3,7 +3,7 @@ start_date = datetime(2012, 7, 1) -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * import os f = '__test__.h5' @@ -234,8 +234,8 @@ def remove(f): # select from a panel table setup13 = common_setup + """ -p = Panel(randn(20, 1000, 25), items= [ 'Item%03d' % i for i in xrange(20) ], - major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%03d' % i for i in xrange(25) ]) +p = Panel(randn(20, 1000, 25), items= [ 'Item%03d' % i for i in range(20) ], + major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%03d' % i for i in range(25) ]) remove(f) store = HDFStore(f) @@ -251,8 +251,8 @@ def remove(f): # write to a panel table setup14 = common_setup + """ -p = Panel(randn(20, 1000, 25), items= [ 'Item%03d' % i for i in xrange(20) ], - major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%03d' % i for i in xrange(25) ]) +p = Panel(randn(20, 1000, 25), items= [ 'Item%03d' % i for i in range(20) ], + major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%03d' % i for i in range(25) ]) remove(f) store = HDFStore(f) @@ -266,7 +266,7 @@ def remove(f): # write to a table (data_columns) setup15 = common_setup + """ -df = DataFrame(np.random.randn(10000,10),columns = [ 'C%03d' % i for i in xrange(10) ]) +df = DataFrame(np.random.randn(10000,10),columns = [ 'C%03d' % i for i in range(10) ]) remove(f) store = HDFStore(f) diff --git a/vb_suite/index_object.py b/vb_suite/index_object.py index 768eb2658af8f..2ab2bc15f3853 100644 --- a/vb_suite/index_object.py +++ b/vb_suite/index_object.py @@ -4,7 +4,7 @@ SECTION = "Index / MultiIndex objects" -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- @@ -111,7 +111,7 @@ # setup = common_setup + """ -iterables = [tm.makeStringIndex(10000), xrange(20)] +iterables = [tm.makeStringIndex(10000), range(20)] """ multiindex_from_product = Benchmark('MultiIndex.from_product(iterables)', diff --git a/vb_suite/indexing.py b/vb_suite/indexing.py index f2236c48fb002..3d95d52dccd71 100644 --- a/vb_suite/indexing.py +++ b/vb_suite/indexing.py @@ -3,7 +3,7 @@ SECTION = 'Indexing and scalar value access' -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- @@ -140,7 +140,13 @@ start_date=datetime(2012, 1, 1)) setup = common_setup + """ -import pandas.computation.expressions as expr +try: + import pandas.computation.expressions as expr +except: + expr = None + +if expr is None: + raise NotImplementedError df = DataFrame(np.random.randn(50000, 100)) df2 = DataFrame(np.random.randn(50000, 100)) expr.set_numexpr_threads(1) @@ -152,7 +158,13 @@ setup = common_setup + """ -import pandas.computation.expressions as expr +try: + import pandas.computation.expressions as expr +except: + expr = None + +if expr is None: + raise NotImplementedError df = DataFrame(np.random.randn(50000, 100)) df2 = DataFrame(np.random.randn(50000, 100)) expr.set_use_numexpr(False) diff --git a/vb_suite/inference.py b/vb_suite/inference.py index 8855f7e654bb1..aaa51aa5163ce 100644 --- a/vb_suite/inference.py +++ b/vb_suite/inference.py @@ -4,7 +4,7 @@ # from GH 7332 -setup = """from pandas_vb_common import * +setup = """from .pandas_vb_common import * import pandas as pd N = 500000 df_int64 = DataFrame(dict(A = np.arange(N,dtype='int64'), B = np.arange(N,dtype='int64'))) diff --git a/vb_suite/io_bench.py b/vb_suite/io_bench.py index 483d61387898d..af5f6076515cc 100644 --- a/vb_suite/io_bench.py +++ b/vb_suite/io_bench.py @@ -1,8 +1,8 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * -from StringIO import StringIO +common_setup = """from .pandas_vb_common import * +from io import StringIO """ #---------------------------------------------------------------------- @@ -77,7 +77,7 @@ from pandas import concat, Timestamp def create_cols(name): - return [ "%s%03d" % (name,i) for i in xrange(5) ] + return [ "%s%03d" % (name,i) for i in range(5) ] df_float = DataFrame(np.random.randn(5000, 5),dtype='float64',columns=create_cols('float')) df_int = DataFrame(np.random.randn(5000, 5),dtype='int64',columns=create_cols('int')) df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool')) diff --git a/vb_suite/io_sql.py b/vb_suite/io_sql.py index 7f580165939bb..ba8367e7e356b 100644 --- a/vb_suite/io_sql.py +++ b/vb_suite/io_sql.py @@ -1,7 +1,7 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * import sqlite3 import sqlalchemy from sqlalchemy import create_engine diff --git a/vb_suite/join_merge.py b/vb_suite/join_merge.py index 244c6abe71b05..238a129552e90 100644 --- a/vb_suite/join_merge.py +++ b/vb_suite/join_merge.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ setup = common_setup + """ diff --git a/vb_suite/miscellaneous.py b/vb_suite/miscellaneous.py index 27efadc7acfe0..da2c736e79ea7 100644 --- a/vb_suite/miscellaneous.py +++ b/vb_suite/miscellaneous.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- diff --git a/vb_suite/packers.py b/vb_suite/packers.py index 60738a62bd287..69ec10822b392 100644 --- a/vb_suite/packers.py +++ b/vb_suite/packers.py @@ -3,7 +3,7 @@ start_date = datetime(2013, 5, 1) -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * import os import pandas as pd from pandas.core import common as com diff --git a/vb_suite/pandas_vb_common.py b/vb_suite/pandas_vb_common.py index 128e262d45d66..a1326d63a112a 100644 --- a/vb_suite/pandas_vb_common.py +++ b/vb_suite/pandas_vb_common.py @@ -7,6 +7,10 @@ import pandas.util.testing as tm import random import numpy as np +try: + from pandas.compat import range +except ImportError: + pass np.random.seed(1234) try: diff --git a/vb_suite/panel_ctor.py b/vb_suite/panel_ctor.py index b6637bb1e61ec..9f497e7357a61 100644 --- a/vb_suite/panel_ctor.py +++ b/vb_suite/panel_ctor.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- @@ -14,7 +14,7 @@ dr = np.asarray(DatetimeIndex(start=datetime(1990,1,1), end=datetime(2012,1,1), freq=datetools.Day(1))) data_frames = {} -for x in xrange(100): +for x in range(100): df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), "c": [2]*len(dr)}, index=dr) data_frames[x] = df @@ -27,7 +27,7 @@ setup_equiv_indexes = common_setup + """ data_frames = {} -for x in xrange(100): +for x in range(100): dr = np.asarray(DatetimeIndex(start=datetime(1990,1,1), end=datetime(2012,1,1), freq=datetools.Day(1))) df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), @@ -44,7 +44,7 @@ data_frames = {} start = datetime(1990,1,1) end = datetime(2012,1,1) -for x in xrange(100): +for x in range(100): end += timedelta(days=1) dr = np.asarray(date_range(start, end)) df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), @@ -61,7 +61,7 @@ data_frames = {} start = datetime(1990,1,1) end = datetime(2012,1,1) -for x in xrange(100): +for x in range(100): if x == 50: end += timedelta(days=1) dr = np.asarray(date_range(start, end)) diff --git a/vb_suite/panel_methods.py b/vb_suite/panel_methods.py index 5e88671a23707..28586422a66e3 100644 --- a/vb_suite/panel_methods.py +++ b/vb_suite/panel_methods.py @@ -1,7 +1,7 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- diff --git a/vb_suite/parser_vb.py b/vb_suite/parser_vb.py index 96da3fac2de5e..bb9ccbdb5e854 100644 --- a/vb_suite/parser_vb.py +++ b/vb_suite/parser_vb.py @@ -1,7 +1,7 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * from pandas import read_csv, read_table """ @@ -44,7 +44,11 @@ start_date=datetime(2011, 11, 1)) setup = common_setup + """ -from cStringIO import StringIO +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO + import os N = 10000 K = 8 @@ -63,7 +67,11 @@ read_table_multiple_date = Benchmark(cmd, setup, start_date=sdate) setup = common_setup + """ -from cStringIO import StringIO +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO + import os N = 10000 K = 8 @@ -81,7 +89,11 @@ read_table_multiple_date_baseline = Benchmark(cmd, setup, start_date=sdate) setup = common_setup + """ -from cStringIO import StringIO +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO + data = '''\ 0.1213700904466425978256438611,0.0525708283766902484401839501,0.4174092731488769913994474336 0.4096341697147408700274695547,0.1587830198973579909349496119,0.1292545832485494372576795285 diff --git a/vb_suite/plotting.py b/vb_suite/plotting.py index 88d272e7be4b3..79e81e9eea8f4 100644 --- a/vb_suite/plotting.py +++ b/vb_suite/plotting.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * try: from pandas import date_range diff --git a/vb_suite/reindex.py b/vb_suite/reindex.py index 07f0e0f7e1bff..443eb43835745 100644 --- a/vb_suite/reindex.py +++ b/vb_suite/reindex.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- diff --git a/vb_suite/replace.py b/vb_suite/replace.py index 23d41e7c8e632..9326aa5becca9 100644 --- a/vb_suite/replace.py +++ b/vb_suite/replace.py @@ -1,7 +1,7 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * from datetime import timedelta N = 1000000 @@ -15,7 +15,7 @@ ts = Series(np.random.randn(N), index=rng) """ -large_dict_setup = """from pandas_vb_common import * +large_dict_setup = """from .pandas_vb_common import * from pandas.compat import range n = 10 ** 6 start_value = 10 ** 5 diff --git a/vb_suite/reshape.py b/vb_suite/reshape.py index f6eaeb353acb5..daab96103f2c5 100644 --- a/vb_suite/reshape.py +++ b/vb_suite/reshape.py @@ -1,7 +1,7 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)]) df = DataFrame(np.random.randn(10000, 4), index=index) diff --git a/vb_suite/series_methods.py b/vb_suite/series_methods.py index d0c31cb04ca6a..cd8688495fa09 100644 --- a/vb_suite/series_methods.py +++ b/vb_suite/series_methods.py @@ -1,7 +1,7 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ setup = common_setup + """ diff --git a/vb_suite/sparse.py b/vb_suite/sparse.py index 5da06451fe2d1..53e2778ee0865 100644 --- a/vb_suite/sparse.py +++ b/vb_suite/sparse.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- diff --git a/vb_suite/stat_ops.py b/vb_suite/stat_ops.py index 544ad6d00ed37..8d7c30dc9fdcf 100644 --- a/vb_suite/stat_ops.py +++ b/vb_suite/stat_ops.py @@ -1,7 +1,7 @@ from vbench.benchmark import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ #---------------------------------------------------------------------- diff --git a/vb_suite/strings.py b/vb_suite/strings.py index f229e0ddedbae..0948df5673a0d 100644 --- a/vb_suite/strings.py +++ b/vb_suite/strings.py @@ -1,6 +1,6 @@ from vbench.api import Benchmark -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * """ setup = common_setup + """ @@ -9,11 +9,11 @@ def make_series(letters, strlen, size): return Series( - np.fromiter(IT.cycle(letters), count=size*strlen, dtype='|S1') - .view('|S{}'.format(strlen))) + [str(x) for x in np.fromiter(IT.cycle(letters), count=size*strlen, dtype='|S1') + .view('|S{}'.format(strlen))]) -many = make_series('matchthis'+string.uppercase, strlen=19, size=10000) # 31% matches -few = make_series('matchthis'+string.uppercase*42, strlen=19, size=10000) # 1% matches +many = make_series('matchthis'+string.ascii_uppercase, strlen=19, size=10000) # 31% matches +few = make_series('matchthis'+string.ascii_uppercase*42, strlen=19, size=10000) # 1% matches """ strings_cat = Benchmark("many.str.cat(sep=',')", setup) @@ -47,7 +47,7 @@ def make_series(letters, strlen, size): strings_get = Benchmark("many.str.get(0)", setup) setup = setup + """ -s = make_series(string.uppercase, strlen=10, size=10000).str.join('|') +s = make_series(string.ascii_uppercase, strlen=10, size=10000).str.join('|') """ strings_get_dummies = Benchmark("s.str.get_dummies('|')", setup) diff --git a/vb_suite/suite.py b/vb_suite/suite.py index ca7a4a9b70836..70a6278c0852d 100644 --- a/vb_suite/suite.py +++ b/vb_suite/suite.py @@ -136,7 +136,7 @@ def generate_rst_files(benchmarks): These historical benchmark graphs were produced with `vbench <http://github.com/pydata/vbench>`__. -The ``pandas_vb_common`` setup script can be found here_ +The ``.pandas_vb_common`` setup script can be found here_ .. _here: https://github.com/pydata/pandas/tree/master/vb_suite diff --git a/vb_suite/timedelta.py b/vb_suite/timedelta.py index febd70739b2c9..378968ea1379a 100644 --- a/vb_suite/timedelta.py +++ b/vb_suite/timedelta.py @@ -1,7 +1,7 @@ from vbench.api import Benchmark from datetime import datetime -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * from pandas import to_timedelta """ diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py index 6a99bd0dfdc65..7e10b333d5c56 100644 --- a/vb_suite/timeseries.py +++ b/vb_suite/timeseries.py @@ -11,7 +11,7 @@ def date_range(start=None, end=None, periods=None, freq=None): return DatetimeIndex(start=start, end=end, periods=periods, offset=freq) -common_setup = """from pandas_vb_common import * +common_setup = """from .pandas_vb_common import * from datetime import timedelta N = 100000 @@ -312,7 +312,10 @@ def date_range(start=None, end=None, periods=None, freq=None): setup = common_setup + """ import datetime as dt import pandas as pd -import pandas.tseries.holiday +try: + import pandas.tseries.holiday +except ImportError: + pass import numpy as np date = dt.datetime(2011,1,1) @@ -417,9 +420,9 @@ def iter_n(iterable, n=None): setup = common_setup + """ N = 100000 idx1 = date_range(start='20140101', freq='T', periods=N) -delta_offset = Day() -fast_offset = DateOffset(months=2, days=2) -slow_offset = offsets.BusinessDay() +delta_offset = pd.offsets.Day() +fast_offset = pd.offsets.DateOffset(months=2, days=2) +slow_offset = pd.offsets.BusinessDay() """ @@ -431,9 +434,9 @@ def iter_n(iterable, n=None): setup = common_setup + """ N = 100000 s = Series(date_range(start='20140101', freq='T', periods=N)) -delta_offset = Day() -fast_offset = DateOffset(months=2, days=2) -slow_offset = offsets.BusinessDay() +delta_offset = pd.offsets.Day() +fast_offset = pd.offsets.DateOffset(months=2, days=2) +slow_offset = pd.offsets.BusinessDay() """
@jorisvandenbossche As you requested in #10849, here's the current state of affairs. The changes are: - First patch: - Fix the translation script so that functions defined in the vbench `setup` string get defined as classmethods and called appropriately. This fixes most failing tests. - Some backwards-compatibility fixes around imports. Raising `NotImplementedError` inside `setup()` is equivalent to `SkipTest`, so I've tried to do that in a couple necessary cases (such as things using `test_parallel`) - Speaking of `test_parallel`, this required a bit of hackery to support not-existing. I implemented a no-op decorator by that name so that the actual definition of test cases wouldn't fail. We simply raise `NotImplementedError` rather than test without it, however. - Second patch: - Support Python 3, the changes are essentially what I mentioned elsewhere. - `from pandas_vb_common import *` -> `from .pandas_vb_common import *` - Replace `xrange` with `range` and add `from pandas.compat import range` in `pandas_vb_common` - Replace `string.uppercase` with `string.ascii_uppercase` - Third patch - `asv` instructions in `contributing.rst`, largely lifted from the vbench description. The current examples are minimalist, but most comparable to what vbench outputs. - Fourth patch: - Re-run the translation script. I think some other additions may be included in the first patch due to having run it a few times while making changes. cc @jreback @TomAugspurger
https://api.github.com/repos/pandas-dev/pandas/pulls/10928
2015-08-29T08:40:42Z
2015-08-29T22:42:36Z
2015-08-29T22:42:36Z
2015-09-01T19:25:40Z
PERF: GH10213 kth_smallest GIL release
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py index 556dd2c364cdf..4b82781fc39d9 100644 --- a/asv_bench/benchmarks/gil.py +++ b/asv_bench/benchmarks/gil.py @@ -298,4 +298,25 @@ def take_1d_pg2_int64(self): @test_parallel(num_threads=2) def take_1d_pg2_float64(self): - com.take_1d(self.df.float64.values, self.indexer) \ No newline at end of file + com.take_1d(self.df.float64.values, self.indexer) + + +class nogil_kth_smallest(object): + number = 1 + repeat = 5 + + def setup(self): + if (not have_real_test_parallel): + raise NotImplementedError + np.random.seed(1234) + self.N = 10000000 + self.k = 500000 + self.a = np.random.randn(self.N) + self.b = self.a.copy() + self.kwargs_list = [{'arr': self.a}, {'arr': self.b}] + + def time_nogil_kth_smallest(self): + @test_parallel(num_threads=2, kwargs_list=self.kwargs_list) + def run(arr): + algos.kth_smallest(arr, self.k) + run() diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index a1326d63a112a..3370131929c22 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -7,6 +7,7 @@ import pandas.util.testing as tm import random import numpy as np +import threading try: from pandas.compat import range except ImportError: diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index e9d39e0441055..b9909c14b592f 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -69,14 +69,15 @@ Releasing the GIL We are releasing the global-interpreter-lock (GIL) on some cython operations. This will allow other threads to run simultaneously during computation, potentially allowing performance improvements -from multi-threading. Notably ``groupby`` and some indexing operations are a benefit from this. (:issue:`8882`) +from multi-threading. Notably ``groupby``, ``nsmallest`` and some indexing operations benefit from this. (:issue:`8882`) For example the groupby expression in the following code will have the GIL released during the factorization step, e.g. ``df.groupby('key')`` as well as the ``.sum()`` operation. .. code-block:: python - N = 1e6 + N = 1000000 + ngroups = 10 df = DataFrame({'key' : np.random.randint(0,ngroups,size=N), 'data' : np.random.randn(N) }) df.groupby('key')['data'].sum() diff --git a/pandas/algos.pyx b/pandas/algos.pyx index 9b6bdf57d4509..44b1996272356 100644 --- a/pandas/algos.pyx +++ b/pandas/algos.pyx @@ -740,7 +740,7 @@ ctypedef fused numeric: float64_t -cdef inline Py_ssize_t swap(numeric *a, numeric *b) except -1: +cdef inline Py_ssize_t swap(numeric *a, numeric *b) nogil except -1: cdef numeric t # cython doesn't allow pointer dereference so use array syntax @@ -756,27 +756,27 @@ cpdef numeric kth_smallest(numeric[:] a, Py_ssize_t k): cdef: Py_ssize_t i, j, l, m, n = a.size numeric x - - l = 0 - m = n - 1 - - while l < m: - x = a[k] - i = l - j = m - - while 1: - while a[i] < x: i += 1 - while x < a[j]: j -= 1 - if i <= j: - swap(&a[i], &a[j]) - i += 1; j -= 1 - - if i > j: break - - if j < k: l = i - if k < i: m = j - return a[k] + with nogil: + l = 0 + m = n - 1 + + while l < m: + x = a[k] + i = l + j = m + + while 1: + while a[i] < x: i += 1 + while x < a[j]: j -= 1 + if i <= j: + swap(&a[i], &a[j]) + i += 1; j -= 1 + + if i > j: break + + if j < k: l = i + if k < i: m = j + return a[k] cdef inline kth_smallest_c(float64_t* a, Py_ssize_t k, Py_ssize_t n): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index e3633a1ec4360..aaa83da036c2f 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -2044,14 +2044,16 @@ def use_numexpr(use, min_elements=expr._MIN_ELEMENTS): if inspect.isfunction(obj) and name.startswith('assert'): setattr(TestCase, name, staticmethod(obj)) -def test_parallel(num_threads=2): + +def test_parallel(num_threads=2, kwargs_list=None): """Decorator to run the same function multiple times in parallel. Parameters ---------- num_threads : int, optional The number of times the function is run in parallel. - + kwargs_list : list of dicts, optional + The list of kwargs to update original function kwargs on different threads. Notes ----- This decorator does not pass the return value of the decorated function. @@ -2061,14 +2063,23 @@ def test_parallel(num_threads=2): """ assert num_threads > 0 + has_kwargs_list = kwargs_list is not None + if has_kwargs_list: + assert len(kwargs_list) == num_threads import threading def wrapper(func): @wraps(func) def inner(*args, **kwargs): + if has_kwargs_list: + update_kwargs = lambda i: dict(kwargs, **kwargs_list[i]) + else: + update_kwargs = lambda i: kwargs threads = [] for i in range(num_threads): - thread = threading.Thread(target=func, args=args, kwargs=kwargs) + updated_kwargs = update_kwargs(i) + thread = threading.Thread(target=func, args=args, + kwargs=updated_kwargs) threads.append(thread) for thread in threads: thread.start()
One tiny part of #10213 ``` import timeit setup_seq = ''' import pandas import numpy numpy.random.seed(1234) x = numpy.random.randn(10000000) a = x.copy() b = x.copy() def f(s): pandas.algos.kth_smallest(s, 500000) def seq(): f(a) f(b) ''' setup_parallel = ''' import pandas import numpy import threading numpy.random.seed(1234) x = numpy.random.randn(10000000) a = x.copy() b = x.copy() def f(s): pandas.algos.kth_smallest(s, 500000) def parallel(): thread1 = threading.Thread(target=f, args=(a,)) thread2 = threading.Thread(target=f, args=(b,)) thread1.start() thread2.start() thread1.join() thread2.join() ''' print(min(timeit.repeat(stmt='seq()', setup=setup_seq, repeat=100, number=1))) print(min(timeit.repeat(stmt='parallel()', setup=setup_parallel, repeat=100, number=1))) ``` On master ``` 0.15268295999976544 0.15424678300041705 ``` On branch ``` 0.1544521670002723 0.08813380599985976 ``` Testing of `nsmallest`/`nlargest`/`median` (I don't think `median` calls `kth_smallest` though) ``` import pandas, numpy from pandas.util.testing import test_parallel n = 1000000 k = 50000 numpy.random.seed(1234) s = pandas.Series(numpy.random.randn(n)) def f(): s.nlargest(k) def seq(): f() f() @test_parallel(num_threads=2) def g(): f() ``` Master `nsmallest` ``` In [2]: %timeit f() 10 loops, best of 3: 42.4 ms per loop In [3]: %timeit g() 10 loops, best of 3: 79.9 ms per loop In [4]: %timeit seq() 10 loops, best of 3: 84.9 ms per loop ``` Branch `nsmallest` ``` In [10]: %timeit f() 10 loops, best of 3: 42.8 ms per loop In [11]: %timeit g() 10 loops, best of 3: 68.6 ms per loop In [12]: %timeit seq() 10 loops, best of 3: 91.2 ms per loop ``` Master `nlargest` ``` In [2]: %timeit f() 10 loops, best of 3: 47.5 ms per loop In [3]: %timeit g() 10 loops, best of 3: 86 ms per loop In [4]: %timeit seq() ``` Branch `nlargest` ``` In [10]: %timeit f() 10 loops, best of 3: 48.1 ms per loop In [11]: %timeit g() 10 loops, best of 3: 71 ms per loop In [12]: %timeit seq() 10 loops, best of 3: 95.7 ms per loop ``` Master `median` ``` In [2]: %timeit f() 100 loops, best of 3: 15.4 ms per loop In [3]: %timeit g() 10 loops, best of 3: 20.7 ms per loop In [4]: %timeit seq() 10 loops, best of 3: 30.7 ms per loop ``` Branch `median` ``` In [12]: %timeit f() 100 loops, best of 3: 15 ms per loop In [13]: %timeit g() 10 loops, best of 3: 21.5 ms per loop In [14]: %timeit seq() 10 loops, best of 3: 30 ms per loop ``` Results are pretty in line with expectations -- `nsmallest` does quite a bit more than `kth_smallest`, such as copying and indexing.
https://api.github.com/repos/pandas-dev/pandas/pulls/10927
2015-08-29T06:37:53Z
2015-09-02T11:49:29Z
2015-09-02T11:49:29Z
2015-09-03T19:14:47Z
BUG: Bug in incorrection computation of .mean() on timedelta64[ns] because of overflow #9442
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 1607d81543946..3e81a923a114c 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -725,6 +725,7 @@ Performance Improvements Bug Fixes ~~~~~~~~~ +- Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`) - Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) - Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`) - Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`) diff --git a/pandas/core/common.py b/pandas/core/common.py index 245535e47abd8..72ea6d14456b0 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -63,6 +63,7 @@ def __str__(self): _int8_max = np.iinfo(np.int8).max _int16_max = np.iinfo(np.int16).max _int32_max = np.iinfo(np.int32).max +_int64_max = np.iinfo(np.int64).max # define abstract base classes to enable isinstance type checking on our # objects diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index c70fb6339517d..447a273a1e171 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -21,7 +21,8 @@ is_bool_dtype, is_object_dtype, is_datetime64_dtype, is_timedelta64_dtype, is_datetime_or_timedelta_dtype, _get_dtype, - is_int_or_datetime_dtype, is_any_int_dtype) + is_int_or_datetime_dtype, is_any_int_dtype, + _int64_max) class disallow(object): @@ -145,7 +146,7 @@ def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): else: if fill_value_typ == '+inf': # need the max int here - return np.iinfo(np.int64).max + return _int64_max else: return tslib.iNaT @@ -223,7 +224,12 @@ def _wrap_results(result, dtype): result = result.view(dtype) elif is_timedelta64_dtype(dtype): if not isinstance(result, np.ndarray): - result = lib.Timedelta(result) + + # raise if we have a timedelta64[ns] which is too large + if np.fabs(result) > _int64_max: + raise ValueError("overflow in timedelta operation") + + result = lib.Timedelta(result, unit='ns') else: result = result.astype('i8').view(dtype) @@ -247,6 +253,8 @@ def nansum(values, axis=None, skipna=True): dtype_sum = dtype_max if is_float_dtype(dtype): dtype_sum = dtype + elif is_timedelta64_dtype(dtype): + dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _maybe_null_out(the_sum, axis, mask) @@ -260,7 +268,7 @@ def nanmean(values, axis=None, skipna=True): dtype_sum = dtype_max dtype_count = np.float64 - if is_integer_dtype(dtype): + if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype): dtype_sum = np.float64 elif is_float_dtype(dtype): dtype_sum = dtype diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index 753e76fd1faea..4870fbd55f33e 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -686,6 +686,25 @@ def test_timedelta_ops(self): s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'), Timestamp('2015-02-15')]) self.assertEqual(s.diff().median(), timedelta(days=6)) + def test_overflow(self): + # GH 9442 + s = Series(pd.date_range('20130101',periods=100000,freq='H')) + s[0] += pd.Timedelta('1s 1ms') + + # mean + result = (s-s.min()).mean() + expected = pd.Timedelta((pd.DatetimeIndex((s-s.min())).asi8/len(s)).sum()) + + # the computation is converted to float so might be some loss of precision + self.assertTrue(np.allclose(result.value/1000, expected.value/1000)) + + # sum + self.assertRaises(ValueError, lambda : (s-s.min()).sum()) + s1 = s[0:10000] + self.assertRaises(ValueError, lambda : (s1-s1.min()).sum()) + s2 = s[0:1000] + result = (s2-s2.min()).sum() + def test_timedelta_ops_scalar(self): # GH 6808 base = pd.to_datetime('20130101 09:01:12.123456')
closes #9442
https://api.github.com/repos/pandas-dev/pandas/pulls/10926
2015-08-29T02:03:26Z
2015-08-29T13:12:39Z
2015-08-29T13:12:39Z
2015-08-29T13:12:39Z
Revert "Revert "Merge pull request #10727 from jorisvandenbossche/sph…
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 850f59c2713eb..1888345e1055c 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -661,18 +661,14 @@ values NOT in the categories, similarly to how you can reindex ANY pandas index. Reshaping and Comparision operations on a ``CategoricalIndex`` must have the same categories or a ``TypeError`` will be raised. - .. code-block:: python - - In [9]: df3 = pd.DataFrame({'A' : np.arange(6), - 'B' : pd.Series(list('aabbca')).astype('category')}) - - In [11]: df3 = df3.set_index('B') - - In [11]: df3.index - Out[11]: CategoricalIndex([u'a', u'a', u'b', u'b', u'c', u'a'], categories=[u'a', u'b', u'c'], ordered=False, name=u'B', dtype='category') + .. ipython:: python + :okexcept: - In [12]: pd.concat([df2, df3] - TypeError: categories must match existing categories when appending + df3 = pd.DataFrame({'A' : np.arange(6), + 'B' : pd.Series(list('aabbca')).astype('category')}) + df3 = df3.set_index('B') + df3.index + pd.concat([df2, df3] .. _indexing.float64index: @@ -738,20 +734,18 @@ In float indexes, slicing using floats is allowed In non-float indexes, slicing using floats will raise a ``TypeError`` -.. code-block:: python - - In [1]: pd.Series(range(5))[3.5] - TypeError: the label [3.5] is not a proper indexer for this index type (Int64Index) +.. ipython:: python + :okexcept: - In [1]: pd.Series(range(5))[3.5:4.5] - TypeError: the slice start [3.5] is not a proper indexer for this index type (Int64Index) + pd.Series(range(5))[3.5] + pd.Series(range(5))[3.5:4.5] Using a scalar float indexer will be deprecated in a future version, but is allowed for now. -.. code-block:: python +.. ipython:: python + :okwarning: - In [3]: pd.Series(range(5))[3.0] - Out[3]: 3 + pd.Series(range(5))[3.0] Here is a typical use-case for using this type of indexing. Imagine that you have a somewhat irregular timedelta-like indexing scheme, but the data is recorded as floats. This could for diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 956c90ae63034..6bfbfb87f2c55 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -352,13 +352,11 @@ objects of the same length: Trying to compare ``Index`` or ``Series`` objects of different lengths will raise a ValueError: -.. code-block:: python - - In [55]: pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo', 'bar']) - ValueError: Series lengths must match to compare +.. ipython:: python + :okexcept: - In [56]: pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo']) - ValueError: Series lengths must match to compare + pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo', 'bar']) + pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo']) Note that this is different from the numpy behavior where a comparison can be broadcast: diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 5a62e7dccea34..847044c4745f9 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -143,10 +143,10 @@ label: If a label is not contained, an exception is raised: -.. code-block:: python +.. ipython:: python + :okexcept: - >>> s['f'] - KeyError: 'f' + s['f'] Using the ``get`` method, a missing label will return None or specified default: diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index 855a459f48cf4..c53252bd7193f 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -633,11 +633,14 @@ With :func:`pandas.eval` you cannot use the ``@`` prefix *at all*, because it isn't defined in that context. ``pandas`` will let you know this if you try to use ``@`` in a top-level call to :func:`pandas.eval`. For example, -.. ipython:: python - :okexcept: +.. ipython:: + + In [41]: a, b = 1, 2 + + In [42]: pd.eval('@a + b') + SyntaxError: The '@' prefix is not allowed in top-level eval calls, + please refer to your variables by name without the '@' prefix - a, b = 1, 2 - pd.eval('@a + b') In this case, you should simply refer to the variables like you would in standard Python. diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 38629ee7baaea..a49a4745f7200 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -293,10 +293,10 @@ Selection By Label dfl = pd.DataFrame(np.random.randn(5,4), columns=list('ABCD'), index=pd.date_range('20130101',periods=5)) dfl - .. code-block:: python + .. ipython:: python + :okexcept: - In [4]: dfl.loc[2:3] - TypeError: cannot do slice indexing on <class 'pandas.tseries.index.DatetimeIndex'> with these indexers [2] of <type 'int'> + dfl.loc[2:3] String likes in slicing *can* be convertible to the type of the index and lead to natural slicing. @@ -475,13 +475,11 @@ A single indexer that is out of bounds will raise an ``IndexError``. A list of indexers where any element is out of bounds will raise an ``IndexError`` -.. code-block:: python +.. ipython:: python + :okexcept: dfl.iloc[[4,5,6]] - IndexError: positional indexers are out-of-bounds - dfl.iloc[:,4] - IndexError: single positional indexer is out-of-bounds .. _indexing.basics.partial_setting: diff --git a/doc/source/options.rst b/doc/source/options.rst index 26871a11473de..834b4b642c393 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -57,11 +57,7 @@ The following will **not work** because it matches multiple option names, e.g. .. ipython:: python :okexcept: - try: - pd.get_option("column") - except KeyError as e: - print(e) - + pd.get_option("column") **Note:** Using this form of shorthand may cause your code to break if new options with similar names are added in future versions. diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 6f30ff3f51ad5..a2067b9a37d55 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -205,9 +205,9 @@ Invalid Data Pass ``errors='coerce'`` to convert invalid data to ``NaT`` (not a time): .. ipython:: python - :okexcept: # this is the default, raise when unparseable + @okexcept to_datetime(['2009/07/31', 'asd'], errors='raise') # return the original input when unparseable @@ -656,7 +656,7 @@ apply the offset to each element. rng + DateOffset(months=2) s + DateOffset(months=2) s - DateOffset(months=2) - + If the offset class maps directly to a ``Timedelta`` (``Day``, ``Hour``, ``Minute``, ``Second``, ``Micro``, ``Milli``, ``Nano``) it can be used exactly like a ``Timedelta`` - see the @@ -670,7 +670,7 @@ used exactly like a ``Timedelta`` - see the td + Minute(15) Note that some offsets (such as ``BQuarterEnd``) do not have a -vectorized implementation. They can still be used but may +vectorized implementation. They can still be used but may calculate signficantly slower and will raise a ``PerformanceWarning`` .. ipython:: python @@ -1702,13 +1702,13 @@ the top example will fail as it contains ambiguous times and the bottom will infer the right offset. .. ipython:: python - :okexcept: rng_hourly = DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00', '11/06/2011 02:00', '11/06/2011 03:00']) # This will fail as there are ambiguous times + @okexcept rng_hourly.tz_localize('US/Eastern') rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', ambiguous='infer') rng_hourly_eastern.tolist() diff --git a/doc/sphinxext/ipython_sphinxext/ipython_directive.py b/doc/sphinxext/ipython_sphinxext/ipython_directive.py index ad7ada8e4eea3..04a9e804f9af2 100644 --- a/doc/sphinxext/ipython_sphinxext/ipython_directive.py +++ b/doc/sphinxext/ipython_sphinxext/ipython_directive.py @@ -465,10 +465,6 @@ def process_input(self, data, input_prompt, lineno): self.cout.seek(0) output = self.cout.read() - if not is_suppress and not is_semicolon: - ret.append(output) - elif is_semicolon: # get spacing right - ret.append('') # context information filename = self.state.document.current_source @@ -498,6 +494,16 @@ def process_input(self, data, input_prompt, lineno): sys.stdout.write(s) sys.stdout.write('<<<' + ('-' * 73) + '\n') + # if :okexcept: has been specified, display shorter traceback + if is_okexcept and "Traceback" in output: + traceback = output.split('\n\n') + output = traceback[-1] + + if not is_suppress and not is_semicolon: + ret.append(output) + elif is_semicolon: # get spacing right + ret.append('') + self.cout.truncate(0) return (ret, input_lines, output, is_doctest, decorator, image_file, image_directive)
Reverts pydata/pandas#10916
https://api.github.com/repos/pandas-dev/pandas/pulls/10925
2015-08-29T00:18:09Z
2016-01-07T01:35:44Z
null
2022-10-13T00:16:46Z
BUG: Bug in clearing the cache on DataFrame.pop and a subsequent inplace op #10912
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 71aac42b17810..0ccfa06fc8844 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -750,7 +750,7 @@ Bug Fixes - Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`) - Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`) - Bug in ``BinGrouper.group_info`` where returned values are not compatible with base class (:issue:`10914`) - +- Bug in clearing the cache on ``DataFrame.pop`` and a subsequent inplace op (:issue:`10912`) - Bug causing ``DataFrame.where`` to not respect the ``axis`` parameter when the frame has a symmetric shape. (:issue:`9736`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b127fb220569d..fe09e03281b4f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -501,6 +501,11 @@ def pop(self, item): """ result = self[item] del self[item] + try: + result._reset_cacher() + except AttributeError: + pass + return result def squeeze(self): @@ -1094,6 +1099,11 @@ def _set_as_cached(self, item, cacher): a weakref to cacher """ self._cacher = (item, weakref.ref(cacher)) + def _reset_cacher(self): + """ reset the cacher """ + if hasattr(self,'_cacher'): + del self._cacher + def _iget_item_cache(self, item): """ return the cached item, item represents a positional indexer """ ax = self._info_axis @@ -1330,6 +1340,7 @@ def __delitem__(self, key): # exception: self._data.delete(key) + # delete from the caches try: del self._item_cache[key] except KeyError: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index fe78ff0f79ff3..58c6d15f8ada5 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5177,6 +5177,20 @@ def test_pop(self): self.assertNotIn('foo', self.frame) # TODO self.assertEqual(self.frame.columns.name, 'baz') + # 10912 + # inplace ops cause caching issue + a = DataFrame([[1,2,3],[4,5,6]], columns=['A','B','C'], index=['X','Y']) + b = a.pop('B') + b += 1 + + # original frame + expected = DataFrame([[1,3],[4,6]], columns=['A','C'], index=['X','Y']) + assert_frame_equal(a, expected) + + # result + expected = Series([2,5],index=['X','Y'],name='B')+1 + assert_series_equal(b, expected) + def test_pop_non_unique_cols(self): df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]}) df.columns = ["a", "b", "a"]
closes #10912
https://api.github.com/repos/pandas-dev/pandas/pulls/10922
2015-08-28T18:36:05Z
2015-08-28T23:28:06Z
2015-08-28T23:28:06Z
2015-08-28T23:28:06Z
DEPR: Series.nlargest/nsmallest take_last.
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index c18bedd0cf6eb..8fb738ff7d76d 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -660,6 +660,7 @@ Deprecations - ``Categorical.name`` was deprecated to make ``Categorical`` more ``numpy.ndarray`` like. Use ``Series(cat, name="whatever")`` instead (:issue:`10482`). - ``drop_duplicates`` and ``duplicated``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`6511`, :issue:`8505`) +- ``Series.nsmallest`` and ``nlargest``'s ``take_last`` keyword was deprecated in favor of ``keep``. (:issue:`10792`) - ``DataFrame.combineAdd`` and ``DataFrame.combineMult`` are deprecated. They can easily be replaced by using the ``add`` and ``mul`` methods: ``DataFrame.add(other, fill_value=0)`` and ``DataFrame.mul(other, fill_value=1.)`` diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 250b4b3e562b8..36d31d493b10d 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -453,24 +453,24 @@ def group_position(*args): _dtype_map = {'datetime64[ns]': 'int64', 'timedelta64[ns]': 'int64'} -def _finalize_nsmallest(arr, kth_val, n, take_last, narr): +def _finalize_nsmallest(arr, kth_val, n, keep, narr): ns, = np.nonzero(arr <= kth_val) inds = ns[arr[ns].argsort(kind='mergesort')][:n] - - if take_last: + if keep == 'last': # reverse indices return narr - 1 - inds - return inds + else: + return inds -def nsmallest(arr, n, take_last=False): +def nsmallest(arr, n, keep='first'): ''' Find the indices of the n smallest values of a numpy array. Note: Fails silently with NaN. ''' - if take_last: + if keep == 'last': arr = arr[::-1] narr = len(arr) @@ -480,10 +480,10 @@ def nsmallest(arr, n, take_last=False): arr = arr.view(_dtype_map.get(sdtype, sdtype)) kth_val = algos.kth_smallest(arr.copy(), n - 1) - return _finalize_nsmallest(arr, kth_val, n, take_last, narr) + return _finalize_nsmallest(arr, kth_val, n, keep, narr) -def nlargest(arr, n, take_last=False): +def nlargest(arr, n, keep='first'): """ Find the indices of the n largest values of a numpy array. @@ -491,11 +491,11 @@ def nlargest(arr, n, take_last=False): """ sdtype = str(arr.dtype) arr = arr.view(_dtype_map.get(sdtype, sdtype)) - return nsmallest(-arr, n, take_last=take_last) + return nsmallest(-arr, n, keep=keep) -def select_n_slow(dropped, n, take_last, method): - reverse_it = take_last or method == 'nlargest' +def select_n_slow(dropped, n, keep, method): + reverse_it = (keep == 'last' or method == 'nlargest') ascending = method == 'nsmallest' slc = np.s_[::-1] if reverse_it else np.s_[:] return dropped[slc].sort_values(ascending=ascending).head(n) @@ -504,13 +504,13 @@ def select_n_slow(dropped, n, take_last, method): _select_methods = {'nsmallest': nsmallest, 'nlargest': nlargest} -def select_n(series, n, take_last, method): +def select_n(series, n, keep, method): """Implement n largest/smallest. Parameters ---------- n : int - take_last : bool + keep : {'first', 'last'}, default 'first' method : str, {'nlargest', 'nsmallest'} Returns @@ -522,15 +522,18 @@ def select_n(series, n, take_last, method): np.timedelta64)): raise TypeError("Cannot use method %r with dtype %s" % (method, dtype)) + if keep not in ('first', 'last'): + raise ValueError('keep must be either "first", "last"') + if n <= 0: return series[[]] dropped = series.dropna() if n >= len(series): - return select_n_slow(dropped, n, take_last, method) + return select_n_slow(dropped, n, keep, method) - inds = _select_methods[method](dropped.values, n, take_last) + inds = _select_methods[method](dropped.values, n, keep) return dropped.iloc[inds] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3e908bf9d579b..3abf7c4458854 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3163,16 +3163,16 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=inplace, sort_remaining=sort_remaining) - def _nsorted(self, columns, n, method, take_last): + def _nsorted(self, columns, n, method, keep): if not com.is_list_like(columns): columns = [columns] columns = list(columns) - ser = getattr(self[columns[0]], method)(n, take_last=take_last) + ser = getattr(self[columns[0]], method)(n, keep=keep) ascending = dict(nlargest=False, nsmallest=True)[method] return self.loc[ser.index].sort_values(columns, ascending=ascending, kind='mergesort') - def nlargest(self, n, columns, take_last=False): + def nlargest(self, n, columns, keep='first'): """Get the rows of a DataFrame sorted by the `n` largest values of `columns`. @@ -3184,8 +3184,10 @@ def nlargest(self, n, columns, take_last=False): Number of items to retrieve columns : list or str Column name or names to order by - take_last : bool, optional - Where there are duplicate values, take the last duplicate + keep : {'first', 'last', False}, default 'first' + Where there are duplicate values: + - ``first`` : take the first occurrence. + - ``last`` : take the last occurrence. Returns ------- @@ -3202,9 +3204,9 @@ def nlargest(self, n, columns, take_last=False): 1 10 b 2 2 8 d NaN """ - return self._nsorted(columns, n, 'nlargest', take_last) + return self._nsorted(columns, n, 'nlargest', keep) - def nsmallest(self, n, columns, take_last=False): + def nsmallest(self, n, columns, keep='first'): """Get the rows of a DataFrame sorted by the `n` smallest values of `columns`. @@ -3216,8 +3218,10 @@ def nsmallest(self, n, columns, take_last=False): Number of items to retrieve columns : list or str Column name or names to order by - take_last : bool, optional - Where there are duplicate values, take the last duplicate + keep : {'first', 'last', False}, default 'first' + Where there are duplicate values: + - ``first`` : take the first occurrence. + - ``last`` : take the last occurrence. Returns ------- @@ -3234,7 +3238,7 @@ def nsmallest(self, n, columns, take_last=False): 0 1 a 1 2 8 d NaN """ - return self._nsorted(columns, n, 'nsmallest', take_last) + return self._nsorted(columns, n, 'nsmallest', keep) def swaplevel(self, i, j, axis=0): """ diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 444f149e70e34..8adaf1437c1de 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -19,7 +19,8 @@ from pandas.core.internals import BlockManager, make_block from pandas.core.series import Series from pandas.core.panel import Panel -from pandas.util.decorators import cache_readonly, Appender, make_signature +from pandas.util.decorators import (cache_readonly, Appender, make_signature, + deprecate_kwarg) import pandas.core.algorithms as algos import pandas.core.common as com from pandas.core.common import(_possibly_downcast_to_dtype, isnull, @@ -82,7 +83,7 @@ _series_apply_whitelist = \ (_common_apply_whitelist - set(['boxplot'])) | \ - frozenset(['dtype', 'unique', 'nlargest', 'nsmallest']) + frozenset(['dtype', 'unique']) _dataframe_apply_whitelist = \ _common_apply_whitelist | frozenset(['dtypes', 'corrwith']) @@ -2583,6 +2584,19 @@ def nunique(self, dropna=True): index=self.grouper.result_index, name=self.name) + @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) + @Appender(Series.nlargest.__doc__) + def nlargest(self, n=5, keep='first'): + # ToDo: When we remove deprecate_kwargs, we can remote these methods + # and inlucde nlargest and nsmallest to _series_apply_whitelist + return self.apply(lambda x: x.nlargest(n=n, keep=keep)) + + + @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) + @Appender(Series.nsmallest.__doc__) + def nsmallest(self, n=5, keep='first'): + return self.apply(lambda x: x.nsmallest(n=n, keep=keep)) + def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): diff --git a/pandas/core/series.py b/pandas/core/series.py index b4fc1c9c48f27..2890730956c75 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1817,15 +1817,19 @@ def rank(self, method='average', na_option='keep', ascending=True, ascending=ascending, pct=pct) return self._constructor(ranks, index=self.index).__finalize__(self) - def nlargest(self, n=5, take_last=False): + @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) + def nlargest(self, n=5, keep='first'): """Return the largest `n` elements. Parameters ---------- n : int Return this many descending sorted values - take_last : bool - Where there are duplicate values, take the last duplicate + keep : {'first', 'last', False}, default 'first' + Where there are duplicate values: + - ``first`` : take the first occurrence. + - ``last`` : take the last occurrence. + take_last : deprecated Returns ------- @@ -1848,17 +1852,21 @@ def nlargest(self, n=5, take_last=False): >>> s = pd.Series(np.random.randn(1e6)) >>> s.nlargest(10) # only sorts up to the N requested """ - return select_n(self, n=n, take_last=take_last, method='nlargest') + return select_n(self, n=n, keep=keep, method='nlargest') - def nsmallest(self, n=5, take_last=False): + @deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'}) + def nsmallest(self, n=5, keep='first'): """Return the smallest `n` elements. Parameters ---------- n : int Return this many ascending sorted values - take_last : bool - Where there are duplicate values, take the last duplicate + keep : {'first', 'last', False}, default 'first' + Where there are duplicate values: + - ``first`` : take the first occurrence. + - ``last`` : take the last occurrence. + take_last : deprecated Returns ------- @@ -1881,7 +1889,7 @@ def nsmallest(self, n=5, take_last=False): >>> s = pd.Series(np.random.randn(1e6)) >>> s.nsmallest(10) # only sorts up to the N requested """ - return select_n(self, n=n, take_last=take_last, method='nsmallest') + return select_n(self, n=n, keep=keep, method='nsmallest') def sortlevel(self, level=0, ascending=True, sort_remaining=True): """ diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index a8bbc372ebe25..41703b3b5a3b7 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -4997,7 +4997,7 @@ def test_groupby_whitelist(self): 'corr', 'cov', 'diff', 'unique', - 'nlargest', 'nsmallest', + # 'nlargest', 'nsmallest', ]) for obj, whitelist in zip((df, s), @@ -5316,6 +5316,16 @@ def test_nlargest(self): [3, 2, 1, 9, 5, 8]])) tm.assert_series_equal(r, e) + + a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) + gb = a.groupby(b) + e = Series([3, 2, 1, 3, 3, 2], + index=MultiIndex.from_arrays([list('aaabbb'), + [2, 3, 1, 6, 5, 7]])) + assert_series_equal(gb.nlargest(3, keep='last'), e) + with tm.assert_produces_warning(FutureWarning): + assert_series_equal(gb.nlargest(3, take_last=True), e) + def test_nsmallest(self): a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) b = Series(list('a' * 5 + 'b' * 5)) @@ -5326,6 +5336,15 @@ def test_nsmallest(self): [0, 4, 1, 6, 7, 8]])) tm.assert_series_equal(r, e) + a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) + gb = a.groupby(b) + e = Series([0, 1, 1, 0, 1, 2], + index=MultiIndex.from_arrays([list('aaabbb'), + [4, 1, 0, 9, 8, 7]])) + assert_series_equal(gb.nsmallest(3, keep='last'), e) + with tm.assert_produces_warning(FutureWarning): + assert_series_equal(gb.nsmallest(3, take_last=True), e) + def test_transform_doesnt_clobber_ints(self): # GH 7972 n = 6 diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index a429059c761d6..34ea674fe10c0 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -5040,11 +5040,16 @@ def test_nsmallest_nlargest(self): for s in s_list: assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]]) - assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]]) + + assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]]) + with tm.assert_produces_warning(FutureWarning): + assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]]) assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]]) - assert_series_equal(s.nlargest(3, take_last=True), - s.iloc[[4, 0, 3]]) + + assert_series_equal(s.nlargest(3, keep='last'), s.iloc[[4, 0, 3]]) + with tm.assert_produces_warning(FutureWarning): + assert_series_equal(s.nlargest(3, take_last=True), s.iloc[[4, 0, 3]]) empty = s.iloc[0:0] assert_series_equal(s.nsmallest(0), empty) @@ -5062,6 +5067,12 @@ def test_nsmallest_nlargest(self): assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]]) assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]]) + msg = 'keep must be either "first", "last"' + with tm.assertRaisesRegexp(ValueError, msg): + s.nsmallest(keep='invalid') + with tm.assertRaisesRegexp(ValueError, msg): + s.nlargest(keep='invalid') + def test_rank(self): tm._skip_if_no_scipy() from scipy.stats import rankdata
Closes #10792. Because using `deprecate_kwargs` hides original impl, used ugly workaround for `GroupBy`...
https://api.github.com/repos/pandas-dev/pandas/pulls/10920
2015-08-28T13:34:45Z
2015-08-29T00:03:43Z
2015-08-29T00:03:43Z
2015-08-29T00:10:30Z
BUG: closes bug in BinGrouper.group_info where returned values are not compatible with base class
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index c18bedd0cf6eb..d3d7fe1637900 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -747,6 +747,7 @@ Bug Fixes - Bug in ``Index`` construction with a mixed list of tuples (:issue:`10697`) - Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`) - Bug in ``ExcelReader`` when worksheet is empty (:issue:`6403`) +- Bug in ``BinGrouper.group_info`` where returned values are not compatible with base class (:issue:`10914`) - Bug causing ``DataFrame.where`` to not respect the ``axis`` parameter when the frame has a symmetric shape. (:issue:`9736`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 444f149e70e34..fae54fa298e85 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1790,8 +1790,10 @@ def indices(self): @cache_readonly def group_info(self): - # for compat - return self.bins, self.binlabels, self.ngroups + ngroups = self.ngroups + obs_group_ids = np.arange(ngroups) + comp_ids = np.repeat(np.arange(ngroups), np.diff(np.r_[0, self.bins])) + return comp_ids, obs_group_ids, ngroups @cache_readonly def ngroups(self): diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 7dafc88bf9239..0bee6f514cad0 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -916,6 +916,31 @@ def test_resample_timegrouper(self): result = df.groupby(pd.Grouper(freq='M', key='A')).count() assert_frame_equal(result, expected) + def test_resample_group_info(self): # GH10914 + for n, k in product((10000, 100000), (10, 100, 1000)): + dr = date_range(start='2015-08-27', periods=n // 10, freq='T') + ts = Series(np.random.randint(0, n // k, n), + index=np.random.choice(dr, n)) + + left = ts.resample('30T', how='nunique') + ix = date_range(start=ts.index.min(), + end=ts.index.max(), + freq='30T') + + vals = ts.values + bins = np.searchsorted(ix.values, ts.index, side='right') + + sorter = np.lexsort((vals, bins)) + vals, bins = vals[sorter], bins[sorter] + + mask = np.r_[True, vals[1:] != vals[:-1]] + mask |= np.r_[True, bins[1:] != bins[:-1]] + + arr = np.bincount(bins[mask] - 1, minlength=len(ix)) + right = Series(arr, index=ix) + + assert_series_equal(left, right) + def test_resmaple_dst_anchor(self): # 5172 dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
closes https://github.com/pydata/pandas/issues/10914
https://api.github.com/repos/pandas-dev/pandas/pulls/10918
2015-08-28T03:24:08Z
2015-08-28T12:22:23Z
2015-08-28T12:22:23Z
2015-08-29T12:39:08Z
PERF: perf improvements in drop_duplicates for integer dtyped arrays
diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py index d6fbd0d31c389..03e654b4886cc 100644 --- a/asv_bench/benchmarks/reindex.py +++ b/asv_bench/benchmarks/reindex.py @@ -61,6 +61,19 @@ def time_frame_drop_duplicates(self): self.df.drop_duplicates(['key1', 'key2']) +class frame_drop_duplicates_int(object): + + def setup(self): + np.random.seed(1234) + self.N = 1000000 + self.K = 10000 + self.key1 = np.random.randint(0,self.K,size=self.N) + self.df = DataFrame({'key1': self.key1}) + + def time_frame_drop_duplicates_int(self): + self.df.drop_duplicates() + + class frame_drop_duplicates_na(object): goal_time = 0.2 @@ -381,4 +394,4 @@ def setup(self): self.s2 = Series(np.tile(tm.makeStringIndex(1000).values, 10)) def time_series_drop_duplicates_string(self): - self.s2.drop_duplicates() \ No newline at end of file + self.s2.drop_duplicates() diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index d3d7fe1637900..33abc62b3f973 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -722,7 +722,7 @@ Performance Improvements - Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`) - Performance improvements in ``Categorical.value_counts`` (:issue:`10804`) - Performance improvements in ``SeriesGroupBy.nunique`` and ``SeriesGroupBy.value_counts`` (:issue:`10820`) - +- Performance improvements in ``DataFrame.drop_duplicates`` with integer dtypes (:issue:`10917`) - 4x improvement in ``timedelta`` string parsing (:issue:`6755`, :issue:`10426`) - 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`) - Significantly improved performance of indexing ``MultiIndex`` with slicers (:issue:`10287`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3e908bf9d579b..af2959e86274f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2962,7 +2962,13 @@ def duplicated(self, subset=None, keep='first'): from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT def f(vals): - labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) + + # if we have integers we can directly index with these + if com.is_integer_dtype(vals): + from pandas.core.nanops import unique1d + labels, shape = vals, unique1d(vals) + else: + labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) return labels.astype('i8',copy=False), len(shape) if subset is None: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 16143fa612c48..693b761ae7b4b 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -7975,6 +7975,22 @@ def test_drop_duplicates(self): expected = df2.drop_duplicates(['AAA', 'B'], take_last=True) assert_frame_equal(result, expected) + # integers + result = df.drop_duplicates('C') + expected = df.iloc[[0,2]] + assert_frame_equal(result, expected) + result = df.drop_duplicates('C',keep='last') + expected = df.iloc[[-2,-1]] + assert_frame_equal(result, expected) + + df['E'] = df['C'].astype('int8') + result = df.drop_duplicates('E') + expected = df.iloc[[0,2]] + assert_frame_equal(result, expected) + result = df.drop_duplicates('E',keep='last') + expected = df.iloc[[-2,-1]] + assert_frame_equal(result, expected) + def test_drop_duplicates_for_take_all(self): df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar', 'foo', 'bar', 'qux', 'foo'],
no need to factorize integers when dropping duplicates. master ``` In [1]: np.random.seed(1234) In [2]: df = DataFrame({'A' : np.random.randint(0,10000,size=1000000)}) In [3]: %timeit df.drop_duplicates() 10 loops, best of 3: 36.9 ms per loop ``` PR ``` In [2]: df = DataFrame({'A' : np.random.randint(0,10000,size=1000000)}) In [3]: %timeit df.drop_duplicates() 10 loops, best of 3: 21.6 ms per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/10917
2015-08-28T03:14:24Z
2015-08-28T18:18:58Z
2015-08-28T18:18:58Z
2015-08-28T18:18:58Z
Revert "Merge pull request #10727 from jorisvandenbossche/sphinx-traceback
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 1888345e1055c..850f59c2713eb 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -661,14 +661,18 @@ values NOT in the categories, similarly to how you can reindex ANY pandas index. Reshaping and Comparision operations on a ``CategoricalIndex`` must have the same categories or a ``TypeError`` will be raised. - .. ipython:: python - :okexcept: + .. code-block:: python + + In [9]: df3 = pd.DataFrame({'A' : np.arange(6), + 'B' : pd.Series(list('aabbca')).astype('category')}) + + In [11]: df3 = df3.set_index('B') + + In [11]: df3.index + Out[11]: CategoricalIndex([u'a', u'a', u'b', u'b', u'c', u'a'], categories=[u'a', u'b', u'c'], ordered=False, name=u'B', dtype='category') - df3 = pd.DataFrame({'A' : np.arange(6), - 'B' : pd.Series(list('aabbca')).astype('category')}) - df3 = df3.set_index('B') - df3.index - pd.concat([df2, df3] + In [12]: pd.concat([df2, df3] + TypeError: categories must match existing categories when appending .. _indexing.float64index: @@ -734,18 +738,20 @@ In float indexes, slicing using floats is allowed In non-float indexes, slicing using floats will raise a ``TypeError`` -.. ipython:: python - :okexcept: +.. code-block:: python + + In [1]: pd.Series(range(5))[3.5] + TypeError: the label [3.5] is not a proper indexer for this index type (Int64Index) - pd.Series(range(5))[3.5] - pd.Series(range(5))[3.5:4.5] + In [1]: pd.Series(range(5))[3.5:4.5] + TypeError: the slice start [3.5] is not a proper indexer for this index type (Int64Index) Using a scalar float indexer will be deprecated in a future version, but is allowed for now. -.. ipython:: python - :okwarning: +.. code-block:: python - pd.Series(range(5))[3.0] + In [3]: pd.Series(range(5))[3.0] + Out[3]: 3 Here is a typical use-case for using this type of indexing. Imagine that you have a somewhat irregular timedelta-like indexing scheme, but the data is recorded as floats. This could for diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 6bfbfb87f2c55..956c90ae63034 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -352,11 +352,13 @@ objects of the same length: Trying to compare ``Index`` or ``Series`` objects of different lengths will raise a ValueError: -.. ipython:: python - :okexcept: +.. code-block:: python + + In [55]: pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo', 'bar']) + ValueError: Series lengths must match to compare - pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo', 'bar']) - pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo']) + In [56]: pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo']) + ValueError: Series lengths must match to compare Note that this is different from the numpy behavior where a comparison can be broadcast: diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 847044c4745f9..5a62e7dccea34 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -143,10 +143,10 @@ label: If a label is not contained, an exception is raised: -.. ipython:: python - :okexcept: +.. code-block:: python - s['f'] + >>> s['f'] + KeyError: 'f' Using the ``get`` method, a missing label will return None or specified default: diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index a49a4745f7200..38629ee7baaea 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -293,10 +293,10 @@ Selection By Label dfl = pd.DataFrame(np.random.randn(5,4), columns=list('ABCD'), index=pd.date_range('20130101',periods=5)) dfl - .. ipython:: python - :okexcept: + .. code-block:: python - dfl.loc[2:3] + In [4]: dfl.loc[2:3] + TypeError: cannot do slice indexing on <class 'pandas.tseries.index.DatetimeIndex'> with these indexers [2] of <type 'int'> String likes in slicing *can* be convertible to the type of the index and lead to natural slicing. @@ -475,11 +475,13 @@ A single indexer that is out of bounds will raise an ``IndexError``. A list of indexers where any element is out of bounds will raise an ``IndexError`` -.. ipython:: python - :okexcept: +.. code-block:: python dfl.iloc[[4,5,6]] + IndexError: positional indexers are out-of-bounds + dfl.iloc[:,4] + IndexError: single positional indexer is out-of-bounds .. _indexing.basics.partial_setting: diff --git a/doc/source/options.rst b/doc/source/options.rst index 834b4b642c393..26871a11473de 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -57,7 +57,11 @@ The following will **not work** because it matches multiple option names, e.g. .. ipython:: python :okexcept: - pd.get_option("column") + try: + pd.get_option("column") + except KeyError as e: + print(e) + **Note:** Using this form of shorthand may cause your code to break if new options with similar names are added in future versions. diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index a2067b9a37d55..6f30ff3f51ad5 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -205,9 +205,9 @@ Invalid Data Pass ``errors='coerce'`` to convert invalid data to ``NaT`` (not a time): .. ipython:: python + :okexcept: # this is the default, raise when unparseable - @okexcept to_datetime(['2009/07/31', 'asd'], errors='raise') # return the original input when unparseable @@ -656,7 +656,7 @@ apply the offset to each element. rng + DateOffset(months=2) s + DateOffset(months=2) s - DateOffset(months=2) - + If the offset class maps directly to a ``Timedelta`` (``Day``, ``Hour``, ``Minute``, ``Second``, ``Micro``, ``Milli``, ``Nano``) it can be used exactly like a ``Timedelta`` - see the @@ -670,7 +670,7 @@ used exactly like a ``Timedelta`` - see the td + Minute(15) Note that some offsets (such as ``BQuarterEnd``) do not have a -vectorized implementation. They can still be used but may +vectorized implementation. They can still be used but may calculate signficantly slower and will raise a ``PerformanceWarning`` .. ipython:: python @@ -1702,13 +1702,13 @@ the top example will fail as it contains ambiguous times and the bottom will infer the right offset. .. ipython:: python + :okexcept: rng_hourly = DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00', '11/06/2011 02:00', '11/06/2011 03:00']) # This will fail as there are ambiguous times - @okexcept rng_hourly.tz_localize('US/Eastern') rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', ambiguous='infer') rng_hourly_eastern.tolist() diff --git a/doc/sphinxext/ipython_sphinxext/ipython_directive.py b/doc/sphinxext/ipython_sphinxext/ipython_directive.py index 04a9e804f9af2..ad7ada8e4eea3 100644 --- a/doc/sphinxext/ipython_sphinxext/ipython_directive.py +++ b/doc/sphinxext/ipython_sphinxext/ipython_directive.py @@ -465,6 +465,10 @@ def process_input(self, data, input_prompt, lineno): self.cout.seek(0) output = self.cout.read() + if not is_suppress and not is_semicolon: + ret.append(output) + elif is_semicolon: # get spacing right + ret.append('') # context information filename = self.state.document.current_source @@ -494,16 +498,6 @@ def process_input(self, data, input_prompt, lineno): sys.stdout.write(s) sys.stdout.write('<<<' + ('-' * 73) + '\n') - # if :okexcept: has been specified, display shorter traceback - if is_okexcept and "Traceback" in output: - traceback = output.split('\n\n') - output = traceback[-1] - - if not is_suppress and not is_semicolon: - ret.append(output) - elif is_semicolon: # get spacing right - ret.append('') - self.cout.truncate(0) return (ret, input_lines, output, is_doctest, decorator, image_file, image_directive)
This reverts commit d40627398cf78347f5a49fa060bad1c40514908a, reversing changes made to bd804aa75ac9b7a55c00101dbea01571cbdfd068. @jorisvandenbossche reverting so that the docs build pls rebsumit when you can
https://api.github.com/repos/pandas-dev/pandas/pulls/10916
2015-08-28T03:06:21Z
2015-08-28T03:08:45Z
2015-08-28T03:08:45Z
2015-08-29T00:17:38Z
ERR between_time and empty pandas.Series
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bc49e9dd79e6a..e5aab2cb97060 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3339,7 +3339,10 @@ def between_time(self, start_time, end_time, include_start=True, include_end=include_end) return self.take(indexer, convert=False) except AttributeError: - raise TypeError('Index must be DatetimeIndex') + if self.empty: + return self + else: + raise TypeError('Index must be DatetimeIndex') def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, label=None, convention='start',
Pull request so solve issue #10897
https://api.github.com/repos/pandas-dev/pandas/pulls/10913
2015-08-27T19:09:15Z
2015-09-01T14:40:55Z
null
2015-09-01T14:40:55Z
explain how to skip rows between header & data
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 9e7b9ad0b7582..9760f18dd837a 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -971,6 +971,57 @@ Parsing date components in multi-columns is faster with a format In [36]: %timeit pd.to_datetime(ds) 1 loops, best of 3: 488 ms per loop +Skip row between header and data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. ipython:: python + + from io import StringIO + import pandas as pd + + data = """;;;; + ;;;; + ;;;; + ;;;; + ;;;; + ;;;; + ;;;; + ;;;; + ;;;; + ;;;; + date;Param1;Param2;Param4;Param5 + ;m²;°C;m²;m + ;;;; + 01.01.1990 00:00;1;1;2;3 + 01.01.1990 01:00;5;3;4;5 + 01.01.1990 02:00;9;5;6;7 + 01.01.1990 03:00;13;7;8;9 + 01.01.1990 04:00;17;9;10;11 + 01.01.1990 05:00;21;11;12;13 + """ + +Option 1: pass rows explicitly to skiprows +"""""""""""""""""""""""""""""""""""""""""" + +.. ipython:: python + + pd.read_csv(StringIO(data.decode('UTF-8')), sep=';', skiprows=[11,12], + index_col=0, parse_dates=True, header=10) + +Option 2: read column names and then data +""""""""""""""""""""""""""""""""""""""""" + +.. ipython:: python + + pd.read_csv(StringIO(data.decode('UTF-8')), sep=';', + header=10, parse_dates=True, nrows=10).columns + columns = pd.read_csv(StringIO(data.decode('UTF-8')), sep=';', + header=10, parse_dates=True, nrows=10).columns + pd.read_csv(StringIO(data.decode('UTF-8')), sep=';', + header=12, parse_dates=True, names=columns) + + + .. _cookbook.sql: SQL
This is the result from https://github.com/pydata/pandas/issues/10898
https://api.github.com/repos/pandas-dev/pandas/pulls/10910
2015-08-27T09:50:17Z
2015-11-10T15:29:21Z
2015-11-10T15:29:21Z
2015-11-10T15:29:29Z
PERF: imporves performance in SeriesGroupBy.value_counts
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 61500bedcdcd4..9049d8de550d0 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -721,7 +721,7 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`) - Performance improvements in ``Categorical.value_counts`` (:issue:`10804`) -- Performance improvements in ``SeriesGroupBy.nunique`` (:issue:`10820`) +- Performance improvements in ``SeriesGroupBy.nunique`` and ``SeriesGroupBy.value_counts`` (:issue:`10820`) - 4x improvement in ``timedelta`` string parsing (:issue:`6755`, :issue:`10426`) - 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 220e67c43e4be..444f149e70e34 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -82,7 +82,7 @@ _series_apply_whitelist = \ (_common_apply_whitelist - set(['boxplot'])) | \ - frozenset(['dtype', 'value_counts', 'unique', 'nlargest', 'nsmallest']) + frozenset(['dtype', 'unique', 'nlargest', 'nsmallest']) _dataframe_apply_whitelist = \ _common_apply_whitelist | frozenset(['dtypes', 'corrwith']) @@ -2583,6 +2583,108 @@ def nunique(self, dropna=True): index=self.grouper.result_index, name=self.name) + def value_counts(self, normalize=False, sort=True, ascending=False, + bins=None, dropna=True): + + from functools import partial + from pandas.tools.tile import cut + from pandas.tools.merge import _get_join_indexers + + if bins is not None and not np.iterable(bins): + # scalar bins cannot be done at top level + # in a backward compatible way + return self.apply(Series.value_counts, + normalize=normalize, + sort=sort, + ascending=ascending, + bins=bins) + + ids, _, _ = self.grouper.group_info + val = self.obj.get_values() + + # groupby removes null keys from groupings + mask = ids != -1 + ids, val = ids[mask], val[mask] + + if bins is None: + lab, lev = algos.factorize(val, sort=True) + else: + cat, bins = cut(val, bins, retbins=True) + # bins[:-1] for backward compat; + # o.w. cat.categories could be better + lab, lev, dropna = cat.codes, bins[:-1], False + + sorter = np.lexsort((lab, ids)) + ids, lab = ids[sorter], lab[sorter] + + # group boundries are where group ids change + idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] + + # new values are where sorted labels change + inc = np.r_[True, lab[1:] != lab[:-1]] + inc[idx] = True # group boundries are also new values + out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts + + # num. of times each group should be repeated + rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx)) + + # multi-index components + labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]] + levels = [ping.group_index for ping in self.grouper.groupings] + [lev] + names = self.grouper.names + [self.name] + + if dropna: + mask = labels[-1] != -1 + if mask.all(): + dropna = False + else: + out, labels = out[mask], [label[mask] for label in labels] + + if normalize: + out = out.astype('float') + acc = rep(np.diff(np.r_[idx, len(ids)])) + out /= acc[mask] if dropna else acc + + if sort and bins is None: + cat = ids[inc][mask] if dropna else ids[inc] + sorter = np.lexsort((out if ascending else -out, cat)) + out, labels[-1] = out[sorter], labels[-1][sorter] + + if bins is None: + mi = MultiIndex(levels=levels, labels=labels, names=names, + verify_integrity=False) + + return Series(out, index=mi) + + # for compat. with algos.value_counts need to ensure every + # bin is present at every index level, null filled with zeros + diff = np.zeros(len(out), dtype='bool') + for lab in labels[:-1]: + diff |= np.r_[True, lab[1:] != lab[:-1]] + + ncat, nbin = diff.sum(), len(levels[-1]) + + left = [np.repeat(np.arange(ncat), nbin), + np.tile(np.arange(nbin), ncat)] + + right = [diff.cumsum() - 1, labels[-1]] + + _, idx = _get_join_indexers(left, right, sort=False, how='left') + out = np.where(idx != -1, out[idx], 0) + + if sort: + sorter = np.lexsort((out if ascending else -out, left[0])) + out, left[-1] = out[sorter], left[-1][sorter] + + # build the multi-index w/ full levels + labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1])) + labels.append(left[-1]) + + mi = MultiIndex(levels=levels, labels=labels, names=names, + verify_integrity=False) + + return Series(out, index=mi) + def _apply_to_column_groupbys(self, func): """ return a pass thru """ return func(self) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index e51a13d3a296f..a8bbc372ebe25 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -1650,6 +1650,57 @@ def check_nunique(df, keys): check_nunique(frame, ['jim']) check_nunique(frame, ['jim', 'joe']) + def test_series_groupby_value_counts(self): + from itertools import product + + def rebuild_index(df): + arr = list(map(df.index.get_level_values, range(df.index.nlevels))) + df.index = MultiIndex.from_arrays(arr, names=df.index.names) + return df + + def check_value_counts(df, keys, bins): + for isort, normalize, sort, ascending, dropna \ + in product((False, True), repeat=5): + + kwargs = dict(normalize=normalize, sort=sort, + ascending=ascending, dropna=dropna, + bins=bins) + + gr = df.groupby(keys, sort=isort) + left = gr['3rd'].value_counts(**kwargs) + + gr = df.groupby(keys, sort=isort) + right = gr['3rd'].apply(Series.value_counts, **kwargs) + right.index.names = right.index.names[:-1] + ['3rd'] + + # have to sort on index because of unstable sort on values + left, right = map(rebuild_index, (left, right)) # xref GH9212 + assert_series_equal(left.sort_index(), right.sort_index()) + + def loop(df): + bins = None, np.arange(0, max(5, df['3rd'].max()) + 1, 2) + keys = '1st', '2nd', ('1st', '2nd') + for k, b in product(keys, bins): + check_value_counts(df, k, b) + + days = date_range('2015-08-24', periods=10) + + for n, m in product((100, 10000), (5, 20)): + frame = DataFrame({ + '1st':np.random.choice(list('abcd'), n), + '2nd':np.random.choice(days, n), + '3rd':np.random.randint(1, m + 1, n)}) + + loop(frame) + + frame.loc[1::11, '1st'] = nan + frame.loc[3::17, '2nd'] = nan + frame.loc[7::19, '3rd'] = nan + frame.loc[8::19, '3rd'] = nan + frame.loc[9::19, '3rd'] = nan + + loop(frame) + def test_mulitindex_passthru(self): # GH 7997 @@ -4944,7 +4995,6 @@ def test_groupby_whitelist(self): 'plot', 'hist', 'median', 'dtype', 'corr', 'cov', - 'value_counts', 'diff', 'unique', 'nlargest', 'nsmallest',
``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- groupby_ngroups_10000_value_counts | 11.4880 | 6698.5627 | 0.0017 | groupby_ngroups_100_value_counts | 1.3397 | 69.8457 | 0.0192 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [d587be8] : PERF: imporves performance in SeriesGroupBy.value_counts Base [a3c4b59] : TST: pythonxs link seems to have changed in test_html.py, skip tests ```
https://api.github.com/repos/pandas-dev/pandas/pulls/10909
2015-08-27T02:26:36Z
2015-08-27T03:16:19Z
2015-08-27T03:16:19Z
2015-08-27T11:19:39Z
API: Closes #7879: (drops not nan in panel.to_frame() by default)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 6e74e2c68e44e..06f6b64c93061 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -63,6 +63,7 @@ Highlights include: - Development support for benchmarking with the `Air Speed Velocity library <https://github.com/spacetelescope/asv/>`_ (:issue:`8316`) - Support for reading SAS xport files, see :ref:`here <whatsnew_0170.enhancements.sas_xport>` - Removal of the automatic TimeSeries broadcasting, deprecated since 0.8.0, see :ref:`here <whatsnew_0170.prior_deprecations>` +- Deprecated ``filter_observations`` by ``dropna`` in ``Panel.to_frame`` and changed default to ``True`` (:issue:`7879`) See the :ref:`v0.17.0 Whatsnew <whatsnew_0170>` overview for an extensive list of all enhancements and bugs that have been fixed in 0.17.0. diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 1293b4034b84e..5e24d765ca5c5 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -862,7 +862,8 @@ def groupby(self, function, axis='major'): axis = self._get_axis_number(axis) return PanelGroupBy(self, function, axis=axis) - def to_frame(self, filter_observations=True): + @deprecate_kwarg(old_arg_name='filter_observations', new_arg_name='dropna') + def to_frame(self, dropna=False): """ Transform wide format into long (stacked) format as DataFrame whose columns are the Panel's items and whose index is a MultiIndex formed @@ -870,7 +871,11 @@ def to_frame(self, filter_observations=True): Parameters ---------- - filter_observations : boolean, default True + dropna : boolean, default False + Drop (major, minor) pairs without a complete set of observations + across all the items + + filter_observations : boolean, default False, [deprecated] Drop (major, minor) pairs without a complete set of observations across all the items @@ -880,7 +885,7 @@ def to_frame(self, filter_observations=True): """ _, N, K = self.shape - if filter_observations: + if dropna is True: # shaped like the return DataFrame mask = com.notnull(self.values).all(axis=0) # size = mask.sum() diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 7c67ded16139c..50ed5f092c979 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1525,19 +1525,19 @@ def test_transpose_copy(self): def test_to_frame(self): # filtered - filtered = self.panel.to_frame() + filtered = self.panel.to_frame(dropna=True) expected = self.panel.to_frame().dropna(how='any') assert_frame_equal(filtered, expected) # unfiltered - unfiltered = self.panel.to_frame(filter_observations=False) + unfiltered = self.panel.to_frame(dropna=False) assert_panel_equal(unfiltered.to_panel(), self.panel) # names self.assertEqual(unfiltered.index.names, ('major', 'minor')) # unsorted, round trip - df = self.panel.to_frame(filter_observations=False) + df = self.panel.to_frame(dropna=False) unsorted = df.take(np.random.permutation(len(df))) pan = unsorted.to_panel() assert_panel_equal(pan, self.panel) @@ -1554,6 +1554,10 @@ def test_to_frame(self): self.assertEqual(rdf.index.names, df.index.names) self.assertEqual(rdf.columns.names, df.columns.names) + # test kw filter_observations deprecation + with tm.assert_produces_warning(Warning): + filtered = self.panel.to_frame(filter_observations=True) + def test_to_frame_mixed(self): panel = self.panel.fillna(0) panel['str'] = 'foo' @@ -1597,7 +1601,7 @@ def test_to_frame_multi_major(self): assert_frame_equal(result, expected) wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773 - result = wp.to_frame() + result = wp.to_frame(dropna=True) assert_frame_equal(result, expected[1:]) idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), @@ -1651,7 +1655,7 @@ def test_to_frame_multi_drop_level(self): idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')]) df = DataFrame({'A': [np.nan, 1, 2]}, index=idx) wp = Panel({'i1': df, 'i2': df}) - result = wp.to_frame() + result = wp.to_frame(dropna=True) exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')], names=[None, None, 'minor']) expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx) @@ -2210,7 +2214,7 @@ def setUp(self): tm.add_nans(panel) self.panel = panel.to_frame() - self.unfiltered_panel = panel.to_frame(filter_observations=False) + self.unfiltered_panel = panel.to_frame(dropna=False) def test_ops_differently_indexed(self): # trying to set non-identically indexed panel
closes #7879 by changing API (filter_observations=True now deprecated and replaced with dropna=False)
https://api.github.com/repos/pandas-dev/pandas/pulls/10908
2015-08-26T20:10:34Z
2015-11-18T20:17:47Z
null
2015-11-18T20:17:47Z
PERF: Cythonize groupby transforms #4095
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 138977a29463e..48480041ed1bd 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -772,3 +772,19 @@ def setup(self): def time_groupby_transform_series2(self): self.df.groupby('id')['val'].transform(np.mean) + +class groupby_transform_cythonized(object): + goal_time = 0.2 + + def setup(self): + np.random.seed(0) + self.df = DataFrame({'id': (np.arange(100000) / 3), 'val': np.random.randn(100000), }) + + def time_groupby_transform_cumprod(self): + self.df.groupby('id').cumprod() + + def time_groupby_transform_cumsum(self): + self.df.groupby('id').cumsum() + + def time_groupby_transform_shift(self): + self.df.groupby('id').shift() diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt index 114665e28203f..b44c3f4f4673c 100755 --- a/doc/source/whatsnew/v0.17.1.txt +++ b/doc/source/whatsnew/v0.17.1.txt @@ -93,6 +93,8 @@ Performance Improvements - Performance bug in repr of ``Categorical`` categories, which was rendering the strings before chopping them for display (:issue:`11305`) - Improved performance of ``Series`` constructor with no data and ``DatetimeIndex`` (:issue:`11433`) +- Improved performance ``shift``, ``cumprod``, and ``cumsum`` with groupby (:issue:`4095`) + .. _whatsnew_0171.bug_fixes: diff --git a/pandas/algos.pyx b/pandas/algos.pyx index 8569209f2e946..62ee6ced84882 100644 --- a/pandas/algos.pyx +++ b/pandas/algos.pyx @@ -50,7 +50,6 @@ cdef np.float64_t MAXfloat64 = np.inf cdef double NaN = <double> np.NaN cdef double nan = NaN - cdef inline int int_max(int a, int b): return a if a >= b else b cdef inline int int_min(int a, int b): return a if a <= b else b @@ -2266,43 +2265,6 @@ def group_last_bin_object(ndarray[object, ndim=2] out, else: out[i, j] = resx[i, j] - - -#---------------------------------------------------------------------- -# median - -def group_median(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, size - ndarray[int64_t] _counts - ndarray data - float64_t* ptr - ngroups = len(counts) - N, K = (<object> values).shape - - indexer, _counts = groupsort_indexer(labels, ngroups) - counts[:] = _counts[1:] - - data = np.empty((K, N), dtype=np.float64) - ptr = <float64_t*> data.data - - take_2d_axis1_float64_float64(values.T, indexer, out=data) - - for i in range(K): - # exclude NA group - ptr += _counts[0] - for j in range(ngroups): - size = _counts[j + 1] - out[j, i] = _median_linear(ptr, size) - ptr += size - - cdef inline float64_t _median_linear(float64_t* a, int n): cdef int i, j, na_count = 0 cdef float64_t result diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 9c5a40f6e34d6..e9aa9066b75a5 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -4,6 +4,7 @@ import datetime import collections import warnings +import copy from pandas.compat import( zip, builtins, range, long, lzip, @@ -88,6 +89,7 @@ _dataframe_apply_whitelist = \ _common_apply_whitelist | frozenset(['dtypes', 'corrwith']) +_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift']) class GroupByError(Exception): pass @@ -1021,6 +1023,45 @@ def cumcount(self, ascending=True): cumcounts = self._cumcount_array(ascending=ascending) return Series(cumcounts, index) + def cumprod(self, axis=0): + """ + Cumulative product for each group + + """ + if axis != 0: + return self.apply(lambda x: x.cumprod(axis=axis)) + + return self._cython_transform('cumprod') + + def cumsum(self, axis=0): + """ + Cumulative sum for each group + + """ + if axis != 0: + return self.apply(lambda x: x.cumprod(axis=axis)) + + return self._cython_transform('cumsum') + + def shift(self, periods=1, freq=None, axis=0): + """ + Shift each group by periods observations + """ + + if freq is not None or axis != 0: + return self.apply(lambda x: x.shift(periods, freq, axis)) + + labels, _, ngroups = self.grouper.group_info + # filled in by Cython + indexer = np.zeros_like(labels) + _algos.group_shift_indexer(indexer, labels, ngroups, periods) + + output = {} + for name, obj in self._iterate_slices(): + output[name] = com.take_nd(obj.values, indexer) + + return self._wrap_transformed_output(output) + def head(self, n=5): """ Returns first n rows of each group. @@ -1139,6 +1180,24 @@ def _try_cast(self, result, obj): return result + def _cython_transform(self, how, numeric_only=True): + output = {} + for name, obj in self._iterate_slices(): + is_numeric = is_numeric_dtype(obj.dtype) + if numeric_only and not is_numeric: + continue + + try: + result, names = self.grouper.transform(obj.values, how) + except AssertionError as e: + raise GroupByError(str(e)) + output[name] = self._try_cast(result, obj) + + if len(output) == 0: + raise DataError('No numeric types to aggregate') + + return self._wrap_transformed_output(output, names) + def _cython_agg_general(self, how, numeric_only=True): output = {} for name, obj in self._iterate_slices(): @@ -1468,20 +1527,27 @@ def get_group_levels(self): # Aggregation functions _cython_functions = { - 'add': 'group_add', - 'prod': 'group_prod', - 'min': 'group_min', - 'max': 'group_max', - 'mean': 'group_mean', - 'median': { - 'name': 'group_median' - }, - 'var': 'group_var', - 'first': { - 'name': 'group_nth', - 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) - }, - 'last': 'group_last', + 'aggregate': { + 'add': 'group_add', + 'prod': 'group_prod', + 'min': 'group_min', + 'max': 'group_max', + 'mean': 'group_mean', + 'median': { + 'name': 'group_median' + }, + 'var': 'group_var', + 'first': { + 'name': 'group_nth', + 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) + }, + 'last': 'group_last', + }, + + 'transform': { + 'cumprod' : 'group_cumprod', + 'cumsum' : 'group_cumsum', + } } _cython_arity = { @@ -1490,22 +1556,24 @@ def get_group_levels(self): _name_functions = {} - def _get_aggregate_function(self, how, values): + def _get_cython_function(self, kind, how, values, is_numeric): dtype_str = values.dtype.name def get_func(fname): - # find the function, or use the object function, or return a - # generic + # see if there is a fused-type version of function + # only valid for numeric + f = getattr(_algos, fname, None) + if f is not None and is_numeric: + return f + + # otherwise find dtype-specific version, falling back to object for dt in [dtype_str, 'object']: f = getattr(_algos, "%s_%s" % (fname, dtype_str), None) if f is not None: return f - if dtype_str == 'float64': - return getattr(_algos, fname, None) - - ftype = self._cython_functions[how] + ftype = self._cython_functions[kind][how] if isinstance(ftype, dict): func = afunc = get_func(ftype['name']) @@ -1529,7 +1597,9 @@ def wrapper(*args, **kwargs): (how, dtype_str)) return func, dtype_str - def aggregate(self, values, how, axis=0): + def _cython_operation(self, kind, values, how, axis): + assert kind in ['transform', 'aggregate'] + arity = self._cython_arity.get(how, 1) vdim = values.ndim @@ -1561,11 +1631,11 @@ def aggregate(self, values, how, axis=0): values = values.astype(object) try: - agg_func, dtype_str = self._get_aggregate_function(how, values) + func, dtype_str = self._get_cython_function(kind, how, values, is_numeric) except NotImplementedError: if is_numeric: values = _algos.ensure_float64(values) - agg_func, dtype_str = self._get_aggregate_function(how, values) + func, dtype_str = self._get_cython_function(kind, how, values, is_numeric) else: raise @@ -1574,19 +1644,26 @@ def aggregate(self, values, how, axis=0): else: out_dtype = 'object' - # will be filled in Cython function - result = np.empty(out_shape, dtype=out_dtype) - result.fill(np.nan) - counts = np.zeros(self.ngroups, dtype=np.int64) + labels, _, _ = self.group_info - result = self._aggregate(result, counts, values, agg_func, is_numeric) + if kind == 'aggregate': + result = np.empty(out_shape, dtype=out_dtype) + result.fill(np.nan) + counts = np.zeros(self.ngroups, dtype=np.int64) + result = self._aggregate(result, counts, values, labels, func, is_numeric) + elif kind == 'transform': + result = np.empty_like(values, dtype=out_dtype) + result.fill(np.nan) + # temporary storange for running-total type tranforms + accum = np.empty(out_shape, dtype=out_dtype) + result = self._transform(result, accum, values, labels, func, is_numeric) if com.is_integer_dtype(result): if len(result[result == tslib.iNaT]) > 0: result = result.astype('float64') result[result == tslib.iNaT] = np.nan - if self._filter_empty_groups and not counts.all(): + if kind == 'aggregate' and self._filter_empty_groups and not counts.all(): if result.ndim == 2: try: result = lib.row_bool_subset( @@ -1612,8 +1689,13 @@ def aggregate(self, values, how, axis=0): return result, names - def _aggregate(self, result, counts, values, agg_func, is_numeric): - comp_ids, _, ngroups = self.group_info + def aggregate(self, values, how, axis=0): + return self._cython_operation('aggregate', values, how, axis) + + def transform(self, values, how, axis=0): + return self._cython_operation('transform', values, how, axis) + + def _aggregate(self, result, counts, values, comp_ids, agg_func, is_numeric): if values.ndim > 3: # punting for now raise NotImplementedError("number of dimensions is currently " @@ -1628,6 +1710,22 @@ def _aggregate(self, result, counts, values, agg_func, is_numeric): return result + def _transform(self, result, accum, values, comp_ids, transform_func, is_numeric): + comp_ids, _, ngroups = self.group_info + if values.ndim > 3: + # punting for now + raise NotImplementedError("number of dimensions is currently " + "limited to 3") + elif values.ndim > 2: + for i, chunk in enumerate(values.transpose(2, 0, 1)): + + chunk = chunk.squeeze() + agg_func(result[:, :, i], values, comp_ids, accum) + else: + transform_func(result, values, comp_ids, accum) + + return result + def agg_series(self, obj, func): try: return self._aggregate_series_fast(obj, func) @@ -1848,9 +1946,9 @@ def groupings(self): #---------------------------------------------------------------------- # cython aggregation - _cython_functions = {'ohlc': 'group_ohlc'} - _cython_functions.update(BaseGrouper._cython_functions) - _cython_functions.pop('median') + _cython_functions = copy.deepcopy(BaseGrouper._cython_functions) + _cython_functions['aggregate']['ohlc'] = 'group_ohlc' + _cython_functions['aggregate'].pop('median') _name_functions = { 'ohlc': lambda *args: ['open', 'high', 'low', 'close'] @@ -2380,10 +2478,9 @@ def _aggregate_multiple_funcs(self, arg): return DataFrame(results, columns=columns) - def _wrap_aggregated_output(self, output, names=None): - # sort of a kludge + def _wrap_output(self, output, index, names=None): + """ common agg/transform wrapping logic """ output = output[self.name] - index = self.grouper.result_index if names is not None: return DataFrame(output, index=index, columns=names) @@ -2393,6 +2490,16 @@ def _wrap_aggregated_output(self, output, names=None): name = self._selected_obj.name return Series(output, index=index, name=name) + def _wrap_aggregated_output(self, output, names=None): + return self._wrap_output(output=output, + index=self.grouper.result_index, + names=names) + + def _wrap_transformed_output(self, output, names=None): + return self._wrap_output(output=output, + index=self.obj.index, + names=names) + def _wrap_applied_output(self, keys, values, not_indexed_same=False): if len(keys) == 0: # GH #6265 @@ -2452,14 +2559,16 @@ def transform(self, func, *args, **kwargs): transformed : Series """ + func = _intercept_cython(func) or func + # if string function if isinstance(func, compat.string_types): - return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs)) - - # do we have a cython function - cyfunc = _intercept_cython(func) - if cyfunc and not args and not kwargs: - return self._transform_fast(cyfunc) + if func in _cython_transforms: + # cythonized transform + return getattr(self, func)(*args, **kwargs) + else: + # cythonized aggregation and merge + return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs)) # reg transform dtype = self._selected_obj.dtype @@ -3208,24 +3317,23 @@ def transform(self, func, *args, **kwargs): >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) """ - # try to do a fast transform via merge if possible - try: - obj = self._obj_with_exclusions - if isinstance(func, compat.string_types): - result = getattr(self, func)(*args, **kwargs) + # optimized transforms + func = _intercept_cython(func) or func + if isinstance(func, compat.string_types): + if func in _cython_transforms: + # cythonized transform + return getattr(self, func)(*args, **kwargs) else: - cyfunc = _intercept_cython(func) - if cyfunc and not args and not kwargs: - result = getattr(self, cyfunc)() - else: - return self._transform_general(func, *args, **kwargs) - except: + # cythonized aggregation and merge + result = getattr(self, func)(*args, **kwargs) + else: return self._transform_general(func, *args, **kwargs) # a reduction transform if not isinstance(result, DataFrame): return self._transform_general(func, *args, **kwargs) + obj = self._obj_with_exclusions # nuiscance columns if not result.columns.equals(obj.columns): return self._transform_general(func, *args, **kwargs) @@ -3437,6 +3545,9 @@ def _wrap_aggregated_output(self, output, names=None): return self._reindex_output(result)._convert(datetime=True) + def _wrap_transformed_output(self, output, names=None): + return DataFrame(output, index=self.obj.index) + def _wrap_agged_blocks(self, items, blocks): if not self.as_index: index = np.arange(blocks[0].values.shape[1]) @@ -4069,7 +4180,9 @@ def _reorder_by_uniques(uniques, labels): np.var: 'var', np.median: 'median', np.max: 'max', - np.min: 'min' + np.min: 'min', + np.cumprod: 'cumprod', + np.cumsum: 'cumsum' } diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index 8c5c7d709e5f1..d137ce732e005 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -1749,6 +1749,144 @@ def put2d_%(name)s_%(dest_type)s(ndarray[%(c_type)s, ndim=2, cast=True] values, out[i] = values[j, loc] """ +#---------------------------------------------------------------------- +# other grouping functions not needing a template +grouping_no_template = """ +def group_median_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, ngroups, size + ndarray[int64_t] _counts + ndarray data + float64_t* ptr + ngroups = len(counts) + N, K = (<object> values).shape + + indexer, _counts = groupsort_indexer(labels, ngroups) + counts[:] = _counts[1:] + + data = np.empty((K, N), dtype=np.float64) + ptr = <float64_t*> data.data + + take_2d_axis1_float64_float64(values.T, indexer, out=data) + + for i in range(K): + # exclude NA group + ptr += _counts[0] + for j in range(ngroups): + size = _counts[j + 1] + out[j, i] = _median_linear(ptr, size) + ptr += size + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_cumprod_float64(float64_t[:,:] out, + float64_t[:,:] values, + int64_t[:] labels, + float64_t[:,:] accum): + ''' + Only transforms on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, size + float64_t val + int64_t lab + + N, K = (<object> values).shape + accum = np.ones_like(accum) + + with nogil: + for i in range(N): + lab = labels[i] + + if lab < 0: + continue + for j in range(K): + val = values[i, j] + if val == val: + accum[lab, j] *= val + out[i, j] = accum[lab, j] + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_cumsum(numeric[:,:] out, + numeric[:,:] values, + int64_t[:] labels, + numeric[:,:] accum): + ''' + Only transforms on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, size + numeric val + int64_t lab + + N, K = (<object> values).shape + accum = np.zeros_like(accum) + + with nogil: + for i in range(N): + lab = labels[i] + + if lab < 0: + continue + for j in range(K): + val = values[i,j] + if val == val: + accum[lab,j] += val + out[i,j] = accum[lab,j] + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_shift_indexer(int64_t[:] out, int64_t[:] labels, + int ngroups, int periods): + cdef: + Py_ssize_t N, i, j, ii + int offset, sign + int64_t lab, idxer, idxer_slot + int64_t[:] label_seen = np.zeros(ngroups, dtype=np.int64) + int64_t[:,:] label_indexer + + N, = (<object> labels).shape + + if periods < 0: + periods = -periods + offset = N - 1 + sign = -1 + elif periods > 0: + offset = 0 + sign = 1 + + if periods == 0: + with nogil: + for i in range(N): + out[i] = i + else: + # array of each previous indexer seen + label_indexer = np.zeros((ngroups, periods), dtype=np.int64) + with nogil: + for i in range(N): + ## reverse iterator if shifting backwards + ii = offset + sign * i + lab = labels[ii] + label_seen[lab] += 1 + + idxer_slot = label_seen[lab] % periods + idxer = label_indexer[lab, idxer_slot] + + if label_seen[lab] > periods: + out[ii] = idxer + else: + out[ii] = -1 + + label_indexer[lab, idxer_slot] = ii +""" + #------------------------------------------------------------------------- # Generators @@ -2012,6 +2150,8 @@ def generate_take_cython_file(): print(generate_put_min_max_template(template, use_ints=True), file=f) + print(grouping_no_template, file=f) + for template in nobool_1d_templates: print(generate_from_template(template, exclude=['bool']), file=f) diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx index 767e7d6292b6d..738f695a6ce9f 100644 --- a/pandas/src/generated.pyx +++ b/pandas/src/generated.pyx @@ -7933,6 +7933,141 @@ def group_max_int64(ndarray[int64_t, ndim=2] out, out[i, j] = maxx[i, j] + +def group_median_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, ngroups, size + ndarray[int64_t] _counts + ndarray data + float64_t* ptr + ngroups = len(counts) + N, K = (<object> values).shape + + indexer, _counts = groupsort_indexer(labels, ngroups) + counts[:] = _counts[1:] + + data = np.empty((K, N), dtype=np.float64) + ptr = <float64_t*> data.data + + take_2d_axis1_float64_float64(values.T, indexer, out=data) + + for i in range(K): + # exclude NA group + ptr += _counts[0] + for j in range(ngroups): + size = _counts[j + 1] + out[j, i] = _median_linear(ptr, size) + ptr += size + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_cumprod_float64(float64_t[:,:] out, + float64_t[:,:] values, + int64_t[:] labels, + float64_t[:,:] accum): + ''' + Only transforms on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, size + float64_t val + int64_t lab + + N, K = (<object> values).shape + accum = np.ones_like(accum) + + with nogil: + for i in range(N): + lab = labels[i] + + if lab < 0: + continue + for j in range(K): + val = values[i, j] + if val == val: + accum[lab, j] *= val + out[i, j] = accum[lab, j] + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_cumsum(numeric[:,:] out, + numeric[:,:] values, + int64_t[:] labels, + numeric[:,:] accum): + ''' + Only transforms on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, size + numeric val + int64_t lab + + N, K = (<object> values).shape + accum = np.zeros_like(accum) + + with nogil: + for i in range(N): + lab = labels[i] + + if lab < 0: + continue + for j in range(K): + val = values[i,j] + if val == val: + accum[lab,j] += val + out[i,j] = accum[lab,j] + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_shift_indexer(int64_t[:] out, int64_t[:] labels, + int ngroups, int periods): + cdef: + Py_ssize_t N, i, j, ii + int offset, sign + int64_t lab, idxer, idxer_slot + int64_t[:] label_seen = np.zeros(ngroups, dtype=np.int64) + int64_t[:,:] label_indexer + + N, = (<object> labels).shape + + if periods < 0: + periods = -periods + offset = N - 1 + sign = -1 + elif periods > 0: + offset = 0 + sign = 1 + + if periods == 0: + with nogil: + for i in range(N): + out[i] = i + else: + # array of each previous indexer seen + label_indexer = np.zeros((ngroups, periods), dtype=np.int64) + with nogil: + for i in range(N): + ## reverse iterator if shifting backwards + ii = offset + sign * i + lab = labels[ii] + label_seen[lab] += 1 + + idxer_slot = label_seen[lab] % periods + idxer = label_indexer[lab, idxer_slot] + + if label_seen[lab] > periods: + out[ii] = idxer + else: + out[ii] = -1 + + label_indexer[lab, idxer_slot] = ii + @cython.wraparound(False) @cython.boundscheck(False) def left_join_indexer_unique_float64(ndarray[float64_t] left, diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 9649288ab5b6d..6a2408cbe5ce0 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -2636,7 +2636,7 @@ def test_cython_api2(self): result = df.groupby('A').cumsum() assert_frame_equal(result,expected) - expected = DataFrame([[1, 2, np.nan], [2, np.nan, 9], [3, 4, 9]], columns=['A', 'B', 'C']).astype('float64') + # GH 5755 - cumsum is a transformer and should ignore as_index result = df.groupby('A', as_index=False).cumsum() assert_frame_equal(result,expected) @@ -5360,6 +5360,157 @@ def test__cython_agg_general(self): exc.args += ('operation: %s' % op,) raise + def test_cython_group_transform_algos(self): + #GH 4095 + dtypes = [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint32, np.uint64, + np.float32, np.float64] + + ops = [(pd.algos.group_cumprod_float64, np.cumproduct, [np.float64]), + (pd.algos.group_cumsum, np.cumsum, dtypes)] + + for pd_op, np_op, dtypes in ops: + for dtype in dtypes: + data = np.array([[1],[2],[3],[4]], dtype=dtype) + ans = np.zeros_like(data) + accum = np.array([[0]], dtype=dtype) + labels = np.array([0,0,0,0], dtype=np.int64) + pd_op(ans, data, labels, accum) + self.assert_numpy_array_equal(np_op(data), ans[:,0]) + + + + # with nans + labels = np.array([0,0,0,0,0], dtype=np.int64) + + data = np.array([[1],[2],[3],[np.nan],[4]], dtype='float64') + accum = np.array([[0.0]]) + actual = np.zeros_like(data) + actual.fill(np.nan) + pd.algos.group_cumprod_float64(actual, data, labels, accum) + expected = np.array([1, 2, 6, np.nan, 24], dtype='float64') + self.assert_numpy_array_equal(actual[:, 0], expected) + + accum = np.array([[0.0]]) + actual = np.zeros_like(data) + actual.fill(np.nan) + pd.algos.group_cumsum(actual, data, labels, accum) + expected = np.array([1, 3, 6, np.nan, 10], dtype='float64') + self.assert_numpy_array_equal(actual[:, 0], expected) + + # timedelta + data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None] + accum = np.array([[0]], dtype='int64') + actual = np.zeros_like(data, dtype='int64') + actual.fill(np.nan) + pd.algos.group_cumsum(actual, data.view('int64'), labels, accum) + expected = np.array( + [np.timedelta64(1, 'ns'), np.timedelta64(2, 'ns'), + np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'), + np.timedelta64(5, 'ns')]) + self.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected) + + + + def test_cython_transform(self): + # GH 4095 + ops = [(('cumprod', ()), lambda x: x.cumprod()), + (('cumsum', ()), lambda x: x.cumsum()), + (('shift', (-1,)), lambda x: x.shift(-1)), + (('shift', (1,)), lambda x: x.shift())] + + s = Series(np.random.randn(1000)) + s_missing = s.copy() + s_missing.iloc[2:10] = np.nan + labels = np.random.randint(0, 50, size=1000).astype(float) + + #series + for (op, args), targop in ops: + for data in [s, s_missing]: + # print(data.head()) + expected = data.groupby(labels).transform(targop) + + tm.assert_series_equal(expected, + data.groupby(labels).transform(op, *args)) + tm.assert_series_equal(expected, + getattr(data.groupby(labels), op)(*args)) + + strings = list('qwertyuiopasdfghjklz') + strings_missing = strings[:] + strings_missing[5] = np.nan + df = DataFrame({'float': s, + 'float_missing': s_missing, + 'int': [1,1,1,1,2] * 200, + 'datetime': pd.date_range('1990-1-1', periods=1000), + 'timedelta': pd.timedelta_range(1, freq='s', periods=1000), + 'string': strings * 50, + 'string_missing': strings_missing * 50}) + df['cat'] = df['string'].astype('category') + + df2 = df.copy() + df2.index = pd.MultiIndex.from_product([range(100), range(10)]) + + #DataFrame - Single and MultiIndex, + #group by values, index level, columns + for df in [df, df2]: + for gb_target in [dict(by=labels), dict(level=0), + dict(by='string')]: # dict(by='string_missing')]: + # dict(by=['int','string'])]: + + gb = df.groupby(**gb_target) + # whitelisted methods set the selection before applying + # bit a of hack to make sure the cythonized shift + # is equivalent to pre 0.17.1 behavior + if op == 'shift': + gb._set_selection_from_grouper() + + for (op, args), targop in ops: + print(op) + if op != 'shift' and 'int' not in gb_target: + # numeric apply fastpath promotes dtype so have + # to apply seperately and concat + i = gb[['int']].apply(targop) + f = gb[['float','float_missing']].apply(targop) + expected = pd.concat([f,i], axis=1) + else: + expected = gb.apply(targop) + + expected = expected.sort_index(axis=1) + tm.assert_frame_equal(expected, + gb.transform(op, *args).sort_index(axis=1)) + tm.assert_frame_equal(expected, + getattr(gb, op)(*args)) + # individual columns + for c in df: + if c not in ['float', 'int', 'float_missing'] and op != 'shift': + self.assertRaises(DataError, gb[c].transform, op) + self.assertRaises(DataError, getattr(gb[c], op)) + else: + expected = gb[c].apply(targop) + expected.name = c + tm.assert_series_equal(expected, + gb[c].transform(op, *args)) + tm.assert_series_equal(expected, + getattr(gb[c], op)(*args)) + def test_groupby_cumprod(self): + # GH 4095 + df = pd.DataFrame({'key': ['b'] * 10, 'value': 2}) + + actual = df.groupby('key')['value'].cumprod() + expected = df.groupby('key')['value'].apply(lambda x: x.cumprod()) + expected.name = 'value' + tm.assert_series_equal(actual, expected) + + df = pd.DataFrame({'key': ['b'] * 100, 'value': 2}) + actual = df.groupby('key')['value'].cumprod() + # if overflows, groupby product casts to float + # while numpy passes back invalid values + df['value'] = df['value'].astype(float) + expected = df.groupby('key')['value'].apply(lambda x: x.cumprod()) + expected.name = 'value' + tm.assert_series_equal(actual, expected) + + def test_ops_general(self): ops = [('mean', np.mean), ('median', np.median),
closes - #4095 - adding Cythonized versions of `shift`, `cumprod` and `cumsum`, along with a helper `GroupBy._cython_transform` that other cythonized transform-ops could be added to. I'm using fused types here instead of adding to `generated.pyx` - as far as I can tell that is working well, looking back through the old issues (#255) it looks like that wasn't done originally because of older versions of Cython? ### 0.16.2 ``` In [17]: df = pd.DataFrame({'i':range(1000) * 1000, 'f':np.linspace(0, 2000, 1000000), 'f2':np.linspace(1, 2, 1000000)}) In [18]: %timeit df.groupby('i').cumsum() 1 loops, best of 3: 614 ms per loop In [19]: %timeit df.groupby('i').shift() 1 loops, best of 3: 499 ms per loop ``` ### PR ``` In [2]: %timeit df.groupby('i').cumsum() 10 loops, best of 3: 58.6 ms per loop In [3]: %timeit df.groupby('i').shift() 10 loops, best of 3: 73.4 ms per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/10901
2015-08-25T03:46:14Z
2015-11-16T12:37:55Z
null
2015-12-04T02:32:26Z
updated link to online documentation
diff --git a/pandas/info.py b/pandas/info.py index 754741c117289..57ecd91739eab 100644 --- a/pandas/info.py +++ b/pandas/info.py @@ -2,7 +2,7 @@ pandas - a powerful data analysis and manipulation library for Python ===================================================================== -See http://pandas.sourceforge.net for full documentation. Otherwise, see the +See http://pandas.pydata.org/ for full documentation. Otherwise, see the docstrings of the various objects in the pandas namespace: Series
https://api.github.com/repos/pandas-dev/pandas/pulls/10896
2015-08-24T17:13:34Z
2015-08-24T17:19:01Z
2015-08-24T17:19:01Z
2015-08-24T17:20:36Z
PERF: improves SeriesGroupBy.nunique performance
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index d30b7875e44b7..26fcbdca28ce7 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -679,6 +679,7 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`) - Performance improvements in ``Categorical.value_counts`` (:issue:`10804`) +- Performance improvements in ``SeriesGroupBy.nunique`` (:issue:`10820`) - 4x improvement in ``timedelta`` string parsing (:issue:`6755`, :issue:`10426`) - 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index d23cb39c15548..220e67c43e4be 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -82,8 +82,7 @@ _series_apply_whitelist = \ (_common_apply_whitelist - set(['boxplot'])) | \ - frozenset(['dtype', 'value_counts', 'unique', 'nunique', - 'nlargest', 'nsmallest']) + frozenset(['dtype', 'value_counts', 'unique', 'nlargest', 'nsmallest']) _dataframe_apply_whitelist = \ _common_apply_whitelist | frozenset(['dtypes', 'corrwith']) @@ -2558,6 +2557,32 @@ def true_and_notnull(x, *args, **kwargs): filtered = self._apply_filter(indices, dropna) return filtered + def nunique(self, dropna=True): + ids, _, _ = self.grouper.group_info + val = self.obj.get_values() + + sorter = np.lexsort((val, ids)) + ids, val = ids[sorter], val[sorter] + + # group boundries are where group ids change + # unique observations are where sorted values change + idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] + inc = np.r_[1, val[1:] != val[:-1]] + + # 1st item of each group is a new unique observation + mask = isnull(val) + if dropna: + inc[idx] = 1 + inc[mask] = 0 + else: + inc[mask & np.r_[False, mask[:-1]]] = 0 + inc[idx] = 1 + + out = np.add.reduceat(inc, idx) + return Series(out if ids[0] != -1 else out[1:], + index=self.grouper.result_index, + name=self.name) + def _apply_to_column_groupbys(self, func): """ return a pass thru """ return func(self) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index fa2e6e911ab5e..afce4e682c0f9 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -1617,6 +1617,40 @@ def test_groupby_as_index_agg(self): assert_frame_equal(left, right) + def test_series_groupby_nunique(self): + from itertools import product + from string import ascii_lowercase + + def check_nunique(df, keys): + for sort, dropna in product((False, True), repeat=2): + gr = df.groupby(keys, sort=sort) + left = gr['julie'].nunique(dropna=dropna) + + gr = df.groupby(keys, sort=sort) + right = gr['julie'].apply(Series.nunique, dropna=dropna) + + assert_series_equal(left, right) + + days = date_range('2015-08-23', periods=10) + + for n, m in product(10**np.arange(2, 6), (10, 100, 1000)): + frame = DataFrame({ + 'jim':np.random.choice(list(ascii_lowercase), n), + 'joe':np.random.choice(days, n), + 'julie':np.random.randint(0, m, n)}) + + check_nunique(frame, ['jim']) + check_nunique(frame, ['jim', 'joe']) + + frame.loc[1::17, 'jim'] = None + frame.loc[3::37, 'joe'] = None + frame.loc[7::19, 'julie'] = None + frame.loc[8::19, 'julie'] = None + frame.loc[9::19, 'julie'] = None + + check_nunique(frame, ['jim']) + check_nunique(frame, ['jim', 'joe']) + def test_mulitindex_passthru(self): # GH 7997 @@ -4913,7 +4947,7 @@ def test_groupby_whitelist(self): 'corr', 'cov', 'value_counts', 'diff', - 'unique', 'nunique', + 'unique', 'nlargest', 'nsmallest', ])
closes https://github.com/pydata/pandas/issues/10820 on master: ``` ipython In [2]: df = pd.DataFrame({'a': np.random.randint(10000, size=100000), ...: 'b': np.random.randint(10, size=100000)}) In [3]: %timeit df.groupby('a')['b'].nunique() 1 loops, best of 3: 1.66 s per loop In [4]: %timeit df.groupby(['a', 'b'])['b'].first().groupby(level=0).size() 10 loops, best of 3: 36.3 ms per loop ``` on branch: ``` ipython In [2]: %timeit df.groupby('a')['b'].nunique() 10 loops, best of 3: 29.2 ms per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/10894
2015-08-24T00:42:38Z
2015-08-24T18:33:56Z
2015-08-24T18:33:56Z
2015-11-11T16:27:48Z
DEPR: Bunch o deprecation removals part 2
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 19fe4e73f1f4d..fdbb5771aff3f 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -665,6 +665,7 @@ Deprecations ``DataFrame.add(other, fill_value=0)`` and ``DataFrame.mul(other, fill_value=1.)`` (:issue:`10735`). - ``TimeSeries`` deprecated in favor of ``Series`` (note that this has been alias since 0.13.0), (:issue:`10890`) +- ``WidePanel`` deprecated in favor of ``Panel``, ``LongPanel`` in favor of ``DataFrame`` (note these have been aliases since < 0.11.0), (:issue:`10892`) .. _whatsnew_0170.prior_deprecations: @@ -705,6 +706,15 @@ Removal of prior version deprecations/changes df.add(df.A,axis='index') + + + +- Remove ``table`` keyword in ``HDFStore.put/append``, in favor of using ``format=`` (:issue:`4645`) +- Remove ``kind`` in ``read_excel/ExcelFile`` as its unused (:issue:`4712`) +- Remove ``infer_type`` keyword from ``pd.read_html`` as its unused (:issue:`4770`, :issue:`7032`) +- Remove ``offset`` and ``timeRule`` keywords from ``Series.tshift/shift``, in favor of ``freq`` (:issue:`4853`, :issue:`4864`) +- Remove ``pd.load/pd.save`` aliases in favor of ``pd.to_pickle/pd.read_pickle`` (:issue:`3787`) + .. _whatsnew_0170.performance: Performance Improvements diff --git a/pandas/core/api.py b/pandas/core/api.py index fde9bc77c4bd9..e2ac57e37cba6 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -12,14 +12,12 @@ from pandas.core.series import Series, TimeSeries from pandas.core.frame import DataFrame -from pandas.core.panel import Panel +from pandas.core.panel import Panel, WidePanel from pandas.core.panel4d import Panel4D from pandas.core.groupby import groupby from pandas.core.reshape import (pivot_simple as pivot, get_dummies, lreshape, wide_to_long) -WidePanel = Panel - from pandas.core.indexing import IndexSlice from pandas.tseries.offsets import DateOffset from pandas.tseries.tools import to_datetime @@ -29,7 +27,6 @@ from pandas.tseries.period import Period, PeriodIndex # legacy -from pandas.core.common import save, load # deprecated, remove in 0.13 import pandas.core.datetools as datetools from pandas.core.config import (get_option, set_option, reset_option, diff --git a/pandas/core/common.py b/pandas/core/common.py index 0d74a4449a5f5..245535e47abd8 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -3313,46 +3313,6 @@ def console_encode(object, **kwds): return pprint_thing_encoded(object, get_option("display.encoding")) - -def load(path): # TODO remove in 0.13 - """ - Load pickled pandas object (or any other pickled object) from the specified - file path - - Warning: Loading pickled data received from untrusted sources can be - unsafe. See: http://docs.python.org/2.7/library/pickle.html - - Parameters - ---------- - path : string - File path - - Returns - ------- - unpickled : type of object stored in file - """ - import warnings - warnings.warn("load is deprecated, use read_pickle", FutureWarning) - from pandas.io.pickle import read_pickle - return read_pickle(path) - - -def save(obj, path): # TODO remove in 0.13 - """ - Pickle (serialize) object to input file path - - Parameters - ---------- - obj : any object - path : string - File path - """ - import warnings - warnings.warn("save is deprecated, use obj.to_pickle", FutureWarning) - from pandas.io.pickle import to_pickle - return to_pickle(obj, path) - - def _maybe_match_name(a, b): a_has = hasattr(a, 'name') b_has = hasattr(b, 'name') diff --git a/pandas/core/datetools.py b/pandas/core/datetools.py index 6678baac1dae5..28cd97f437f29 100644 --- a/pandas/core/datetools.py +++ b/pandas/core/datetools.py @@ -41,23 +41,3 @@ isBusinessDay = BDay().onOffset isMonthEnd = MonthEnd().onOffset isBMonthEnd = BMonthEnd().onOffset - - -def _resolve_offset(freq, kwds): - if 'timeRule' in kwds or 'offset' in kwds: - offset = kwds.get('offset', None) - offset = kwds.get('timeRule', offset) - if isinstance(offset, compat.string_types): - offset = getOffset(offset) - warn = True - else: - offset = freq - warn = False - - if warn: - import warnings - warnings.warn("'timeRule' and 'offset' parameters are deprecated," - " please use 'freq' instead", - FutureWarning) - - return offset diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a9979b4eb3810..0321082669a05 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2618,9 +2618,9 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, **kwargs) @Appender(_shared_docs['shift'] % _shared_doc_kwargs) - def shift(self, periods=1, freq=None, axis=0, **kwargs): + def shift(self, periods=1, freq=None, axis=0): return super(DataFrame, self).shift(periods=periods, freq=freq, - axis=axis, **kwargs) + axis=axis) def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bc49e9dd79e6a..e54a6a6f11148 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1006,20 +1006,6 @@ def to_pickle(self, path): from pandas.io.pickle import to_pickle return to_pickle(self, path) - def save(self, path): # TODO remove in 0.14 - "Deprecated. Use to_pickle instead" - import warnings - from pandas.io.pickle import to_pickle - warnings.warn("save is deprecated, use to_pickle", FutureWarning) - return to_pickle(self, path) - - def load(self, path): # TODO remove in 0.14 - "Deprecated. Use read_pickle instead." - import warnings - from pandas.io.pickle import read_pickle - warnings.warn("load is deprecated, use pd.read_pickle", FutureWarning) - return read_pickle(path) - def to_clipboard(self, excel=None, sep=None, **kwargs): """ Attempt to write text representation of object to the system clipboard @@ -3806,15 +3792,15 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, shifted : %(klass)s """) @Appender(_shared_docs['shift'] % _shared_doc_kwargs) - def shift(self, periods=1, freq=None, axis=0, **kwargs): + def shift(self, periods=1, freq=None, axis=0): if periods == 0: return self block_axis = self._get_block_manager_axis(axis) - if freq is None and not len(kwargs): + if freq is None: new_data = self._data.shift(periods=periods, axis=block_axis) else: - return self.tshift(periods, freq, **kwargs) + return self.tshift(periods, freq) return self._constructor(new_data).__finalize__(self) @@ -3854,7 +3840,7 @@ def slice_shift(self, periods=1, axis=0): return new_obj.__finalize__(self) - def tshift(self, periods=1, freq=None, axis=0, **kwargs): + def tshift(self, periods=1, freq=None, axis=0): """ Shift the time index, using the index's frequency if available @@ -3877,7 +3863,6 @@ def tshift(self, periods=1, freq=None, axis=0, **kwargs): ------- shifted : NDFrame """ - from pandas.core.datetools import _resolve_offset index = self._get_axis(axis) if freq is None: @@ -3893,24 +3878,22 @@ def tshift(self, periods=1, freq=None, axis=0, **kwargs): if periods == 0: return self - offset = _resolve_offset(freq, kwargs) - - if isinstance(offset, string_types): - offset = datetools.to_offset(offset) + if isinstance(freq, string_types): + freq = datetools.to_offset(freq) block_axis = self._get_block_manager_axis(axis) if isinstance(index, PeriodIndex): - orig_offset = datetools.to_offset(index.freq) - if offset == orig_offset: + orig_freq = datetools.to_offset(index.freq) + if freq == orig_freq: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods) else: msg = ('Given freq %s does not match PeriodIndex freq %s' % - (offset.rule_code, orig_offset.rule_code)) + (freq.rule_code, orig_freq.rule_code)) raise ValueError(msg) else: new_data = self._data.copy() - new_data.axes[block_axis] = index.shift(periods, offset) + new_data.axes[block_axis] = index.shift(periods, freq) return self._constructor(new_data).__finalize__(self) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index bc342d5919bb8..d45422ecfa81d 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1210,8 +1210,8 @@ def shift(self, periods=1, freq=None, axis='major'): return super(Panel, self).slice_shift(periods, axis=axis) - def tshift(self, periods=1, freq=None, axis='major', **kwds): - return super(Panel, self).tshift(periods, freq, axis, **kwds) + def tshift(self, periods=1, freq=None, axis='major'): + return super(Panel, self).tshift(periods, freq, axis) def join(self, other, how='left', lsuffix='', rsuffix=''): """ @@ -1509,5 +1509,23 @@ def f(self, other, axis=0): Panel._add_aggregate_operations() Panel._add_numeric_operations() -WidePanel = Panel -LongPanel = DataFrame +# legacy +class WidePanel(Panel): + + def __init__(self, *args, **kwargs): + + # deprecation, #10892 + warnings.warn("WidePanel is deprecated. Please use Panel", + FutureWarning, stacklevel=2) + + super(WidePanel, self).__init__(*args, **kwargs) + +class LongPanel(DataFrame): + + def __init__(self, *args, **kwargs): + + # deprecation, #10892 + warnings.warn("LongPanel is deprecated. Please use DataFrame", + FutureWarning, stacklevel=2) + + super(LongPanel, self).__init__(*args, **kwargs) diff --git a/pandas/core/series.py b/pandas/core/series.py index 0c17104bb701e..185b6d2b74801 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2182,9 +2182,9 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, **kwargs) @Appender(generic._shared_docs['shift'] % _shared_doc_kwargs) - def shift(self, periods=1, freq=None, axis=0, **kwargs): + def shift(self, periods=1, freq=None, axis=0): return super(Series, self).shift(periods=periods, freq=freq, - axis=axis, **kwargs) + axis=axis) def reindex_axis(self, labels, axis=0, **kwargs): """ for compatibility with higher dims """ diff --git a/pandas/io/excel.py b/pandas/io/excel.py index d58d6590b96c0..d5258cb32e6e0 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -78,17 +78,17 @@ def read_excel(io, sheetname=0, **kwds): and file. For file URLs, a host is expected. For instance, a local file could be file://localhost/path/to/workbook.xlsx sheetname : string, int, mixed list of strings/ints, or None, default 0 - - Strings are used for sheet names, Integers are used in zero-indexed sheet - positions. - + + Strings are used for sheet names, Integers are used in zero-indexed sheet + positions. + Lists of strings/integers are used to request multiple sheets. - + Specify None to get all sheets. - + str|int -> DataFrame is returned. list|None -> Dict of DataFrames is returned, with keys representing sheets. - + Available Cases * Defaults to 0 -> 1st sheet as a DataFrame @@ -143,11 +143,6 @@ def read_excel(io, sheetname=0, **kwds): for more information on when a Dict of Dataframes is returned. """ - if 'kind' in kwds: - kwds.pop('kind') - warn("kind keyword is no longer supported in read_excel and may be " - "removed in a future version", FutureWarning) - engine = kwds.pop('engine', None) return ExcelFile(io, engine=engine).parse(sheetname=sheetname, **kwds) @@ -207,19 +202,19 @@ def parse(self, sheetname=0, header=0, skiprows=None, skip_footer=0, Parameters ---------- sheetname : string, int, mixed list of strings/ints, or None, default 0 - - Strings are used for sheet names, Integers are used in zero-indexed sheet - positions. - + + Strings are used for sheet names, Integers are used in zero-indexed sheet + positions. + Lists of strings/integers are used to request multiple sheets. - + Specify None to get all sheets. - + str|int -> DataFrame is returned. list|None -> Dict of DataFrames is returned, with keys representing sheets. - + Available Cases - + * Defaults to 0 -> 1st sheet as a DataFrame * 1 -> 2nd sheet as a DataFrame * "Sheet1" -> 1st sheet as a DataFrame @@ -336,7 +331,7 @@ def _parse_excel(self, sheetname=0, header=0, skiprows=None, skip_footer=0, def _parse_cell(cell_contents,cell_typ): """converts the contents of the cell into a pandas appropriate object""" - + if cell_typ == XL_CELL_DATE: if xlrd_0_9_3: # Use the newer xlrd datetime handling. @@ -379,9 +374,9 @@ def _parse_cell(cell_contents,cell_typ): xlrd_0_9_3 = True else: xlrd_0_9_3 = False - + ret_dict = False - + #Keep sheetname to maintain backwards compatibility. if isinstance(sheetname, list): sheets = sheetname @@ -391,31 +386,31 @@ def _parse_cell(cell_contents,cell_typ): ret_dict = True else: sheets = [sheetname] - + #handle same-type duplicates. sheets = list(set(sheets)) - + output = {} - + for asheetname in sheets: if verbose: print("Reading sheet %s" % asheetname) - + if isinstance(asheetname, compat.string_types): sheet = self.book.sheet_by_name(asheetname) - else: # assume an integer if not a string - sheet = self.book.sheet_by_index(asheetname) - + else: # assume an integer if not a string + sheet = self.book.sheet_by_index(asheetname) + data = [] should_parse = {} - + for i in range(sheet.nrows): row = [] for j, (value, typ) in enumerate(zip(sheet.row_values(i), sheet.row_types(i))): if parse_cols is not None and j not in should_parse: should_parse[j] = self._should_parse(j, parse_cols) - + if parse_cols is None or should_parse[j]: row.append(_parse_cell(value,typ)) data.append(row) @@ -436,14 +431,14 @@ def _parse_cell(cell_contents,cell_typ): skip_footer=skip_footer, chunksize=chunksize, **kwds) - + output[asheetname] = parser.read() - + if ret_dict: return output else: return output[asheetname] - + @property def sheet_names(self): diff --git a/pandas/io/html.py b/pandas/io/html.py index b806b5147c4a5..cb2ee7b1c1e3f 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -604,7 +604,7 @@ def _expand_elements(body): body[ind] += empty * (lens_max - length) -def _data_to_frame(data, header, index_col, skiprows, infer_types, +def _data_to_frame(data, header, index_col, skiprows, parse_dates, tupleize_cols, thousands): head, body, foot = data @@ -707,7 +707,7 @@ def _validate_flavor(flavor): return flavor -def _parse(flavor, io, match, header, index_col, skiprows, infer_types, +def _parse(flavor, io, match, header, index_col, skiprows, parse_dates, tupleize_cols, thousands, attrs, encoding): flavor = _validate_flavor(flavor) compiled_match = re.compile(match) # you can pass a compiled regex here @@ -730,15 +730,20 @@ def _parse(flavor, io, match, header, index_col, skiprows, infer_types, ret = [] for table in tables: try: - ret.append(_data_to_frame(table, header, index_col, skiprows, - infer_types, parse_dates, tupleize_cols, thousands)) + ret.append(_data_to_frame(data=table, + header=header, + index_col=index_col, + skiprows=skiprows, + parse_dates=parse_dates, + tupleize_cols=tupleize_cols, + thousands=thousands)) except StopIteration: # empty table continue return ret def read_html(io, match='.+', flavor=None, header=None, index_col=None, - skiprows=None, infer_types=None, attrs=None, parse_dates=False, + skiprows=None, attrs=None, parse_dates=False, tupleize_cols=False, thousands=',', encoding=None): r"""Read HTML tables into a ``list`` of ``DataFrame`` objects. @@ -776,9 +781,6 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None, that sequence. Note that a single element sequence means 'skip the nth row' whereas an integer means 'skip n rows'. - infer_types : None, optional - This has no effect since 0.15.0. It is here for backwards compatibility. - attrs : dict or None, optional This is a dictionary of attributes that you can pass to use to identify the table in the HTML. These are not checked for validity before being @@ -853,13 +855,11 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None, pandas.read_csv """ _importers() - if infer_types is not None: - warnings.warn("infer_types has no effect since 0.15", FutureWarning) # Type check here. We don't want to parse only to fail because of an # invalid value of an integer skiprows. if isinstance(skiprows, numbers.Integral) and skiprows < 0: raise ValueError('cannot skip rows starting from the end of the ' 'data (you passed a negative value)') - return _parse(flavor, io, match, header, index_col, skiprows, infer_types, + return _parse(flavor, io, match, header, index_col, skiprows, parse_dates, tupleize_cols, thousands, attrs, encoding) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b23a183cdc145..dd02157e201d5 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1117,17 +1117,6 @@ def _validate_format(self, format, kwargs): """ validate / deprecate formats; return the new kwargs """ kwargs = kwargs.copy() - # table arg - table = kwargs.pop('table', None) - - if table is not None: - warnings.warn(format_deprecate_doc, FutureWarning) - - if table: - format = 'table' - else: - format = 'fixed' - # validate try: kwargs['format'] = _FORMAT_MAP[format.lower()] diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py index 9093df9f0bf62..921107859082d 100644 --- a/pandas/io/tests/test_html.py +++ b/pandas/io/tests/test_html.py @@ -137,12 +137,10 @@ def test_banklist(self): assert_framelist_equal(df1, df2) def test_spam_no_types(self): - with tm.assert_produces_warning(FutureWarning): - df1 = self.read_html(self.spam_data, '.*Water.*', - infer_types=False) - with tm.assert_produces_warning(FutureWarning): - df2 = self.read_html(self.spam_data, 'Unit', infer_types=False) + # infer_types removed in #10892 + df1 = self.read_html(self.spam_data, '.*Water.*') + df2 = self.read_html(self.spam_data, 'Unit') assert_framelist_equal(df1, df2) self.assertEqual(df1[0].ix[0, 0], 'Proximates') @@ -230,12 +228,9 @@ def test_index(self): assert_framelist_equal(df1, df2) def test_header_and_index_no_types(self): - with tm.assert_produces_warning(FutureWarning): - df1 = self.read_html(self.spam_data, '.*Water.*', header=1, - index_col=0, infer_types=False) - with tm.assert_produces_warning(FutureWarning): - df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0, - infer_types=False) + df1 = self.read_html(self.spam_data, '.*Water.*', header=1, + index_col=0) + df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0) assert_framelist_equal(df1, df2) def test_header_and_index_with_types(self): @@ -245,18 +240,10 @@ def test_header_and_index_with_types(self): assert_framelist_equal(df1, df2) def test_infer_types(self): - with tm.assert_produces_warning(FutureWarning): - df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0, - infer_types=False) - with tm.assert_produces_warning(FutureWarning): - df2 = self.read_html(self.spam_data, 'Unit', index_col=0, - infer_types=False) - assert_framelist_equal(df1, df2) - - with tm.assert_produces_warning(FutureWarning): - df2 = self.read_html(self.spam_data, 'Unit', index_col=0, - infer_types=True) + # 10892 infer_types removed + df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0) + df2 = self.read_html(self.spam_data, 'Unit', index_col=0) assert_framelist_equal(df1, df2) def test_string_io(self): @@ -641,8 +628,7 @@ def test_computer_sales_page(self): with tm.assertRaisesRegexp(CParserError, r"Passed header=\[0,1\] are " "too many rows for this multi_index " "of columns"): - with tm.assert_produces_warning(FutureWarning): - self.read_html(data, infer_types=False, header=[0, 1]) + self.read_html(data, header=[0, 1]) def test_wikipedia_states_table(self): data = os.path.join(DATA_PATH, 'wikipedia_states.html') @@ -751,8 +737,7 @@ def test_parse_dates_combine(self): def test_computer_sales_page(self): data = os.path.join(DATA_PATH, 'computer_sales_page.html') - with tm.assert_produces_warning(FutureWarning): - self.read_html(data, infer_types=False, header=[0, 1]) + self.read_html(data, header=[0, 1]) def test_invalid_flavor(): diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 210852d83094f..3a128fa3f247d 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -1040,7 +1040,7 @@ def test_append_all_nans(self): store.append('df2', df[10:], dropna=False) tm.assert_frame_equal(store['df2'], df) - # Test to make sure defaults are to not drop. + # Test to make sure defaults are to not drop. # Corresponding to Issue 9382 df_with_missing = DataFrame({'col1':[0, np.nan, 2], 'col2':[1, np.nan, np.nan]}) @@ -1059,7 +1059,7 @@ def test_append_all_nans(self): with ensure_clean_path(self.path) as path: panel_with_missing.to_hdf(path, 'panel_with_missing', format='table') - reloaded_panel = read_hdf(path, 'panel_with_missing') + reloaded_panel = read_hdf(path, 'panel_with_missing') tm.assert_panel_equal(panel_with_missing, reloaded_panel) def test_append_frame_column_oriented(self): @@ -2440,9 +2440,9 @@ def test_terms(self): p4d = tm.makePanel4D() wpneg = Panel.fromDict({-1: tm.makeDataFrame(), 0: tm.makeDataFrame(), 1: tm.makeDataFrame()}) - store.put('wp', wp, table=True) - store.put('p4d', p4d, table=True) - store.put('wpneg', wpneg, table=True) + store.put('wp', wp, format='table') + store.put('p4d', p4d, format='table') + store.put('wpneg', wpneg, format='table') # panel result = store.select('wp', [Term( @@ -2607,7 +2607,7 @@ def test_same_name_scoping(self): import pandas as pd df = DataFrame(np.random.randn(20, 2),index=pd.date_range('20130101',periods=20)) - store.put('df', df, table=True) + store.put('df', df, format='table') expected = df[df.index>pd.Timestamp('20130105')] import datetime @@ -3608,7 +3608,7 @@ def test_frame_select_complex(self): df.loc[df.index[0:4],'string'] = 'bar' with ensure_clean_store(self.path) as store: - store.put('df', df, table=True, data_columns=['string']) + store.put('df', df, format='table', data_columns=['string']) # empty result = store.select('df', 'index>df.index[3] & string="bar"') @@ -3717,7 +3717,7 @@ def test_invalid_filtering(self): df = tm.makeTimeDataFrame() with ensure_clean_store(self.path) as store: - store.put('df', df, table=True) + store.put('df', df, format='table') # not implemented self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A'] | columns=['B']") diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index 62d6a34655e1d..420cf509395ce 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -604,13 +604,10 @@ def dropna(self, axis=0, inplace=False, **kwargs): dense_valid = dense_valid[dense_valid != self.fill_value] return dense_valid.to_sparse(fill_value=self.fill_value) - def shift(self, periods, freq=None, **kwds): + def shift(self, periods, freq=None): """ Analogous to Series.shift """ - from pandas.core.datetools import _resolve_offset - - offset = _resolve_offset(freq, kwds) # no special handling of fill values yet if not isnull(self.fill_value): @@ -622,10 +619,10 @@ def shift(self, periods, freq=None, **kwds): if periods == 0: return self.copy() - if offset is not None: + if freq is not None: return self._constructor(self.sp_values, sparse_index=self.sp_index, - index=self.index.shift(periods, offset), + index=self.index.shift(periods, freq), fill_value=self.fill_value).__finalize__(self) int_index = self.sp_index.to_int_index() diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 6424a190dba9f..a429059c761d6 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -5385,10 +5385,10 @@ def test_shift(self): self.assertRaises(ValueError, ps.shift, freq='D') # legacy support - shifted4 = ps.shift(1, timeRule='B') + shifted4 = ps.shift(1, freq='B') assert_series_equal(shifted2, shifted4) - shifted5 = ps.shift(1, offset=datetools.bday) + shifted5 = ps.shift(1, freq=datetools.bday) assert_series_equal(shifted5, shifted4) # 32-bit taking
- Remove the table keyword in HDFStore.put/append, in favor of using format= #4645 - Remove unused keyword `kind` in `read_excel/ExcelFile` #4712 - Remove `infer_type` keyword from `pd.read_html` as its unused, #4770, #7032 - Remove `offset` and `timeRule` keywords from `Series.tshift/shift`, in favor of `freq`, #4853, #4864 - Remove `pd.load/pd.save` aliases in favor of `pd.to_pickle/pd.read_pickle`, #3787 - Deprecate `WidePanel/LongPanel`
https://api.github.com/repos/pandas-dev/pandas/pulls/10892
2015-08-23T12:44:39Z
2015-08-24T18:24:19Z
2015-08-24T18:24:19Z
2015-08-24T18:24:19Z
DEPR: Bunch o deprecation removals
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index d30b7875e44b7..7415ac01ada7a 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -37,6 +37,7 @@ Highlights include: - Support for ``Series.dt.strftime`` to generate formatted strings for datetime-likes, see :ref:`here <whatsnew_0170.strftime>` - Development installed versions of pandas will now have ``PEP440`` compliant version strings (:issue:`9518`) - Support for reading SAS xport files, see :ref:`here <whatsnew_0170.enhancements.sas_xport>` +- Removal of the automatic TimeSeries broadcasting, deprecated since 0.8.0, see :ref:`here <whatsnew_0170.prior_deprecations>` Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsnew_0170.deprecations>` before updating. @@ -663,6 +664,7 @@ Deprecations can easily be replaced by using the ``add`` and ``mul`` methods: ``DataFrame.add(other, fill_value=0)`` and ``DataFrame.mul(other, fill_value=1.)`` (:issue:`10735`). +- ``TimeSeries`` deprecated in favor of ``Series`` (note that this has been alias since 0.13.0), (:issue:`10890`) .. _whatsnew_0170.prior_deprecations: @@ -672,6 +674,36 @@ Removal of prior version deprecations/changes - Remove use of some deprecated numpy comparison operations, mainly in tests. (:issue:`10569`) - Removal of ``na_last`` parameters from ``Series.order()`` and ``Series.sort()``, in favor of ``na_position``, xref (:issue:`5231`) - Remove of ``percentile_width`` from ``.describe()``, in favor of ``percentiles``. (:issue:`7088`) +- Removal of ``colSpace`` parameter from ``DataFrame.to_string()``, in favor of ``col_space``, circa 0.8.0 version. +- Removal of automatic time-series broadcasting (:issue:`2304`) + + .. ipython :: python + + np.random.seed(1234) + df = DataFrame(np.random.randn(5,2),columns=list('AB'),index=date_range('20130101',periods=5)) + df + + Previously + + .. code-block:: python + + In [3]: df + df.A + FutureWarning: TimeSeries broadcasting along DataFrame index by default is deprecated. + Please use DataFrame.<op> to explicitly broadcast arithmetic operations along the index + + Out[3]: + A B + 2013-01-01 0.942870 -0.719541 + 2013-01-02 2.865414 1.120055 + 2013-01-03 -1.441177 0.166574 + 2013-01-04 1.719177 0.223065 + 2013-01-05 0.031393 -2.226989 + + Current + + .. ipython :: python + + df.add(df.A,axis='index') .. _whatsnew_0170.performance: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1f222f9f99cbe..a9979b4eb3810 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1396,7 +1396,7 @@ def to_stata( writer.write_file() @Appender(fmt.docstring_to_string, indents=1) - def to_string(self, buf=None, columns=None, col_space=None, colSpace=None, + def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, line_width=None, max_rows=None, max_cols=None, @@ -1405,11 +1405,6 @@ def to_string(self, buf=None, columns=None, col_space=None, colSpace=None, Render a DataFrame to a console-friendly tabular output. """ - if colSpace is not None: # pragma: no cover - warnings.warn("colSpace is deprecated, use col_space", - FutureWarning) - col_space = colSpace - formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, @@ -3359,16 +3354,7 @@ def _combine_series_infer(self, other, func, level=None, fill_value=None): return self._constructor(data=self._series, index=self.index, columns=self.columns) - # teeny hack because one does DataFrame + TimeSeries all the time - if self.index.is_all_dates and other.index.is_all_dates: - warnings.warn(("TimeSeries broadcasting along DataFrame index " - "by default is deprecated. Please use " - "DataFrame.<op> to explicitly broadcast arithmetic " - "operations along the index"), - FutureWarning) - return self._combine_match_index(other, func, level=level, fill_value=fill_value) - else: - return self._combine_match_columns(other, func, level=level, fill_value=fill_value) + return self._combine_match_columns(other, func, level=level, fill_value=fill_value) def _combine_match_index(self, other, func, level=None, fill_value=None): left, right = self.align(other, join='outer', axis=0, level=level, copy=False) diff --git a/pandas/core/series.py b/pandas/core/series.py index 8768d0e139e7b..0c17104bb701e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -261,6 +261,7 @@ def _set_axis(self, axis, labels, fastpath=False): is_all_dates = labels.is_all_dates if is_all_dates: + if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): labels = DatetimeIndex(labels) @@ -2779,7 +2780,14 @@ def _try_cast(arr, take_fast_path): return subarr # backwards compatiblity -TimeSeries = Series +class TimeSeries(Series): + + def __init__(self, *args, **kwargs): + # deprecation TimeSeries, #10890 + warnings.warn("TimeSeries is deprecated. Please use Series", + FutureWarning, stacklevel=2) + + super(TimeSeries, self).__init__(*args, **kwargs) #---------------------------------------------------------------------- # Add plotting methods to Series diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 8ef6363f836ae..b23a183cdc145 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -13,7 +13,8 @@ import os import numpy as np -from pandas import (Series, TimeSeries, DataFrame, Panel, Panel4D, Index, +import pandas as pd +from pandas import (Series, DataFrame, Panel, Panel4D, Index, MultiIndex, Int64Index, Timestamp) from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel from pandas.sparse.array import BlockIndex, IntIndex @@ -164,7 +165,7 @@ class DuplicateWarning(Warning): Series: u('series'), SparseSeries: u('sparse_series'), - TimeSeries: u('series'), + pd.TimeSeries: u('series'), DataFrame: u('frame'), SparseDataFrame: u('sparse_frame'), Panel: u('wide'), diff --git a/pandas/io/tests/generate_legacy_storage_files.py b/pandas/io/tests/generate_legacy_storage_files.py index 86c5a9e0d7f19..0ca5ced1b8d1a 100644 --- a/pandas/io/tests/generate_legacy_storage_files.py +++ b/pandas/io/tests/generate_legacy_storage_files.py @@ -1,8 +1,8 @@ """ self-contained to write legacy storage (pickle/msgpack) files """ from __future__ import print_function from distutils.version import LooseVersion -from pandas import (Series, TimeSeries, DataFrame, Panel, - SparseSeries, SparseTimeSeries, SparseDataFrame, SparsePanel, +from pandas import (Series, DataFrame, Panel, + SparseSeries, SparseDataFrame, SparsePanel, Index, MultiIndex, PeriodIndex, bdate_range, to_msgpack, date_range, period_range, bdate_range, Timestamp, Categorical, Period) @@ -36,7 +36,7 @@ def _create_sp_tsseries(): arr[-1:] = nan date_index = bdate_range('1/1/2011', periods=len(arr)) - bseries = SparseTimeSeries(arr, index=date_index, kind='block') + bseries = SparseSeries(arr, index=date_index, kind='block') bseries.name = 'btsseries' return bseries @@ -78,7 +78,7 @@ def create_data(): series = dict(float=Series(data['A']), int=Series(data['B']), mixed=Series(data['E']), - ts=TimeSeries(np.arange(10).astype(np.int64), index=date_range('20130101',periods=10)), + ts=Series(np.arange(10).astype(np.int64), index=date_range('20130101',periods=10)), mi=Series(np.arange(5).astype(np.float64), index=MultiIndex.from_tuples(tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=['one', 'two'])), diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index a8addfab17c26..62d6a34655e1d 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -7,7 +7,7 @@ from numpy import nan, ndarray import numpy as np - +import warnings import operator from pandas.core.common import isnull, _values_from_object, _maybe_match_name @@ -770,4 +770,11 @@ def from_coo(cls, A, dense_index=False): bool_method=None, use_numexpr=False, force=True) # backwards compatiblity -SparseTimeSeries = SparseSeries +class SparseTimeSeries(SparseSeries): + + def __init__(self, *args, **kwargs): + # deprecation TimeSeries, #10890 + warnings.warn("SparseTimeSeries is deprecated. Please use SparseSeries", + FutureWarning, stacklevel=2) + + super(SparseTimeSeries, self).__init__(*args, **kwargs) diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py index 103f3992f950a..8d24025f3c3cf 100644 --- a/pandas/sparse/tests/test_sparse.py +++ b/pandas/sparse/tests/test_sparse.py @@ -30,7 +30,7 @@ import pandas.sparse.frame as spf from pandas._sparse import BlockIndex, IntIndex -from pandas.sparse.api import (SparseSeries, SparseTimeSeries, +from pandas.sparse.api import (SparseSeries, SparseDataFrame, SparsePanel, SparseArray) import pandas.tests.test_frame as test_frame @@ -160,6 +160,12 @@ def test_iteration_and_str(self): [x for x in self.bseries] str(self.bseries) + def test_TimeSeries_deprecation(self): + + # deprecation TimeSeries, #10890 + with tm.assert_produces_warning(FutureWarning): + pd.SparseTimeSeries(1,index=pd.date_range('20130101',periods=3)) + def test_construct_DataFrame_with_sp_series(self): # it works! df = DataFrame({'col': self.bseries}) @@ -258,7 +264,7 @@ def _check_const(sparse, name): # Sparse time series works date_index = bdate_range('1/1/2000', periods=len(self.bseries)) s5 = SparseSeries(self.bseries, index=date_index) - tm.assertIsInstance(s5, SparseTimeSeries) + tm.assertIsInstance(s5, SparseSeries) # pass Series bseries2 = SparseSeries(self.bseries.to_dense()) @@ -1189,14 +1195,19 @@ def _compare_to_dense(a, b, da, db, op): frame['A'].reindex(fidx[::2]), SparseSeries([], index=[])] - for op in ops: + for op in opnames: _compare_to_dense(frame, frame[::2], frame.to_dense(), - frame[::2].to_dense(), op) + frame[::2].to_dense(), getattr(operator, op)) + + # 2304, no auto-broadcasting for i, s in enumerate(series): + f = lambda a, b: getattr(a,op)(b,axis='index') _compare_to_dense(frame, s, frame.to_dense(), - s.to_dense(), op) - _compare_to_dense(s, frame, s.to_dense(), - frame.to_dense(), op) + s.to_dense(), f) + + # rops are not implemented + #_compare_to_dense(s, frame, s.to_dense(), + # frame.to_dense(), f) # cross-sectional operations series = [frame.xs(fidx[0]), diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 022594e296c2a..9687d9b742126 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -6039,46 +6039,47 @@ def test_combineSeries(self): #added = self.mixed_int + (100*series).astype('int32') #_check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C = 'int32', D = 'int64')) - # TimeSeries - buf = StringIO() - tmp = sys.stderr - sys.stderr = buf - try: - ts = self.tsframe['A'] - added = self.tsframe + ts - - for key, col in compat.iteritems(self.tsframe): - result = col + ts - assert_series_equal(added[key], result, check_names=False) - self.assertEqual(added[key].name, key) - if col.name == ts.name: - self.assertEqual(result.name, 'A') - else: - self.assertTrue(result.name is None) + # TimeSeries + ts = self.tsframe['A'] + + # 10890 + # we no longer allow auto timeseries broadcasting + # and require explict broadcasting + added = self.tsframe.add(ts, axis='index') + + for key, col in compat.iteritems(self.tsframe): + result = col + ts + assert_series_equal(added[key], result, check_names=False) + self.assertEqual(added[key].name, key) + if col.name == ts.name: + self.assertEqual(result.name, 'A') + else: + self.assertTrue(result.name is None) - smaller_frame = self.tsframe[:-5] - smaller_added = smaller_frame + ts + smaller_frame = self.tsframe[:-5] + smaller_added = smaller_frame.add(ts, axis='index') - self.assertTrue(smaller_added.index.equals(self.tsframe.index)) + self.assertTrue(smaller_added.index.equals(self.tsframe.index)) - smaller_ts = ts[:-5] - smaller_added2 = self.tsframe + smaller_ts - assert_frame_equal(smaller_added, smaller_added2) + smaller_ts = ts[:-5] + smaller_added2 = self.tsframe.add(smaller_ts, axis='index') + assert_frame_equal(smaller_added, smaller_added2) - # length 0 - result = self.tsframe + ts[:0] + # length 0, result is all-nan + result = self.tsframe.add(ts[:0], axis='index') + expected = DataFrame(np.nan,index=self.tsframe.index,columns=self.tsframe.columns) + assert_frame_equal(result, expected) - # Frame is length 0 - result = self.tsframe[:0] + ts - self.assertEqual(len(result), 0) + # Frame is all-nan + result = self.tsframe[:0].add(ts, axis='index') + expected = DataFrame(np.nan,index=self.tsframe.index,columns=self.tsframe.columns) + assert_frame_equal(result, expected) - # empty but with non-empty index - frame = self.tsframe[:1].reindex(columns=[]) - result = frame * ts - self.assertEqual(len(result), len(ts)) - finally: - sys.stderr = tmp + # empty but with non-empty index + frame = self.tsframe[:1].reindex(columns=[]) + result = frame.mul(ts,axis='index') + self.assertEqual(len(result), len(ts)) def test_combineFunc(self): result = self.frame * 2 diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index fa2e6e911ab5e..d1073b6c4d7ab 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -502,9 +502,8 @@ def test_groupby_bounds_check(self): self.assertRaises(AssertionError, pd.algos.groupby_object,a, b) def test_groupby_grouper_f_sanity_checked(self): - import pandas as pd dates = date_range('01-Jan-2013', periods=12, freq='MS') - ts = pd.TimeSeries(np.random.randn(12), index=dates) + ts = Series(np.random.randn(12), index=dates) # GH3035 # index.map is used to apply grouper to the index diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 3567c98e71bce..6424a190dba9f 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -666,6 +666,12 @@ def test_astype(self): self.assertEqual(astyped.dtype, dtype) self.assertEqual(astyped.name, s.name) + def test_TimeSeries_deprecation(self): + + # deprecation TimeSeries, #10890 + with tm.assert_produces_warning(FutureWarning): + pd.TimeSeries(1,index=date_range('20130101',periods=3)) + def test_constructor(self): # Recognize TimeSeries self.assertTrue(self.ts.is_time_series) @@ -4515,10 +4521,10 @@ def test_operators_frame(self): # rpow does not work with DataFrame df = DataFrame({'A': self.ts}) - tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A']) - tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A']) - tm.assert_almost_equal(self.ts < self.ts, (self.ts < df)['A']) - tm.assert_almost_equal(self.ts / self.ts, (self.ts / df)['A']) + tm.assert_almost_equal(self.ts + self.ts, self.ts + df['A']) + tm.assert_almost_equal(self.ts ** self.ts, self.ts ** df['A']) + tm.assert_almost_equal(self.ts < self.ts, self.ts < df['A']) + tm.assert_almost_equal(self.ts / self.ts, self.ts / df['A']) def test_operators_combine(self): def _check_fill(meth, op, a, b, fill_value=0): diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 6a9ad175f42dd..7886a63c6df46 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -15,7 +15,7 @@ from pandas.compat import range, lrange, u, unichr import pandas.compat as compat -from pandas import (Index, Series, TimeSeries, DataFrame, isnull, notnull, +from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range, date_range, MultiIndex) import pandas.core.common as com diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index eb5c6759bfa45..e0434bfec3be4 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -24,7 +24,7 @@ from numpy.random import randn from pandas.compat import range, lrange, lmap, zip -from pandas import Series, TimeSeries, DataFrame, _np_version_under1p9 +from pandas import Series, DataFrame, _np_version_under1p9 from pandas import tslib from pandas.util.testing import(assert_series_equal, assert_almost_equal, assertRaisesRegexp) @@ -1191,7 +1191,7 @@ def test_hash_error(self): def test_make_time_series(self): index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') series = Series(1, index=index) - tm.assertIsInstance(series, TimeSeries) + tm.assertIsInstance(series, Series) def test_astype(self): idx = period_range('1990', '2009', freq='A') diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index 4b3085dc8259f..7dafc88bf9239 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -980,7 +980,7 @@ def _simple_ts(start, end, freq='D'): def _simple_pts(start, end, freq='D'): rng = period_range(start, end, freq=freq) - return TimeSeries(np.random.randn(len(rng)), index=rng) + return Series(np.random.randn(len(rng)), index=rng) class TestResamplePeriodIndex(tm.TestCase): @@ -1177,7 +1177,7 @@ def test_resample_to_quarterly(self): def test_resample_fill_missing(self): rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A') - s = TimeSeries(np.random.randn(4), index=rng) + s = Series(np.random.randn(4), index=rng) stamps = s.to_timestamp() @@ -1191,12 +1191,12 @@ def test_resample_fill_missing(self): def test_cant_fill_missing_dups(self): rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A') - s = TimeSeries(np.random.randn(5), index=rng) + s = Series(np.random.randn(5), index=rng) self.assertRaises(Exception, s.resample, 'A') def test_resample_5minute(self): rng = period_range('1/1/2000', '1/5/2000', freq='T') - ts = TimeSeries(np.random.randn(len(rng)), index=rng) + ts = Series(np.random.randn(len(rng)), index=rng) result = ts.resample('5min') expected = ts.to_timestamp().resample('5min') @@ -1402,7 +1402,7 @@ def test_evenly_divisible_with_no_extra_bins(self): 'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 + [{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10, 'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28, - index=index.append(index)).sort() + index=index.append(index)).sort_index() index = date_range('2001-5-4',periods=4,freq='7D') expected = DataFrame( @@ -1430,7 +1430,7 @@ def test_apply(self): grouped = self.ts.groupby(grouper) - f = lambda x: x.order()[-3:] + f = lambda x: x.sort_values()[-3:] applied = grouped.apply(f) expected = self.ts.groupby(lambda x: x.year).apply(f) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index e02973136863d..f416a8939ac82 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -9,7 +9,7 @@ import numpy as np randn = np.random.randn -from pandas import (Index, Series, TimeSeries, DataFrame, +from pandas import (Index, Series, DataFrame, isnull, date_range, Timestamp, Period, DatetimeIndex, Int64Index, to_datetime, bdate_range, Float64Index, TimedeltaIndex, NaT) @@ -60,7 +60,7 @@ def setUp(self): self.dups = Series(np.random.randn(len(dates)), index=dates) def test_constructor(self): - tm.assertIsInstance(self.dups, TimeSeries) + tm.assertIsInstance(self.dups, Series) tm.assertIsInstance(self.dups.index, DatetimeIndex) def test_is_unique_monotonic(self): diff --git a/pandas/tseries/tests/test_timeseries_legacy.py b/pandas/tseries/tests/test_timeseries_legacy.py index 6889f8e2afbb2..4cbc171364ee6 100644 --- a/pandas/tseries/tests/test_timeseries_legacy.py +++ b/pandas/tseries/tests/test_timeseries_legacy.py @@ -8,7 +8,7 @@ import numpy as np randn = np.random.randn -from pandas import (Index, Series, TimeSeries, DataFrame, +from pandas import (Index, Series, DataFrame, isnull, date_range, Timestamp, DatetimeIndex, Int64Index, to_datetime, bdate_range) diff --git a/pandas/tseries/util.py b/pandas/tseries/util.py index 6c534de0a7aaa..4f29b2bf31f83 100644 --- a/pandas/tseries/util.py +++ b/pandas/tseries/util.py @@ -28,7 +28,7 @@ def pivot_annual(series, freq=None): Parameters ---------- - series : TimeSeries + series : Series freq : string or None, default None Returns
xref #6581 remove `colSpace` remove auto time series broadcasting, xref #2304 deprecate `TimeSeries` (forgot was not actually deprecated)
https://api.github.com/repos/pandas-dev/pandas/pulls/10890
2015-08-22T21:50:42Z
2015-08-24T18:22:38Z
2015-08-24T18:22:38Z
2015-08-24T18:22:38Z
BUG: encoding of categoricals in hdf serialization
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 9049d8de550d0..c18bedd0cf6eb 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -818,7 +818,7 @@ Bug Fixes - Bug in ``read_csv`` when using the ``nrows`` or ``chunksize`` parameters if file contains only a header line (:issue:`9535`) - +- Bug in serialization of ``category`` types in HDF5 in presence of alternate encodings. (:issue:`10366`) - Bug in ``pd.DataFrame`` when constructing an empty DataFrame with a string dtype (:issue:`9428`) - Bug in ``pd.unique`` for arrays with the ``datetime64`` or ``timedelta64`` dtype that meant an array with object dtype was returned instead the original dtype (:issue:`9431`) - Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index dd02157e201d5..ea0a59ce2ab31 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3039,7 +3039,8 @@ def write_metadata(self, key, values): """ values = Series(values) - self.parent.put(self._get_metadata_path(key), values, format='table') + self.parent.put(self._get_metadata_path(key), values, format='table', + encoding=self.encoding, nan_rep=self.nan_rep) def read_metadata(self, key): """ return the meta data array for this key """ @@ -4389,11 +4390,23 @@ def _unconvert_index_legacy(data, kind, legacy=False, encoding=None): def _convert_string_array(data, encoding, itemsize=None): + """ + we take a string-like that is object dtype and coerce to a fixed size string type + + Parameters + ---------- + data : a numpy array of object dtype + encoding : None or string-encoding + itemsize : integer, optional, defaults to the max length of the strings + + Returns + ------- + data in a fixed-length string dtype, encoded to bytes if needed + """ # encode if needed if encoding is not None and len(data): - f = np.vectorize(lambda x: x.encode(encoding), otypes=[np.object]) - data = f(data) + data = Series(data.ravel()).str.encode(encoding).values.reshape(data.shape) # create the sized dtype if itemsize is None: @@ -4403,7 +4416,20 @@ def _convert_string_array(data, encoding, itemsize=None): return data def _unconvert_string_array(data, nan_rep=None, encoding=None): - """ deserialize a string array, possibly decoding """ + """ + inverse of _convert_string_array + + Parameters + ---------- + data : fixed length string dtyped array + nan_rep : the storage repr of NaN, optional + encoding : the encoding of the data, optional + + Returns + ------- + an object array of the decoded data + + """ shape = data.shape data = np.asarray(data.ravel(), dtype=object) @@ -4412,16 +4438,16 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None): encoding = _ensure_encoding(encoding) if encoding is not None and len(data): - try: - itemsize = lib.max_len_string_array(com._ensure_object(data.ravel())) - if compat.PY3: - dtype = "U{0}".format(itemsize) - else: - dtype = "S{0}".format(itemsize) + itemsize = lib.max_len_string_array(com._ensure_object(data)) + if compat.PY3: + dtype = "U{0}".format(itemsize) + else: + dtype = "S{0}".format(itemsize) + + if isinstance(data[0], compat.binary_type): + data = Series(data).str.decode(encoding).values + else: data = data.astype(dtype, copy=False).astype(object, copy=False) - except (Exception) as e: - f = np.vectorize(lambda x: x.decode(encoding), otypes=[np.object]) - data = f(data) if nan_rep is None: nan_rep = 'nan' diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 3a128fa3f247d..b4f1e6a429198 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -930,6 +930,51 @@ def test_encoding(self): result = store.select('df',Term('columns=A',encoding='ascii')) tm.assert_frame_equal(result,expected) + def test_latin_encoding(self): + + if compat.PY2: + self.assertRaisesRegexp(TypeError, '\[unicode\] is not implemented as a table column') + return + + values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'], + [b'E\xc9, 17', b'a', b'b', b'c'], + [b'EE, 17', b'', b'a', b'b', b'c'], + [b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'], + [b'', b'a', b'b', b'c'], + [b'\xf8\xfc', b'a', b'b', b'c'], + [b'A\xf8\xfc', b'', b'a', b'b', b'c'], + [np.nan, b'', b'b', b'c'], + [b'A\xf8\xfc', np.nan, b'', b'b', b'c']] + + def _try_decode(x, encoding='latin-1'): + try: + return x.decode(encoding) + except AttributeError: + return x + # not sure how to remove latin-1 from code in python 2 and 3 + values = [[_try_decode(x) for x in y] for y in values] + + examples = [] + for dtype in ['category', object]: + for val in values: + examples.append(pandas.Series(val, dtype=dtype)) + + def roundtrip(s, key='data', encoding='latin-1', nan_rep=''): + with ensure_clean_path(self.path) as store: + s.to_hdf(store, key, format='table', encoding=encoding, + nan_rep=nan_rep) + retr = read_hdf(store, key) + s_nan = s.replace(nan_rep, np.nan) + assert_series_equal(s_nan, retr) + + for s in examples: + roundtrip(s) + + # fails: + # for x in examples: + # roundtrip(s, nan_rep=b'\xf8\xfc') + + def test_append_some_nans(self): with ensure_clean_store(self.path) as store:
closes #10366 replaces #10454
https://api.github.com/repos/pandas-dev/pandas/pulls/10889
2015-08-22T20:20:30Z
2015-08-28T02:29:04Z
2015-08-28T02:29:03Z
2015-08-28T02:29:10Z
BUG: GH10885 where an edge case in date_range produces an extra point
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index d30b7875e44b7..cc8f135eb62b0 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -781,6 +781,7 @@ Bug Fixes - Bug in ``DatetimeIndex.take`` and ``TimedeltaIndex.take`` may not raise ``IndexError`` against invalid index (:issue:`10295`) - Bug in ``Series([np.nan]).astype('M8[ms]')``, which now returns ``Series([pd.NaT])`` (:issue:`10747`) - Bug in ``PeriodIndex.order`` reset freq (:issue:`10295`) +- Bug in ``date_range`` when ``freq`` divides ``end`` as nanos (:issue:`10885`) - Bug in ``iloc`` allowing memory outside bounds of a Series to be accessed with negative integers (:issue:`10779`) - Bug in ``read_msgpack`` where encoding is not respected (:issue:`10580`) - Bug preventing access to the first index when using ``iloc`` with a list containing the appropriate negative integer (:issue:`10547`, :issue:`10779`) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 0525a29ef3fd0..c6c66a62b86b5 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1,4 +1,5 @@ # pylint: disable=E1101 +from __future__ import division import operator import warnings from datetime import time, datetime @@ -1793,8 +1794,9 @@ def _generate_regular_range(start, end, periods, offset): stride = offset.nanos if periods is None: b = Timestamp(start).value - e = Timestamp(end).value - e += stride - e % stride + # cannot just use e = Timestamp(end) + 1 because arange breaks when + # stride is too large, see GH10887 + e = b + (Timestamp(end).value - b)//stride * stride + stride//2 # end.tz == start.tz by this point due to _generate implementation tz = start.tz elif start is not None: diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py index 86e0f7162c545..42136c3433977 100644 --- a/pandas/tseries/tests/test_daterange.py +++ b/pandas/tseries/tests/test_daterange.py @@ -490,6 +490,18 @@ def test_years_only(self): self.assertEqual(dr[0], datetime(2014, 1, 31)) self.assertEqual(dr[-1], datetime(2014, 12, 31)) + def test_freq_divides_end_in_nanos(self): + # GH 10885 + result_1 = date_range('2005-01-12 10:00', '2005-01-12 16:00', + freq='345min') + result_2 = date_range('2005-01-13 10:00', '2005-01-13 16:00', + freq='345min') + expected_1 = DatetimeIndex(['2005-01-12 10:00:00', '2005-01-12 15:45:00'], + dtype='datetime64[ns]', freq='345T', tz=None) + expected_2 = DatetimeIndex(['2005-01-13 10:00:00', '2005-01-13 15:45:00'], + dtype='datetime64[ns]', freq='345T', tz=None) + self.assertTrue(result_1.equals(expected_1)) + self.assertTrue(result_2.equals(expected_2)) class TestCustomDateRange(tm.TestCase):
closes #10885
https://api.github.com/repos/pandas-dev/pandas/pulls/10887
2015-08-22T14:48:11Z
2015-08-24T11:37:59Z
2015-08-24T11:37:58Z
2015-08-24T13:59:27Z
BUG: 10720 - Test error message enhancement
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 6b4bde588469e..7379fc7ff9aa1 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -675,11 +675,19 @@ Performance Improvements Bug Fixes ~~~~~~~~~ - Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) +======= + + +- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) - Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`) - Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`) - Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`) - Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`) +<<<<<<< HEAD +- Bug in ``pd.Series.interpolate`` when setting no order value on ``Series.interpolate`` this needs to be at least 1. (:issue:`10633`) and (:issue:`10800`) +======= - Bug in ``pd.Series.interpolate`` with invalid ``order`` keyword values. (:issue:`10633`) +>>>>>>> upstream/master - Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`) - Bug in ``Index`` construction with a mixed list of tuples (:issue:`10697`) - Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`) diff --git a/pandas/io/tests/generate_legacy_storage_files.py b/pandas/io/tests/generate_legacy_storage_files.py index 86c5a9e0d7f19..0ec3575be04f6 100644 --- a/pandas/io/tests/generate_legacy_storage_files.py +++ b/pandas/io/tests/generate_legacy_storage_files.py @@ -83,9 +83,20 @@ def create_data(): index=MultiIndex.from_tuples(tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=['one', 'two'])), dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A']), +<<<<<<< HEAD +<<<<<<< HEAD cat=Series(Categorical(['foo', 'bar', 'baz']))) if LooseVersion(pandas.__version__) >= '0.17.0': series['period'] = Series([Period('2000Q1')] * 5) +======= + cat=Series(Categorical(['foo', 'bar', 'baz'])), + per=Series([Period('2000Q1')] * 5)) +>>>>>>> 0525684... ENH: pickle support for Period #10439 +======= + cat=Series(Categorical(['foo', 'bar', 'baz']))) + if LooseVersion(pandas.__version__) >= '0.17.0': + series['period'] = Series([Period('2000Q1')] * 5) +>>>>>>> aa04812... update legacy_storage for pickles mixed_dup_df = DataFrame(data) mixed_dup_df.columns = list("ABCDA") diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index cfee33da5d913..07894b2c45825 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -799,7 +799,15 @@ def test_nan_interpolate(self): tm._skip_if_no_scipy() result = s.interpolate(method='polynomial', order=1) assert_series_equal(result, expected) - + + # GH #10633 + def test_interpolate_spline(self): + np.random.seed(1) + t = pd.Series(np.arange(10)**2) + t[np.random.randint(0,9,3)] = np.nan + with tm.assertRaises(ValueError): + t.interpolate(method='spline', order=0) + def test_nan_irregular_index(self): s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9]) result = s.interpolate() diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index eb5c6759bfa45..3a69670f43fb7 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2537,7 +2537,17 @@ def test_searchsorted(self): def test_round_trip(self): +<<<<<<< HEAD +<<<<<<< HEAD p = Period('2000Q1') +======= + import pickle + p = Period('2000Q1') + +>>>>>>> 0525684... ENH: pickle support for Period #10439 +======= + p = Period('2000Q1') +>>>>>>> aa04812... update legacy_storage for pickles new_p = self.round_trip_pickle(p) self.assertEqual(new_p, p) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index e02973136863d..1056554dd0623 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -923,8 +923,8 @@ def test_to_datetime_with_apply(self): assert_series_equal(result, expected) td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3]) - self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y', errors='raise')) - self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y', errors='raise')) + self.assertRaises(ValueError, 'Unknown string format. You can coerce errors to NaT by passing coerce', lambda : pd.to_datetime(td,format='%b %y', errors='raise')) + self.assertRaises(ValueError, 'Unknown string format. You can coerce errors to NaT by passing coerce',lambda : td.apply(pd.to_datetime, format='%b %y', errors='raise')) expected = pd.to_datetime(td, format='%b %y', errors='coerce') result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', errors='coerce'))
Hi all. This might take a bit more work to get the error messages right, but it's a start. Let me know what you think.
https://api.github.com/repos/pandas-dev/pandas/pulls/10884
2015-08-22T09:43:52Z
2015-08-22T13:00:15Z
null
2015-08-22T13:00:15Z
ERR: better error reporting for failing parsing in timedelta/timeseries #10720
diff --git a/pandas/core/common.py b/pandas/core/common.py index 0d74a4449a5f5..090fcbdd139b5 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1718,7 +1718,8 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, bounds_error=bounds_error) new_y = terp(new_x) elif method == 'spline': - # GH #10633 + + # GH 10633 if not order: raise ValueError("order needs to be specified and greater than 0") terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index cfee33da5d913..762f0bb2064e1 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -799,6 +799,14 @@ def test_nan_interpolate(self): tm._skip_if_no_scipy() result = s.interpolate(method='polynomial', order=1) assert_series_equal(result, expected) + # GH #10633: first attempt + def test_interpolate_spline(self): + np.random.seed(1) + s = pd.Series(np.arange(10)**2) + s[np.random.randint(0,9,3)] = np.nan + with tm.assertRaises(ValueError): + s.interpolate(method='spline') + def test_nan_irregular_index(self): s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9]) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index e02973136863d..28bbb73e1dc03 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -923,8 +923,8 @@ def test_to_datetime_with_apply(self): assert_series_equal(result, expected) td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3]) - self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y', errors='raise')) - self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y', errors='raise')) + self.assertRaisesRegexp(ValueError, 'Unknown string format. You can coerce errors to NaT by passing coerce', lambda : pd.to_datetime(td,format='%b %y', errors='raise')) + self.assertRaisesRegexp(ValueError, 'Unknown string format. You can coerce errors to NaT by passing coerce',lambda : td.apply(pd.to_datetime, format='%b %y', errors='raise')) expected = pd.to_datetime(td, format='%b %y', errors='coerce') result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', errors='coerce'))
closes #10720
https://api.github.com/repos/pandas-dev/pandas/pulls/10883
2015-08-22T09:29:37Z
2015-08-30T09:05:26Z
null
2015-08-30T09:05:33Z
ERR/DEPR: Fix quantile error message / remove percentile_width
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 6b4bde588469e..f6ce0a0c4d975 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -654,6 +654,7 @@ Removal of prior version deprecations/changes - Remove use of some deprecated numpy comparison operations, mainly in tests. (:issue:`10569`) - Removal of ``na_last`` parameters from ``Series.order()`` and ``Series.sort()``, in favor of ``na_position``, xref (:issue:`5231`) +- Remove of ``percentile_width`` from ``.describe()``, in favor of ``percentiles``. (:issue:`7088`) .. _whatsnew_0170.performance: @@ -678,6 +679,7 @@ Bug Fixes - Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`) - Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`) - Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`) +- Bug in ``Series.quantile`` dropping name (:issue:`10881`) - Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`) - Bug in ``pd.Series.interpolate`` with invalid ``order`` keyword values. (:issue:`10633`) - Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index da350a726c255..1f222f9f99cbe 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4684,6 +4684,7 @@ def quantile(self, q=0.5, axis=0, numeric_only=True): 0.1 1.3 3.7 0.5 2.5 55.0 """ + self._check_percentile(q) per = np.asarray(q) * 100 if not com.is_list_like(per): @@ -4718,7 +4719,9 @@ def f(arr, per): quantiles = [[f(vals, x) for x in per] for (_, vals) in data.iteritems()] - result = DataFrame(quantiles, index=data._info_axis, columns=q).T + + result = self._constructor(quantiles, index=data._info_axis, + columns=q).T if len(is_dt_col) > 0: result[is_dt_col] = result[is_dt_col].applymap(lib.Timestamp) if squeeze: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ae4c09fba5469..bc49e9dd79e6a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2989,7 +2989,7 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial' is passed to ``scipy.interpolate.interp1d``. Both 'polynomial' and 'spline' - require that you also specify an `order` (int), + require that you also specify an `order` (int), e.g. df.interpolate(method='polynomial', order=4). These use the actual numerical values of the index. * 'krogh', 'piecewise_polynomial', 'spline', and 'pchip' are all @@ -4096,11 +4096,6 @@ def abs(self): Parameters ---------- - percentile_width : float, deprecated - The ``percentile_width`` argument will be removed in a future - version. Use ``percentiles`` instead. - width of the desired uncertainty interval, default is 50, - which corresponds to lower=25, upper=75 percentiles : array-like, optional The percentiles to include in the output. Should all be in the interval [0, 1]. By default `percentiles` is @@ -4149,36 +4144,17 @@ def abs(self): """ @Appender(_shared_docs['describe'] % _shared_doc_kwargs) - def describe(self, percentile_width=None, percentiles=None, include=None, exclude=None ): + def describe(self, percentiles=None, include=None, exclude=None ): if self.ndim >= 3: msg = "describe is not implemented on on Panel or PanelND objects." raise NotImplementedError(msg) - if percentile_width is not None and percentiles is not None: - msg = "Cannot specify both 'percentile_width' and 'percentiles.'" - raise ValueError(msg) if percentiles is not None: # get them all to be in [0, 1] + self._check_percentile(percentiles) percentiles = np.asarray(percentiles) - if (percentiles > 1).any(): - percentiles = percentiles / 100.0 - msg = ("percentiles should all be in the interval [0, 1]. " - "Try {0} instead.") - raise ValueError(msg.format(list(percentiles))) else: - # only warn if they change the default - if percentile_width is not None: - do_warn = True - else: - do_warn = False - percentile_width = percentile_width or 50 - lb = .5 * (1. - percentile_width / 100.) - ub = 1. - lb - percentiles = np.array([lb, 0.5, ub]) - if do_warn: - msg = ("The `percentile_width` keyword is deprecated. " - "Use percentiles={0} instead".format(list(percentiles))) - warnings.warn(msg, FutureWarning) + percentiles = np.array([0.25, 0.5, 0.75]) # median should always be included if (percentiles != 0.5).all(): # median isn't included @@ -4256,6 +4232,20 @@ def describe_1d(data, percentiles): d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1) return d + def _check_percentile(self, q): + """ Validate percentiles. Used by describe and quantile """ + + msg = ("percentiles should all be in the interval [0, 1]. " + "Try {0} instead.") + q = np.asarray(q) + if q.ndim == 0: + if not 0 <= q <= 1: + raise ValueError(msg.format(q / 100.0)) + else: + if not all(0 <= qs <= 1 for qs in q): + raise ValueError(msg.format(q / 100.0)) + return q + _shared_docs['pct_change'] = """ Percent change over given number of periods. diff --git a/pandas/core/series.py b/pandas/core/series.py index c788c15cdc398..8768d0e139e7b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1266,11 +1266,12 @@ def quantile(self, q=0.5): dtype: float64 """ valid = self.dropna() + self._check_percentile(q) def multi(values, qs): if com.is_list_like(qs): - return Series([_quantile(values, x*100) - for x in qs], index=qs) + values = [_quantile(values, x*100) for x in qs] + return self._constructor(values, index=qs, name=self.name) else: return _quantile(values, qs*100) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index c790c92280208..022594e296c2a 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -12837,6 +12837,12 @@ def test_quantile_datetime(self): index=[0.5], columns=[0, 1]) assert_frame_equal(result, expected) + def test_quantile_invalid(self): + msg = 'percentiles should all be in the interval \\[0, 1\\]' + for invalid in [-1, 2, [0.5, -1], [0.5, 2]]: + with tm.assertRaisesRegexp(ValueError, msg): + self.tsframe.quantile(invalid) + def test_cumsum(self): self.tsframe.ix[5:10, 0] = nan self.tsframe.ix[10:15, 1] = nan diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index cfee33da5d913..7ed8799dd6ded 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -909,17 +909,6 @@ def test_describe(self): _ = self.series.describe() _ = self.ts.describe() - def test_describe_percentiles(self): - with tm.assert_produces_warning(FutureWarning): - desc = self.series.describe(percentile_width=50) - assert '75%' in desc.index - assert '25%' in desc.index - - with tm.assert_produces_warning(FutureWarning): - desc = self.series.describe(percentile_width=95) - assert '97.5%' in desc.index - assert '2.5%' in desc.index - def test_describe_objects(self): s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a']) result = s.describe() @@ -1181,27 +1170,19 @@ def test_describe(self): desc = tm.makeMixedDataFrame().describe() desc = tm.makeTimeDataFrame().describe() - def test_describe_percentiles(self): - with tm.assert_produces_warning(FutureWarning): - desc = tm.makeDataFrame().describe(percentile_width=50) - assert '75%' in desc.index - assert '25%' in desc.index - - with tm.assert_produces_warning(FutureWarning): - desc = tm.makeDataFrame().describe(percentile_width=95) - assert '97.5%' in desc.index - assert '2.5%' in desc.index - - def test_describe_quantiles_both(self): - with tm.assertRaises(ValueError): - tm.makeDataFrame().describe(percentile_width=50, - percentiles=[25, 75]) - def test_describe_percentiles_percent_or_raw(self): + msg = 'percentiles should all be in the interval \\[0, 1\\]' + df = tm.makeDataFrame() - with tm.assertRaises(ValueError): + with tm.assertRaisesRegexp(ValueError, msg): df.describe(percentiles=[10, 50, 100]) + with tm.assertRaisesRegexp(ValueError, msg): + df.describe(percentiles=[2]) + + with tm.assertRaisesRegexp(ValueError, msg): + df.describe(percentiles=[-2]) + def test_describe_percentiles_equivalence(self): df = tm.makeDataFrame() d1 = df.describe() @@ -1213,16 +1194,29 @@ def test_describe_percentiles_insert_median(self): d1 = df.describe(percentiles=[.25, .75]) d2 = df.describe(percentiles=[.25, .5, .75]) assert_frame_equal(d1, d2) + self.assertTrue('25%' in d1.index) + self.assertTrue('75%' in d2.index) # none above d1 = df.describe(percentiles=[.25, .45]) d2 = df.describe(percentiles=[.25, .45, .5]) assert_frame_equal(d1, d2) + self.assertTrue('25%' in d1.index) + self.assertTrue('45%' in d2.index) # none below d1 = df.describe(percentiles=[.75, 1]) d2 = df.describe(percentiles=[.5, .75, 1]) assert_frame_equal(d1, d2) + self.assertTrue('75%' in d1.index) + self.assertTrue('100%' in d2.index) + + # edge + d1 = df.describe(percentiles=[0, 1]) + d2 = df.describe(percentiles=[0, .5, 1]) + assert_frame_equal(d1, d2) + self.assertTrue('0%' in d1.index) + self.assertTrue('100%' in d2.index) def test_describe_no_numeric(self): df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8, diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 4cf52b75fb7fe..3567c98e71bce 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2854,6 +2854,11 @@ def test_quantile(self): result = Series([np.timedelta64('NaT')]).sum() self.assertTrue(result is pd.NaT) + msg = 'percentiles should all be in the interval \\[0, 1\\]' + for invalid in [-1, 2, [0.5, -1], [0.5, 2]]: + with tm.assertRaisesRegexp(ValueError, msg): + self.ts.quantile(invalid) + def test_quantile_multi(self): from numpy import percentile @@ -2861,14 +2866,20 @@ def test_quantile_multi(self): result = self.ts.quantile(qs) expected = pd.Series([percentile(self.ts.valid(), 10), percentile(self.ts.valid(), 90)], - index=qs) + index=qs, name=self.ts.name) assert_series_equal(result, expected) dts = self.ts.index.to_series() + dts.name = 'xxx' result = dts.quantile((.2, .2)) - assert_series_equal(result, Series([Timestamp('2000-01-10 19:12:00'), - Timestamp('2000-01-10 19:12:00')], - index=[.2, .2])) + expected = Series([Timestamp('2000-01-10 19:12:00'), + Timestamp('2000-01-10 19:12:00')], + index=[.2, .2], name='xxx') + assert_series_equal(result, expected) + + result = self.ts.quantile([]) + expected = pd.Series([], name=self.ts.name) + assert_series_equal(result, expected) def test_append(self): appendedSeries = self.series.append(self.objSeries)
Currently, error is raised from `numpy` and incorrect for `pandas` ``` s = pd.Series([1, 2, 3]) s.quantile(2) # File ".../numpy/lib/function_base.py", line 3078, in _percentile # raise ValueError("Percentiles must be in the range [0,100]") # ValueError: Percentiles must be in the range [0,100] ``` `describe` using `percentiles` option outputs better error message, but not check lower limit. ``` # OK s.describe(percentiles=[2]) # Traceback (most recent call last): # File "pandas/core/generic.py", line 4167, in describe # raise ValueError(msg.format(list(percentiles))) # ValueError: percentiles should all be in the interval [0, 1]. Try [0.02] instead. # NG s.describe(percentiles=[-2]) # File ".../numpy/lib/function_base.py", line 3078, in _percentile # raise ValueError("Percentiles must be in the range [0,100]") # ValueError: Percentiles must be in the range [0,100] ``` This PR fix both error messages. Also, remove deprecated `percentile_width` option in `describe`.
https://api.github.com/repos/pandas-dev/pandas/pulls/10881
2015-08-21T23:32:35Z
2015-08-23T13:28:33Z
2015-08-23T13:28:33Z
2015-08-23T21:31:56Z
BUG: pd.Series.interpolate(method='spline') Errort Msg, #10633
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 77130343c9d57..0bb2ba3f9b343 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -674,11 +674,18 @@ Performance Improvements Bug Fixes ~~~~~~~~~ +<<<<<<< HEAD - Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) +======= + + +- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`) +>>>>>>> updating examples and the bug fix - Bug in ``DataFrame.apply`` when function returns categorical series. (:issue:`9573`) - Bug in ``to_datetime`` with invalid dates and formats supplied (:issue:`10154`) - Bug in ``Index.drop_duplicates`` dropping name(s) (:issue:`10115`) - Bug in ``pd.Series`` when setting a value on an empty ``Series`` whose index has a frequency. (:issue:`10193`) +- Bug in ``pd.Series.interpolate`` when setting no order value on ``Series.interpolate`` this needs to be at least 1. (:issue:`10633`) and (:issue:`10800`) - Bug in ``DataFrame.plot`` raises ``ValueError`` when color name is specified by multiple characters (:issue:`10387`) - Bug in ``Index`` construction with a mixed list of tuples (:issue:`10697`) - Bug in ``DataFrame.reset_index`` when index contains `NaT`. (:issue:`10388`) diff --git a/pandas/core/common.py b/pandas/core/common.py index 53cd5ca9aa78b..0d74a4449a5f5 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1718,6 +1718,9 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, bounds_error=bounds_error) new_y = terp(new_x) elif method == 'spline': + # GH #10633 + if not order: + raise ValueError("order needs to be specified and greater than 0") terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) new_y = terp(new_x) else: diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index c1f6045c61d54..b001386466213 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -799,6 +799,40 @@ def test_nan_interpolate(self): tm._skip_if_no_scipy() result = s.interpolate(method='polynomial', order=1) assert_series_equal(result, expected) +<<<<<<< HEAD +<<<<<<< HEAD + + # GH #10633 +======= + # GH #10633: first attempt +>>>>>>> d992cd0... Updated test +======= + + # GH #10633 +>>>>>>> 5a5407e... updating examples and the bug fix + def test_interpolate_spline(self): + np.random.seed(1) + s = pd.Series(np.arange(10)**2) + s[np.random.randint(0,9,3)] = np.nan + with tm.assertRaises(ValueError): + s.interpolate(method='spline') +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 027a5e7... Updating based on feedback + + def test_interpolate_spline(self): + np.random.seed(1) + t = pd.Series(np.arange(10)**2) + t[np.random.randint(0,9,3)] = np.nan + with tm.assertRaises(ValueError): + t.interpolate(method='spline', order=0) +<<<<<<< HEAD +======= +>>>>>>> d992cd0... Updated test +======= +>>>>>>> 027a5e7... Updating based on feedback + def test_nan_irregular_index(self): s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9]) @@ -1398,6 +1432,23 @@ def test_no_order(self): s.interpolate(method='polynomial') with tm.assertRaises(ValueError): s.interpolate(method='spline') + +<<<<<<< HEAD +<<<<<<< HEAD +======= + # GH #10633 + def test_order_spline_interpolation(self): + tm._skip_if_no_scipy() + np.random.seed(1) + s = Series(np.arange(10)**2) + s[np.random.randint(0,9,3)] = np.nan + result1 = s.interpolate(method='spline', order=1) + expected1 = s.interpolate(method='spline', order=1) + assert_series_equal(result1, expected1) +>>>>>>> 5a5407e... updating examples and the bug fix +======= +>>>>>>> 027a5e7... Updating based on feedback + def test_spline(self): tm._skip_if_no_scipy()
closed #10633 This might be able to be merged quicker than the other one.
https://api.github.com/repos/pandas-dev/pandas/pulls/10880
2015-08-21T16:18:12Z
2015-08-21T20:27:39Z
null
2015-08-21T20:27:39Z
BUG: DataFrame.plot may raise IndexError / show unnessesary minor ticklabels
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 9f2ec43cb2ae3..95209ceb5f559 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -824,3 +824,5 @@ Bug Fixes - Bug in ``DataFrame.where`` when handling Series slicing (:issue:`10218`, :issue:`9558`) - Bug where ``pd.read_gbq`` throws ``ValueError`` when Bigquery returns zero rows (:issue:`10273`) - Bug in ``to_json`` which was causing segmentation fault when serializing 0-rank ndarray (:issue:`9576`) +- Bug in plotting functions may raise ``IndexError`` when plotted on ``GridSpec`` (:issue:`10819`) +- Bug in plot result may show unnecessary minor ticklabels (:issue:`10657`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 1907cbd78da1d..71fd85bde1235 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -3210,6 +3210,7 @@ def _check_errorbar_color(containers, expected, has_err='has_xerr'): self._check_has_errorbars(ax, xerr=0, yerr=1) _check_errorbar_color(ax.containers, 'green', has_err='has_yerr') + @slow def test_sharex_and_ax(self): # https://github.com/pydata/pandas/issues/9737 # using gridspec, the axis in fig.get_axis() are sorted differently than pandas expected @@ -3218,68 +3219,96 @@ def test_sharex_and_ax(self): plt.close('all') gs, axes = _generate_4_axes_via_gridspec() - df = DataFrame({"a":[1,2,3,4,5,6], "b":[1,2,3,4,5,6]}) + df = DataFrame({"a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6]}) + + def _check(axes): + for ax in axes: + self.assertEqual(len(ax.lines), 1) + self._check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[0], axes[2]]: + self._check_visible(ax.get_xticklabels(), visible=False) + self._check_visible(ax.get_xticklabels(minor=True), visible=False) + for ax in [axes[1], axes[3]]: + self._check_visible(ax.get_xticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(minor=True), visible=True) for ax in axes: df.plot(x="a", y="b", title="title", ax=ax, sharex=True) - gs.tight_layout(plt.gcf()) - for ax in plt.gcf().get_axes(): - for label in ax.get_xticklabels(): - self.assertEqual(label.get_visible(), ax.is_last_row(), - "x ticklabel has wrong visiblity") - self.assertEqual(ax.xaxis.get_label().get_visible(), ax.is_last_row(), - "x label has wrong visiblity") + _check(axes) + tm.close() + + gs, axes = _generate_4_axes_via_gridspec() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True) + _check(axes) + tm.close() - plt.close('all') gs, axes = _generate_4_axes_via_gridspec() # without sharex, no labels should be touched! for ax in axes: df.plot(x="a", y="b", title="title", ax=ax) gs.tight_layout(plt.gcf()) - for ax in plt.gcf().get_axes(): - for label in ax.get_xticklabels(): - self.assertTrue(label.get_visible(), "x ticklabel is invisible but shouldn't") - self.assertTrue(ax.xaxis.get_label().get_visible(), - "x label is invisible but shouldn't") - + for ax in axes: + self.assertEqual(len(ax.lines), 1) + self._check_visible(ax.get_yticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(minor=True), visible=True) + tm.close() + @slow def test_sharey_and_ax(self): # https://github.com/pydata/pandas/issues/9737 # using gridspec, the axis in fig.get_axis() are sorted differently than pandas expected # them, so make sure that only the right ones are removed import matplotlib.pyplot as plt - plt.close('all') gs, axes = _generate_4_axes_via_gridspec() - df = DataFrame({"a":[1,2,3,4,5,6], "b":[1,2,3,4,5,6]}) + df = DataFrame({"a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6]}) + + def _check(axes): + for ax in axes: + self.assertEqual(len(ax.lines), 1) + self._check_visible(ax.get_xticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(minor=True), visible=True) + for ax in [axes[0], axes[1]]: + self._check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[2], axes[3]]: + self._check_visible(ax.get_yticklabels(), visible=False) for ax in axes: df.plot(x="a", y="b", title="title", ax=ax, sharey=True) - gs.tight_layout(plt.gcf()) - for ax in plt.gcf().get_axes(): - for label in ax.get_yticklabels(): - self.assertEqual(label.get_visible(), ax.is_first_col(), - "y ticklabel has wrong visiblity") - self.assertEqual(ax.yaxis.get_label().get_visible(), ax.is_first_col(), - "y label has wrong visiblity") + _check(axes) + tm.close() - plt.close('all') gs, axes = _generate_4_axes_via_gridspec() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharey=True) + + gs.tight_layout(plt.gcf()) + _check(axes) + tm.close() + gs, axes = _generate_4_axes_via_gridspec() # without sharex, no labels should be touched! for ax in axes: df.plot(x="a", y="b", title="title", ax=ax) gs.tight_layout(plt.gcf()) - for ax in plt.gcf().get_axes(): - for label in ax.get_yticklabels(): - self.assertTrue(label.get_visible(), "y ticklabel is invisible but shouldn't") - self.assertTrue(ax.yaxis.get_label().get_visible(), - "y label is invisible but shouldn't") + for ax in axes: + self.assertEqual(len(ax.lines), 1) + self._check_visible(ax.get_yticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(minor=True), visible=True) def test_memory_leak(self): """ Check that every plot type gets properly collected. """ @@ -3311,6 +3340,172 @@ def test_memory_leak(self): # need to actually access something to get an error results[key].lines + @slow + def test_df_subplots_patterns_minorticks(self): + # GH 10657 + import matplotlib.pyplot as plt + + df = DataFrame(np.random.randn(10, 2), + index=date_range('1/1/2000', periods=10), + columns=list('AB')) + + # shared subplots + fig, axes = plt.subplots(2, 1, sharex=True) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + self.assertEqual(len(ax.lines), 1) + self._check_visible(ax.get_yticklabels(), visible=True) + # xaxis of 1st ax must be hidden + self._check_visible(axes[0].get_xticklabels(), visible=False) + self._check_visible(axes[0].get_xticklabels(minor=True), visible=False) + self._check_visible(axes[1].get_xticklabels(), visible=True) + self._check_visible(axes[1].get_xticklabels(minor=True), visible=True) + tm.close() + + fig, axes = plt.subplots(2, 1) + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True) + for ax in axes: + self.assertEqual(len(ax.lines), 1) + self._check_visible(ax.get_yticklabels(), visible=True) + # xaxis of 1st ax must be hidden + self._check_visible(axes[0].get_xticklabels(), visible=False) + self._check_visible(axes[0].get_xticklabels(minor=True), visible=False) + self._check_visible(axes[1].get_xticklabels(), visible=True) + self._check_visible(axes[1].get_xticklabels(minor=True), visible=True) + tm.close() + + # not shared + fig, axes = plt.subplots(2, 1) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + self.assertEqual(len(ax.lines), 1) + self._check_visible(ax.get_yticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(minor=True), visible=True) + tm.close() + + @slow + def test_df_gridspec_patterns(self): + # GH 10819 + import matplotlib.pyplot as plt + import matplotlib.gridspec as gridspec + + ts = Series(np.random.randn(10), + index=date_range('1/1/2000', periods=10)) + + df = DataFrame(np.random.randn(10, 2), index=ts.index, + columns=list('AB')) + + def _get_vertical_grid(): + gs = gridspec.GridSpec(3, 1) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:2, :]) + ax2 = fig.add_subplot(gs[2, :]) + return ax1, ax2 + + def _get_horizontal_grid(): + gs = gridspec.GridSpec(1, 3) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:, :2]) + ax2 = fig.add_subplot(gs[:, 2]) + return ax1, ax2 + + for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]: + ax1 = ts.plot(ax=ax1) + self.assertEqual(len(ax1.lines), 1) + ax2 = df.plot(ax=ax2) + self.assertEqual(len(ax2.lines), 2) + for ax in [ax1, ax2]: + self._check_visible(ax.get_yticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(minor=True), visible=True) + tm.close() + + # subplots=True + for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]: + axes = df.plot(subplots=True, ax=[ax1, ax2]) + self.assertEqual(len(ax1.lines), 1) + self.assertEqual(len(ax2.lines), 1) + for ax in axes: + self._check_visible(ax.get_yticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(minor=True), visible=True) + tm.close() + + # vertical / subplots / sharex=True / sharey=True + ax1, ax2 = _get_vertical_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=[ax1, ax2], + sharex=True, sharey=True) + self.assertEqual(len(axes[0].lines), 1) + self.assertEqual(len(axes[1].lines), 1) + for ax in [ax1, ax2]: + # yaxis are visible because there is only one column + self._check_visible(ax.get_yticklabels(), visible=True) + # xaxis of axes0 (top) are hidden + self._check_visible(axes[0].get_xticklabels(), visible=False) + self._check_visible(axes[0].get_xticklabels(minor=True), visible=False) + self._check_visible(axes[1].get_xticklabels(), visible=True) + self._check_visible(axes[1].get_xticklabels(minor=True), visible=True) + tm.close() + + # horizontal / subplots / sharex=True / sharey=True + ax1, ax2 = _get_horizontal_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=[ax1, ax2], + sharex=True, sharey=True) + self.assertEqual(len(axes[0].lines), 1) + self.assertEqual(len(axes[1].lines), 1) + self._check_visible(axes[0].get_yticklabels(), visible=True) + # yaxis of axes1 (right) are hidden + self._check_visible(axes[1].get_yticklabels(), visible=False) + for ax in [ax1, ax2]: + # xaxis are visible because there is only one column + self._check_visible(ax.get_xticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(minor=True), visible=True) + tm.close() + + # boxed + def _get_boxed_grid(): + gs = gridspec.GridSpec(3,3) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:2, :2]) + ax2 = fig.add_subplot(gs[:2, 2]) + ax3 = fig.add_subplot(gs[2, :2]) + ax4 = fig.add_subplot(gs[2, 2]) + return ax1, ax2, ax3, ax4 + + axes = _get_boxed_grid() + df = DataFrame(np.random.randn(10, 4), + index=ts.index, columns=list('ABCD')) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + self.assertEqual(len(ax.lines), 1) + # axis are visible because these are not shared + self._check_visible(ax.get_yticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(minor=True), visible=True) + tm.close() + + # subplots / sharex=True / sharey=True + axes = _get_boxed_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True) + for ax in axes: + self.assertEqual(len(ax.lines), 1) + for ax in [axes[0], axes[2]]: # left column + self._check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[1], axes[3]]: # right column + self._check_visible(ax.get_yticklabels(), visible=False) + for ax in [axes[0], axes[1]]: # top row + self._check_visible(ax.get_xticklabels(), visible=False) + self._check_visible(ax.get_xticklabels(minor=True), visible=False) + for ax in [axes[2], axes[3]]: # bottom row + self._check_visible(ax.get_xticklabels(), visible=True) + self._check_visible(ax.get_xticklabels(minor=True), visible=True) + tm.close() + @slow def test_df_grid_settings(self): # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 1f799c23c5396..041c747286c51 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -3278,63 +3278,64 @@ def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True, return fig, axes -def _remove_xlabels_from_axis(ax): - for label in ax.get_xticklabels(): - label.set_visible(False) + +def _remove_labels_from_axis(axis): + for t in axis.get_majorticklabels(): + t.set_visible(False) + try: # set_visible will not be effective if # minor axis has NullLocator and NullFormattor (default) import matplotlib.ticker as ticker - - if isinstance(ax.xaxis.get_minor_locator(), ticker.NullLocator): - ax.xaxis.set_minor_locator(ticker.AutoLocator()) - if isinstance(ax.xaxis.get_minor_formatter(), ticker.NullFormatter): - ax.xaxis.set_minor_formatter(ticker.FormatStrFormatter('')) - for label in ax.get_xticklabels(minor=True): - label.set_visible(False) + if isinstance(axis.get_minor_locator(), ticker.NullLocator): + axis.set_minor_locator(ticker.AutoLocator()) + if isinstance(axis.get_minor_formatter(), ticker.NullFormatter): + axis.set_minor_formatter(ticker.FormatStrFormatter('')) + for t in axis.get_minorticklabels(): + t.set_visible(False) except Exception: # pragma no cover - pass - ax.xaxis.get_label().set_visible(False) + raise + axis.get_label().set_visible(False) -def _remove_ylables_from_axis(ax): - for label in ax.get_yticklabels(): - label.set_visible(False) - try: - import matplotlib.ticker as ticker - if isinstance(ax.yaxis.get_minor_locator(), ticker.NullLocator): - ax.yaxis.set_minor_locator(ticker.AutoLocator()) - if isinstance(ax.yaxis.get_minor_formatter(), ticker.NullFormatter): - ax.yaxis.set_minor_formatter(ticker.FormatStrFormatter('')) - for label in ax.get_yticklabels(minor=True): - label.set_visible(False) - except Exception: # pragma no cover - pass - ax.yaxis.get_label().set_visible(False) def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey): if nplots > 1: - # first find out the ax layout, so that we can correctly handle 'gaps" - layout = np.zeros((nrows+1,ncols+1), dtype=np.bool) - for ax in axarr: - layout[ax.rowNum, ax.colNum] = ax.get_visible() - - if sharex and nrows > 1: - for ax in axarr: - # only the last row of subplots should get x labels -> all other off - # layout handles the case that the subplot is the last in the column, - # because below is no subplot/gap. - if not layout[ax.rowNum+1, ax.colNum]: - continue - _remove_xlabels_from_axis(ax) - if sharey and ncols > 1: + if nrows > 1: + try: + # first find out the ax layout, + # so that we can correctly handle 'gaps" + layout = np.zeros((nrows+1,ncols+1), dtype=np.bool) + for ax in axarr: + layout[ax.rowNum, ax.colNum] = ax.get_visible() + + for ax in axarr: + # only the last row of subplots should get x labels -> all + # other off layout handles the case that the subplot is + # the last in the column, because below is no subplot/gap. + if not layout[ax.rowNum+1, ax.colNum]: + continue + if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1: + _remove_labels_from_axis(ax.xaxis) + + except IndexError: + # if gridspec is used, ax.rowNum and ax.colNum may different + # from layout shape. in this case, use last_row logic + for ax in axarr: + if ax.is_last_row(): + continue + if sharex or len(ax.get_shared_x_axes().get_siblings(ax)) > 1: + _remove_labels_from_axis(ax.xaxis) + + if ncols > 1: for ax in axarr: # only the first column should get y labels -> set all other to off # as we only have labels in teh first column and we always have a subplot there, # we can skip the layout test if ax.is_first_col(): continue - _remove_ylables_from_axis(ax) + if sharey or len(ax.get_shared_y_axes().get_siblings(ax)) > 1: + _remove_labels_from_axis(ax.yaxis)
Closes #10657. Closes #10819. Both are related to the logic how `pandas` detects axes layout. Use current logic as much to support cases like #7457, and added error handing logic for gridspec. Following is an example notebook how the fix works. - https://gist.github.com/sinhrks/2d29e2e8ef26f2757e92 CC @TomAugspurger @JanSchulz @heelancd @williamsmj
https://api.github.com/repos/pandas-dev/pandas/pulls/10879
2015-08-21T15:54:55Z
2015-09-01T12:06:24Z
2015-09-01T12:06:24Z
2015-09-01T12:16:17Z
BUG: GH10747 where 'timestamp' is not inferred to be datetime column
diff --git a/doc/source/io.rst b/doc/source/io.rst index 2f2c4c7566413..70e7154493ccf 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1484,7 +1484,13 @@ be set to ``False`` if you need to preserve string-like numbers (e.g. '1', '2') .. note:: - Large integer values may be converted to dates if ``convert_dates=True`` and the data and / or column labels appear 'date-like'. The exact threshold depends on the ``date_unit`` specified. + Large integer values may be converted to dates if ``convert_dates=True`` and the data and / or column labels appear 'date-like'. The exact threshold depends on the ``date_unit`` specified. 'date-like' means that the column label meets one of the following criteria: + + * it ends with ``'_at'`` + * it ends with ``'_time'`` + * it begins with ``'timestamp'`` + * it is ``'modified'`` + * it is ``'date'`` .. warning:: diff --git a/pandas/io/json.py b/pandas/io/json.py index 81a916e058b3d..d6310d81ab87f 100644 --- a/pandas/io/json.py +++ b/pandas/io/json.py @@ -150,7 +150,18 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, Try to convert the axes to the proper dtypes. convert_dates : boolean, default True List of columns to parse for dates; If True, then try to parse - datelike columns default is True + datelike columns default is True; a column label is datelike if + + * it ends with ``'_at'``, + + * it ends with ``'_time'``, + + * it begins with ``'timestamp'``, + + * it is ``'modified'``, or + + * it is ``'date'`` + keep_default_dates : boolean, default True. If parsing dates, then parse the default datelike columns numpy : boolean, default False @@ -543,11 +554,13 @@ def is_ok(col): if not isinstance(col, compat.string_types): return False - if (col.endswith('_at') or - col.endswith('_time') or - col.lower() == 'modified' or - col.lower() == 'date' or - col.lower() == 'datetime'): + col_lower = col.lower() + if (col_lower.endswith('_at') or + col_lower.endswith('_time') or + col_lower == 'modified' or + col_lower == 'date' or + col_lower == 'datetime' or + col_lower.startswith('timestamp')): return True return False diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py index c577286ceca9a..40cdc8fe8478c 100644 --- a/pandas/io/tests/test_json/test_pandas.py +++ b/pandas/io/tests/test_json/test_pandas.py @@ -1,13 +1,12 @@ # pylint: disable-msg=W0612,E1101 from pandas.compat import range, lrange, StringIO, OrderedDict -from pandas import compat import os import numpy as np -from pandas import Series, DataFrame, DatetimeIndex, Timestamp, CategoricalIndex +from pandas import (Series, DataFrame, DatetimeIndex, Timestamp, CategoricalIndex, + read_json, compat) from datetime import timedelta import pandas as pd -read_json = pd.read_json from pandas.util.testing import (assert_almost_equal, assert_frame_equal, assert_series_equal, network, @@ -574,6 +573,16 @@ def test_convert_dates(self): result = read_json(json, typ='series') assert_series_equal(result, ts) + def test_convert_dates_infer(self): + #GH10747 + infer_words = ['trade_time', 'date', 'datetime', 'sold_at', + 'modified', 'timestamp', 'timestamps'] + for infer_word in infer_words: + data = [{'id': 1, infer_word: 1036713600000}, {'id': 2}] + expected = DataFrame([[1, Timestamp('2002-11-08')], [2, pd.NaT]], columns=['id', infer_word]) + result = read_json(pd.json.dumps(data))[['id', infer_word]] + assert_frame_equal(result, expected) + def test_date_format_frame(self): df = self.tsframe.copy()
addresses second point of #10747
https://api.github.com/repos/pandas-dev/pandas/pulls/10876
2015-08-21T08:33:29Z
2015-08-24T11:36:30Z
2015-08-24T11:36:30Z
2015-08-24T11:36:30Z
PERF: uses bincount instead of hash table in categorical value counts
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 34caef221a340..80b277336df7a 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -8,4 +8,20 @@ def setup(self): self.s = pd.Series((list('aabbcd') * 1000000)).astype('category') def time_concat_categorical(self): - concat([self.s, self.s]) \ No newline at end of file + concat([self.s, self.s]) + + +class categorical_value_counts(object): + goal_time = 1 + + def setup(self): + n = 500000 + np.random.seed(2718281) + arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)] + self.ts = Series(arr).astype('category') + + def time_value_counts(self): + self.ts.value_counts(dropna=False) + + def time_value_counts_dropna(self): + self.ts.value_counts(dropna=True) diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 6b4bde588469e..5df49b1457a7e 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -660,6 +660,7 @@ Removal of prior version deprecations/changes Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Added vbench benchmarks for alternative ExcelWriter engines and reading Excel files (:issue:`7171`) +- Performance improvements in ``Categorical.value_counts`` (:issue:`10804`) - 4x improvement in ``timedelta`` string parsing (:issue:`6755`, :issue:`10426`) - 8x improvement in ``timedelta64`` and ``datetime64`` ops (:issue:`6755`) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index ba2c9314322c3..78c9d264c43a5 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1025,25 +1025,28 @@ def value_counts(self, dropna=True): ------- counts : Series """ - import pandas.hashtable as htable + from numpy import bincount + from pandas.core.common import isnull from pandas.core.series import Series from pandas.core.index import CategoricalIndex - cat = self.dropna() if dropna else self - keys, counts = htable.value_count_scalar64(com._ensure_int64(cat._codes), dropna) - result = Series(counts, index=keys) + obj = self.remove_categories([np.nan]) \ + if dropna and isnull(self.categories).any() else self + + code, cat = obj._codes, obj.categories + ncat, mask = len(cat), 0 <= code + ix, clean = np.arange(ncat), mask.all() - ix = np.arange(len(cat.categories), dtype='int64') - if not dropna and -1 in keys: + if dropna or clean: + count = bincount(code if clean else code[mask], minlength=ncat) + else: + count = bincount(np.where(mask, code, ncat)) ix = np.append(ix, -1) - result = result.reindex(ix, fill_value=0) - index = (np.append(cat.categories, np.nan) - if not dropna and -1 in keys - else cat.categories) - result.index = CategoricalIndex(index, self.categories, self.ordered) + ix = Categorical(ix, categories=cat, + ordered=obj.ordered, fastpath=True) - return result + return Series(count, index=CategoricalIndex(ix)) def get_values(self): """ Return the values.
closes https://github.com/pydata/pandas/issues/10804 ``` ipython In [1]: np.random.seed(2718281) In [2]: n = 500000 In [3]: u = int(0.1*n) In [4]: arr = ["s%04d" % i for i in np.random.randint(0, u, size=n)] In [5]: ts = pd.Series(arr).astype('category') In [6]: %timeit ts.value_counts() 10 loops, best of 3: 82.7 ms per loop ``` on branch: ``` ipython In [6]: %timeit ts.value_counts() 10 loops, best of 3: 31.3 ms per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/10874
2015-08-21T00:40:06Z
2015-08-22T20:03:01Z
2015-08-22T20:03:01Z
2015-08-22T20:28:02Z
CLN: Series.asof uses reindex GH10343
diff --git a/pandas/core/index.py b/pandas/core/index.py index 7b5a6b199bc1b..7d6d044b9d5e2 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1316,22 +1316,6 @@ def asof(self, label): loc = loc.indices(len(self))[-1] return self[loc] - def asof_locs(self, where, mask): - """ - where : array of timestamps - mask : array of booleans where data is not NA - - """ - locs = self.values[mask].searchsorted(where.values, side='right') - - locs = np.where(locs > 0, locs - 1, 0) - result = np.arange(len(self))[mask].take(locs) - - first = mask.argmax() - result[(locs == 0) & (where < self.values[first])] = -1 - - return result - def order(self, return_indexer=False, ascending=True): """ Return sorted copy of Index diff --git a/pandas/core/series.py b/pandas/core/series.py index e43721e52f17e..ea68252b76daa 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2461,6 +2461,10 @@ def asof(self, where): If there is no good value, NaN is returned. + Note that this is really just a convenient shorthand for `Series.reindex`, + and is equivalent to `s.dropna().reindex(where, method='ffill')` for + an array of dates. + Parameters ---------- where : date or array of dates @@ -2476,29 +2480,18 @@ def asof(self, where): if isinstance(where, compat.string_types): where = datetools.to_datetime(where) - values = self.values - if not hasattr(where, '__iter__'): - start = self.index[0] - if isinstance(self.index, PeriodIndex): - where = Period(where, freq=self.index.freq).ordinal - start = start.ordinal - - if where < start: - return np.nan - loc = self.index.searchsorted(where, side='right') - if loc > 0: - loc -= 1 - while isnull(values[loc]) and loc > 0: - loc -= 1 - return values[loc] - - if not isinstance(where, Index): - where = Index(where) - - locs = self.index.asof_locs(where, notnull(values)) - new_values = com.take_1d(values, locs) - return self._constructor(new_values, index=where).__finalize__(self) + is_scalar = True + where = [where] + else: + is_scalar = False + + ret = self.dropna().reindex(where, method='ffill') + + if is_scalar: + return ret.iloc[0] + else: + return ret def to_timestamp(self, freq=None, how='start', copy=True): """ diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 56d7d45120fdc..e10332b5483ee 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -318,26 +318,6 @@ def _to_embed(self, keep_tz=False): def _formatter_func(self): return lambda x: "'%s'" % x - def asof_locs(self, where, mask): - """ - where : array of timestamps - mask : array of booleans where data is not NA - - """ - where_idx = where - if isinstance(where_idx, DatetimeIndex): - where_idx = PeriodIndex(where_idx.values, freq=self.freq) - - locs = self.values[mask].searchsorted(where_idx.values, side='right') - - locs = np.where(locs > 0, locs - 1, 0) - result = np.arange(len(self))[mask].take(locs) - - first = mask.argmax() - result[(locs == 0) & (where_idx.values < self.values[first])] = -1 - - return result - def _array_values(self): return self.asobject
closes #10343 as discussed in #10345 , changed `Series.asof` to use `reindex` internally, and remove the unused leftovers from index classes. All existing tests in test_series.py:TestSeries.test_asof pass with the new implementation.
https://api.github.com/repos/pandas-dev/pandas/pulls/10873
2015-08-21T00:34:30Z
2015-10-18T14:04:01Z
null
2023-05-11T01:13:09Z
BUG: import of maybe_convert_indices in pandas.core.index.py, #10610
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index cc8f135eb62b0..4dadc8cd97fa9 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -785,4 +785,5 @@ Bug Fixes - Bug in ``iloc`` allowing memory outside bounds of a Series to be accessed with negative integers (:issue:`10779`) - Bug in ``read_msgpack`` where encoding is not respected (:issue:`10580`) - Bug preventing access to the first index when using ``iloc`` with a list containing the appropriate negative integer (:issue:`10547`, :issue:`10779`) +- Bug in ``pd.Index`` when using passing a list of indices to a mixed-integer index (:issue:`10610`) - Bug in ``TimedeltaIndex`` formatter causing error while trying to save ``DataFrame`` with ``TimedeltaIndex`` using ``to_csv`` (:issue:`10833`) diff --git a/pandas/core/index.py b/pandas/core/index.py index f7e06626febd1..a8645519ade1d 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -973,9 +973,12 @@ def _convert_list_indexer(self, keyarr, kind=None): indexer = self.get_indexer(keyarr) if (indexer >= 0).all(): return indexer - - from pandas.core.indexing import _maybe_convert_indices - return _maybe_convert_indices(indexer, len(self)) + # missing values are flagged as -1 by get_indexer and negative indices are already + # converted to positive indices in the above if-statement, so the negative flags are changed to + # values outside the range of indices so as to trigger an IndexError in maybe_convert_indices + indexer[indexer < 0] = len(self) + from pandas.core.indexing import maybe_convert_indices + return maybe_convert_indices(indexer, len(self)) elif not self.inferred_type == 'integer': return keyarr diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 4a679f73d27cf..c8ce3f522693f 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1729,6 +1729,11 @@ def test_equals_op_multiindex(self): df.index == index_a tm.assert_numpy_array_equal(index_a == mi3, np.array([False, False, False])) + def test_multitype_list_index_access(self): + df = pd.DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23]) + with self.assertRaises(IndexError): + vals = df[[22, 26, -8]] + self.assertEqual(df[21].shape[0], df.shape[0]) class TestCategoricalIndex(Base, tm.TestCase): _holder = CategoricalIndex
closes #10610 I fixed the import statement and added a test to check for proper behavior when accessing a mixed-integer index with a list of values.
https://api.github.com/repos/pandas-dev/pandas/pulls/10872
2015-08-20T23:35:29Z
2015-09-01T11:49:25Z
null
2015-09-01T11:49:25Z
DEPR: deprecate the data reader package, #10861
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 13764543ec665..fac04552fdb42 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -11,6 +11,10 @@ users upgrade to this version. pandas >= 0.17.0 will no longer support compatibility with Python version 3.2 (:issue:`9118`) +.. warning:: + + The ``pandas.io.data`` package is deprecated and will be replaced by the `pandas-datareader package <https://github.com/pydata/pandas-datareader>`_. This will allow the data modules to be independently updated to your pandas installation. The API for ``pandas-datareader v0.1.1`` is the same as in ``pandas v0.17.0``. (:issue:`8961`, :issue:`10861`) + Highlights include: - Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>` diff --git a/pandas/io/data.py b/pandas/io/data.py index 1556f6b00e981..f24dc134b4cbd 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -23,6 +23,12 @@ from pandas.util.testing import _network_error_classes from pandas.io.html import read_html +warnings.warn("\n" + "The pandas.io.data module is deprecated and will be " + "removed in a future version.\nThis is being replaced by the " + "pandas-datareader package, found here: https://github.com/pydata/pandas-datareader", + FutureWarning) + class SymbolWarning(UserWarning): pass
closed #10861
https://api.github.com/repos/pandas-dev/pandas/pulls/10870
2015-08-20T22:05:05Z
2015-08-23T10:26:43Z
null
2015-08-23T22:15:01Z
DOC: update ipython sphinxext for IPython 4.0
diff --git a/doc/sphinxext/ipython_sphinxext/ipython_directive.py b/doc/sphinxext/ipython_sphinxext/ipython_directive.py index 3f9be95609874..ad7ada8e4eea3 100644 --- a/doc/sphinxext/ipython_sphinxext/ipython_directive.py +++ b/doc/sphinxext/ipython_sphinxext/ipython_directive.py @@ -127,7 +127,11 @@ from sphinx.util.compat import Directive # Our own -from IPython import Config, InteractiveShell +try: + from traitlets.config import Config +except ImportError: + from IPython import Config +from IPython import InteractiveShell from IPython.core.profiledir import ProfileDir from IPython.utils import io from IPython.utils.py3compat import PY3
https://api.github.com/repos/pandas-dev/pandas/pulls/10868
2015-08-20T20:55:58Z
2015-08-23T10:35:40Z
2015-08-23T10:35:40Z
2015-09-07T17:51:10Z
DEPR: iterkv change Deprecation to FutureWarning
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1656d52c35eeb..610c5a0da2cff 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -701,7 +701,7 @@ def iterkv(self, *args, **kwargs): "iteritems alias used to get around 2to3. Deprecated" warnings.warn("iterkv is deprecated and will be removed in a future " "release, use ``iteritems`` instead.", - DeprecationWarning) + FutureWarning) return self.iteritems(*args, **kwargs) def __len__(self): @@ -2004,13 +2004,13 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No Sample with or without replacement. Default = False. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. - If passed a Series, will align with target object on index. Index + If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and - index values in sampled object not in weights will be assigned - weights of zero. + index values in sampled object not in weights will be assigned + weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. - Unless weights are a Series, weights must be same length as axis + Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. @@ -2040,8 +2040,8 @@ def sample(self, n=None, frac=None, replace=False, weights=None, random_state=No if weights is not None: # If a series, align with frame - if isinstance(weights, pd.Series): - weights = weights.reindex(self.axes[axis]) + if isinstance(weights, pd.Series): + weights = weights.reindex(self.axes[axis]) # Strings acceptable if a dataframe and axis = 0 if isinstance(weights, string_types): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 465f1da05ebde..d6e825d8fd000 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -14188,7 +14188,7 @@ def test_take(self): assert_frame_equal(result, expected) def test_iterkv_deprecation(self): - with tm.assert_produces_warning(DeprecationWarning): + with tm.assert_produces_warning(FutureWarning): self.mixed_float.iterkv() def test_iterkv_names(self):
So this is visible to the users (already long time deprecated in the docs)
https://api.github.com/repos/pandas-dev/pandas/pulls/10867
2015-08-20T20:18:52Z
2015-08-21T07:31:13Z
2015-08-21T07:31:13Z
2015-08-21T07:31:13Z
ENH: Period pickle
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 039772f68ee85..0c3af5c4bd64f 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -198,6 +198,8 @@ Other enhancements ``tolerance`` is also exposed by the lower level ``Index.get_indexer`` and ``Index.get_loc`` methods. +- Support pickling of ``Period`` objects (:issue:`10439`) + .. _whatsnew_0170.api: .. _whatsnew_0170.api_breaking: diff --git a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack index 6bf1b9b9afaaa..000879f4cb2c2 100644 Binary files a/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack and b/pandas/io/tests/data/legacy_msgpack/0.16.2/0.16.2_x86_64_darwin_2.7.10.msgpack differ diff --git a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle index 60101c2f1e95e..d45936baa1e00 100644 Binary files a/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle and b/pandas/io/tests/data/legacy_pickle/0.16.2/0.16.2_x86_64_darwin_2.7.10.pickle differ diff --git a/pandas/io/tests/generate_legacy_storage_files.py b/pandas/io/tests/generate_legacy_storage_files.py index e7cc89fcc0b61..86c5a9e0d7f19 100644 --- a/pandas/io/tests/generate_legacy_storage_files.py +++ b/pandas/io/tests/generate_legacy_storage_files.py @@ -4,7 +4,8 @@ from pandas import (Series, TimeSeries, DataFrame, Panel, SparseSeries, SparseTimeSeries, SparseDataFrame, SparsePanel, Index, MultiIndex, PeriodIndex, bdate_range, to_msgpack, - date_range, period_range, bdate_range, Timestamp, Categorical) + date_range, period_range, bdate_range, Timestamp, Categorical, + Period) import os import sys import numpy as np @@ -63,6 +64,10 @@ def create_data(): 'E': [0., 1, Timestamp('20100101'), 'foo', 2.] } + scalars = dict(timestamp=Timestamp('20130101')) + if LooseVersion(pandas.__version__) >= '0.17.0': + scalars['period'] = Period('2012','M') + index = dict(int=Index(np.arange(10)), date=date_range('20130101', periods=10), period=period_range('2013-01-01', freq='M', periods=10)) @@ -79,6 +84,8 @@ def create_data(): names=['one', 'two'])), dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A']), cat=Series(Categorical(['foo', 'bar', 'baz']))) + if LooseVersion(pandas.__version__) >= '0.17.0': + series['period'] = Series([Period('2000Q1')] * 5) mixed_dup_df = DataFrame(data) mixed_dup_df.columns = list("ABCDA") @@ -107,6 +114,7 @@ def create_data(): frame=frame, panel=panel, index=index, + scalars=scalars, mi=mi, sp_series=dict(float=_create_sp_series(), ts=_create_sp_tsseries()), diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index e691fac215002..8f2079722c00e 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -48,7 +48,7 @@ def compare_element(self, typ, result, expected): comparator = getattr(test_sparse,"assert_%s_equal" % typ) comparator(result,expected,exact_indices=False) else: - comparator = getattr(tm,"assert_%s_equal" % typ) + comparator = getattr(tm,"assert_%s_equal" % typ,tm.assert_almost_equal) comparator(result,expected) def compare(self, vf): diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx index b4a4930e09d68..619d1a87a71e0 100644 --- a/pandas/src/period.pyx +++ b/pandas/src/period.pyx @@ -969,6 +969,14 @@ cdef class Period(object): value = ("%s" % formatted) return value + def __setstate__(self, state): + self.freq=state[1] + self.ordinal=state[2] + + def __reduce__(self): + object_state = None, self.freq, self.ordinal + return (Period, object_state) + def strftime(self, fmt): """ Returns the string representation of the :class:`Period`, depending diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index b9757c9e1b5d7..eb5c6759bfa45 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -2471,7 +2471,6 @@ def test_append_concat(self): def test_pickle_freq(self): # GH2891 - import pickle prng = period_range('1/1/2011', '1/1/2012', freq='M') new_prng = self.round_trip_pickle(prng) self.assertEqual(new_prng.freq,'M') @@ -2536,6 +2535,12 @@ def test_searchsorted(self): ValueError, 'Different period frequency: H', lambda: pidx.searchsorted(pd.Period('2014-01-01', freq='H'))) + def test_round_trip(self): + + p = Period('2000Q1') + new_p = self.round_trip_pickle(p) + self.assertEqual(new_p, p) + def _permute(obj): return obj.take(np.random.permutation(len(obj)))
closes #10439 closes #10441
https://api.github.com/repos/pandas-dev/pandas/pulls/10866
2015-08-20T16:23:48Z
2015-08-20T17:32:21Z
2015-08-20T17:32:21Z
2015-08-20T17:32:21Z
Tests for ABC Types
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 94f151efbe2a6..b234773359f8c 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -6,6 +6,7 @@ import nose from nose.tools import assert_equal, assert_true import numpy as np +import pandas as pd from pandas.tslib import iNaT, NaT from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp, Float64Index from pandas import compat @@ -40,6 +41,7 @@ def __getitem__(self): assert(not is_seq(A())) + def test_get_callable_name(): from functools import partial getname = com._get_callable_name @@ -49,6 +51,7 @@ def fn(x): lambda_ = lambda x: x part1 = partial(fn) part2 = partial(part1) + class somecall(object): def __call__(self): return x @@ -60,6 +63,38 @@ def __call__(self): assert getname(somecall()) == 'somecall' assert getname(1) is None +#Issue 10859 +class TestABCClasses(tm.TestCase): + tuples = [[1, 2, 2], ['red', 'blue', 'red']] + multi_index = pd.MultiIndex.from_arrays(tuples, names=('number', 'color')) + datetime_index = pd.to_datetime(['2000/1/1', '2010/1/1']) + timedelta_index = pd.to_timedelta(np.arange(5), unit='s') + period_index = pd.period_range('2000/1/1', '2010/1/1/', freq='M') + categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1]) + categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical) + df = pd.DataFrame({'names': ['a', 'b', 'c']}, index=multi_index) + sparse_series = pd.Series([1, 2, 3]).to_sparse() + sparse_array = pd.SparseArray(np.random.randn(10)) + + def test_abc_types(self): + self.assertIsInstance(pd.Index(['a', 'b', 'c']), com.ABCIndex) + self.assertIsInstance(pd.Int64Index([1, 2, 3]), com.ABCInt64Index) + self.assertIsInstance(pd.Float64Index([1, 2, 3]), com.ABCFloat64Index) + self.assertIsInstance(self.multi_index, com.ABCMultiIndex) + self.assertIsInstance(self.datetime_index, com.ABCDatetimeIndex) + self.assertIsInstance(self.timedelta_index, com.ABCTimedeltaIndex) + self.assertIsInstance(self.period_index, com.ABCPeriodIndex) + self.assertIsInstance(self.categorical_df.index, com.ABCCategoricalIndex) + self.assertIsInstance(pd.Index(['a', 'b', 'c']), com.ABCIndexClass) + self.assertIsInstance(pd.Int64Index([1, 2, 3]), com.ABCIndexClass) + self.assertIsInstance(pd.Series([1, 2, 3]), com.ABCSeries) + self.assertIsInstance(self.df, com.ABCDataFrame) + self.assertIsInstance(self.df.to_panel(), com.ABCPanel) + self.assertIsInstance(self.sparse_series, com.ABCSparseSeries) + self.assertIsInstance(self.sparse_array, com.ABCSparseArray) + self.assertIsInstance(self.categorical, com.ABCCategorical) + self.assertIsInstance(pd.Period('2012', freq='A-DEC'), com.ABCPeriod) + def test_notnull(): assert notnull(1.) @@ -942,7 +977,7 @@ def test_2d_float32(self): def test_2d_datetime64(self): # 2005/01/01 - 2006/01/01 - arr = np.random.randint(long(11045376), long(11360736), (5,3))*100000000000 + arr = np.random.randint(long(11045376), long(11360736), (5, 3))*100000000000 arr = arr.view(dtype='datetime64[ns]') indexer = [0, 2, -1, 1, -1] @@ -1026,6 +1061,7 @@ def test_dict_compat(): assert(com._dict_compat(expected) == expected) assert(com._dict_compat(data_unchanged) == data_unchanged) + def test_possibly_convert_objects_copy(): values = np.array([1, 2])
closes #10828 I used self.assertIsInstance(obj, type) and inlined as many of the expressions as I could while staying PEP8 compliant. I added in the two tests that I took out, that was an accident. Let me know if this looks okay!
https://api.github.com/repos/pandas-dev/pandas/pulls/10859
2015-08-20T07:22:33Z
2015-08-20T17:39:19Z
null
2015-08-20T17:39:19Z
removes categories & ordered from CategoricalIndex attributes
diff --git a/pandas/core/index.py b/pandas/core/index.py index 135195d90ffdb..7b5a6b199bc1b 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2910,7 +2910,7 @@ class CategoricalIndex(Index, PandasDelegate): _typ = 'categoricalindex' _engine_type = _index.Int64Engine - _attributes = ['name','categories','ordered'] + _attributes = ['name'] def __new__(cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs):
`categories` and `ordered` are properties not attributes ([index.py#L3085-L3091](https://github.com/pydata/pandas/blob/3e35c842f37265ac7339908869d4cc7bdef784d9/pandas/core/index.py#L3085-L3091)). The side effect is that the categories are rehashed too often: on master: ``` ipython In [1]: from pandas.core.index import CategoricalIndex In [2]: np.random.seed(2718281) In [3]: n = 100000 In [4]: val = map('{:->4}'.format, np.random.randint(0, 10000, n)) In [5]: idx = CategoricalIndex(list(val)) In [6]: %timeit idx[::-1] 100 loops, best of 3: 14.1 ms per loop In [7]: %timeit idx[idx.argsort()] 10 loops, best of 3: 22 ms per loop In [8]: cProfile.run("idx[::-1]", sort=1) 174 function calls (171 primitive calls) in 0.025 seconds Ordered by: internal time ncalls tottime percall cumtime percall filename:lineno(function) 1 0.014 0.014 0.014 0.014 {method 'lookup' of 'pandas.hashtable.PyObjectHashTable' objects} 1 0.004 0.004 0.004 0.004 {pandas.algos.take_1d_object_object} 1 0.003 0.003 0.003 0.003 {method 'map_locations' of 'pandas.hashtable.PyObjectHashTable' objects} ``` on branch: ``` ipython In [6]: %timeit idx[::-1] The slowest run took 6.78 times longer than the fastest. This could mean that an intermediate result is being cached 10000 loops, best of 3: 29 µs per loop In [7]: %timeit idx[idx.argsort()] 100 loops, best of 3: 9.38 ms per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/10858
2015-08-20T00:10:29Z
2015-08-20T12:48:09Z
2015-08-20T12:48:09Z
2015-08-20T23:32:06Z
ENH: #8325 Add ability to create tables using the gbq module.
diff --git a/ci/requirements-2.7.pip b/ci/requirements-2.7.pip index cf8e6b8b3d3a6..ff1978a8d45ed 100644 --- a/ci/requirements-2.7.pip +++ b/ci/requirements-2.7.pip @@ -1 +1,3 @@ blosc +httplib2 +google-api-python-client == 1.2 diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt index 951c8798bef15..d6a1e2d362330 100644 --- a/ci/requirements-2.7.txt +++ b/ci/requirements-2.7.txt @@ -20,6 +20,4 @@ patsy pymysql=0.6.3 html5lib=1.0b2 beautiful-soup=4.2.1 -httplib2=0.8 python-gflags=2.0 -google-api-python-client=1.2 diff --git a/ci/requirements-2.7_SLOW.txt b/ci/requirements-2.7_SLOW.txt index 19686ccb56922..1a56434c62f86 100644 --- a/ci/requirements-2.7_SLOW.txt +++ b/ci/requirements-2.7_SLOW.txt @@ -20,6 +20,4 @@ psycopg2 pymysql html5lib beautiful-soup -httplib2 python-gflags -google-api-python-client diff --git a/ci/requirements-3.4.pip b/ci/requirements-3.4.pip index cf8e6b8b3d3a6..47a049aac7632 100644 --- a/ci/requirements-3.4.pip +++ b/ci/requirements-3.4.pip @@ -1 +1,3 @@ blosc +httplib2 +google-api-python-client diff --git a/doc/source/api.rst b/doc/source/api.rst index 915727817bb7b..01634d7c02481 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -110,6 +110,10 @@ Google BigQuery read_gbq to_gbq + generate_bq_schema + create_table + delete_table + table_exists .. currentmodule:: pandas diff --git a/doc/source/io.rst b/doc/source/io.rst index f95fdd502d306..5ad9af310225d 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3951,14 +3951,35 @@ The :mod:`pandas.io.gbq` module provides a wrapper for Google's BigQuery analytics web service to simplify retrieving results from BigQuery tables using SQL-like queries. Result sets are parsed into a pandas DataFrame with a shape and data types derived from the source table. -Additionally, DataFrames can be appended to existing BigQuery tables if -the destination table is the same shape as the DataFrame. +Additionally, DataFrames can be inserted into new BigQuery tables or appended +to existing tables. -For specifics on the service itself, see `here <https://developers.google.com/bigquery/>`__ +.. warning:: + + To use this module, you will need a valid BigQuery account. Refer to the + `BigQuery Documentation <https://developers.google.com/bigquery/>`__ for details on the service itself. + +The key functions are: -As an example, suppose you want to load all data from an existing BigQuery -table : `test_dataset.test_table` into a DataFrame using the :func:`~pandas.io.read_gbq` -function. +.. currentmodule:: pandas.io.gbq + +.. autosummary:: + :toctree: generated/ + + read_gbq + to_gbq + generate_bq_schema + create_table + delete_table + table_exists + +.. currentmodule:: pandas + +Querying +'''''''' + +Suppose you want to load all data from an existing BigQuery table : `test_dataset.test_table` +into a DataFrame using the :func:`~pandas.io.gbq.read_gbq` function. .. code-block:: python @@ -3966,14 +3987,14 @@ function. # Can be found in the Google web console projectid = "xxxxxxxx" - data_frame = pd.read_gbq('SELECT * FROM test_dataset.test_table', project_id = projectid) + data_frame = pd.read_gbq('SELECT * FROM test_dataset.test_table', projectid) You will then be authenticated to the specified BigQuery account via Google's Oauth2 mechanism. In general, this is as simple as following the prompts in a browser window which will be opened for you. Should the browser not be available, or fail to launch, a code will be provided to complete the process manually. Additional information on the authentication mechanism can be found -`here <https://developers.google.com/accounts/docs/OAuth2#clientside/>`__ +`here <https://developers.google.com/accounts/docs/OAuth2#clientside/>`__. You can define which column from BigQuery to use as an index in the destination DataFrame as well as a preferred column order as follows: @@ -3982,56 +4003,167 @@ destination DataFrame as well as a preferred column order as follows: data_frame = pd.read_gbq('SELECT * FROM test_dataset.test_table', index_col='index_column_name', - col_order=['col1', 'col2', 'col3'], project_id = projectid) - -Finally, you can append data to a BigQuery table from a pandas DataFrame -using the :func:`~pandas.io.to_gbq` function. This function uses the -Google streaming API which requires that your destination table exists in -BigQuery. Given the BigQuery table already exists, your DataFrame should -match the destination table in column order, structure, and data types. -DataFrame indexes are not supported. By default, rows are streamed to -BigQuery in chunks of 10,000 rows, but you can pass other chuck values -via the ``chunksize`` argument. You can also see the progess of your -post via the ``verbose`` flag which defaults to ``True``. The http -response code of Google BigQuery can be successful (200) even if the -append failed. For this reason, if there is a failure to append to the -table, the complete error response from BigQuery is returned which -can be quite long given it provides a status for each row. You may want -to start with smaller chunks to test that the size and types of your -dataframe match your destination table to make debugging simpler. + col_order=['col1', 'col2', 'col3'], projectid) + +.. note:: + + You can find your project id in the `BigQuery management console <https://code.google.com/apis/console/b/0/?noredirect>`__. + + +.. note:: + + You can toggle the verbose output via the ``verbose`` flag which defaults to ``True``. + +Writing DataFrames +'''''''''''''''''' + +Assume we want to write a DataFrame ``df`` into a BigQuery table using :func:`~pandas.DataFrame.to_gbq`. + +.. ipython:: python + + df = pd.DataFrame({'my_string': list('abc'), + 'my_int64': list(range(1, 4)), + 'my_float64': np.arange(4.0, 7.0), + 'my_bool1': [True, False, True], + 'my_bool2': [False, True, False], + 'my_dates': pd.date_range('now', periods=3)}) + + df + df.dtypes .. code-block:: python - df = pandas.DataFrame({'string_col_name' : ['hello'], - 'integer_col_name' : [1], - 'boolean_col_name' : [True]}) - df.to_gbq('my_dataset.my_table', project_id = projectid) + df.to_gbq('my_dataset.my_table', projectid) + +.. note:: + + If the destination table does not exist, a new table will be created. The + destination dataset id must already exist in order for a new table to be created. + +The ``if_exists`` argument can be used to dictate whether to ``'fail'``, ``'replace'`` +or ``'append'`` if the destination table already exists. The default value is ``'fail'``. + +For example, assume that ``if_exists`` is set to ``'fail'``. The following snippet will raise +a ``TableCreationError`` if the destination table already exists. + +.. code-block:: python -The BigQuery SQL query language has some oddities, see `here <https://developers.google.com/bigquery/query-reference>`__ + df.to_gbq('my_dataset.my_table', projectid, if_exists='fail') -While BigQuery uses SQL-like syntax, it has some important differences -from traditional databases both in functionality, API limitations (size and -quantity of queries or uploads), and how Google charges for use of the service. -You should refer to Google documentation often as the service seems to -be changing and evolving. BiqQuery is best for analyzing large sets of -data quickly, but it is not a direct replacement for a transactional database. +.. note:: -You can access the management console to determine project id's by: -<https://code.google.com/apis/console/b/0/?noredirect> + If the ``if_exists`` argument is set to ``'append'``, the destination dataframe will + be written to the table using the defined table schema and column types. The + dataframe must match the destination table in column order, structure, and + data types. + If the ``if_exists`` argument is set to ``'replace'``, and the existing table has a + different schema, a delay of 2 minutes will be forced to ensure that the new schema + has propagated in the Google environment. See + `Google BigQuery issue 191 <https://code.google.com/p/google-bigquery/issues/detail?id=191>`__. -As of 0.15.2, the gbq module has a function ``generate_bq_schema`` which -will produce the dictionary representation of the schema. +Writing large DataFrames can result in errors due to size limitations being exceeded. +This can be avoided by setting the ``chunksize`` argument when calling :func:`~pandas.DataFrame.to_gbq`. +For example, the following writes ``df`` to a BigQuery table in batches of 10000 rows at a time: .. code-block:: python - df = pandas.DataFrame({'A': [1.0]}) - gbq.generate_bq_schema(df, default_type='STRING') + df.to_gbq('my_dataset.my_table', projectid, chunksize=10000) -.. warning:: +You can also see the progress of your post via the ``verbose`` flag which defaults to ``True``. +For example: + +.. code-block:: python + + In [8]: df.to_gbq('my_dataset.my_table', projectid, chunksize=10000, verbose=True) + + Streaming Insert is 10% Complete + Streaming Insert is 20% Complete + Streaming Insert is 30% Complete + Streaming Insert is 40% Complete + Streaming Insert is 50% Complete + Streaming Insert is 60% Complete + Streaming Insert is 70% Complete + Streaming Insert is 80% Complete + Streaming Insert is 90% Complete + Streaming Insert is 100% Complete + +.. note:: + + If an error occurs while streaming data to BigQuery, see + `Troubleshooting BigQuery Errors <https://cloud.google.com/bigquery/troubleshooting-errors>`__. + +.. note:: + + The BigQuery SQL query language has some oddities, see the + `BigQuery Query Reference Documentation <https://developers.google.com/bigquery/query-reference>`__. + +.. note:: + + While BigQuery uses SQL-like syntax, it has some important differences from traditional + databases both in functionality, API limitations (size and quantity of queries or uploads), + and how Google charges for use of the service. You should refer to `Google BigQuery documentation <https://developers.google.com/bigquery/>`__ + often as the service seems to be changing and evolving. BiqQuery is best for analyzing large + sets of data quickly, but it is not a direct replacement for a transactional database. + + +Creating BigQuery Tables +'''''''''''''''''''''''' + +As of 0.17.0, the gbq module has a function :func:`~pandas.io.gbq.create_table` which allows users +to create a table in BigQuery. The only requirement is that the dataset must already exist. +The schema may be generated from a pandas DataFrame using the :func:`~pandas.io.gbq.generate_bq_schema` function below. + +For example: + +.. code-block:: python + + gbq.create_table('my_dataset.my_table', schema, projectid) + +As of 0.15.2, the gbq module has a function :func:`~pandas.io.gbq.generate_bq_schema` which will +produce the dictionary representation schema of the specified pandas DataFrame. + +.. code-block:: python + + In [10]: gbq.generate_bq_schema(df, default_type='STRING') + + Out[10]: {'fields': [{'name': 'my_bool1', 'type': 'BOOLEAN'}, + {'name': 'my_bool2', 'type': 'BOOLEAN'}, + {'name': 'my_dates', 'type': 'TIMESTAMP'}, + {'name': 'my_float64', 'type': 'FLOAT'}, + {'name': 'my_int64', 'type': 'INTEGER'}, + {'name': 'my_string', 'type': 'STRING'}]} + +Deleting BigQuery Tables +'''''''''''''''''''''''' + +As of 0.17.0, the gbq module has a function :func:`~pandas.io.gbq.delete_table` which allows users to delete a table +in Google BigQuery. + +For example: + +.. code-block:: python + + gbq.delete_table('my_dataset.my_table', projectid) + +The following function can be used to check whether a table exists prior to calling ``table_exists``: + +:func:`~pandas.io.gbq.table_exists`. + +The return value will be of type boolean. + +For example: + +.. code-block:: python + + In [12]: gbq.table_exists('my_dataset.my_table', projectid) + Out[12]: True + +.. note:: - To use this module, you will need a valid BigQuery account. See - <https://cloud.google.com/products/big-query> for details on the - service. + If you delete and re-create a BigQuery table with the same name, but different table schema, + you must wait 2 minutes before streaming data into the table. As a workaround, consider creating + the new table with a different name. Refer to + `Google BigQuery issue 191 <https://code.google.com/p/google-bigquery/issues/detail?id=191>`__. .. _io.stata: diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 3b3bf8cffe41b..0a2407ed7b16e 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -319,6 +319,15 @@ has been changed to make this keyword unnecessary - the change is shown below. Excel files saved in version 0.16.2 or prior that had index names will still able to be read in, but the ``has_index_names`` argument must specified to ``True``. +.. _whatsnew_0170.gbq: + +Google BigQuery Enhancements +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +- Added ability to automatically create a table using the :func:`pandas.io.gbq.to_gbq` function if destination table does not exist. (:issue:`8325`). +- Added ability to replace an existing table and schema when calling the :func:`pandas.io.gbq.to_gbq` function via the ``if_exists`` argument. See the :ref:`docs <io.bigquery>` for more details (:issue:`8325`). +- Added the following functions to the gbq module: :func:`pandas.io.gbq.table_exists`, :func:`pandas.io.gbq.create_table`, and :func:`pandas.io.gbq.delete_table`. See the :ref:`docs <io.bigquery>` for more details (:issue:`8325`). +- ``InvalidColumnOrder`` and ``InvalidPageToken`` in the gbq module will raise ``ValueError`` instead of ``IOError``. + .. _whatsnew_0170.enhancements.other: Other enhancements @@ -1137,3 +1146,4 @@ Bug Fixes - Bug in ``DatetimeIndex`` cannot infer negative freq (:issue:`11018`) - Remove use of some deprecated numpy comparison operations, mainly in tests. (:issue:`10569`) - Bug in ``Index`` dtype may not applied properly (:issue:`11017`) +- Bug in ``io.gbq`` when testing for minimum google api client version (:issue:`10652`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 77b8c4cf35aad..e58bd1f2fa0ff 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -811,20 +811,12 @@ def to_dict(self, orient='dict'): else: raise ValueError("orient '%s' not understood" % orient) - def to_gbq(self, destination_table, project_id=None, chunksize=10000, - verbose=True, reauth=False): + def to_gbq(self, destination_table, project_id, chunksize=10000, + verbose=True, reauth=False, if_exists='fail'): """Write a DataFrame to a Google BigQuery table. THIS IS AN EXPERIMENTAL LIBRARY - If the table exists, the dataframe will be written to the table using - the defined table schema and column types. For simplicity, this method - uses the Google BigQuery streaming API. The to_gbq method chunks data - into a default chunk size of 10,000. Failures return the complete error - response which can be quite long depending on the size of the insert. - There are several important limitations of the Google streaming API - which are `here <https://developers.google.com/bigquery/streaming-data-into-bigquery>`__ - Parameters ---------- dataframe : DataFrame @@ -840,13 +832,18 @@ def to_gbq(self, destination_table, project_id=None, chunksize=10000, reauth : boolean (default False) Force Google BigQuery to reauthenticate the user. This is useful if multiple accounts are used. + if_exists : {'fail', 'replace', 'append'}, default 'fail' + 'fail': If table exists, do nothing. + 'replace': If table exists, drop it, recreate it, and insert data. + 'append': If table exists, insert data. Create if does not exist. + .. versionadded:: 0.17.0 """ from pandas.io import gbq return gbq.to_gbq(self, destination_table, project_id=project_id, chunksize=chunksize, verbose=verbose, - reauth=reauth) + reauth=reauth, if_exists=if_exists) @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 1dff195e4b54f..37e7cb944814a 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -7,7 +7,7 @@ import numpy as np -from distutils.version import LooseVersion +from distutils.version import StrictVersion from pandas import compat from pandas.core.api import DataFrame from pandas.tools.merge import concat @@ -26,76 +26,101 @@ def _check_google_client_version(): _GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version - if LooseVersion(_GOOGLE_API_CLIENT_VERSION) < '1.2.0': + if StrictVersion(_GOOGLE_API_CLIENT_VERSION) < StrictVersion('1.2.0'): raise ImportError("pandas requires google-api-python-client >= 1.2.0 for Google " "BigQuery support, current version " + _GOOGLE_API_CLIENT_VERSION) logger = logging.getLogger('pandas.io.gbq') logger.setLevel(logging.ERROR) -class InvalidPageToken(PandasError, IOError): + +class AccessDenied(PandasError, ValueError): """ - Raised when Google BigQuery fails to return, - or returns a duplicate page token. + Raised when invalid credentials are provided, or tokens have expired. """ pass -class InvalidQueryException(PandasError, IOError): + +class GenericGBQException(PandasError, ValueError): """ - Raised when a malformed query is given to read_gbq. + Raised when an unrecognized Google API Error occurs. """ pass -class AccessDeniedException(PandasError, IOError): + +class InvalidColumnOrder(PandasError, ValueError): """ - Raised when invalid credentials are provided, or tokens have expired. + Raised when the provided column order for output + results DataFrame does not match the schema + returned by BigQuery. """ pass -class NotFoundException(PandasError, IOError): + +class InvalidPageToken(PandasError, ValueError): """ - Raised when the project_id/table provided in the query could not be found. + Raised when Google BigQuery fails to return, + or returns a duplicate page token. """ pass -class TermsOfServiceNotAcceptedException(PandasError, IOError): + + +class InvalidSchema(PandasError, ValueError): """ - Raised when the terms of service were not accepted or have been unaccepted. + Raised when the provided DataFrame does + not match the schema of the destination + table in BigQuery. """ pass -class UnknownGBQException(PandasError, IOError): + +class NotFoundException(PandasError, ValueError): """ - Raised when an unrecognized Google API Error occurs. + Raised when the project_id, table or dataset provided in the query could not be found. """ pass -class InvalidColumnOrder(PandasError, IOError): +class StreamingInsertError(PandasError, ValueError): """ - Raised when the provided column order for output - results DataFrame does not match the schema - returned by BigQuery. + Raised when BigQuery reports a streaming insert error. + For more information see `Streaming Data Into BigQuery <https://cloud.google.com/bigquery/streaming-data-into-bigquery>`__ + """ + + +class TableCreationError(PandasError, ValueError): + """ + Raised when the create table method fails """ pass + class GbqConnector(object): def __init__(self, project_id, reauth=False): + self.test_google_api_imports() + self.project_id = project_id + self.reauth = reauth + self.credentials = self.get_credentials() + self.service = self.get_service(self.credentials) - self.project_id = project_id - self.reauth = reauth - self.credentials = self.get_credentials() - self.service = self.get_service(self.credentials) - - def get_credentials(self): + def test_google_api_imports(self): try: + import httplib2 + from apiclient.discovery import build + from apiclient.errors import HttpError + from oauth2client.client import AccessTokenRefreshError from oauth2client.client import OAuth2WebServerFlow from oauth2client.file import Storage from oauth2client.tools import run_flow, argparser + except ImportError as e: + raise ImportError("Missing module required for Google BigQuery support: {0}".format(str(e))) - except ImportError: - raise ImportError('Could not import Google API Client.') + def get_credentials(self): + from oauth2client.client import OAuth2WebServerFlow + from oauth2client.file import Storage + from oauth2client.tools import run_flow, argparser _check_google_client_version() @@ -113,17 +138,8 @@ def get_credentials(self): return credentials def get_service(self, credentials): - try: - import httplib2 - - except ImportError: - raise ImportError("pandas requires httplib2 for Google BigQuery support") - - try: - from apiclient.discovery import build - - except ImportError: - raise ImportError('Could not import Google API Client.') + import httplib2 + from apiclient.discovery import build _check_google_client_version() @@ -133,13 +149,41 @@ def get_service(self, credentials): return bigquery_service - def run_query(self, query): - try: - from apiclient.errors import HttpError - from oauth2client.client import AccessTokenRefreshError + def process_http_error(self, ex): + # See `BigQuery Troubleshooting Errors <https://cloud.google.com/bigquery/troubleshooting-errors>`__ + + status = json.loads(ex.content)['error'] + errors = status.get('errors', None) - except ImportError: - raise ImportError('Could not import Google API Client.') + if errors: + for error in errors: + reason = error['reason'] + message = error['message'] + + raise GenericGBQException("Reason: {0}, Message: {1}".format(reason, message)) + + raise GenericGBQException(errors) + + def process_insert_errors(self, insert_errors, verbose): + for insert_error in insert_errors: + row = insert_error['index'] + errors = insert_error.get('errors', None) + for error in errors: + reason = error['reason'] + message = error['message'] + error_message = 'Error at Row: {0}, Reason: {1}, Message: {2}'.format(row, reason, message) + + # Report all error messages if verbose is set + if verbose: + print(error_message) + else: + raise StreamingInsertError(error_message + '\nEnable verbose logging to see all errors') + + raise StreamingInsertError + + def run_query(self, query, verbose=True): + from apiclient.errors import HttpError + from oauth2client.client import AccessTokenRefreshError _check_google_client_version() @@ -148,122 +192,201 @@ def run_query(self, query): 'configuration': { 'query': { 'query': query - #'allowLargeResults', 'createDisposition', 'preserveNulls', destinationTable, useQueryCache + # 'allowLargeResults', 'createDisposition', 'preserveNulls', destinationTable, useQueryCache } } } try: - query_reply = job_collection.insert(projectId=self.project_id, - body=job_data).execute() - status = query_reply['status'] + query_reply = job_collection.insert(projectId=self.project_id, body=job_data).execute() except AccessTokenRefreshError: - raise AccessDeniedException("The credentials have been revoked or expired, please re-run" - "the application to re-authorize") + raise AccessDenied("The credentials have been revoked or expired, please re-run the application " + "to re-authorize") except HttpError as ex: - status = json.loads(ex.content)['error'] - - - errors = status.get('errors', None) - - if errors: - reasons = [error['reason'] for error in errors] - if 'accessDenied' in reasons: - raise AccessDeniedException - if 'invalidQuery' in reasons: - raise InvalidQueryException - if 'notFound' in reasons: - raise NotFoundException - if 'termsOfServiceNotAccepted' in reasons: - raise TermsOfServiceNotAcceptedException - else: - raise UnknownGBQException(errors) + self.process_http_error(ex) job_reference = query_reply['jobReference'] - while(not query_reply.get('jobComplete', False)): - print('Job not yet complete...') - query_reply = job_collection.getQueryResults( - projectId=job_reference['projectId'], - jobId=job_reference['jobId']).execute() + while not query_reply.get('jobComplete', False): + if verbose: + print('Waiting for job to complete...') + try: + query_reply = job_collection.getQueryResults(projectId=job_reference['projectId'], + jobId=job_reference['jobId']).execute() + except HttpError as ex: + self.process_http_error(ex) total_rows = int(query_reply['totalRows']) result_pages = list() seen_page_tokens = list() current_row = 0 - #Only read schema on first page + # Only read schema on first page schema = query_reply['schema'] # Loop through each page of data - while('rows' in query_reply and current_row < total_rows): + while 'rows' in query_reply and current_row < total_rows: page = query_reply['rows'] result_pages.append(page) current_row += len(page) page_token = query_reply.get('pageToken', None) if not page_token and current_row < total_rows: - raise InvalidPageToken("Required pageToken was missing. Recieved {0} of {1} rows".format(current_row,total_rows)) + raise InvalidPageToken("Required pageToken was missing. Recieved {0} of {1} rows".format(current_row, total_rows)) elif page_token in seen_page_tokens: raise InvalidPageToken("A duplicate pageToken was returned") seen_page_tokens.append(page_token) - query_reply = job_collection.getQueryResults( - projectId = job_reference['projectId'], - jobId = job_reference['jobId'], - pageToken = page_token).execute() - if (current_row < total_rows): + try: + query_reply = job_collection.getQueryResults( + projectId=job_reference['projectId'], + jobId=job_reference['jobId'], + pageToken=page_token).execute() + except HttpError as ex: + self.process_http_error(ex) + + if current_row < total_rows: raise InvalidPageToken() return schema, result_pages def load_data(self, dataframe, dataset_id, table_id, chunksize, verbose): + from apiclient.errors import HttpError + job_id = uuid.uuid4().hex rows = [] remaining_rows = len(dataframe) if verbose: total_rows = remaining_rows - sys.stdout.write("\n\n") - sys.stdout.flush() + print("\n\n") for index, row in dataframe.reset_index(drop=True).iterrows(): row_dict = dict() - row_dict['json'] = json.loads(row.to_json(force_ascii = False, - date_unit = 's', - date_format = 'iso')) + row_dict['json'] = json.loads(row.to_json(force_ascii=False, + date_unit='s', + date_format='iso')) row_dict['insertId'] = job_id + str(index) rows.append(row_dict) remaining_rows -= 1 if (len(rows) % chunksize == 0) or (remaining_rows == 0): if verbose: - sys.stdout.write("\rStreaming Insert is {0}% Complete".format(((total_rows - remaining_rows) * 100) / total_rows)) - sys.stdout.flush() + print("\rStreaming Insert is {0}% Complete".format(((total_rows - remaining_rows) * 100) / total_rows)) body = {'rows': rows} - response = self.service.tabledata().insertAll( - projectId = self.project_id, - datasetId = dataset_id, - tableId = table_id, - body = body).execute() - if 'insertErrors' in response: - raise UnknownGBQException(response) - - sleep(1) # Maintains the inserts "per second" rate per API + + try: + response = self.service.tabledata().insertAll( + projectId = self.project_id, + datasetId = dataset_id, + tableId = table_id, + body = body).execute() + except HttpError as ex: + self.process_http_error(ex) + + # For streaming inserts, even if you receive a success HTTP response code, you'll need to check the + # insertErrors property of the response to determine if the row insertions were successful, because + # it's possible that BigQuery was only partially successful at inserting the rows. + # See the `Success HTTP Response Codes <https://cloud.google.com/bigquery/streaming-data-into-bigquery#troubleshooting>`__ + # section + + insert_errors = response.get('insertErrors', None) + if insert_errors: + self.process_insert_errors(insert_errors, verbose) + + sleep(1) # Maintains the inserts "per second" rate per API rows = [] if verbose: - sys.stdout.write("\n") - sys.stdout.flush() + print("\n") + + def table_exists(self, dataset_id, table_id): + from apiclient.errors import HttpError + + try: + self.service.tables().get( + projectId=self.project_id, + datasetId=dataset_id, + tableId=table_id).execute() + return True + except HttpError as ex: + if ex.resp.status == 404: + return False + else: + self.process_http_error(ex) + + def verify_schema(self, dataset_id, table_id, schema): + from apiclient.errors import HttpError + + try: + return (self.service.tables().get( + projectId=self.project_id, + datasetId=dataset_id, + tableId=table_id + ).execute()['schema']) == schema + + except HttpError as ex: + self.process_http_error(ex) + + def create_table(self, dataset_id, table_id, schema): + from apiclient.errors import HttpError + + body = { + 'schema': schema, + 'tableReference': { + 'tableId': table_id, + 'projectId': self.project_id, + 'datasetId': dataset_id + } + } + + try: + self.service.tables().insert( + projectId=self.project_id, + datasetId=dataset_id, + body=body + ).execute() + except HttpError as ex: + self.process_http_error(ex) + + def delete_table(self, dataset_id, table_id): + from apiclient.errors import HttpError + + try: + self.service.tables().delete( + datasetId=dataset_id, + projectId=self.project_id, + tableId=table_id + ).execute() + + except HttpError as ex: + self.process_http_error(ex) + + def delete_and_recreate_table(self, dataset_id, table_id, table_schema, verbose): + delay = 0 + + # Changes to table schema may take up to 2 minutes as of May 2015 + # See `Issue 191 <https://code.google.com/p/google-bigquery/issues/detail?id=191>`__ + # Compare previous schema with new schema to determine if there should be a 120 second delay + + if not self.verify_schema(dataset_id, table_id, table_schema): + if verbose: + print('The existing table has a different schema. Please wait 2 minutes. See Google BigQuery issue #191') + delay = 120 + + self.delete_table(dataset_id, table_id) + self.create_table(dataset_id, table_id, table_schema) + + sleep(delay) + def _parse_data(schema, rows): # see: http://pandas.pydata.org/pandas-docs/dev/missing_data.html#missing-data-casting-rules-and-indexing dtype_map = {'INTEGER': np.dtype(float), 'FLOAT': np.dtype(float), - 'TIMESTAMP': 'M8[ns]'} # This seems to be buggy without - # nanosecond indicator + 'TIMESTAMP': 'M8[ns]'} # This seems to be buggy without nanosecond indicator fields = schema['fields'] col_types = [field['type'] for field in fields] @@ -281,6 +404,7 @@ def _parse_data(schema, rows): return DataFrame(page_array, columns=col_names) + def _parse_entry(field_value, field_type): if field_value is None or field_value == 'null': return None @@ -294,7 +418,7 @@ def _parse_entry(field_value, field_type): return field_value -def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=False): +def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=False, verbose=True): """Load data from Google BigQuery. THIS IS AN EXPERIMENTAL LIBRARY @@ -319,6 +443,8 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=Fals reauth : boolean (default False) Force Google BigQuery to reauthenticate the user. This is useful if multiple accounts are used. + verbose : boolean (default True) + Verbose output Returns ------- @@ -327,12 +453,11 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=Fals """ - if not project_id: raise TypeError("Missing required parameter: project_id") - connector = GbqConnector(project_id, reauth = reauth) - schema, pages = connector.run_query(query) + connector = GbqConnector(project_id, reauth=reauth) + schema, pages = connector.run_query(query, verbose=verbose) dataframe_list = [] while len(pages) > 0: page = pages.pop() @@ -346,7 +471,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=Fals # Reindex the DataFrame on the provided column if index_col is not None: if index_col in final_df.columns: - final_df.set_index(index_col, inplace = True) + final_df.set_index(index_col, inplace=True) else: raise InvalidColumnOrder( 'Index column "{0}" does not exist in DataFrame.' @@ -368,21 +493,13 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=Fals final_df._data = final_df._data.downcast(dtypes='infer') return final_df -def to_gbq(dataframe, destination_table, project_id=None, chunksize=10000, - verbose=True, reauth=False): + +def to_gbq(dataframe, destination_table, project_id, chunksize=10000, + verbose=True, reauth=False, if_exists='fail'): """Write a DataFrame to a Google BigQuery table. THIS IS AN EXPERIMENTAL LIBRARY - If the table exists, the dataframe will be written to the table using - the defined table schema and column types. For simplicity, this method - uses the Google BigQuery streaming API. The to_gbq method chunks data - into a default chunk size of 10,000. Failures return the complete error - response which can be quite long depending on the size of the insert. - There are several important limitations of the Google streaming API - which are detailed at: - https://developers.google.com/bigquery/streaming-data-into-bigquery. - Parameters ---------- dataframe : DataFrame @@ -398,22 +515,41 @@ def to_gbq(dataframe, destination_table, project_id=None, chunksize=10000, reauth : boolean (default False) Force Google BigQuery to reauthenticate the user. This is useful if multiple accounts are used. - + if_exists : {'fail', 'replace', 'append'}, default 'fail' + 'fail': If table exists, do nothing. + 'replace': If table exists, drop it, recreate it, and insert data. + 'append': If table exists, insert data. Create if does not exist. """ - if not project_id: - raise TypeError("Missing required parameter: project_id") + if if_exists not in ('fail', 'replace', 'append'): + raise ValueError("'{0}' is not valid for if_exists".format(if_exists)) - if not '.' in destination_table: + if '.' not in destination_table: raise NotFoundException("Invalid Table Name. Should be of the form 'datasetId.tableId' ") - connector = GbqConnector(project_id, reauth = reauth) - dataset_id, table_id = destination_table.rsplit('.',1) + connector = GbqConnector(project_id, reauth=reauth) + dataset_id, table_id = destination_table.rsplit('.', 1) + + table_schema = generate_bq_schema(dataframe) + + # If table exists, check if_exists parameter + if connector.table_exists(dataset_id, table_id): + if if_exists == 'fail': + raise TableCreationError("Could not create the table because it already exists. " + "Change the if_exists parameter to append or replace data.") + elif if_exists == 'replace': + connector.delete_and_recreate_table(dataset_id, table_id, table_schema, verbose) + elif if_exists == 'append': + if not connector.verify_schema(dataset_id, table_id, table_schema): + raise InvalidSchema("The schema of the destination table does not match") + else: + connector.create_table(dataset_id, table_id, table_schema) connector.load_data(dataframe, dataset_id, table_id, chunksize, verbose) + def generate_bq_schema(df, default_type='STRING'): - """ Given a passed df, generate the associated big query schema. + """ Given a passed df, generate the associated Google BigQuery schema. Parameters ---------- @@ -439,3 +575,76 @@ def generate_bq_schema(df, default_type='STRING'): 'type': type_mapping.get(dtype.kind, default_type)}) return {'fields': fields} + + +def table_exists(table, project_id): + """ Check if a table exists in Google BigQuery given a table and project id + + .. versionadded:: 0.17.0 + + Parameters + ---------- + table : str + Name of table to be verified, in the form 'dataset.tablename' + project_id : str + Google BigQuery Account project ID. + + Returns + ------- + boolean + true if table exists, otherwise false + """ + + if '.' not in table: + raise NotFoundException("Invalid Table Name. Should be of the form 'datasetId.tableId' ") + + connector = GbqConnector(project_id) + dataset_id, table_id = table.rsplit('.', 1) + + return connector.table_exists(dataset_id, table_id) + + +def create_table(table, schema, project_id): + """ Create a table in Google BigQuery given a table, schema and project id + + .. versionadded:: 0.17.0 + + Parameters + ---------- + table : str + Name of table to be written, in the form 'dataset.tablename' + schema : str + Use the generate_bq_schema to generate your table schema from a dataframe. + project_id : str + Google BigQuery Account project ID. + """ + + if table_exists(table, project_id): + raise TableCreationError("The table could not be created because it already exists") + + connector = GbqConnector(project_id) + dataset_id, table_id = table.rsplit('.', 1) + + return connector.create_table(dataset_id, table_id, schema) + + +def delete_table(table, project_id): + """ Delete a table in Google BigQuery given a table and project id + + .. versionadded:: 0.17.0 + + Parameters + ---------- + table : str + Name of table to be written, in the form 'dataset.tablename' + project_id : str + Google BigQuery Account project ID. + """ + + if not table_exists(table, project_id): + raise NotFoundException("Table does not exist") + + connector = GbqConnector(project_id) + dataset_id, table_id = table.rsplit('.', 1) + + return connector.delete_table(dataset_id, table_id) diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py index f04eeb03f790e..990050b8ac544 100644 --- a/pandas/io/tests/test_gbq.py +++ b/pandas/io/tests/test_gbq.py @@ -1,5 +1,5 @@ import ast -import datetime +from datetime import datetime import json import nose import os @@ -12,7 +12,7 @@ import numpy as np -from distutils.version import LooseVersion +from distutils.version import StrictVersion from pandas import compat from pandas import NaT @@ -31,13 +31,15 @@ _HTTPLIB2_INSTALLED = False _SETUPTOOLS_INSTALLED = False + def missing_bq(): try: - subprocess.call('bq') + subprocess.call(['bq', 'ls']) return False except OSError: return True + def _test_imports(): if not compat.PY3: @@ -63,7 +65,7 @@ def _test_imports(): _GOOGLE_API_CLIENT_INSTALLED=True _GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version - if LooseVersion(_GOOGLE_API_CLIENT_VERSION) >= '1.2.0': + if StrictVersion(_GOOGLE_API_CLIENT_VERSION) >= StrictVersion('1.2.0'): _GOOGLE_API_CLIENT_VALID_VERSION = True except ImportError: @@ -75,7 +77,6 @@ def _test_imports(): _HTTPLIB2_INSTALLED = True except ImportError: _HTTPLIB2_INSTALLED = False - if compat.PY3: raise NotImplementedError("Google's libraries do not support Python 3 yet") @@ -93,12 +94,24 @@ def _test_imports(): if not _HTTPLIB2_INSTALLED: raise ImportError("pandas requires httplib2 for Google BigQuery support") + def test_requirements(): try: _test_imports() except (ImportError, NotImplementedError) as import_exception: raise nose.SkipTest(import_exception) + +def make_mixed_dataframe_v2(test_size): + # create df to test for all BQ datatypes except RECORD + bools = np.random.randint(2, size=(1,test_size)).astype(bool) + flts = np.random.randn(1, test_size) + ints = np.random.randint(1, 10, size=(1,test_size)) + strs = np.random.randint(1, 10, size=(1,test_size)).astype(str) + times = [datetime.now(pytz.timezone('US/Arizona')) for t in xrange(test_size)] + return DataFrame({'bools': bools[0], 'flts': flts[0], 'ints': ints[0], 'strs': strs[0], 'times': times[0]}, index=range(test_size)) + + class TestGBQConnectorIntegration(tm.TestCase): def setUp(self): test_requirements() @@ -128,6 +141,7 @@ def test_should_be_able_to_get_results_from_query(self): schema, pages = self.sut.run_query('SELECT 1') self.assertTrue(pages is not None) + class TestReadGBQUnitTests(tm.TestCase): def setUp(self): test_requirements() @@ -165,90 +179,116 @@ def test_read_gbq_with_no_project_id_given_should_fail(self): gbq.read_gbq('SELECT "1" as NUMBER_1') def test_that_parse_data_works_properly(self): - test_schema = {'fields': [{'mode': 'NULLABLE', - 'name': 'VALID_STRING', - 'type': 'STRING'}]} + test_schema = {'fields': [{'mode': 'NULLABLE', 'name': 'VALID_STRING', 'type': 'STRING'}]} test_page = [{'f': [{'v': 'PI'}]}] test_output = gbq._parse_data(test_schema, test_page) - correct_output = DataFrame({'VALID_STRING' : ['PI']}) + correct_output = DataFrame({'VALID_STRING': ['PI']}) tm.assert_frame_equal(test_output, correct_output) + class TestReadGBQIntegration(tm.TestCase): - def setUp(self): + @classmethod + def setUpClass(cls): + # - GLOBAL CLASS FIXTURES - + # put here any instruction you want to execute only *ONCE* *BEFORE* executing *ALL* tests + # described below. + test_requirements() if not PROJECT_ID: raise nose.SkipTest("Cannot run integration tests without a project id") + if missing_bq(): + raise nose.SkipTest("Cannot run read_gbq tests without bq command line client") + + subprocess.call(['bq', 'mk', PROJECT_ID + ':pydata_pandas_bq_testing']) + + def setUp(self): + # - PER-TEST FIXTURES - + # put here any instruction you want to be run *BEFORE* *EVERY* test is executed. + pass + + @classmethod + def tearDownClass(cls): + # - GLOBAL CLASS FIXTURES - + # put here any instruction you want to execute only *ONCE* *AFTER* executing all tests. + subprocess.call(['bq', 'rm', '-f', PROJECT_ID + ':pydata_pandas_bq_testing']) + + def tearDown(self): + # - PER-TEST FIXTURES - + # put here any instructions you want to be run *AFTER* *EVERY* test is executed. + if gbq.table_exists('pydata_pandas_bq_testing.new_test', PROJECT_ID): + subprocess.call(['bq', 'rm', '-f', PROJECT_ID + ':pydata_pandas_bq_testing.new_test']) + def test_should_properly_handle_valid_strings(self): query = 'SELECT "PI" as VALID_STRING' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'VALID_STRING' : ['PI']})) + tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']})) def test_should_properly_handle_empty_strings(self): query = 'SELECT "" as EMPTY_STRING' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'EMPTY_STRING' : [""]})) + tm.assert_frame_equal(df, DataFrame({'EMPTY_STRING': [""]})) def test_should_properly_handle_null_strings(self): query = 'SELECT STRING(NULL) as NULL_STRING' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'NULL_STRING' : [None]})) + tm.assert_frame_equal(df, DataFrame({'NULL_STRING': [None]})) def test_should_properly_handle_valid_integers(self): query = 'SELECT INTEGER(3) as VALID_INTEGER' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'VALID_INTEGER' : [3]})) + tm.assert_frame_equal(df, DataFrame({'VALID_INTEGER': [3]})) def test_should_properly_handle_null_integers(self): query = 'SELECT INTEGER(NULL) as NULL_INTEGER' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'NULL_INTEGER' : [np.nan]})) + tm.assert_frame_equal(df, DataFrame({'NULL_INTEGER': [np.nan]})) def test_should_properly_handle_valid_floats(self): query = 'SELECT PI() as VALID_FLOAT' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'VALID_FLOAT' : [3.141592653589793]})) + tm.assert_frame_equal(df, DataFrame({'VALID_FLOAT': [3.141592653589793]})) def test_should_properly_handle_null_floats(self): query = 'SELECT FLOAT(NULL) as NULL_FLOAT' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'NULL_FLOAT' : [np.nan]})) + tm.assert_frame_equal(df, DataFrame({'NULL_FLOAT': [np.nan]})) def test_should_properly_handle_timestamp_unix_epoch(self): query = 'SELECT TIMESTAMP("1970-01-01 00:00:00") as UNIX_EPOCH' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'UNIX_EPOCH' : [np.datetime64('1970-01-01T00:00:00.000000Z')]})) + tm.assert_frame_equal(df, DataFrame({'UNIX_EPOCH': [np.datetime64('1970-01-01T00:00:00.000000Z')]})) def test_should_properly_handle_arbitrary_timestamp(self): query = 'SELECT TIMESTAMP("2004-09-15 05:00:00") as VALID_TIMESTAMP' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'VALID_TIMESTAMP' : [np.datetime64('2004-09-15T05:00:00.000000Z')]})) + tm.assert_frame_equal(df, DataFrame({'VALID_TIMESTAMP': [np.datetime64('2004-09-15T05:00:00.000000Z')]})) def test_should_properly_handle_null_timestamp(self): query = 'SELECT TIMESTAMP(NULL) as NULL_TIMESTAMP' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'NULL_TIMESTAMP' :[NaT]})) + tm.assert_frame_equal(df, DataFrame({'NULL_TIMESTAMP': [NaT]})) def test_should_properly_handle_true_boolean(self): query = 'SELECT BOOLEAN(TRUE) as TRUE_BOOLEAN' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'TRUE_BOOLEAN' : [True]})) + tm.assert_frame_equal(df, DataFrame({'TRUE_BOOLEAN': [True]})) def test_should_properly_handle_false_boolean(self): query = 'SELECT BOOLEAN(FALSE) as FALSE_BOOLEAN' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'FALSE_BOOLEAN' : [False]})) + tm.assert_frame_equal(df, DataFrame({'FALSE_BOOLEAN': [False]})) def test_should_properly_handle_null_boolean(self): query = 'SELECT BOOLEAN(NULL) as NULL_BOOLEAN' df = gbq.read_gbq(query, project_id=PROJECT_ID) - tm.assert_frame_equal(df, DataFrame({'NULL_BOOLEAN' : [None]})) + tm.assert_frame_equal(df, DataFrame({'NULL_BOOLEAN': [None]})) def test_unicode_string_conversion_and_normalization(self): correct_test_datatype = DataFrame( - {'UNICODE_STRING' : [u("\xe9\xfc")]} + {'UNICODE_STRING': [u("\xe9\xfc")]} ) query = 'SELECT "\xc3\xa9\xc3\xbc" as UNICODE_STRING' @@ -259,35 +299,35 @@ def test_unicode_string_conversion_and_normalization(self): def test_index_column(self): query = "SELECT 'a' as STRING_1, 'b' as STRING_2" result_frame = gbq.read_gbq(query, project_id=PROJECT_ID, index_col="STRING_1") - correct_frame = DataFrame({'STRING_1' : ['a'], 'STRING_2' : ['b']}).set_index("STRING_1") + correct_frame = DataFrame({'STRING_1': ['a'], 'STRING_2': ['b']}).set_index("STRING_1") tm.assert_equal(result_frame.index.name, correct_frame.index.name) def test_column_order(self): query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3" col_order = ['STRING_3', 'STRING_1', 'STRING_2'] result_frame = gbq.read_gbq(query, project_id=PROJECT_ID, col_order=col_order) - correct_frame = DataFrame({'STRING_1' : ['a'], 'STRING_2' : ['b'], 'STRING_3' : ['c']})[col_order] + correct_frame = DataFrame({'STRING_1': ['a'], 'STRING_2': ['b'], 'STRING_3': ['c']})[col_order] tm.assert_frame_equal(result_frame, correct_frame) def test_column_order_plus_index(self): query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3" col_order = ['STRING_3', 'STRING_2'] result_frame = gbq.read_gbq(query, project_id=PROJECT_ID, index_col='STRING_1', col_order=col_order) - correct_frame = DataFrame({'STRING_1' : ['a'], 'STRING_2' : ['b'], 'STRING_3' : ['c']}) + correct_frame = DataFrame({'STRING_1': ['a'], 'STRING_2': ['b'], 'STRING_3': ['c']}) correct_frame.set_index('STRING_1', inplace=True) correct_frame = correct_frame[col_order] tm.assert_frame_equal(result_frame, correct_frame) def test_malformed_query(self): - with tm.assertRaises(gbq.InvalidQueryException): + with tm.assertRaises(gbq.GenericGBQException): gbq.read_gbq("SELCET * FORM [publicdata:samples.shakespeare]", project_id=PROJECT_ID) def test_bad_project_id(self): - with tm.assertRaises(gbq.NotFoundException): + with tm.assertRaises(gbq.GenericGBQException): gbq.read_gbq("SELECT 1", project_id='001') def test_bad_table_name(self): - with tm.assertRaises(gbq.NotFoundException): + with tm.assertRaises(gbq.GenericGBQException): gbq.read_gbq("SELECT * FROM [publicdata:samples.nope]", project_id=PROJECT_ID) def test_download_dataset_larger_than_200k_rows(self): @@ -304,52 +344,128 @@ def test_zero_rows(self): class TestToGBQIntegration(tm.TestCase): - # This class requires bq.py to be installed for setup/teardown. - # It will also need to be preconfigured with a default dataset, - # so, be sure to `bq init` in terminal before running. + # Changes to BigQuery table schema may take up to 2 minutes as of May 2015 + # As a workaround to this issue, each test should use a unique table name. + # Make sure to modify the for loop range in the tearDownClass when a new test is added + # See `Issue 191 <https://code.google.com/p/google-bigquery/issues/detail?id=191>`__ + + @classmethod + def setUpClass(cls): + # - GLOBAL CLASS FIXTURES - + # put here any instruction you want to execute only *ONCE* *BEFORE* executing *ALL* tests + # described below. - def setUp(self): test_requirements() if not PROJECT_ID: raise nose.SkipTest("Cannot run integration tests without a project id") + if missing_bq(): raise nose.SkipTest("Cannot run to_gbq tests without bq command line client") + subprocess.call(['bq', 'mk', PROJECT_ID + ':pydata_pandas_bq_testing']) + + def setUp(self): + # - PER-TEST FIXTURES - + # put here any instruction you want to be run *BEFORE* *EVERY* test is executed. + pass + @classmethod - def setUpClass(cls): - if PROJECT_ID and not missing_bq(): - subprocess.call(['bq','mk','pydata_pandas_bq_testing']) - subprocess.call(['bq','mk','pydata_pandas_bq_testing.new_test','bools:BOOLEAN,flts:FLOAT,ints:INTEGER,strs:STRING,times:TIMESTAMP']) + def tearDownClass(cls): + # - GLOBAL CLASS FIXTURES - + # put here any instruction you want to execute only *ONCE* *AFTER* executing all tests. + + for i in range(1, 8): + if gbq.table_exists('pydata_pandas_bq_testing.new_test' + str(i), PROJECT_ID): + subprocess.call(['bq', 'rm', '-f', PROJECT_ID + ':pydata_pandas_bq_testing.new_test' + str(i)]) + + subprocess.call(['bq', 'rm', '-f', PROJECT_ID + ':pydata_pandas_bq_testing']) + + def tearDown(self): + # - PER-TEST FIXTURES - + # put here any instructions you want to be run *AFTER* *EVERY* test is executed. + pass def test_upload_data(self): + table_name = 'new_test1' + test_size = 1000001 - #create df to test for all BQ datatypes except RECORD - bools = np.random.randint(2, size=(1,test_size)).astype(bool) - flts = np.random.randn(1,test_size) - ints = np.random.randint(1,10, size=(1,test_size)) - strs = np.random.randint(1,10, size=(1,test_size)).astype(str) - times = [datetime.datetime.now(pytz.timezone('US/Arizona')) for t in xrange(test_size)] - df = DataFrame({'bools':bools[0], 'flts':flts[0], 'ints':ints[0], 'strs':strs[0], 'times':times[0]}, index=range(test_size)) - gbq.to_gbq(df,"pydata_pandas_bq_testing.new_test", project_id=PROJECT_ID, chunksize=10000) - sleep(60) # <- Curses Google!!! - - result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM pydata_pandas_bq_testing.new_test", project_id=PROJECT_ID) + df = make_mixed_dataframe_v2(test_size) + + gbq.to_gbq(df, "pydata_pandas_bq_testing." + table_name, PROJECT_ID, chunksize=10000) + + sleep(60) # <- Curses Google!!! + + result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM pydata_pandas_bq_testing." + table_name, project_id=PROJECT_ID) self.assertEqual(result['NUM_ROWS'][0], test_size) + def test_upload_data_if_table_exists_fail(self): + table_name = 'new_test2' + + test_size = 10 + df = make_mixed_dataframe_v2(test_size) + + gbq.create_table('pydata_pandas_bq_testing.' + table_name, gbq.generate_bq_schema(df), PROJECT_ID) + + # Test the default value of if_exists is 'fail' + with tm.assertRaises(gbq.TableCreationError): + gbq.to_gbq(df, "pydata_pandas_bq_testing." + table_name, PROJECT_ID) + + # Test the if_exists parameter with value 'fail' + with tm.assertRaises(gbq.TableCreationError): + gbq.to_gbq(df, "pydata_pandas_bq_testing." + table_name, PROJECT_ID, if_exists='fail') + + def test_upload_data_if_table_exists_append(self): + table_name = 'new_test3' + + test_size = 10 + df = make_mixed_dataframe_v2(test_size) + df_different_schema = tm.makeMixedDataFrame() + + # Initialize table with sample data + gbq.to_gbq(df, "pydata_pandas_bq_testing." + table_name, PROJECT_ID, chunksize=10000) + + # Test the if_exists parameter with value 'append' + gbq.to_gbq(df, "pydata_pandas_bq_testing." + table_name, PROJECT_ID, if_exists='append') + + sleep(60) # <- Curses Google!!! + + result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM pydata_pandas_bq_testing." + table_name, project_id=PROJECT_ID) + self.assertEqual(result['NUM_ROWS'][0], test_size * 2) + + # Try inserting with a different schema, confirm failure + with tm.assertRaises(gbq.InvalidSchema): + gbq.to_gbq(df_different_schema, "pydata_pandas_bq_testing." + table_name, PROJECT_ID, if_exists='append') + + def test_upload_data_if_table_exists_replace(self): + table_name = 'new_test4' + + test_size = 10 + df = make_mixed_dataframe_v2(test_size) + df_different_schema = tm.makeMixedDataFrame() + + # Initialize table with sample data + gbq.to_gbq(df, "pydata_pandas_bq_testing." + table_name, PROJECT_ID, chunksize=10000) + + # Test the if_exists parameter with the value 'replace'. + gbq.to_gbq(df_different_schema, "pydata_pandas_bq_testing." + table_name, PROJECT_ID, if_exists='replace') + + sleep(60) # <- Curses Google!!! + + result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM pydata_pandas_bq_testing." + table_name, project_id=PROJECT_ID) + self.assertEqual(result['NUM_ROWS'][0], 5) + def test_google_upload_errors_should_raise_exception(self): - test_timestamp = datetime.datetime.now(pytz.timezone('US/Arizona')) - bad_df = DataFrame( {'bools': [False, False], - 'flts': [0.0,1.0], - 'ints': [0,'1'], - 'strs': ['a', 1], - 'times': [test_timestamp, test_timestamp] - }, index=range(2)) - with tm.assertRaises(gbq.UnknownGBQException): - gbq.to_gbq(bad_df, 'pydata_pandas_bq_testing.new_test', project_id = PROJECT_ID) + table_name = 'new_test5' - def test_generate_bq_schema(self): + test_timestamp = datetime.now(pytz.timezone('US/Arizona')) + bad_df = DataFrame({'bools': [False, False], 'flts': [0.0, 1.0], 'ints': [0, '1'], 'strs': ['a', 1], + 'times': [test_timestamp, test_timestamp]}, index=range(2)) + + with tm.assertRaises(gbq.StreamingInsertError): + gbq.to_gbq(bad_df, 'pydata_pandas_bq_testing.' + table_name, PROJECT_ID, verbose=True) + def test_generate_bq_schema(self): df = tm.makeMixedDataFrame() schema = gbq.generate_bq_schema(df) @@ -360,13 +476,41 @@ def test_generate_bq_schema(self): self.assertEqual(schema, test_schema) - @classmethod - def tearDownClass(cls): - if PROJECT_ID and not missing_bq(): - subprocess.call(['bq','rm','-f','pydata_pandas_bq_testing.new_test']) - subprocess.call(['bq','rm','-f','pydata_pandas_bq_testing']) + def test_create_bq_table(self): + table_name = 'new_test6' + + test_schema = {'fields': [{'name': 'A', 'type': 'FLOAT'}, {'name': 'B', 'type': 'FLOAT'}, + {'name': 'C', 'type': 'STRING'}, {'name': 'D', 'type': 'TIMESTAMP'}]} + + gbq.create_table('pydata_pandas_bq_testing.' + table_name, test_schema, PROJECT_ID) + + self.assertTrue(gbq.table_exists('pydata_pandas_bq_testing.' + table_name, PROJECT_ID), 'Expected table to exist') + + def test_table_does_not_exist(self): + table_name = 'new_test7' + self.assertTrue(not gbq.table_exists('pydata_pandas_bq_testing.' + table_name, PROJECT_ID), + 'Expected table not to exist') + + def test_delete_bq_table(self): + table_name = 'new_test8' + + test_schema = {'fields': [{'name': 'A', 'type': 'FLOAT'}, {'name': 'B', 'type': 'FLOAT'}, + {'name': 'C', 'type': 'STRING'}, {'name': 'D', 'type': 'TIMESTAMP'}]} + + gbq.create_table('pydata_pandas_bq_testing.' + table_name, test_schema, PROJECT_ID) + + gbq.delete_table('pydata_pandas_bq_testing.' + table_name, PROJECT_ID) + + self.assertTrue(not gbq.table_exists('pydata_pandas_bq_testing.' + table_name, PROJECT_ID), + 'Expected table not to exist') + + def test_upload_data_dataset_not_found(self): + test_size = 10 + df = make_mixed_dataframe_v2(test_size) + + with tm.assertRaises(gbq.GenericGBQException): + gbq.create_table('pydata_pandas_bq_testing2.new_test', gbq.generate_bq_schema(df), PROJECT_ID) if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) -
closes #8325 closes #10652 - Added ability to automatically create a table using the gbq.to_gbq function if destination table does not exist - Added `gbq.gbq_table_exists()` function to the gbq module - Added `gbq.create_gbq_table()` function to the gbq module
https://api.github.com/repos/pandas-dev/pandas/pulls/10857
2015-08-19T21:29:11Z
2015-09-13T20:20:52Z
2015-09-13T20:20:52Z
2015-09-14T03:28:03Z
ABC Tests
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 94f151efbe2a6..df1e30817adbc 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -6,8 +6,9 @@ import nose from nose.tools import assert_equal, assert_true import numpy as np +import pandas as pd from pandas.tslib import iNaT, NaT -from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp, Float64Index +from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp from pandas import compat from pandas.compat import range, long, lrange, lmap, u from pandas.core.common import notnull, isnull, array_equivalent @@ -40,6 +41,7 @@ def __getitem__(self): assert(not is_seq(A())) + def test_get_callable_name(): from functools import partial getname = com._get_callable_name @@ -49,6 +51,7 @@ def fn(x): lambda_ = lambda x: x part1 = partial(fn) part2 = partial(part1) + class somecall(object): def __call__(self): return x @@ -61,6 +64,45 @@ def __call__(self): assert getname(1) is None +def test_abc_types(): + tuples = [('bar', 'one'), ('bar', 'two')] + names = ['first', 'second'] + values = [1, 2, 3, 4] + index = pd.Index(['a', 'b', 'c']) + int64_index = pd.Int64Index([1, 2, 3]) + float64_index = pd.Float64Index([1, 2, 3]) + multi_index = pd.MultiIndex.from_tuples(tuples, names=names) + datetime_index = pd.to_datetime(['2000/1/1', '2010/1/1']) + timedelta_index = pd.to_timedelta(np.arange(5), unit='s') + period_index = pd.period_range('2000/1/1', '2010/1/1/', freq='M') + categorical = pd.Categorical([1, 2, 3, 4], categories=[4, 2, 3, 1]) + categorical_df = pd.DataFrame({"values": values}, index=categorical) + categorical_index = categorical_df.index + series = pd.Series(values) + df = pd.DataFrame({"names": names}, index=multi_index) + panel = df.to_panel() + sparse_series = series.to_sparse() + sparse_array = pd.SparseArray(np.random.randn(10)) + period = pd.Period('2012', freq='A-DEC') + assert(isinstance(index, com.ABCIndex)) + assert(isinstance(int64_index, com.ABCInt64Index)) + assert(isinstance(float64_index, com.ABCFloat64Index)) + assert(isinstance(multi_index, com.ABCMultiIndex)) + assert(isinstance(datetime_index, com.ABCDatetimeIndex)) + assert(isinstance(timedelta_index, com.ABCTimedeltaIndex)) + assert(isinstance(period_index, com.ABCPeriodIndex)) + assert(isinstance(categorical_index, com.ABCCategoricalIndex)) + assert(isinstance(index, com.ABCIndexClass)) + assert(isinstance(int64_index, com.ABCIndexClass)) + assert(isinstance(series, com.ABCSeries)) + assert(isinstance(df, com.ABCDataFrame)) + assert(isinstance(panel, com.ABCPanel)) + assert(isinstance(sparse_series, com.ABCSparseSeries)) + assert(isinstance(sparse_array, com.ABCSparseArray)) + assert(isinstance(categorical, com.ABCCategorical)) + assert(isinstance(period, com.ABCPeriod)) + + def test_notnull(): assert notnull(1.) assert not notnull(None) @@ -229,8 +271,6 @@ def test_array_equivalent(): assert not array_equivalent(np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan])) assert not array_equivalent(np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e'])) - assert array_equivalent(Float64Index([0, np.nan]), Float64Index([0, np.nan])) - assert not array_equivalent(Float64Index([0, np.nan]), Float64Index([1, np.nan])) assert array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan])) assert not array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan])) @@ -942,7 +982,7 @@ def test_2d_float32(self): def test_2d_datetime64(self): # 2005/01/01 - 2006/01/01 - arr = np.random.randint(long(11045376), long(11360736), (5,3))*100000000000 + arr = np.random.randint(long(11045376), long(11360736), (5, 3))*100000000000 arr = arr.view(dtype='datetime64[ns]') indexer = [0, 2, -1, 1, -1] @@ -1026,6 +1066,7 @@ def test_dict_compat(): assert(com._dict_compat(expected) == expected) assert(com._dict_compat(data_unchanged) == data_unchanged) + def test_possibly_convert_objects_copy(): values = np.array([1, 2])
closes #10828 I created tests for this issue here: https://github.com/pydata/pandas/issues/10828. This is my first time working on Pandas, so please let me know if there is anything wrong with the way I have coded these tests. I haven't updated the documentation, I was unsure of what version I should update and whether I should update at all (it seems that the documentation is for additional features and bug fixes rather than increasing test coverage). I'd love any guidance as to what I can do to fully resolve this issue and how I can best help in the future!
https://api.github.com/repos/pandas-dev/pandas/pulls/10855
2015-08-19T10:17:06Z
2015-08-20T12:34:11Z
null
2015-08-20T12:34:11Z
DOC: fix Panel4D docstring
diff --git a/pandas/core/panel4d.py b/pandas/core/panel4d.py index 3d480464388c8..7fafbd0eaa2b5 100644 --- a/pandas/core/panel4d.py +++ b/pandas/core/panel4d.py @@ -12,7 +12,9 @@ aliases={'major': 'major_axis', 'minor': 'minor_axis'}, stat_axis=2, ns=dict(__doc__=""" - Represents a 4 dimensional structured + Panel4D is a 4-Dimensional named container very much like a Panel, but + having 4 named dimensions. It is intended as a test bed for more + N-Dimensional named containers. Parameters ----------
Just took this from https://github.com/pydata/pandas/blob/master/doc/source/dsintro.rst#panel4d-experimental
https://api.github.com/repos/pandas-dev/pandas/pulls/10854
2015-08-19T08:55:31Z
2015-08-19T09:21:38Z
2015-08-19T09:21:38Z
2015-08-19T09:47:11Z
TST: GH10837 remove test_ujson.py reliance on dict iteration order
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py index 43e1c5c89dd5e..af0a1da830c83 100644 --- a/pandas/io/tests/test_json/test_ujson.py +++ b/pandas/io/tests/test_json/test_ujson.py @@ -1131,22 +1131,20 @@ def testArrayNumpyLabelled(self): self.assertTrue(output[1] is None) self.assertTrue((np.array([u('a')]) == output[2]).all()) - # py3 is non-determinstic on the ordering...... - if not compat.PY3: - input = [{'a': 42, 'b':31}, {'a': 24, 'c': 99}, {'a': 2.4, 'b': 78}] - output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True) - expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3,2)) - self.assertTrue((expectedvals == output[0]).all()) - self.assertTrue(output[1] is None) - self.assertTrue((np.array([u('a'), 'b']) == output[2]).all()) - - - input = {1: {'a': 42, 'b':31}, 2: {'a': 24, 'c': 99}, 3: {'a': 2.4, 'b': 78}} - output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True) - expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3,2)) - self.assertTrue((expectedvals == output[0]).all()) - self.assertTrue((np.array(['1','2','3']) == output[1]).all()) - self.assertTrue((np.array(['a', 'b']) == output[2]).all()) + # Write out the dump explicitly so there is no dependency on iteration order GH10837 + input_dumps = '[{"a": 42, "b":31}, {"a": 24, "c": 99}, {"a": 2.4, "b": 78}]' + output = ujson.loads(input_dumps, numpy=True, labelled=True) + expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2)) + self.assertTrue((expectedvals == output[0]).all()) + self.assertTrue(output[1] is None) + self.assertTrue((np.array([u('a'), 'b']) == output[2]).all()) + + input_dumps = '{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, "3": {"a": 2.4, "b": 78}}' + output = ujson.loads(input_dumps, numpy=True, labelled=True) + expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2)) + self.assertTrue((expectedvals == output[0]).all()) + self.assertTrue((np.array(['1', '2', '3']) == output[1]).all()) + self.assertTrue((np.array(['a', 'b']) == output[2]).all()) class PandasJSONTests(TestCase):
closes #10837
https://api.github.com/repos/pandas-dev/pandas/pulls/10853
2015-08-19T03:59:44Z
2015-08-19T10:53:27Z
2015-08-19T10:53:27Z
2015-08-19T10:53:32Z
DOC: Excel formatting and searchsorted cookbook examples
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 49ff987ca3549..9e7b9ad0b7582 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -834,6 +834,9 @@ ignore_index is needed in pandas < v0.13, and depending on df construction `Join with a criteria based on the values <http://stackoverflow.com/questions/15581829/how-to-perform-an-inner-or-outer-join-of-dataframes-with-pandas-on-non-simplisti>`__ +`Using searchsorted to merge based on values inside a range +<http://stackoverflow.com/questions/25125626/pandas-merge-with-logic/2512764>`__ + .. _cookbook.plotting: Plotting @@ -988,8 +991,14 @@ The :ref:`Excel <io.excel>` docs `Reading from a filelike handle <http://stackoverflow.com/questions/15588713/sheets-of-excel-workbook-from-a-url-into-a-pandas-dataframe>`__ +`Modifying formatting in XlsxWriter output +<http://pbpython.com/improve-pandas-excel-output.html>`__ + .. _cookbook.html: +HTML +**** + `Reading HTML tables from a server that cannot handle the default request header <http://stackoverflow.com/a/18939272/564538>`__
Also added the missing `HTML` header for the one link in that section.
https://api.github.com/repos/pandas-dev/pandas/pulls/10852
2015-08-19T02:55:45Z
2015-08-19T09:12:07Z
2015-08-19T09:12:07Z
2015-09-07T17:15:03Z
[DOCS][10MINS] Fix method for the 'where' operation example
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index 1714e00030026..472c875a4ee3f 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -289,7 +289,7 @@ A ``where`` operation for getting. .. ipython:: python - df[df > 0] + df.where(df > 0) Using the :func:`~Series.isin` method for filtering:
Fix method of the 'where' operation for getting.
https://api.github.com/repos/pandas-dev/pandas/pulls/10851
2015-08-19T02:52:41Z
2015-08-20T03:15:59Z
null
2023-05-11T01:13:08Z
DOC: Update CONTRIBUTING.md
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 284ac2fc5b169..86b0e890fb94a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -288,7 +288,7 @@ Some other important things to know about the docs: #### Requirements To build the *pandas* docs there are some extra requirements: you will -need to have `sphinx` and `ipython` installed. +need to have `sphinx`, `ipython` and `ipython-notebook` installed. [numpydoc](https://github.com/numpy/numpydoc) is used to parse the docstrings that follow the Numpy Docstring Standard (see above), but you don't need to install this because a local copy of `numpydoc` is @@ -298,7 +298,7 @@ It is easiest to create a development environment \<contributing-dev\_env\>, then install: - conda install -n pandas_dev sphinx ipython + conda install -n pandas_dev sphinx ipython ipython-notebook Furthermore, it is recommended to have all [optional dependencies](http://pandas.pydata.org/pandas-docs/dev/install.html#optional-dependencies)
Add `ipython-notebook` as dependency. I had to install it otherwise sphinx was failing the build.
https://api.github.com/repos/pandas-dev/pandas/pulls/10847
2015-08-18T22:07:20Z
2015-08-20T22:21:03Z
null
2023-05-11T01:13:08Z
BUG: Error while saving DataFrame with TimedeltaIndex to .csv #10833
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 1079ec52338b9..e41fe592a57ae 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -678,3 +678,4 @@ Bug Fixes - Bug in ``iloc`` allowing memory outside bounds of a Series to be accessed with negative integers (:issue:`10779`) - Bug in ``read_msgpack`` where encoding is not respected (:issue:`10580`) - Bug preventing access to the first index when using ``iloc`` with a list containing the appropriate negative integer (:issue:`10547`, :issue:`10779`) +- Bug in ``TimedeltaIndex`` formatter causing error while trying to save ``DataFrame`` with ``TimedeltaIndex`` using ``to_csv`` (:issue:`10833`) diff --git a/pandas/core/format.py b/pandas/core/format.py index 4ec4375349764..52f3c17ebdd26 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -2174,7 +2174,7 @@ def __init__(self, values, nat_rep='NaT', box=False, **kwargs): def _format_strings(self): formatter = self.formatter or _get_format_timedelta64(self.values, nat_rep=self.nat_rep, box=self.box) - fmt_values = [formatter(x) for x in self.values] + fmt_values = np.array([formatter(x) for x in self.values]) return fmt_values diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index a3a57929ad931..68d6ecf33e619 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -6308,7 +6308,6 @@ def test_to_csv_from_csv(self): header=['AA', 'X']) with ensure_clean(pname) as path: - import pandas as pd df1 = DataFrame(np.random.randn(3, 1)) df2 = DataFrame(np.random.randn(3, 1)) @@ -6320,6 +6319,22 @@ def test_to_csv_from_csv(self): xp.columns = lmap(int,xp.columns) assert_frame_equal(xp,rs) + with ensure_clean() as path: + # GH 10833 (TimedeltaIndex formatting) + dt = pd.Timedelta(seconds=1) + df = pd.DataFrame({'dt_data': [i*dt for i in range(3)]}, + index=pd.Index([i*dt for i in range(3)], + name='dt_index')) + df.to_csv(path) + + result = pd.read_csv(path, index_col='dt_index') + result.index = pd.to_timedelta(result.index) + # TODO: remove renaming when GH 10875 is solved + result.index = result.index.rename('dt_index') + result['dt_data'] = pd.to_timedelta(result['dt_data']) + + assert_frame_equal(df, result, check_index_type=True) + def test_to_csv_cols_reordering(self): # GH3454 import pandas as pd
Fix in accordance with https://github.com/pydata/pandas/issues/10833
https://api.github.com/repos/pandas-dev/pandas/pulls/10845
2015-08-18T19:55:42Z
2015-08-21T14:09:48Z
2015-08-21T14:09:48Z
2015-08-21T14:10:00Z
DataFrame to dict with index orientation.
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 0c3af5c4bd64f..ea116278e1eb2 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -174,6 +174,8 @@ Other enhancements - ``msgpack`` submodule has been updated to 0.4.6 with backward compatibility (:issue:`10581`) +- ``DataFrame.to_dict`` now accepts the *index* option in ``orient`` keyword argument (:issue:`10844`). + .. ipython :: python s = pd.Series(['A', 'B', 'C', 'A', 'B', 'D']) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ed46dad2846c9..57ffbc8a8ee39 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -748,7 +748,7 @@ def to_dict(self, orient='dict'): Parameters ---------- - orient : str {'dict', 'list', 'series', 'split', 'records'} + orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - dict (default) : dict like {column -> {index -> value}} @@ -758,6 +758,9 @@ def to_dict(self, orient='dict'): {index -> [index], columns -> [columns], data -> [values]} - records : list like [{column -> value}, ... , {column -> value}] + - index : dict like {index -> {column -> value}} + + .. versionadded:: 0.17.0 Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. @@ -782,6 +785,8 @@ def to_dict(self, orient='dict'): elif orient.lower().startswith('r'): return [dict((k, v) for k, v in zip(self.columns, row)) for row in self.values] + elif orient.lower().startswith('i'): + return dict((k, v.to_dict()) for k, v in self.iterrows()) else: raise ValueError("orient '%s' not understood" % orient) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 465f1da05ebde..1797129191162 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -4474,9 +4474,16 @@ def test_to_dict(self): tm.assert_almost_equal(recons_data, expected_records) + # GH10844 + recons_data = DataFrame(test_data).to_dict("i") + + for k, v in compat.iteritems(test_data): + for k2, v2 in compat.iteritems(v): + self.assertEqual(v2, recons_data[k2][k]) + def test_to_dict_invalid_orient(self): df = DataFrame({'A':[0, 1]}) - self.assertRaises(ValueError, df.to_dict, orient='invalid') + self.assertRaises(ValueError, df.to_dict, orient='xinvalid') def test_to_records_dt64(self): df = DataFrame([["one", "two", "three"],
The method `DataFrame.to_dict` didn't have an option for index orientation unlike its cousin `to_json`, which does have.
https://api.github.com/repos/pandas-dev/pandas/pulls/10844
2015-08-18T18:38:21Z
2015-08-21T14:21:21Z
null
2015-08-21T14:21:21Z
DOC: Fix for #10823, updating min_periods docstring
diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py index e1951135914e9..d1d74442d8961 100644 --- a/pandas/stats/ols.py +++ b/pandas/stats/ols.py @@ -614,7 +614,8 @@ class MovingOLS(OLS): size of window (for rolling/expanding OLS) min_periods : int Threshold of non-null data points to require. - If None, defaults to size of window. + If None, defaults to size of window for window_type='rolling' and 1 + otherwise intercept : bool True if you want an intercept. nw_lags : None or int
closes #10823
https://api.github.com/repos/pandas-dev/pandas/pulls/10841
2015-08-18T02:15:46Z
2015-08-18T10:47:43Z
2015-08-18T10:47:43Z
2015-08-18T10:47:48Z
PERF: value_counts_float64 #10821
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 7e69a8044a305..8ae2aa1659077 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -585,7 +585,7 @@ Performance Improvements - Improved performance of ``Series.isin`` for datetimelike/integer Series (:issue:`10287`) - 20x improvement in ``concat`` of Categoricals when categories are identical (:issue:`10587`) - Improved performance of ``to_datetime`` when specified format string is ISO8601 (:issue:`10178`) - +- 2x improvement of ``Series.value_counts`` for float dtype (:issue:`10821`) .. _whatsnew_0170.bug_fixes: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index b0c7ff43bc7d8..0b11a2bae3973 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -232,7 +232,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False, values = PeriodIndex(values, name=name) values = values.view(np.int64) - keys, counts = htable.value_count_int64(values) + keys, counts = htable.value_count_scalar64(values, dropna) if dropna: from pandas.tslib import iNaT @@ -244,7 +244,10 @@ def value_counts(values, sort=True, ascending=False, normalize=False, elif com.is_integer_dtype(dtype): values = com._ensure_int64(values) - keys, counts = htable.value_count_int64(values) + keys, counts = htable.value_count_scalar64(values, dropna) + elif com.is_float_dtype(dtype): + values = com._ensure_float64(values) + keys, counts = htable.value_count_scalar64(values, dropna) else: values = com._ensure_object(values) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index c9e30ea31dab8..b204cba997b98 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1030,7 +1030,7 @@ def value_counts(self, dropna=True): from pandas.core.index import CategoricalIndex cat = self.dropna() if dropna else self - keys, counts = htable.value_count_int64(com._ensure_int64(cat._codes)) + keys, counts = htable.value_count_scalar64(com._ensure_int64(cat._codes), dropna) result = Series(counts, index=keys) ix = np.arange(len(cat.categories), dtype='int64') diff --git a/pandas/hashtable.pyx b/pandas/hashtable.pyx index 7dbd1b45c938f..dfa7930ada62f 100644 --- a/pandas/hashtable.pyx +++ b/pandas/hashtable.pyx @@ -866,51 +866,90 @@ cdef class Int64Factorizer: self.count = len(self.uniques) return labels - +ctypedef fused kh_scalar64: + kh_int64_t + kh_float64_t @cython.boundscheck(False) -cdef build_count_table_int64(int64_t[:] values, kh_int64_t *table): +cdef build_count_table_scalar64(sixty_four_bit_scalar[:] values, + kh_scalar64 *table, bint dropna): cdef: khiter_t k Py_ssize_t i, n = len(values) - int64_t val + sixty_four_bit_scalar val int ret = 0 - with nogil: - kh_resize_int64(table, n) + if sixty_four_bit_scalar is float64_t and kh_scalar64 is kh_float64_t: + with nogil: + kh_resize_float64(table, n) + + for i in range(n): + val = values[i] + if val == val or not dropna: + k = kh_get_float64(table, val) + if k != table.n_buckets: + table.vals[k] += 1 + else: + k = kh_put_float64(table, val, &ret) + table.vals[k] = 1 + elif sixty_four_bit_scalar is int64_t and kh_scalar64 is kh_int64_t: + with nogil: + kh_resize_int64(table, n) + + for i in range(n): + val = values[i] + k = kh_get_int64(table, val) + if k != table.n_buckets: + table.vals[k] += 1 + else: + k = kh_put_int64(table, val, &ret) + table.vals[k] = 1 + else: + raise ValueError("Table type must match scalar type.") - for i in range(n): - val = values[i] - k = kh_get_int64(table, val) - if k != table.n_buckets: - table.vals[k] += 1 - else: - k = kh_put_int64(table, val, &ret) - table.vals[k] = 1 @cython.boundscheck(False) -cpdef value_count_int64(int64_t[:] values): +cpdef value_count_scalar64(sixty_four_bit_scalar[:] values, bint dropna): cdef: Py_ssize_t i - kh_int64_t *table - int64_t[:] result_keys, result_counts + kh_float64_t *ftable + kh_int64_t *itable + sixty_four_bit_scalar[:] result_keys + int64_t[:] result_counts int k - table = kh_init_int64() - build_count_table_int64(values, table) - i = 0 - result_keys = np.empty(table.n_occupied, dtype=np.int64) - result_counts = np.zeros(table.n_occupied, dtype=np.int64) - with nogil: - for k in range(table.n_buckets): - if kh_exist_int64(table, k): - result_keys[i] = table.keys[k] - result_counts[i] = table.vals[k] - i += 1 - kh_destroy_int64(table) + if sixty_four_bit_scalar is float64_t: + ftable = kh_init_float64() + build_count_table_scalar64(values, ftable, dropna) + + result_keys = np.empty(ftable.n_occupied, dtype=np.float64) + result_counts = np.zeros(ftable.n_occupied, dtype=np.int64) + + with nogil: + for k in range(ftable.n_buckets): + if kh_exist_float64(ftable, k): + result_keys[i] = ftable.keys[k] + result_counts[i] = ftable.vals[k] + i += 1 + kh_destroy_float64(ftable) + + elif sixty_four_bit_scalar is int64_t: + itable = kh_init_int64() + build_count_table_scalar64(values, itable, dropna) + + result_keys = np.empty(itable.n_occupied, dtype=np.int64) + result_counts = np.zeros(itable.n_occupied, dtype=np.int64) + + with nogil: + for k in range(itable.n_buckets): + if kh_exist_int64(itable, k): + result_keys[i] = itable.keys[k] + result_counts[i] = itable.vals[k] + i += 1 + kh_destroy_int64(itable) return np.asarray(result_keys), np.asarray(result_counts) @@ -1002,7 +1041,7 @@ def mode_int64(int64_t[:] values): table = kh_init_int64() - build_count_table_int64(values, table) + build_count_table_scalar64(values, table, 0) modes = np.empty(table.n_buckets, dtype=np.int64) diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index 73f5f19d6a626..bceb78c26e6ac 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -194,6 +194,15 @@ def f(): series_value_counts_strings = Benchmark('s.value_counts()', setup, start_date=datetime(2011, 10, 21)) +#value_counts on float dtype + +setup = common_setup + """ +s = Series(np.random.randint(0, 1000, size=100000)).astype(float) +""" + +series_value_counts_float64 = Benchmark('s.value_counts()', setup, + start_date=datetime(2015, 8, 17)) + #---------------------------------------------------------------------- # pivot_table
Adresses #10821 - I get a doubling of performance for `value_counts` for a `Series` with a float dtype. Couldn't think of any new tests needed not covered by existing [here](https://github.com/pydata/pandas/blob/master/pandas/tests/test_algos.py#L416) ``` In [1]: f = np.hstack([np.hstack(np.linspace(0,1,1000) for _ in range(1000)), np.array([np.nan] * 1000)]) ...: i = range(1000) * 1001 ...: df = pd.DataFrame({'f':f, 'i':i}) ``` ### master ``` In [2]: %timeit df['i'].value_counts() 10 loops, best of 3: 21.1 ms per loop In [3]: %timeit df['f'].value_counts() 10 loops, best of 3: 114 ms per loop ``` ### pr ``` In [3]: %timeit df['i'].value_counts() 10 loops, best of 3: 21 ms per loop In [2]: %timeit df['f'].value_counts() 10 loops, best of 3: 56 ms per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/10840
2015-08-18T02:13:15Z
2015-08-18T23:37:09Z
2015-08-18T23:37:09Z
2015-08-19T00:53:03Z
BUG: Panel setitem with a multiindex #10360 (partial)
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 7e69a8044a305..3e611d00a8757 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -606,7 +606,7 @@ Bug Fixes - Bug in ``offsets.generate_range`` where ``start`` and ``end`` have finer precision than ``offset`` (:issue:`9907`) - Bug in ``pd.rolling_*`` where ``Series.name`` would be lost in the output (:issue:`10565`) - Bug in ``stack`` when index or columns are not unique. (:issue:`10417`) - +- Bug in setting a Panel when an axis has a multi-index (:issue:`10360`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 8a8ee00f234fa..b8ee831cdc12c 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -201,6 +201,7 @@ def _setitem_with_indexer(self, indexer, value): # also has the side effect of consolidating in-place from pandas import Panel, DataFrame, Series + info_axis = self.obj._info_axis_number # maybe partial set take_split_path = self.obj._is_mixed_type @@ -213,6 +214,16 @@ def _setitem_with_indexer(self, indexer, value): val = list(value.values()) if isinstance(value,dict) else value take_split_path = not blk._can_hold_element(val) + if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): + + for i, ax in zip(indexer, self.obj.axes): + + # if we have any multi-indexes that have non-trivial slices (not null slices) + # then we must take the split path, xref GH 10360 + if isinstance(ax, MultiIndex) and not (is_integer(i) or is_null_slice(i)): + take_split_path = True + break + if isinstance(indexer, tuple): nindexer = [] for i, idx in enumerate(indexer): @@ -328,14 +339,8 @@ def _setitem_with_indexer(self, indexer, value): return self.obj.__setitem__(indexer, value) # set - info_axis = self.obj._info_axis_number item_labels = self.obj._get_axis(info_axis) - # if we have a complicated setup, take the split path - if (isinstance(indexer, tuple) and - any([isinstance(ax, MultiIndex) for ax in self.obj.axes])): - take_split_path = True - # align and set the values if take_split_path: diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 2c0bfcd9b905d..ee16b44f173ec 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -411,7 +411,7 @@ def test_iloc_exceeds_bounds(self): df.iloc[30] self.assertRaises(IndexError, lambda : df.iloc[-30]) - # GH10779 + # GH10779 # single positive/negative indexer exceeding Series bounds should raise an IndexError with tm.assertRaisesRegexp(IndexError, 'single positional indexer is out-of-bounds'): s.iloc[30] @@ -2652,6 +2652,44 @@ def test_panel_setitem(self): tm.assert_panel_equal(p, expected) + def test_panel_setitem_with_multiindex(self): + + # 10360 + # failing with a multi-index + arr = np.array([[[1,2,3],[0,0,0]],[[0,0,0],[0,0,0]]],dtype=np.float64) + + # reg index + axes = dict(items=['A', 'B'], major_axis=[0, 1], minor_axis=['X', 'Y' ,'Z']) + p1 = Panel(0., **axes) + p1.iloc[0, 0, :] = [1, 2, 3] + expected = Panel(arr, **axes) + tm.assert_panel_equal(p1, expected) + + # multi-indexes + axes['items'] = pd.MultiIndex.from_tuples([('A','a'), ('B','b')]) + p2 = Panel(0., **axes) + p2.iloc[0, 0, :] = [1, 2, 3] + expected = Panel(arr, **axes) + tm.assert_panel_equal(p2, expected) + + axes['major_axis']=pd.MultiIndex.from_tuples([('A',1),('A',2)]) + p3 = Panel(0., **axes) + p3.iloc[0, 0, :] = [1, 2, 3] + expected = Panel(arr, **axes) + tm.assert_panel_equal(p3, expected) + + axes['minor_axis']=pd.MultiIndex.from_product([['X'],range(3)]) + p4 = Panel(0., **axes) + p4.iloc[0, 0, :] = [1, 2, 3] + expected = Panel(arr, **axes) + tm.assert_panel_equal(p4, expected) + + arr = np.array([[[1,0,0],[2,0,0]],[[0,0,0],[0,0,0]]],dtype=np.float64) + p5 = Panel(0., **axes) + p5.iloc[0, :, 0] = [1, 2] + expected = Panel(arr, **axes) + tm.assert_panel_equal(p5, expected) + def test_panel_assignment(self): # GH3777
partial on #10360
https://api.github.com/repos/pandas-dev/pandas/pulls/10838
2015-08-18T00:03:35Z
2015-08-18T10:44:03Z
2015-08-18T10:44:03Z
2019-08-10T19:45:05Z
skipped failing test on non-engish locales
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 0bb385b756eb8..e02973136863d 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -4325,6 +4325,8 @@ def test_to_datetime_format_time(self): self.assertEqual(to_datetime(s, format=format), dt) def test_to_datetime_with_non_exact(self): + # GH 10834 + _skip_if_has_locale() # 8904 # exact kw
Should fix https://github.com/pydata/pandas/issues/10834
https://api.github.com/repos/pandas-dev/pandas/pulls/10836
2015-08-17T17:36:22Z
2015-08-17T20:50:10Z
2015-08-17T20:50:10Z
2015-08-17T20:50:15Z
Minor fix to error messages ('See the the caveats in the documentatio…
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2ced2aafe2f1c..2fc288de438b3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1300,7 +1300,7 @@ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): t = ("\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" - "See the the caveats in the documentation: " + "See the caveats in the documentation: " "http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy") else: @@ -1308,7 +1308,7 @@ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value instead\n\n" - "See the the caveats in the documentation: " + "See the caveats in the documentation: " "http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy") if value == 'raise':
…n...')
https://api.github.com/repos/pandas-dev/pandas/pulls/10829
2015-08-16T08:37:18Z
2015-08-16T09:54:59Z
2015-08-16T09:54:59Z
2015-08-16T10:46:38Z