title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
Add various display types for pivot table such as Excel
diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index ef477582b82f2..c3924c5fd3244 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -16,7 +16,7 @@ @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') @deprecate_kwarg(old_arg_name='rows', new_arg_name='index') def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', - fill_value=None, margins=False, dropna=True): + fill_value=None, margins=False, display_value='normal', dropna=True): """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on @@ -40,6 +40,8 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', Value to replace missing values with margins : boolean, default False Add all row / columns (e.g. for subtotal / grand totals) + display_value : string, default 'normal' + Type of display. Among 'normal', 'col_ratio', 'row_ratio', 'total_ratio' dropna : boolean, default True Do not include columns whose entries are all NaN rows : kwarg only alias of index [deprecated] @@ -81,7 +83,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', for func in aggfunc: table = pivot_table(data, values=values, index=index, columns=columns, fill_value=fill_value, aggfunc=func, - margins=margins) + margins=margins, display_value=display_value) pieces.append(table) keys.append(func.__name__) return concat(pieces, keys=keys, axis=1) @@ -153,6 +155,13 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', if len(index) == 0 and len(columns) > 0: table = table.T + if display_value == "col_ratio": + table = table.div(table.sum(axis=0), axis=1) + elif display_value == "row_ratio": + table = table.div(table.sum(axis=1), axis=0) + elif display_value == "total_ratio": + table = table.div(table.sum().sum()) + return table
Excel offers several types of values display for a pivot table (ratio of row, ratio of column, ratio of total, difference with, cumulated sum ...) This ticket aims to implement this option in the pivot table API. I implemented the three types cited above for a start. I chose "display_value" for the argument name. One can find too generic, so i'm opened to suggestions.
https://api.github.com/repos/pandas-dev/pandas/pulls/9356
2015-01-26T06:43:31Z
2015-04-04T19:07:54Z
null
2023-05-11T01:12:49Z
Value counts base
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index b0c5b11079f31..2073bff13fc46 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -22,6 +22,13 @@ New features .. _whatsnew_0160.api: +- ``value_counts`` accepts a base argument: + + ..ipython::python + + Series(list('aaabbc')).value_counts(base=list('acd')) + + Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 364a3fa13801b..ffc37269ac9b6 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -173,7 +173,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1): def value_counts(values, sort=True, ascending=False, normalize=False, - bins=None, dropna=True): + bins=None, dropna=True, base=None): """ Compute a histogram of the counts of non-null values. @@ -191,6 +191,8 @@ def value_counts(values, sort=True, ascending=False, normalize=False, convenience for pd.cut, only works with numeric data dropna : boolean, default True Don't include counts of NaN + base : list-like, optional + Unique values to count against Returns ------- @@ -245,6 +247,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False, counts = np.insert(counts, 0, mask.sum()) result = Series(counts, index=com._values_from_object(keys)) + if bins is not None: # TODO: This next line should be more efficient result = result.reindex(np.arange(len(cat.categories)), fill_value=0) @@ -253,10 +256,11 @@ def value_counts(values, sort=True, ascending=False, normalize=False, else: result.index = cat.categories + if base is not None: + result = result.reindex(base, fill_value=0) + if sort: - result.sort() - if not ascending: - result = result[::-1] + result.sort(ascending=ascending) if normalize: result = result / float(values.size) diff --git a/pandas/core/series.py b/pandas/core/series.py index ca401518af66d..18dc9656469d3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1132,6 +1132,37 @@ def count(self, level=None): return notnull(_values_from_object(self)).sum() + def value_counts(self, normalize=False, sort=True, ascending=False, + bins=None, base=None): + """ + Returns Series containing counts of unique values. The resulting Series + will be in descending order so that the first element is the most + frequently-occurring element. Excludes NA values + + Parameters + ---------- + normalize : boolean, default False + If True then the Series returned will contain the relative + frequencies of the unique values. + sort : boolean, default True + Sort by values + ascending : boolean, default False + Sort in ascending order + bins : integer, optional + Rather than count values, group them into half-open bins, + a convenience for pd.cut, only works with numeric data + base : list-like, optional + Unique values to count against + + Returns + ------- + counts : Series + """ + from pandas.core.algorithms import value_counts + return value_counts(self.values, sort=sort, ascending=ascending, + normalize=normalize, bins=bins, base=base) + + def mode(self): """Returns the mode(s) of the dataset. diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index b145400afe13b..a1bedd3d3d446 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -254,6 +254,21 @@ def test_value_counts_nat(self): tm.assert_series_equal(algos.value_counts(dt), exp_dt) # TODO same for (timedelta) + def test_value_counts_base(self): + x = [1, 3, 3, 3, 2, 2, 7] + y = [3, 2, 5] + res = algos.value_counts(x, base=y, sort=False) + exp = Series([3, 2, 0], y) + tm.assert_series_equal(res, exp) + + res = algos.value_counts(x, base=y, sort=True) + tm.assert_series_equal(res, exp.reindex([3, 2, 5])) + + res = algos.value_counts(x, base=y, sort=False, normalize=True) + exp = Series([0.42857142857142855, 0.2857142857142857, 0.0], [3, 2, 5]) + # Note: this *doesnt* sum to 1 (intentionally) + tm.assert_series_equal(res, exp) + def test_quantile(): s = Series(np.random.randn(100)) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index db8ff37e4e1b4..4d04bdaf0d0dd 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -4312,6 +4312,98 @@ def test_dot(self): self.assertRaises(ValueError, a.dot, b.T) def test_value_counts_nunique(self): + s = Series(['a', 'b', 'b', 'b', 'b', 'a', 'c', 'd', 'd', 'a']) + hist = s.value_counts() + expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c']) + assert_series_equal(hist, expected) + + # don't sort, have to sort after the fact as not sorting is platform-dep + hist = s.value_counts(sort=False) + hist.sort() + expected = Series([3, 1, 4, 2], index=list('acbd')) + expected.sort() + assert_series_equal(hist, expected) + + # sort ascending + hist = s.value_counts(ascending=True) + expected = Series([1, 2, 3, 4], index=list('cdab')) + assert_series_equal(hist, expected) + + # relative histogram. + hist = s.value_counts(normalize=True) + expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c']) + assert_series_equal(hist, expected) + + self.assertEquals(s.nunique(), 4) + + # bins + self.assertRaises(TypeError, lambda bins: s.value_counts(bins=bins), 1) + + s1 = Series([1, 1, 2, 3]) + res1 = s1.value_counts(bins=1) + exp1 = Series({0.998: 4}) + assert_series_equal(res1, exp1) + res1n = s1.value_counts(bins=1, normalize=True) + exp1n = Series({0.998: 1.0}) + assert_series_equal(res1n, exp1n) + + res4 = s1.value_counts(bins=4) + exp4 = Series({0.998: 2, 1.5: 1, 2.0: 0, 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0]) + assert_series_equal(res4, exp4) + res4n = s1.value_counts(bins=4, normalize=True) + exp4n = Series({0.998: 0.5, 1.5: 0.25, 2.0: 0.0, 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0]) + assert_series_equal(res4n, exp4n) + + # base + x = Series([1, 3, 3, 3, 2, 2, 7]) + y = Series([3, 2, 5]) + res = x.value_counts(base=y, sort=False) + exp = Series([3, 2, 0], y) + assert_series_equal(res, exp) + + res = x.value_counts(base=y, sort=True) + assert_series_equal(res, exp.reindex([3, 2, 5])) + + res = x.value_counts(base=y, sort=True, normalize=True) + exp = Series([0.42857142857142855, 0.2857142857142857, 0.0], [3, 2, 5]) + # Note: this *doesn't* sum to 1 (intentionally) + assert_series_equal(res, exp) + + # handle NA's properly + s[5:7] = np.nan + hist = s.value_counts() + expected = s.dropna().value_counts() + assert_series_equal(hist, expected) + + s = Series({}) + hist = s.value_counts() + expected = Series([], dtype=np.int64) + assert_series_equal(hist, expected) + + # GH 3002, datetime64[ns] + import pandas as pd + f = StringIO( + "xxyyzz20100101PIE\nxxyyzz20100101GUM\nxxyyww20090101EGG\nfoofoo20080909PIE") + df = pd.read_fwf(f, widths=[6, 8, 3], names=[ + "person_id", "dt", "food"], parse_dates=["dt"]) + s = df.dt.copy() + result = s.value_counts() + self.assertEqual(result.index.dtype, 'datetime64[ns]') + + # with NaT + s = s.append(Series({4: pd.NaT})) + result = s.value_counts() + self.assertEqual(result.index.dtype, 'datetime64[ns]') + + # timedelta64[ns] + from datetime import timedelta + td = df.dt - df.dt + timedelta(1) + td2 = timedelta(1) + (df.dt - df.dt) + result = td.value_counts() + result2 = td2.value_counts() + #self.assertEqual(result.index.dtype, 'timedelta64[ns]') + self.assertEqual(result.index.dtype, 'int64') + self.assertEqual(result2.index.dtype, 'int64') # basics.rst doc example series = Series(np.random.randn(500))
rebase of #6632.
https://api.github.com/repos/pandas-dev/pandas/pulls/9355
2015-01-26T01:23:36Z
2015-05-09T16:06:25Z
null
2022-10-13T00:16:23Z
ENH: StringMethods now supports ljust and rjust
diff --git a/doc/source/api.rst b/doc/source/api.rst index a8097f2648c4b..f3843f26a9505 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -535,12 +535,14 @@ strings and apply several methods to it. These can be acccessed like Series.str.get Series.str.join Series.str.len + Series.str.ljust Series.str.lower Series.str.lstrip Series.str.match Series.str.pad Series.str.repeat Series.str.replace + Series.str.rjust Series.str.rstrip Series.str.slice Series.str.slice_replace diff --git a/doc/source/text.rst b/doc/source/text.rst index eb11cfb1248a9..3ad32c3c41073 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -212,7 +212,9 @@ Method Summary :meth:`~Series.str.replace`,Replace occurrences of pattern/regex with some other string :meth:`~Series.str.repeat`,Duplicate values (``s.str.repeat(3)`` equivalent to ``x * 3``) :meth:`~Series.str.pad`,"Add whitespace to left, right, or both sides of strings" - :meth:`~Series.str.center`,Equivalent to ``pad(side='both')`` + :meth:`~Series.str.center`,Equivalent to ``str.center`` + :meth:`~Series.str.ljust`,Equivalent to ``str.ljust`` + :meth:`~Series.str.rjust`,Equivalent to ``str.rjust`` :meth:`~Series.str.wrap`,Split long strings into lines with length less than a given width :meth:`~Series.str.slice`,Slice each string in the Series :meth:`~Series.str.slice_replace`,Replace slice in each string with passed value diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 6082a58687c2c..25e436f8504d1 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -110,6 +110,11 @@ Enhancements - Added auto-complete for ``Series.str.<tab>``, ``Series.dt.<tab>`` and ``Series.cat.<tab>`` (:issue:`9322`) + + +- Added ``StringMethods.ljust()`` and ``rjust()`` which behave as the same as standard ``str`` (:issue:`9352`) +- ``StringMethods.pad()`` and ``center()`` now accept `fillchar` option to specify filling character (:issue:`9352`) + Performance ~~~~~~~~~~~ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 75d10654977cd..8845944d615e0 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -3,12 +3,16 @@ from pandas.compat import zip from pandas.core.common import isnull, _values_from_object import pandas.compat as compat +from pandas.util.decorators import Appender import re import pandas.lib as lib import warnings import textwrap +_shared_docs = dict() + + def _get_array_list(arr, others): from pandas.core.series import Series @@ -583,9 +587,9 @@ def str_findall(arr, pat, flags=0): return _na_map(regex.findall, arr) -def str_pad(arr, width, side='left'): +def str_pad(arr, width, side='left', fillchar=' '): """ - Pad strings with whitespace + Pad strings with an additional character Parameters ---------- @@ -594,40 +598,33 @@ def str_pad(arr, width, side='left'): Minimum width of resulting string; additional characters will be filled with spaces side : {'left', 'right', 'both'}, default 'left' + fillchar : str + Additional character for filling, default is whitespace Returns ------- padded : array """ + + if not isinstance(fillchar, compat.string_types): + msg = 'fillchar must be a character, not {0}' + raise TypeError(msg.format(type(fillchar).__name__)) + + if len(fillchar) != 1: + raise TypeError('fillchar must be a character, not str') + if side == 'left': - f = lambda x: x.rjust(width) + f = lambda x: x.rjust(width, fillchar) elif side == 'right': - f = lambda x: x.ljust(width) + f = lambda x: x.ljust(width, fillchar) elif side == 'both': - f = lambda x: x.center(width) + f = lambda x: x.center(width, fillchar) else: # pragma: no cover raise ValueError('Invalid side') return _na_map(f, arr) -def str_center(arr, width): - """ - "Center" strings, filling left and right side with additional whitespace - - Parameters - ---------- - width : int - Minimum width of resulting string; additional characters will be filled - with spaces - - Returns - ------- - centered : array - """ - return str_pad(arr, width, side='both') - - def str_split(arr, pat=None, n=None, return_type='series'): """ Split each string (a la re.split) in array by given pattern, propagating NA @@ -1016,14 +1013,37 @@ def repeat(self, repeats): return self._wrap_result(result) @copy(str_pad) - def pad(self, width, side='left'): - result = str_pad(self.series, width, side=side) + def pad(self, width, side='left', fillchar=' '): + result = str_pad(self.series, width, side=side, fillchar=fillchar) return self._wrap_result(result) - @copy(str_center) - def center(self, width): - result = str_center(self.series, width) - return self._wrap_result(result) + _shared_docs['str_pad'] = (""" + "Center" strings, filling %s side with an additional character + + Parameters + ---------- + width : int + Minimum width of resulting string; additional characters will be filled + with ``fillchar`` + fillchar : str + Additional character for filling, default is whitespace + + Returns + ------- + centered : array + """) + + @Appender(_shared_docs['str_pad'] % 'left and right') + def center(self, width, fillchar=' '): + return self.pad(width, side='both', fillchar=fillchar) + + @Appender(_shared_docs['str_pad'] % 'right') + def ljust(self, width, fillchar=' '): + return self.pad(width, side='right', fillchar=fillchar) + + @Appender(_shared_docs['str_pad'] % 'left') + def rjust(self, width, fillchar=' '): + return self.pad(width, side='left', fillchar=fillchar) @copy(str_slice) def slice(self, start=None, stop=None, step=None): diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index b8f1a6ac342af..8bdc9d4f25ac9 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -770,13 +770,43 @@ def test_pad(self): u('eeeeee')]) tm.assert_almost_equal(result, exp) - def test_center(self): + def test_pad_fillchar(self): + + values = Series(['a', 'b', NA, 'c', NA, 'eeeeee']) + + result = values.str.pad(5, side='left', fillchar='X') + exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee']) + tm.assert_almost_equal(result, exp) + + result = values.str.pad(5, side='right', fillchar='X') + exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee']) + tm.assert_almost_equal(result, exp) + + result = values.str.pad(5, side='both', fillchar='X') + exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee']) + tm.assert_almost_equal(result, exp) + + with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not str"): + result = values.str.pad(5, fillchar='XY') + + with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not int"): + result = values.str.pad(5, fillchar=5) + + def test_center_ljust_rjust(self): values = Series(['a', 'b', NA, 'c', NA, 'eeeeee']) result = values.str.center(5) exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee']) tm.assert_almost_equal(result, exp) + result = values.str.ljust(5) + exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee']) + tm.assert_almost_equal(result, exp) + + result = values.str.rjust(5) + exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee']) + tm.assert_almost_equal(result, exp) + # mixed mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None, 1, 2.]) @@ -784,7 +814,18 @@ def test_center(self): rs = Series(mixed).str.center(5) xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA]) + tm.assert_isinstance(rs, Series) + tm.assert_almost_equal(rs, xp) + rs = Series(mixed).str.ljust(5) + xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, + NA]) + tm.assert_isinstance(rs, Series) + tm.assert_almost_equal(rs, xp) + + rs = Series(mixed).str.rjust(5) + xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, + NA]) tm.assert_isinstance(rs, Series) tm.assert_almost_equal(rs, xp) @@ -797,6 +838,58 @@ def test_center(self): u('eeeeee')]) tm.assert_almost_equal(result, exp) + result = values.str.ljust(5) + exp = Series([u('a '), u('b '), NA, u('c '), NA, + u('eeeeee')]) + tm.assert_almost_equal(result, exp) + + result = values.str.rjust(5) + exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, + u('eeeeee')]) + tm.assert_almost_equal(result, exp) + + def test_center_ljust_rjust_fillchar(self): + values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee']) + + result = values.str.center(5, fillchar='X') + expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee']) + tm.assert_series_equal(result, expected) + expected = np.array([v.center(5, 'X') for v in values.values]) + tm.assert_numpy_array_equal(result.values, expected) + + result = values.str.ljust(5, fillchar='X') + expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee']) + tm.assert_series_equal(result, expected) + expected = np.array([v.ljust(5, 'X') for v in values.values]) + tm.assert_numpy_array_equal(result.values, expected) + + result = values.str.rjust(5, fillchar='X') + expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee']) + tm.assert_series_equal(result, expected) + expected = np.array([v.rjust(5, 'X') for v in values.values]) + tm.assert_numpy_array_equal(result.values, expected) + + # If fillchar is not a charatter, normal str raises TypeError + # 'aaa'.ljust(5, 'XY') + # TypeError: must be char, not str + with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not str"): + result = values.str.center(5, fillchar='XY') + + with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not str"): + result = values.str.ljust(5, fillchar='XY') + + with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not str"): + result = values.str.rjust(5, fillchar='XY') + + with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not int"): + result = values.str.center(5, fillchar=1) + + with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not int"): + result = values.str.ljust(5, fillchar=1) + + with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not int"): + result = values.str.rjust(5, fillchar=1) + def test_split(self): values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
Derived from #9111. - Add `StringMethods.ljust` and `StringMethods.rjust` to be consistent with standard `str`. - Add `fillchar` option to `StringMethods.pad` and `StringMethods.center` to be consistent with standard `str.center` - https://docs.python.org/2/library/string.html#string.center
https://api.github.com/repos/pandas-dev/pandas/pulls/9352
2015-01-25T13:12:41Z
2015-01-29T11:12:01Z
2015-01-29T11:12:01Z
2015-01-29T23:28:30Z
Changed uin8 to uint8 in response to issue #9266
diff --git a/.travis.yml b/.travis.yml index 6c4d6897a69de..0d143d7f7133b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,6 +22,7 @@ matrix: - LOCALE_OVERRIDE="it_IT.UTF-8" - BUILD_TYPE=conda - JOB_NAME: "26_nslow_nnet" + - INSTALL_TEST=true - python: 2.7 env: - NOSE_ARGS="slow and not network and not disabled" @@ -30,6 +31,24 @@ matrix: - JOB_TAG=_LOCALE - BUILD_TYPE=conda - JOB_NAME: "27_slow_nnet_LOCALE" + - python: 2.7 + env: + - NOSE_ARGS="not slow and not disabled" + - FULL_DEPS=true + - CLIPBOARD_GUI=gtk2 + - BUILD_TYPE=conda + - JOB_NAME: "27_build_test" + - JOB_TAG=_BUILD_TEST + - BUILD_TEST=true + - python: 2.7 + env: + - NOSE_ARGS="not slow and not disabled" + - FULL_DEPS=true + - CLIPBOARD_GUI=gtk2 + - BUILD_TYPE=pydata + - JOB_NAME: "27_build_test" + - JOB_TAG=_BUILD_TEST + - BUILD_TEST=true - python: 2.7 env: - NOSE_ARGS="not slow and not disabled" @@ -115,6 +134,24 @@ matrix: - NUMPY_BUILD=master - BUILD_TYPE=pydata - PANDAS_TESTING_MODE="deprecate" + - python: 2.7 + env: + - NOSE_ARGS="not slow and not disabled" + - FULL_DEPS=true + - CLIPBOARD_GUI=gtk2 + - BUILD_TYPE=conda + - JOB_NAME: "27_build_test" + - JOB_TAG=_BUILD_TEST + - BUILD_TEST=true + - python: 2.7 + env: + - NOSE_ARGS="not slow and not disabled" + - FULL_DEPS=true + - CLIPBOARD_GUI=gtk2 + - BUILD_TYPE=pydata + - JOB_NAME: "27_build_test" + - JOB_TAG=_BUILD_TEST + - BUILD_TEST=true before_install: - echo "before_install" @@ -147,6 +184,7 @@ script: # nothing here, or failed tests won't fail travis after_script: + - ci/install_test.sh - if [ -f /tmp/doc.log ]; then cat /tmp/doc.log; fi - source activate pandas && ci/print_versions.py - ci/print_skipped.py /tmp/nosetests.xml diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f7041dbabdad5..284ac2fc5b169 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,8 +12,8 @@ navigate to the [GitHub "issues" tab](https://github.com/pydata/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pydata/pandas/issues?labels=Docs&sort=updated&state=open) -and [Good as first -PR](https://github.com/pydata/pandas/issues?labels=Good+as+first+PR&sort=updated&state=open) +and [Difficulty +Novice](https://github.com/pydata/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22) where you could start out. Or maybe through using *pandas* you have an idea of you own or are @@ -137,6 +137,69 @@ clear what the branch brings to *pandas*. You can have many shiny-new-features and switch in between them using the git checkout command. +### Creating a Development Environment + +An easy way to create a *pandas* development environment is as follows. + +- Install either Install Anaconda \<install-anaconda\> or + Install miniconda \<install-miniconda\> +- Make sure that you have + cloned the repository \<contributing-forking\> +- `cd` to the pandas source directory + +Tell `conda` to create a new environment, named `pandas_dev`, or any +name you would like for this environment by running: + + conda create -n pandas_dev --file ci/requirements_dev.txt + +For a python 3 environment + + conda create -n pandas_dev python=3 --file ci/requirements_dev.txt + +If you are on `windows`, then you will need to install the compiler +linkages: + + conda install -n pandas_dev libpython + +This will create the new environment, and not touch any of your existing +environments, nor any existing python installation. It will install all +of the basic dependencies of *pandas*, as well as the development and +testing tools. If you would like to install other dependencies, you can +install them as follows: + + conda install -n pandas_dev -c pandas pytables scipy + +To install *all* pandas dependencies you can do the following: + + conda install -n pandas_dev -c pandas --file ci/requirements_all.txt + +To work in this environment, `activate` it as follows: + + activate pandas_dev + +At which point, the prompt will change to indicate you are in the new +development environment. + +> **note** +> +> The above syntax is for `windows` environments. To work on +> `macosx/linux`, use: +> +> source activate pandas_dev + +To view your environments: + + conda info -e + +To return to you home root environment: + + deactivate + +See the full `conda` docs [here](http://conda.pydata.org/docs). + +At this point you can easily do an *in-place* install, as detailed in +the next section. + ### Making changes Before making your code changes, it is often necessary to build the code @@ -231,13 +294,19 @@ docstrings that follow the Numpy Docstring Standard (see above), but you don't need to install this because a local copy of `numpydoc` is included in the *pandas* source code. +It is easiest to +create a development environment \<contributing-dev\_env\>, then +install: + + conda install -n pandas_dev sphinx ipython + Furthermore, it is recommended to have all [optional dependencies](http://pandas.pydata.org/pandas-docs/dev/install.html#optional-dependencies) -installed. This is not needed, but be aware that you will see some error -messages. Because all the code in the documentation is executed during -the doc build, the examples using this optional dependencies will -generate errors. Run `pd.show_versions()` to get an overview of the -installed version of all dependencies. +installed. This is not strictly necessary, but be aware that you will +see some error messages. Because all the code in the documentation is +executed during the doc build, the examples using this optional +dependencies will generate errors. Run `pd.show_versions()` to get an +overview of the installed version of all dependencies. > **warning** > diff --git a/README.md b/README.md index cea7e8c6bfd72..8623ee170d154 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # pandas: powerful Python data analysis toolkit -![Travis-CI Build Status](https://travis-ci.org/pydata/pandas.svg) +[![Build Status](https://travis-ci.org/pydata/pandas.svg?branch=master)](https://travis-ci.org/pydata/pandas) ## What is it @@ -123,7 +123,7 @@ conda install pandas - xlrd >= 0.9.0 - [XlsxWriter](https://pypi.python.org/pypi/XlsxWriter) - Alternative Excel writer. -- [Google bq Command Line Tool](https://developers.google.com/bigquery/bq-command-line-tool/) +- [Google bq Command Line Tool](https://cloud.google.com/bigquery/bq-command-line-tool) - Needed for `pandas.io.gbq` - [boto](https://pypi.python.org/pypi/boto): necessary for Amazon S3 access. - One of the following combinations of libraries is needed to use the diff --git a/ci/install_conda.sh b/ci/install_conda.sh index 4c8a62c64979d..01b89807d164c 100755 --- a/ci/install_conda.sh +++ b/ci/install_conda.sh @@ -86,6 +86,9 @@ conda remove -n pandas pandas source activate pandas +pip install -U blosc # See https://github.com/pydata/pandas/pull/9783 +python -c 'import blosc; blosc.print_versions()' + # set the compiler cache to work if [ "$IRON_TOKEN" ]; then export PATH=/usr/lib/ccache:/usr/lib64/ccache:$PATH @@ -96,7 +99,13 @@ if [ "$IRON_TOKEN" ]; then export CC='ccache gcc' fi -python setup.py build_ext --inplace && python setup.py develop +if [ "$BUILD_TEST" ]; then + pip uninstall --yes cython + pip install cython==0.15.1 + ( python setup.py build_ext --inplace && python setup.py develop ) || true +else + python setup.py build_ext --inplace && python setup.py develop +fi for package in beautifulsoup4; do pip uninstall --yes $package diff --git a/ci/install_pydata.sh b/ci/install_pydata.sh index 33a6d3854da22..f2ab5af34dc64 100755 --- a/ci/install_pydata.sh +++ b/ci/install_pydata.sh @@ -137,8 +137,15 @@ if [ "$IRON_TOKEN" ]; then fi # build pandas -python setup.py build_ext --inplace -python setup.py develop +if [ "$BUILD_TEST" ]; then + pip uninstall --yes cython + pip install cython==0.15.1 + ( python setup.py build_ext --inplace ) || true + ( python setup.py develop ) || true +else + python setup.py build_ext --inplace + python setup.py develop +fi # restore cython (if not numpy building) if [ -z "$NUMPY_BUILD" ]; then diff --git a/ci/install_test.sh b/ci/install_test.sh new file mode 100755 index 0000000000000..e01ad7b94a349 --- /dev/null +++ b/ci/install_test.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +echo "inside $0" + +if [ "$INSTALL_TEST" ]; then + source activate pandas + echo "Starting installation test." + conda uninstall cython || exit 1 + python "$TRAVIS_BUILD_DIR"/setup.py sdist --formats=zip,gztar || exit 1 + pip install "$TRAVIS_BUILD_DIR"/dist/*tar.gz || exit 1 + nosetests --exe -A "$NOSE_ARGS" pandas/tests/test_series.py --with-xunit --xunit-file=/tmp/nosetests_install.xml +else + echo "Skipping installation test." +fi +RET="$?" + +exit "$RET" diff --git a/ci/requirements-2.7_BUILD_TEST.txt b/ci/requirements-2.7_BUILD_TEST.txt new file mode 100644 index 0000000000000..b273ca043c4a2 --- /dev/null +++ b/ci/requirements-2.7_BUILD_TEST.txt @@ -0,0 +1,5 @@ +dateutil +pytz +numpy +cython +nose diff --git a/ci/requirements_all.txt b/ci/requirements_all.txt new file mode 100644 index 0000000000000..c70efed96a8dd --- /dev/null +++ b/ci/requirements_all.txt @@ -0,0 +1,21 @@ +nose +sphinx +ipython +dateutil +pytz +openpyxl +xlsxwriter +xlrd +html5lib +patsy +beautiful-soup +numpy +cython +scipy +numexpr +pytables +matplotlib +lxml +sqlalchemy +bottleneck +pymysql diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt new file mode 100644 index 0000000000000..b273ca043c4a2 --- /dev/null +++ b/ci/requirements_dev.txt @@ -0,0 +1,5 @@ +dateutil +pytz +numpy +cython +nose diff --git a/ci/script.sh b/ci/script.sh index b1ba7ba79c816..fe9db792df5e7 100755 --- a/ci/script.sh +++ b/ci/script.sh @@ -16,9 +16,12 @@ fi "$TRAVIS_BUILD_DIR"/ci/build_docs.sh 2>&1 > /tmp/doc.log & # doc build log will be shown after tests - -echo nosetests --exe -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml -nosetests --exe -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml +if [ "$BUILD_TEST" ]; then + echo "We are not running nosetests as this is simply a build test." +else + echo nosetests --exe -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml + nosetests --exe -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml +fi RET="$?" diff --git a/doc/_templates/api_redirect.html b/doc/_templates/api_redirect.html new file mode 100644 index 0000000000000..24bdd8363830f --- /dev/null +++ b/doc/_templates/api_redirect.html @@ -0,0 +1,15 @@ +{% set pgn = pagename.split('.') -%} +{% if pgn[-2][0].isupper() -%} + {% set redirect = ["pandas", pgn[-2], pgn[-1], 'html']|join('.') -%} +{% else -%} + {% set redirect = ["pandas", pgn[-1], 'html']|join('.') -%} +{% endif -%} +<html> + <head> + <meta http-equiv="Refresh" content="0; url={{ redirect }}" /> + <title>This API page has moved</title> + </head> + <body> + <p>This API page has moved <a href="{{ redirect }}">here</a>.</p> + </body> +</html> \ No newline at end of file diff --git a/doc/_templates/autosummary/accessor.rst b/doc/_templates/autosummary/accessor.rst new file mode 100644 index 0000000000000..1401121fb51c6 --- /dev/null +++ b/doc/_templates/autosummary/accessor.rst @@ -0,0 +1,6 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module.split('.')[0] }} + +.. automethod:: {{ [module.split('.')[1], objname]|join('.') }} diff --git a/doc/_templates/autosummary/class_without_autosummary.rst b/doc/_templates/autosummary/class_without_autosummary.rst new file mode 100644 index 0000000000000..6676c672b206d --- /dev/null +++ b/doc/_templates/autosummary/class_without_autosummary.rst @@ -0,0 +1,6 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} diff --git a/doc/source/10min.rst b/doc/source/10min.rst index 1f59c38d75f93..1714e00030026 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -6,18 +6,16 @@ :suppress: import numpy as np - import random + import pandas as pd import os np.random.seed(123456) - from pandas import options - import pandas as pd np.set_printoptions(precision=4, suppress=True) import matplotlib try: matplotlib.style.use('ggplot') except AttributeError: - options.display.mpl_style = 'default' - options.display.max_rows=15 + pd.options.display.mpl_style = 'default' + pd.options.display.max_rows = 15 #### portions of this were borrowed from the #### Pandas cheatsheet @@ -45,21 +43,22 @@ Object Creation See the :ref:`Data Structure Intro section <dsintro>` -Creating a ``Series`` by passing a list of values, letting pandas create a default -integer index +Creating a :class:`Series` by passing a list of values, letting pandas create +a default integer index: .. ipython:: python s = pd.Series([1,3,5,np.nan,6,8]) s -Creating a ``DataFrame`` by passing a numpy array, with a datetime index and labeled columns. +Creating a :class:`DataFrame` by passing a numpy array, with a datetime index +and labeled columns: .. ipython:: python - dates = pd.date_range('20130101',periods=6) + dates = pd.date_range('20130101', periods=6) dates - df = pd.DataFrame(np.random.randn(6,4),index=dates,columns=list('ABCD')) + df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD')) df Creating a ``DataFrame`` by passing a dict of objects that can be converted to series-like. @@ -128,7 +127,7 @@ See the top & bottom rows of the frame df.head() df.tail(3) -Display the index,columns, and the underlying numpy data +Display the index, columns, and the underlying numpy data .. ipython:: python @@ -297,7 +296,7 @@ Using the :func:`~Series.isin` method for filtering: .. ipython:: python df2 = df.copy() - df2['E']=['one', 'one','two','three','four','three'] + df2['E'] = ['one', 'one','two','three','four','three'] df2 df2[df2['E'].isin(['two','four'])] @@ -309,7 +308,7 @@ by the indexes .. ipython:: python - s1 = pd.Series([1,2,3,4,5,6],index=pd.date_range('20130102',periods=6)) + s1 = pd.Series([1,2,3,4,5,6], index=pd.date_range('20130102', periods=6)) s1 df['F'] = s1 @@ -358,7 +357,7 @@ returns a copy of the data. .. ipython:: python - df1 = df.reindex(index=dates[0:4],columns=list(df.columns) + ['E']) + df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ['E']) df1.loc[dates[0]:dates[1],'E'] = 1 df1 @@ -408,9 +407,9 @@ In addition, pandas automatically broadcasts along the specified dimension. .. ipython:: python - s = pd.Series([1,3,5,np.nan,6,8],index=dates).shift(2) + s = pd.Series([1,3,5,np.nan,6,8], index=dates).shift(2) s - df.sub(s,axis='index') + df.sub(s, axis='index') Apply @@ -430,7 +429,7 @@ See more at :ref:`Histogramming and Discretization <basics.discretization>` .. ipython:: python - s = pd.Series(np.random.randint(0,7,size=10)) + s = pd.Series(np.random.randint(0, 7, size=10)) s s.value_counts() @@ -462,7 +461,7 @@ operations. See the :ref:`Merging section <merging>` -Concatenating pandas objects together +Concatenating pandas objects together with :func:`concat`: .. ipython:: python @@ -515,9 +514,9 @@ See the :ref:`Grouping section <groupby>` .. ipython:: python df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], + 'foo', 'bar', 'foo', 'foo'], 'B' : ['one', 'one', 'two', 'three', - 'two', 'two', 'one', 'three'], + 'two', 'two', 'one', 'three'], 'C' : np.random.randn(8), 'D' : np.random.randn(8)}) df @@ -555,7 +554,8 @@ Stack df2 = df[:4] df2 -The ``stack`` function "compresses" a level in the DataFrame's columns. +The :meth:`~DataFrame.stack` method "compresses" a level in the DataFrame's +columns. .. ipython:: python @@ -563,8 +563,8 @@ The ``stack`` function "compresses" a level in the DataFrame's columns. stacked With a "stacked" DataFrame or Series (having a ``MultiIndex`` as the -``index``), the inverse operation of ``stack`` is ``unstack``, which by default -unstacks the **last level**: +``index``), the inverse operation of :meth:`~DataFrame.stack` is +:meth:`~DataFrame.unstack`, which by default unstacks the **last level**: .. ipython:: python @@ -708,7 +708,8 @@ Plotting @savefig series_plot_basic.png ts.plot() -On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: +On DataFrame, :meth:`~DataFrame.plot` is a convenience to plot all of the +columns with labels: .. ipython:: python diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 1749409c863df..850f59c2713eb 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -6,15 +6,10 @@ :suppress: import numpy as np - import random - np.random.seed(123456) - from pandas import * - options.display.max_rows=15 import pandas as pd - randn = np.random.randn - randint = np.random.randint + np.random.seed(123456) np.set_printoptions(precision=4, suppress=True) - from pandas.compat import range, zip + pd.options.display.max_rows=15 ****************************** MultiIndex / Advanced Indexing @@ -80,10 +75,10 @@ demo different ways to initialize MultiIndexes. tuples = list(zip(*arrays)) tuples - index = MultiIndex.from_tuples(tuples, names=['first', 'second']) + index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second']) index - s = Series(randn(8), index=index) + s = pd.Series(np.random.randn(8), index=index) s When you want every pairing of the elements in two iterables, it can be easier @@ -92,7 +87,7 @@ to use the ``MultiIndex.from_product`` function: .. ipython:: python iterables = [['bar', 'baz', 'foo', 'qux'], ['one', 'two']] - MultiIndex.from_product(iterables, names=['first', 'second']) + pd.MultiIndex.from_product(iterables, names=['first', 'second']) As a convenience, you can pass a list of arrays directly into Series or DataFrame to construct a MultiIndex automatically: @@ -101,9 +96,9 @@ DataFrame to construct a MultiIndex automatically: arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']), np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])] - s = Series(randn(8), index=arrays) + s = pd.Series(np.random.randn(8), index=arrays) s - df = DataFrame(randn(8, 4), index=arrays) + df = pd.DataFrame(np.random.randn(8, 4), index=arrays) df All of the ``MultiIndex`` constructors accept a ``names`` argument which stores @@ -119,9 +114,9 @@ of the index is up to you: .. ipython:: python - df = DataFrame(randn(3, 8), index=['A', 'B', 'C'], columns=index) + df = pd.DataFrame(np.random.randn(3, 8), index=['A', 'B', 'C'], columns=index) df - DataFrame(randn(6, 6), index=index[:6], columns=index[:6]) + pd.DataFrame(np.random.randn(6, 6), index=index[:6], columns=index[:6]) We've "sparsified" the higher levels of the indexes to make the console output a bit easier on the eyes. @@ -131,7 +126,7 @@ tuples as atomic labels on an axis: .. ipython:: python - Series(randn(8), index=tuples) + pd.Series(np.random.randn(8), index=tuples) The reason that the ``MultiIndex`` matters is that it can allow you to do grouping, selection, and reshaping operations as we will describe below and in @@ -282,16 +277,16 @@ As usual, **both sides** of the slicers are included as this is label indexing. def mklbl(prefix,n): return ["%s%s" % (prefix,i) for i in range(n)] - miindex = MultiIndex.from_product([mklbl('A',4), - mklbl('B',2), - mklbl('C',4), - mklbl('D',2)]) - micolumns = MultiIndex.from_tuples([('a','foo'),('a','bar'), - ('b','foo'),('b','bah')], - names=['lvl0', 'lvl1']) - dfmi = DataFrame(np.arange(len(miindex)*len(micolumns)).reshape((len(miindex),len(micolumns))), - index=miindex, - columns=micolumns).sortlevel().sortlevel(axis=1) + miindex = pd.MultiIndex.from_product([mklbl('A',4), + mklbl('B',2), + mklbl('C',4), + mklbl('D',2)]) + micolumns = pd.MultiIndex.from_tuples([('a','foo'),('a','bar'), + ('b','foo'),('b','bah')], + names=['lvl0', 'lvl1']) + dfmi = pd.DataFrame(np.arange(len(miindex)*len(micolumns)).reshape((len(miindex),len(micolumns))), + index=miindex, + columns=micolumns).sortlevel().sortlevel(axis=1) dfmi Basic multi-index slicing using slices, lists, and labels. @@ -418,9 +413,9 @@ instance: .. ipython:: python - midx = MultiIndex(levels=[['zero', 'one'], ['x','y']], - labels=[[1,1,0,0],[1,0,1,0]]) - df = DataFrame(randn(4,2), index=midx) + midx = pd.MultiIndex(levels=[['zero', 'one'], ['x','y']], + labels=[[1,1,0,0],[1,0,1,0]]) + df = pd.DataFrame(np.random.randn(4,2), index=midx) df df2 = df.mean(level=0) df2 @@ -471,7 +466,7 @@ labels will be sorted lexicographically! .. ipython:: python import random; random.shuffle(tuples) - s = Series(randn(8), index=MultiIndex.from_tuples(tuples)) + s = pd.Series(np.random.randn(8), index=pd.MultiIndex.from_tuples(tuples)) s s.sortlevel(0) s.sortlevel(1) @@ -509,13 +504,13 @@ an exception. Here is a concrete example to illustrate this: .. ipython:: python tuples = [('a', 'a'), ('a', 'b'), ('b', 'a'), ('b', 'b')] - idx = MultiIndex.from_tuples(tuples) + idx = pd.MultiIndex.from_tuples(tuples) idx.lexsort_depth reordered = idx[[1, 0, 3, 2]] reordered.lexsort_depth - s = Series(randn(4), index=reordered) + s = pd.Series(np.random.randn(4), index=reordered) s.ix['a':'a'] However: @@ -540,7 +535,7 @@ index positions. ``take`` will also accept negative integers as relative positio .. ipython:: python - index = Index(randint(0, 1000, 10)) + index = pd.Index(np.random.randint(0, 1000, 10)) index positions = [0, 9, 3] @@ -548,7 +543,7 @@ index positions. ``take`` will also accept negative integers as relative positio index[positions] index.take(positions) - ser = Series(randn(10)) + ser = pd.Series(np.random.randn(10)) ser.iloc[positions] ser.take(positions) @@ -558,7 +553,7 @@ row or column positions. .. ipython:: python - frm = DataFrame(randn(5, 3)) + frm = pd.DataFrame(np.random.randn(5, 3)) frm.take([1, 4, 3]) @@ -569,11 +564,11 @@ intended to work on boolean indices and may return unexpected results. .. ipython:: python - arr = randn(10) + arr = np.random.randn(10) arr.take([False, False, True, True]) arr[[0, 1]] - ser = Series(randn(10)) + ser = pd.Series(np.random.randn(10)) ser.take([False, False, True, True]) ser.ix[[0, 1]] @@ -583,17 +578,102 @@ faster than fancy indexing. .. ipython:: - arr = randn(10000, 5) + arr = np.random.randn(10000, 5) indexer = np.arange(10000) random.shuffle(indexer) timeit arr[indexer] timeit arr.take(indexer, axis=0) - ser = Series(arr[:, 0]) + ser = pd.Series(arr[:, 0]) timeit ser.ix[indexer] timeit ser.take(indexer) +.. _indexing.categoricalindex: + +CategoricalIndex +---------------- + +.. versionadded:: 0.16.1 + +We introduce a ``CategoricalIndex``, a new type of index object that is useful for supporting +indexing with duplicates. This is a container around a ``Categorical`` (introduced in v0.15.0) +and allows efficient indexing and storage of an index with a large number of duplicated elements. Prior to 0.16.1, +setting the index of a ``DataFrame/Series`` with a ``category`` dtype would convert this to regular object-based ``Index``. + +.. ipython:: python + + df = pd.DataFrame({'A': np.arange(6), + 'B': list('aabbca')}) + df['B'] = df['B'].astype('category', categories=list('cab')) + df + df.dtypes + df.B.cat.categories + +Setting the index, will create create a ``CategoricalIndex`` + +.. ipython:: python + + df2 = df.set_index('B') + df2.index + +Indexing with ``__getitem__/.iloc/.loc/.ix`` works similarly to an ``Index`` with duplicates. +The indexers MUST be in the category or the operation will raise. + +.. ipython:: python + + df2.loc['a'] + +These PRESERVE the ``CategoricalIndex`` + +.. ipython:: python + + df2.loc['a'].index + +Sorting will order by the order of the categories + +.. ipython:: python + + df2.sort_index() + +Groupby operations on the index will preserve the index nature as well + +.. ipython:: python + + df2.groupby(level=0).sum() + df2.groupby(level=0).sum().index + +Reindexing operations, will return a resulting index based on the type of the passed +indexer, meaning that passing a list will return a plain-old-``Index``; indexing with +a ``Categorical`` will return a ``CategoricalIndex``, indexed according to the categories +of the PASSED ``Categorical`` dtype. This allows one to arbitrarly index these even with +values NOT in the categories, similarly to how you can reindex ANY pandas index. + +.. ipython :: python + + df2.reindex(['a','e']) + df2.reindex(['a','e']).index + df2.reindex(pd.Categorical(['a','e'],categories=list('abcde'))) + df2.reindex(pd.Categorical(['a','e'],categories=list('abcde'))).index + +.. warning:: + + Reshaping and Comparision operations on a ``CategoricalIndex`` must have the same categories + or a ``TypeError`` will be raised. + + .. code-block:: python + + In [9]: df3 = pd.DataFrame({'A' : np.arange(6), + 'B' : pd.Series(list('aabbca')).astype('category')}) + + In [11]: df3 = df3.set_index('B') + + In [11]: df3.index + Out[11]: CategoricalIndex([u'a', u'a', u'b', u'b', u'c', u'a'], categories=[u'a', u'b', u'c'], ordered=False, name=u'B', dtype='category') + + In [12]: pd.concat([df2, df3] + TypeError: categories must match existing categories when appending + .. _indexing.float64index: Float64Index @@ -616,9 +696,9 @@ same. .. ipython:: python - indexf = Index([1.5, 2, 3, 4.5, 5]) + indexf = pd.Index([1.5, 2, 3, 4.5, 5]) indexf - sf = Series(range(5),index=indexf) + sf = pd.Series(range(5), index=indexf) sf Scalar selection for ``[],.ix,.loc`` will always be label based. An integer will match an equal float index (e.g. ``3`` is equivalent to ``3.0``) @@ -660,17 +740,17 @@ In non-float indexes, slicing using floats will raise a ``TypeError`` .. code-block:: python - In [1]: Series(range(5))[3.5] + In [1]: pd.Series(range(5))[3.5] TypeError: the label [3.5] is not a proper indexer for this index type (Int64Index) - In [1]: Series(range(5))[3.5:4.5] + In [1]: pd.Series(range(5))[3.5:4.5] TypeError: the slice start [3.5] is not a proper indexer for this index type (Int64Index) Using a scalar float indexer will be deprecated in a future version, but is allowed for now. .. code-block:: python - In [3]: Series(range(5))[3.0] + In [3]: pd.Series(range(5))[3.0] Out[3]: 3 Here is a typical use-case for using this type of indexing. Imagine that you have a somewhat @@ -679,12 +759,12 @@ example be millisecond offsets. .. ipython:: python - dfir = concat([DataFrame(randn(5,2), - index=np.arange(5) * 250.0, - columns=list('AB')), - DataFrame(randn(6,2), - index=np.arange(4,10) * 250.1, - columns=list('AB'))]) + dfir = pd.concat([pd.DataFrame(np.random.randn(5,2), + index=np.arange(5) * 250.0, + columns=list('AB')), + pd.DataFrame(np.random.randn(6,2), + index=np.arange(4,10) * 250.1, + columns=list('AB'))]) dfir Selection operations then will always work on a value basis, for all selection operators. @@ -706,4 +786,3 @@ Of course if you need integer based selection, then use ``iloc`` .. ipython:: python dfir.iloc[0:5] - diff --git a/doc/source/api.rst b/doc/source/api.rst index af9f8c84388bd..f5ba03afc9f19 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -158,6 +158,7 @@ Top-level dealing with datetimelike bdate_range period_range timedelta_range + infer_freq Top-level evaluation ~~~~~~~~~~~~~~~~~~~~ @@ -357,6 +358,8 @@ Computations / Descriptive Stats Series.median Series.min Series.mode + Series.nlargest + Series.nsmallest Series.pct_change Series.prod Series.quantile @@ -390,6 +393,7 @@ Reindexing / Selection / Label manipulation Series.reindex_like Series.rename Series.reset_index + Series.sample Series.select Series.take Series.tail @@ -468,6 +472,7 @@ These can be accessed like ``Series.dt.<property>``. Series.dt.microsecond Series.dt.nanosecond Series.dt.second + Series.dt.week Series.dt.weekofyear Series.dt.dayofweek Series.dt.weekday @@ -479,6 +484,10 @@ These can be accessed like ``Series.dt.<property>``. Series.dt.is_quarter_end Series.dt.is_year_start Series.dt.is_year_end + Series.dt.daysinmonth + Series.dt.days_in_month + Series.dt.tz + Series.dt.freq **Datetime Methods** @@ -490,6 +499,7 @@ These can be accessed like ``Series.dt.<property>``. Series.dt.to_pydatetime Series.dt.tz_localize Series.dt.tz_convert + Series.dt.normalize **Timedelta Properties** @@ -533,17 +543,22 @@ strings and apply several methods to it. These can be acccessed like Series.str.find Series.str.findall Series.str.get + Series.str.index Series.str.join Series.str.len Series.str.ljust Series.str.lower Series.str.lstrip Series.str.match + Series.str.normalize Series.str.pad + Series.str.partition Series.str.repeat Series.str.replace Series.str.rfind + Series.str.rindex Series.str.rjust + Series.str.rpartition Series.str.rstrip Series.str.slice Series.str.slice_replace @@ -552,7 +567,9 @@ strings and apply several methods to it. These can be acccessed like Series.str.strip Series.str.swapcase Series.str.title + Series.str.translate Series.str.upper + Series.str.wrap Series.str.zfill Series.str.isalnum Series.str.isalpha @@ -565,6 +582,20 @@ strings and apply several methods to it. These can be acccessed like Series.str.isdecimal Series.str.get_dummies +.. + The following is needed to ensure the generated pages are created with the + correct template (otherwise they would be created in the Series class page) + +.. + .. autosummary:: + :toctree: generated/ + :template: autosummary/accessor.rst + + Series.str + Series.cat + Series.dt + + .. _api.categorical: Categorical @@ -572,22 +603,28 @@ Categorical If the Series is of dtype ``category``, ``Series.cat`` can be used to change the the categorical data. This accessor is similar to the ``Series.dt`` or ``Series.str`` and has the -following usable methods and properties (all available as ``Series.cat.<method_or_property>``). +following usable methods and properties: .. autosummary:: :toctree: generated/ + :template: autosummary/accessor_attribute.rst + + Series.cat.categories + Series.cat.ordered + Series.cat.codes + +.. autosummary:: + :toctree: generated/ + :template: autosummary/accessor_method.rst - Categorical.categories - Categorical.ordered - Categorical.rename_categories - Categorical.reorder_categories - Categorical.add_categories - Categorical.remove_categories - Categorical.remove_unused_categories - Categorical.set_categories - Categorical.as_ordered - Categorical.as_unordered - Categorical.codes + Series.cat.rename_categories + Series.cat.reorder_categories + Series.cat.add_categories + Series.cat.remove_categories + Series.cat.remove_unused_categories + Series.cat.set_categories + Series.cat.as_ordered + Series.cat.as_unordered To create a Series of dtype ``category``, use ``cat = s.astype("category")``. @@ -596,8 +633,13 @@ adding ordering information or special categories is need at creation time of th .. autosummary:: :toctree: generated/ + :template: autosummary/class_without_autosummary.rst Categorical + +.. autosummary:: + :toctree: generated/ + Categorical.from_codes ``np.asarray(categorical)`` works by implementing the array interface. Be aware, that this converts @@ -823,6 +865,7 @@ Reindexing / Selection / Label manipulation DataFrame.reindex_like DataFrame.rename DataFrame.reset_index + DataFrame.sample DataFrame.select DataFrame.set_index DataFrame.tail @@ -1071,6 +1114,7 @@ Reindexing / Selection / Label manipulation Panel.reindex_axis Panel.reindex_like Panel.rename + Panel.sample Panel.select Panel.take Panel.truncate @@ -1220,8 +1264,6 @@ Modifying and Computations Index.argmax Index.copy Index.delete - Index.diff - Index.sym_diff Index.drop Index.drop_duplicates Index.duplicated @@ -1267,15 +1309,17 @@ Time-specific operations Index.shift -Combining / joining / merging -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Combining / joining / set operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ Index.append - Index.intersection Index.join + Index.intersection Index.union + Index.difference + Index.sym_diff Selecting ~~~~~~~~~ @@ -1291,6 +1335,34 @@ Selecting Index.slice_indexer Index.slice_locs +.. _api.categoricalindex: + +CategoricalIndex +---------------- + +.. autosummary:: + :toctree: generated/ + + CategoricalIndex + +Categorical Components +~~~~~~~~~~~~~~~~~~~~~~ + +.. autosummary:: + :toctree: generated/ + + CategoricalIndex.codes + CategoricalIndex.categories + CategoricalIndex.ordered + CategoricalIndex.rename_categories + CategoricalIndex.reorder_categories + CategoricalIndex.add_categories + CategoricalIndex.remove_categories + CategoricalIndex.remove_unused_categories + CategoricalIndex.set_categories + CategoricalIndex.as_ordered + CategoricalIndex.as_unordered + .. _api.datetimeindex: DatetimeIndex @@ -1332,6 +1404,7 @@ Time/Date Components DatetimeIndex.is_quarter_end DatetimeIndex.is_year_start DatetimeIndex.is_year_end + DatetimeIndex.inferred_freq Selecting ~~~~~~~~~ @@ -1382,6 +1455,7 @@ Components TimedeltaIndex.microseconds TimedeltaIndex.nanoseconds TimedeltaIndex.components + TimedeltaIndex.inferred_freq Conversion ~~~~~~~~~~ @@ -1521,230 +1595,3 @@ Working with options get_option set_option option_context - - -.. - HACK - see github issue #4539. To ensure old links remain valid, include - here the autosummaries with previous currentmodules as a comment and add - them to a hidden toctree (to avoid warnings): - -.. toctree:: - :hidden: - - generated/pandas.core.common.isnull - generated/pandas.core.common.notnull - generated/pandas.core.reshape.get_dummies - generated/pandas.io.clipboard.read_clipboard - generated/pandas.io.excel.ExcelFile.parse - generated/pandas.io.excel.read_excel - generated/pandas.io.html.read_html - generated/pandas.io.json.read_json - generated/pandas.io.parsers.read_csv - generated/pandas.io.parsers.read_fwf - generated/pandas.io.parsers.read_table - generated/pandas.io.pickle.read_pickle - generated/pandas.io.pytables.HDFStore.append - generated/pandas.io.pytables.HDFStore.get - generated/pandas.io.pytables.HDFStore.put - generated/pandas.io.pytables.HDFStore.select - generated/pandas.io.pytables.read_hdf - generated/pandas.io.sql.read_sql - generated/pandas.io.sql.read_frame - generated/pandas.io.sql.write_frame - generated/pandas.io.stata.read_stata - generated/pandas.stats.moments.ewma - generated/pandas.stats.moments.ewmcorr - generated/pandas.stats.moments.ewmcov - generated/pandas.stats.moments.ewmstd - generated/pandas.stats.moments.ewmvar - generated/pandas.stats.moments.expanding_apply - generated/pandas.stats.moments.expanding_corr - generated/pandas.stats.moments.expanding_count - generated/pandas.stats.moments.expanding_cov - generated/pandas.stats.moments.expanding_kurt - generated/pandas.stats.moments.expanding_mean - generated/pandas.stats.moments.expanding_median - generated/pandas.stats.moments.expanding_quantile - generated/pandas.stats.moments.expanding_skew - generated/pandas.stats.moments.expanding_std - generated/pandas.stats.moments.expanding_sum - generated/pandas.stats.moments.expanding_var - generated/pandas.stats.moments.rolling_apply - generated/pandas.stats.moments.rolling_corr - generated/pandas.stats.moments.rolling_count - generated/pandas.stats.moments.rolling_cov - generated/pandas.stats.moments.rolling_kurt - generated/pandas.stats.moments.rolling_mean - generated/pandas.stats.moments.rolling_median - generated/pandas.stats.moments.rolling_quantile - generated/pandas.stats.moments.rolling_skew - generated/pandas.stats.moments.rolling_std - generated/pandas.stats.moments.rolling_sum - generated/pandas.stats.moments.rolling_var - generated/pandas.tools.merge.concat - generated/pandas.tools.merge.merge - generated/pandas.tools.pivot.pivot_table - generated/pandas.tseries.tools.to_datetime - -.. - .. currentmodule:: pandas.io.pickle - - .. autosummary:: - :toctree: generated/ - - read_pickle - - .. currentmodule:: pandas.io.parsers - - .. autosummary:: - :toctree: generated/ - - read_table - read_csv - read_fwf - - .. currentmodule:: pandas.io.clipboard - - .. autosummary:: - :toctree: generated/ - - read_clipboard - - .. currentmodule:: pandas.io.excel - - .. autosummary:: - :toctree: generated/ - - read_excel - ExcelFile.parse - - .. currentmodule:: pandas.io.json - - .. autosummary:: - :toctree: generated/ - - read_json - - .. currentmodule:: pandas.io.html - - .. autosummary:: - :toctree: generated/ - - read_html - - .. currentmodule:: pandas.io.pytables - - .. autosummary:: - :toctree: generated/ - - read_hdf - HDFStore.put - HDFStore.append - HDFStore.get - HDFStore.select - - .. currentmodule:: pandas.io.sql - - .. autosummary:: - :toctree: generated/ - - read_sql - read_frame - write_frame - - .. currentmodule:: pandas.io.stata - - .. autosummary:: - :toctree: generated/ - - read_stata - StataReader.data - StataReader.data_label - StataReader.value_labels - StataReader.variable_labels - StataWriter.write_file - - .. currentmodule:: pandas.tools.pivot - - .. autosummary:: - :toctree: generated/ - - pivot_table - - .. currentmodule:: pandas.tools.merge - - .. autosummary:: - :toctree: generated/ - - merge - concat - - .. currentmodule:: pandas.core.reshape - - .. autosummary:: - :toctree: generated/ - - get_dummies - - .. currentmodule:: pandas.core.common - - .. autosummary:: - :toctree: generated/ - - isnull - notnull - - .. currentmodule:: pandas.tseries.tools - - .. autosummary:: - :toctree: generated/ - - to_datetime - - - .. currentmodule:: pandas.stats.moments - - .. autosummary:: - :toctree: generated/ - - rolling_count - rolling_sum - rolling_mean - rolling_median - rolling_var - rolling_std - rolling_corr - rolling_cov - rolling_skew - rolling_kurt - rolling_apply - rolling_quantile - - - .. currentmodule:: pandas.stats.moments - - .. autosummary:: - :toctree: generated/ - - expanding_count - expanding_sum - expanding_mean - expanding_median - expanding_var - expanding_std - expanding_corr - expanding_cov - expanding_skew - expanding_kurt - expanding_apply - expanding_quantile - - - .. autosummary:: - :toctree: generated/ - - ewma - ewmstd - ewmvar - ewmcorr - ewmcov diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 8e78ac597479b..d16feb3a6c448 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1,16 +1,14 @@ .. currentmodule:: pandas -.. _basics: .. ipython:: python :suppress: import numpy as np - from pandas import * - randn = np.random.randn + import pandas as pd np.set_printoptions(precision=4, suppress=True) - from pandas.compat import lrange - options.display.max_rows=15 + pd.options.display.max_rows = 15 +.. _basics: ============================== Essential Basic Functionality @@ -22,26 +20,26 @@ the previous section: .. ipython:: python - index = date_range('1/1/2000', periods=8) - s = Series(randn(5), index=['a', 'b', 'c', 'd', 'e']) - df = DataFrame(randn(8, 3), index=index, - columns=['A', 'B', 'C']) - wp = Panel(randn(2, 5, 4), items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) + index = pd.date_range('1/1/2000', periods=8) + s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e']) + df = pd.DataFrame(np.random.randn(8, 3), index=index, + columns=['A', 'B', 'C']) + wp = pd.Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], + major_axis=pd.date_range('1/1/2000', periods=5), + minor_axis=['A', 'B', 'C', 'D']) .. _basics.head_tail: Head and Tail ------------- -To view a small sample of a Series or DataFrame object, use the ``head`` and -``tail`` methods. The default number of elements to display is five, but you -may pass a custom number. +To view a small sample of a Series or DataFrame object, use the +:meth:`~DataFrame.head` and :meth:`~DataFrame.tail` methods. The default number +of elements to display is five, but you may pass a custom number. .. ipython:: python - long_series = Series(randn(1000)) + long_series = pd.Series(np.random.randn(1000)) long_series.head() long_series.tail(3) @@ -134,16 +132,18 @@ be handled simultaneously. Matching / broadcasting behavior ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -DataFrame has the methods **add, sub, mul, div** and related functions **radd, -rsub, ...** for carrying out binary operations. For broadcasting behavior, +DataFrame has the methods :meth:`~DataFrame.add`, :meth:`~DataFrame.sub`, +:meth:`~DataFrame.mul`, :meth:`~DataFrame.div` and related functions +:meth:`~DataFrame.radd`, :meth:`~DataFrame.rsub`, ... +for carrying out binary operations. For broadcasting behavior, Series input is of primary interest. Using these functions, you can use to either match on the *index* or *columns* via the **axis** keyword: .. ipython:: python - df = DataFrame({'one' : Series(randn(3), index=['a', 'b', 'c']), - 'two' : Series(randn(4), index=['a', 'b', 'c', 'd']), - 'three' : Series(randn(3), index=['b', 'c', 'd'])}) + df = pd.DataFrame({'one' : pd.Series(np.random.randn(3), index=['a', 'b', 'c']), + 'two' : pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd']), + 'three' : pd.Series(np.random.randn(3), index=['b', 'c', 'd'])}) df row = df.ix[1] column = df['two'] @@ -164,8 +164,8 @@ Furthermore you can align a level of a multi-indexed DataFrame with a Series. .. ipython:: python dfmi = df.copy() - dfmi.index = MultiIndex.from_tuples([(1,'a'),(1,'b'),(1,'c'),(2,'a')], - names=['first','second']) + dfmi.index = pd.MultiIndex.from_tuples([(1,'a'),(1,'b'),(1,'c'),(2,'a')], + names=['first','second']) dfmi.sub(column, axis=0, level='second') With Panel, describing the matching behavior is a bit more difficult, so @@ -234,7 +234,8 @@ see :ref:`here<indexing.boolean>` Boolean Reductions ~~~~~~~~~~~~~~~~~~ -You can apply the reductions: ``empty``, ``any()``, ``all()``, and ``bool()`` to provide a +You can apply the reductions: :attr:`~DataFrame.empty`, :meth:`~DataFrame.any`, +:meth:`~DataFrame.all`, and :meth:`~DataFrame.bool` to provide a way to summarize a boolean result. .. ipython:: python @@ -248,21 +249,22 @@ You can reduce to a final boolean value. (df>0).any().any() -You can test if a pandas object is empty, via the ``empty`` property. +You can test if a pandas object is empty, via the :attr:`~DataFrame.empty` property. .. ipython:: python df.empty - DataFrame(columns=list('ABC')).empty + pd.DataFrame(columns=list('ABC')).empty -To evaluate single-element pandas objects in a boolean context, use the method ``.bool()``: +To evaluate single-element pandas objects in a boolean context, use the method +:meth:`~DataFrame.bool`: .. ipython:: python - Series([True]).bool() - Series([False]).bool() - DataFrame([[True]]).bool() - DataFrame([[False]]).bool() + pd.Series([True]).bool() + pd.Series([False]).bool() + pd.DataFrame([[True]]).bool() + pd.DataFrame([[False]]).bool() .. warning:: @@ -311,8 +313,8 @@ That is because NaNs do not compare as equals: np.nan == np.nan So, as of v0.13.1, NDFrames (such as Series, DataFrames, and Panels) -have an ``equals`` method for testing equality, with NaNs in corresponding -locations treated as equal. +have an :meth:`~DataFrame.equals` method for testing equality, with NaNs in +corresponding locations treated as equal. .. ipython:: python @@ -323,8 +325,8 @@ equality to be True: .. ipython:: python - df1 = DataFrame({'col':['foo', 0, np.nan]}) - df2 = DataFrame({'col':[np.nan, 0, 'foo']}, index=[2,1,0]) + df1 = pd.DataFrame({'col':['foo', 0, np.nan]}) + df2 = pd.DataFrame({'col':[np.nan, 0, 'foo']}, index=[2,1,0]) df1.equals(df2) df1.equals(df2.sort()) @@ -339,14 +341,15 @@ be of "higher quality". However, the lower quality series might extend further back in history or have more complete data coverage. As such, we would like to combine two DataFrame objects where missing values in one DataFrame are conditionally filled with like-labeled values from the other DataFrame. The -function implementing this operation is ``combine_first``, which we illustrate: +function implementing this operation is :meth:`~DataFrame.combine_first`, +which we illustrate: .. ipython:: python - df1 = DataFrame({'A' : [1., np.nan, 3., 5., np.nan], - 'B' : [np.nan, 2., 3., np.nan, 6.]}) - df2 = DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.], - 'B' : [np.nan, np.nan, 3., 4., 6., 8.]}) + df1 = pd.DataFrame({'A' : [1., np.nan, 3., 5., np.nan], + 'B' : [np.nan, 2., 3., np.nan, 6.]}) + df2 = pd.DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.], + 'B' : [np.nan, np.nan, 3., 4., 6., 8.]}) df1 df2 df1.combine_first(df2) @@ -354,16 +357,16 @@ function implementing this operation is ``combine_first``, which we illustrate: General DataFrame Combine ~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``combine_first`` method above calls the more general DataFrame method -``combine``. This method takes another DataFrame and a combiner function, -aligns the input DataFrame and then passes the combiner function pairs of -Series (i.e., columns whose names are the same). +The :meth:`~DataFrame.combine_first` method above calls the more general +DataFrame method :meth:`~DataFrame.combine`. This method takes another DataFrame +and a combiner function, aligns the input DataFrame and then passes the combiner +function pairs of Series (i.e., columns whose names are the same). -So, for instance, to reproduce ``combine_first`` as above: +So, for instance, to reproduce :meth:`~DataFrame.combine_first` as above: .. ipython:: python - combiner = lambda x, y: np.where(isnull(x), y, x) + combiner = lambda x, y: np.where(pd.isnull(x), y, x) df1.combine(df2, combiner) .. _basics.stats: @@ -374,8 +377,9 @@ Descriptive statistics A large number of methods for computing descriptive statistics and other related operations on :ref:`Series <api.series.stats>`, :ref:`DataFrame <api.dataframe.stats>`, and :ref:`Panel <api.panel.stats>`. Most of these -are aggregations (hence producing a lower-dimensional result) like **sum**, -**mean**, and **quantile**, but some of them, like **cumsum** and **cumprod**, +are aggregations (hence producing a lower-dimensional result) like +:meth:`~DataFrame.sum`, :meth:`~DataFrame.mean`, and :meth:`~DataFrame.quantile`, +but some of them, like :meth:`~DataFrame.cumsum` and :meth:`~DataFrame.cumprod`, produce an object of the same size. Generally speaking, these methods take an **axis** argument, just like *ndarray.{sum, std, ...}*, but the axis can be specified by name or integer: @@ -412,8 +416,8 @@ standard deviation 1), very concisely: xs_stand = df.sub(df.mean(1), axis=0).div(df.std(1), axis=0) xs_stand.std(1) -Note that methods like **cumsum** and **cumprod** preserve the location of NA -values: +Note that methods like :meth:`~DataFrame.cumsum` and :meth:`~DataFrame.cumprod` +preserve the location of NA values: .. ipython:: python @@ -456,12 +460,12 @@ will exclude NAs on Series input by default: np.mean(df['one']) np.mean(df['one'].values) -``Series`` also has a method ``nunique`` which will return the number of unique -non-null values: +``Series`` also has a method :meth:`~Series.nunique` which will return the +number of unique non-null values: .. ipython:: python - series = Series(randn(500)) + series = pd.Series(np.random.randn(500)) series[20:500] = np.nan series[10:20] = 5 series.nunique() @@ -471,16 +475,16 @@ non-null values: Summarizing data: describe ~~~~~~~~~~~~~~~~~~~~~~~~~~ -There is a convenient ``describe`` function which computes a variety of summary +There is a convenient :meth:`~DataFrame.describe` function which computes a variety of summary statistics about a Series or the columns of a DataFrame (excluding NAs of course): .. ipython:: python - series = Series(randn(1000)) + series = pd.Series(np.random.randn(1000)) series[::2] = np.nan series.describe() - frame = DataFrame(randn(1000, 5), columns=['a', 'b', 'c', 'd', 'e']) + frame = pd.DataFrame(np.random.randn(1000, 5), columns=['a', 'b', 'c', 'd', 'e']) frame.ix[::2] = np.nan frame.describe() @@ -492,21 +496,21 @@ You can select specific percentiles to include in the output: By default, the median is always included. -For a non-numerical Series object, `describe` will give a simple summary of the -number of unique values and most frequently occurring values: - +For a non-numerical Series object, :meth:`~Series.describe` will give a simple +summary of the number of unique values and most frequently occurring values: .. ipython:: python - s = Series(['a', 'a', 'b', 'b', 'a', 'a', np.nan, 'c', 'd', 'a']) + s = pd.Series(['a', 'a', 'b', 'b', 'a', 'a', np.nan, 'c', 'd', 'a']) s.describe() -Note that on a mixed-type DataFrame object, `describe` will restrict the summary to -include only numerical columns or, if none are, only categorical columns: +Note that on a mixed-type DataFrame object, :meth:`~DataFrame.describe` will +restrict the summary to include only numerical columns or, if none are, only +categorical columns: .. ipython:: python - frame = DataFrame({'a': ['Yes', 'Yes', 'No', 'No'], 'b': range(4)}) + frame = pd.DataFrame({'a': ['Yes', 'Yes', 'No', 'No'], 'b': range(4)}) frame.describe() This behaviour can be controlled by providing a list of types as ``include``/``exclude`` @@ -518,33 +522,36 @@ arguments. The special value ``all`` can also be used: frame.describe(include=['number']) frame.describe(include='all') -That feature relies on :ref:`select_dtypes <basics.selectdtypes>`. Refer to there for details about accepted inputs. +That feature relies on :ref:`select_dtypes <basics.selectdtypes>`. Refer to +there for details about accepted inputs. .. _basics.idxmin: Index of Min/Max Values ~~~~~~~~~~~~~~~~~~~~~~~ -The ``idxmin`` and ``idxmax`` functions on Series and DataFrame compute the -index labels with the minimum and maximum corresponding values: +The :meth:`~DataFrame.idxmin` and :meth:`~DataFrame.idxmax` functions on Series +and DataFrame compute the index labels with the minimum and maximum +corresponding values: .. ipython:: python - s1 = Series(randn(5)) + s1 = pd.Series(np.random.randn(5)) s1 s1.idxmin(), s1.idxmax() - df1 = DataFrame(randn(5,3), columns=['A','B','C']) + df1 = pd.DataFrame(np.random.randn(5,3), columns=['A','B','C']) df1 df1.idxmin(axis=0) df1.idxmax(axis=1) When there are multiple rows (or columns) matching the minimum or maximum -value, ``idxmin`` and ``idxmax`` return the first matching index: +value, :meth:`~DataFrame.idxmin` and :meth:`~DataFrame.idxmax` return the first +matching index: .. ipython:: python - df3 = DataFrame([2, 1, 1, 3, np.nan], columns=['A'], index=list('edcba')) + df3 = pd.DataFrame([2, 1, 1, 3, np.nan], columns=['A'], index=list('edcba')) df3 df3['A'].idxmin() @@ -557,59 +564,59 @@ value, ``idxmin`` and ``idxmax`` return the first matching index: Value counts (histogramming) / Mode ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``value_counts`` Series method and top-level function computes a histogram +The :meth:`~Series.value_counts` Series method and top-level function computes a histogram of a 1D array of values. It can also be used as a function on regular arrays: .. ipython:: python data = np.random.randint(0, 7, size=50) data - s = Series(data) + s = pd.Series(data) s.value_counts() - value_counts(data) + pd.value_counts(data) Similarly, you can get the most frequently occurring value(s) (the mode) of the values in a Series or DataFrame: .. ipython:: python - s5 = Series([1, 1, 3, 3, 3, 5, 5, 7, 7, 7]) + s5 = pd.Series([1, 1, 3, 3, 3, 5, 5, 7, 7, 7]) s5.mode() - df5 = DataFrame({"A": np.random.randint(0, 7, size=50), - "B": np.random.randint(-10, 15, size=50)}) + df5 = pd.DataFrame({"A": np.random.randint(0, 7, size=50), + "B": np.random.randint(-10, 15, size=50)}) df5.mode() Discretization and quantiling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Continuous values can be discretized using the ``cut`` (bins based on values) -and ``qcut`` (bins based on sample quantiles) functions: +Continuous values can be discretized using the :func:`cut` (bins based on values) +and :func:`qcut` (bins based on sample quantiles) functions: .. ipython:: python arr = np.random.randn(20) - factor = cut(arr, 4) + factor = pd.cut(arr, 4) factor - factor = cut(arr, [-5, -1, 0, 1, 5]) + factor = pd.cut(arr, [-5, -1, 0, 1, 5]) factor -``qcut`` computes sample quantiles. For example, we could slice up some +:func:`qcut` computes sample quantiles. For example, we could slice up some normally distributed data into equal-size quartiles like so: .. ipython:: python arr = np.random.randn(30) - factor = qcut(arr, [0, .25, .5, .75, 1]) + factor = pd.qcut(arr, [0, .25, .5, .75, 1]) factor - value_counts(factor) + pd.value_counts(factor) We can also pass infinite values to define the bins: .. ipython:: python arr = np.random.randn(20) - factor = cut(arr, [-np.inf, 0, np.inf]) + factor = pd.cut(arr, [-np.inf, 0, np.inf]) factor .. _basics.apply: @@ -618,8 +625,8 @@ Function application -------------------- Arbitrary functions can be applied along the axes of a DataFrame or Panel -using the ``apply`` method, which, like the descriptive statistics methods, -take an optional ``axis`` argument: +using the :meth:`~DataFrame.apply` method, which, like the descriptive +statistics methods, take an optional ``axis`` argument: .. ipython:: python @@ -629,20 +636,20 @@ take an optional ``axis`` argument: df.apply(np.cumsum) df.apply(np.exp) -Depending on the return type of the function passed to ``apply``, the result -will either be of lower dimension or the same dimension. +Depending on the return type of the function passed to :meth:`~DataFrame.apply`, +the result will either be of lower dimension or the same dimension. -``apply`` combined with some cleverness can be used to answer many questions +:meth:`~DataFrame.apply` combined with some cleverness can be used to answer many questions about a data set. For example, suppose we wanted to extract the date where the maximum value for each column occurred: .. ipython:: python - tsdf = DataFrame(randn(1000, 3), columns=['A', 'B', 'C'], - index=date_range('1/1/2000', periods=1000)) + tsdf = pd.DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'], + index=pd.date_range('1/1/2000', periods=1000)) tsdf.apply(lambda x: x.idxmax()) -You may also pass additional arguments and keyword arguments to the ``apply`` +You may also pass additional arguments and keyword arguments to the :meth:`~DataFrame.apply` method. For instance, consider the following function you would like to apply: .. code-block:: python @@ -662,16 +669,16 @@ Series operation on each column or row: .. ipython:: python :suppress: - tsdf = DataFrame(randn(10, 3), columns=['A', 'B', 'C'], - index=date_range('1/1/2000', periods=10)) + tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], + index=pd.date_range('1/1/2000', periods=10)) tsdf.values[3:7] = np.nan .. ipython:: python tsdf - tsdf.apply(Series.interpolate) + tsdf.apply(pd.Series.interpolate) -Finally, ``apply`` takes an argument ``raw`` which is False by default, which +Finally, :meth:`~DataFrame.apply` takes an argument ``raw`` which is False by default, which converts each row or column into a Series before applying the function. When set to True, the passed function will instead receive an ndarray object, which has positive performance implications if you do not need the indexing @@ -687,9 +694,9 @@ Applying elementwise Python functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since not all functions can be vectorized (accept NumPy arrays and return -another array or value), the methods ``applymap`` on DataFrame and analogously -``map`` on Series accept any Python function taking a single value and -returning a single value. For example: +another array or value), the methods :meth:`~DataFrame.applymap` on DataFrame +and analogously :meth:`~Series.map` on Series accept any Python function taking +a single value and returning a single value. For example: .. ipython:: python :suppress: @@ -703,16 +710,15 @@ returning a single value. For example: df4['one'].map(f) df4.applymap(f) -``Series.map`` has an additional feature which is that it can be used to easily +:meth:`Series.map` has an additional feature which is that it can be used to easily "link" or "map" values defined by a secondary series. This is closely related to :ref:`merging/joining functionality <merging>`: - .. ipython:: python - s = Series(['six', 'seven', 'six', 'seven', 'six'], - index=['a', 'b', 'c', 'd', 'e']) - t = Series({'six' : 6., 'seven' : 7.}) + s = pd.Series(['six', 'seven', 'six', 'seven', 'six'], + index=['a', 'b', 'c', 'd', 'e']) + t = pd.Series({'six' : 6., 'seven' : 7.}) s s.map(t) @@ -789,7 +795,7 @@ This is equivalent to the following .. ipython:: python - result = Panel(dict([ (ax,f(panel.loc[:,:,ax])) + result = pd.Panel(dict([ (ax, f(panel.loc[:,:,ax])) for ax in panel.minor_axis ])) result result.loc[:,:,'ItemA'] @@ -797,12 +803,11 @@ This is equivalent to the following .. _basics.reindexing: - Reindexing and altering labels ------------------------------ -``reindex`` is the fundamental data alignment method in pandas. It is used to -implement nearly all other features relying on label-alignment +:meth:`~Series.reindex` is the fundamental data alignment method in pandas. +It is used to implement nearly all other features relying on label-alignment functionality. To *reindex* means to conform the data to match a given set of labels along a particular axis. This accomplishes several things: @@ -816,7 +821,7 @@ Here is a simple example: .. ipython:: python - s = Series(randn(5), index=['a', 'b', 'c', 'd', 'e']) + s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e']) s s.reindex(['e', 'b', 'f', 'd']) @@ -830,8 +835,8 @@ With a DataFrame, you can simultaneously reindex the index and columns: df df.reindex(index=['c', 'f', 'b'], columns=['three', 'two', 'one']) -For convenience, you may utilize the ``reindex_axis`` method, which takes the -labels and a keyword ``axis`` parameter. +For convenience, you may utilize the :meth:`~Series.reindex_axis` method, which +takes the labels and a keyword ``axis`` parameter. Note that the ``Index`` objects containing the actual axis labels can be **shared** between objects. So if we have a Series and a DataFrame, the @@ -869,8 +874,8 @@ Reindexing to align with another object You may wish to take an object and reindex its axes to be labeled the same as another object. While the syntax for this is straightforward albeit verbose, it -is a common enough operation that the ``reindex_like`` method is available to -make this simpler: +is a common enough operation that the :meth:`~DataFrame.reindex_like` method is +available to make this simpler: .. ipython:: python :suppress: @@ -885,15 +890,12 @@ make this simpler: df3 df.reindex_like(df2) -Reindexing with ``reindex_axis`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - .. _basics.align: Aligning objects with each other with ``align`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``align`` method is the fastest way to simultaneously align two objects. It +The :meth:`~Series.align` method is the fastest way to simultaneously align two objects. It supports a ``join`` argument (related to :ref:`joining and merging <merging>`): - ``join='outer'``: take the union of the indexes (default) @@ -905,7 +907,7 @@ It returns a tuple with both of the reindexed Series: .. ipython:: python - s = Series(randn(5), index=['a', 'b', 'c', 'd', 'e']) + s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e']) s1 = s[:4] s2 = s[1:] s1.align(s2) @@ -929,7 +931,7 @@ You can also pass an ``axis`` option to only align on the specified axis: .. _basics.align.frame.series: -If you pass a Series to ``DataFrame.align``, you can choose to align both +If you pass a Series to :meth:`DataFrame.align`, you can choose to align both objects either on the DataFrame's index or columns using the ``axis`` argument: .. ipython:: python @@ -941,8 +943,8 @@ objects either on the DataFrame's index or columns using the ``axis`` argument: Filling while reindexing ~~~~~~~~~~~~~~~~~~~~~~~~ -``reindex`` takes an optional parameter ``method`` which is a filling method -chosen from the following table: +:meth:`~Series.reindex` takes an optional parameter ``method`` which is a +filling method chosen from the following table: .. csv-table:: :header: "Method", "Action" @@ -956,8 +958,8 @@ We illustrate these fill methods on a simple Series: .. ipython:: python - rng = date_range('1/3/2000', periods=8) - ts = Series(randn(8), index=rng) + rng = pd.date_range('1/3/2000', periods=8) + ts = pd.Series(np.random.randn(8), index=rng) ts2 = ts[[0, 3, 6]] ts ts2 @@ -978,17 +980,17 @@ Note that the same result could have been achieved using ts2.reindex(ts.index).fillna(method='ffill') -``reindex`` will raise a ValueError if the index is not monotonic increasing or -descreasing. ``fillna`` and ``interpolate`` will not make any checks on the -order of the index. +:meth:`~Series.reindex` will raise a ValueError if the index is not monotonic +increasing or descreasing. :meth:`~Series.fillna` and :meth:`~Series.interpolate` +will not make any checks on the order of the index. .. _basics.drop: Dropping labels from an axis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -A method closely related to ``reindex`` is the ``drop`` function. It removes a -set of labels from an axis: +A method closely related to ``reindex`` is the :meth:`~DataFrame.drop` function. +It removes a set of labels from an axis: .. ipython:: python @@ -1000,15 +1002,15 @@ Note that the following also works, but is a bit less obvious / clean: .. ipython:: python - df.reindex(df.index - ['a', 'd']) + df.reindex(df.index.difference(['a', 'd'])) .. _basics.rename: Renaming / mapping labels ~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``rename`` method allows you to relabel an axis based on some mapping (a -dict or Series) or an arbitrary function. +The :meth:`~DataFrame.rename` method allows you to relabel an axis based on some +mapping (a dict or Series) or an arbitrary function. .. ipython:: python @@ -1024,14 +1026,14 @@ Series, it need only contain a subset of the labels as keys: df.rename(columns={'one' : 'foo', 'two' : 'bar'}, index={'a' : 'apple', 'b' : 'banana', 'd' : 'durian'}) -The ``rename`` method also provides an ``inplace`` named parameter that is by -default ``False`` and copies the underlying data. Pass ``inplace=True`` to -rename the data in place. +The :meth:`~DataFrame.rename` method also provides an ``inplace`` named +parameter that is by default ``False`` and copies the underlying data. Pass +``inplace=True`` to rename the data in place. .. _basics.rename_axis: -The Panel class has a related ``rename_axis`` class which can rename any of -its three axes. +The Panel class has a related :meth:`~Panel.rename_axis` class which can rename +any of its three axes. Iteration --------- @@ -1055,8 +1057,8 @@ Thus, for example: iteritems ~~~~~~~~~ -Consistent with the dict-like interface, **iteritems** iterates through -key-value pairs: +Consistent with the dict-like interface, :meth:`~DataFrame.iteritems` iterates +through key-value pairs: * **Series**: (index, scalar value) pairs * **DataFrame**: (column, Series) pairs @@ -1078,8 +1080,8 @@ iterrows ~~~~~~~~ New in v0.7 is the ability to iterate efficiently through rows of a -DataFrame. It returns an iterator yielding each index value along with a Series -containing the data in each row: +DataFrame with :meth:`~DataFrame.iterrows`. It returns an iterator yielding each +index value along with a Series containing the data in each row: .. ipython:: @@ -1091,11 +1093,11 @@ For instance, a contrived way to transpose the DataFrame would be: .. ipython:: python - df2 = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]}) + df2 = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]}) print(df2) print(df2.T) - df2_t = DataFrame(dict((idx,values) for idx, values in df2.iterrows())) + df2_t = pd.DataFrame(dict((idx,values) for idx, values in df2.iterrows())) print(df2_t) .. note:: @@ -1105,7 +1107,7 @@ For instance, a contrived way to transpose the DataFrame would be: .. ipython:: python - df_iter = DataFrame([[1, 1.0]], columns=['x', 'y']) + df_iter = pd.DataFrame([[1, 1.0]], columns=['x', 'y']) row = next(df_iter.iterrows())[1] print(row['x'].dtype) print(df_iter['x'].dtype) @@ -1113,7 +1115,7 @@ For instance, a contrived way to transpose the DataFrame would be: itertuples ~~~~~~~~~~ -This method will return an iterator yielding a tuple for each row in the +The :meth:`~DataFrame.itertuples` method will return an iterator yielding a tuple for each row in the DataFrame. The first element of the tuple will be the row's corresponding index value, while the remaining values are the row values proper. @@ -1129,13 +1131,14 @@ For instance, .dt accessor ~~~~~~~~~~~~ -``Series`` has an accessor to succinctly return datetime like properties for the *values* of the Series, if its a datetime/period like Series. +``Series`` has an accessor to succinctly return datetime like properties for the +*values* of the Series, if its a datetime/period like Series. This will return a Series, indexed like the existing Series. .. ipython:: python # datetime - s = Series(date_range('20130101 09:10:12',periods=4)) + s = pd.Series(pd.date_range('20130101 09:10:12',periods=4)) s s.dt.hour s.dt.second @@ -1166,7 +1169,7 @@ The ``.dt`` accessor works for period and timedelta dtypes. .. ipython:: python # period - s = Series(period_range('20130101',periods=4,freq='D')) + s = pd.Series(pd.period_range('20130101', periods=4,freq='D')) s s.dt.year s.dt.day @@ -1174,7 +1177,7 @@ The ``.dt`` accessor works for period and timedelta dtypes. .. ipython:: python # timedelta - s = Series(timedelta_range('1 day 00:00:05',periods=4,freq='s')) + s = pd.Series(pd.timedelta_range('1 day 00:00:05',periods=4,freq='s')) s s.dt.days s.dt.seconds @@ -1195,7 +1198,7 @@ built-in string methods. For example: .. ipython:: python - s = Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) + s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) s.str.lower() Powerful pattern-matching methods are provided as well, but note that @@ -1213,7 +1216,7 @@ Sorting by index and value There are two obvious kinds of sorting that you may be interested in: sorting by label and sorting by actual values. The primary method for sorting axis -labels (indexes) across data structures is the ``sort_index`` method. +labels (indexes) across data structures is the :meth:`~DataFrame.sort_index` method. .. ipython:: python @@ -1223,13 +1226,13 @@ labels (indexes) across data structures is the ``sort_index`` method. unsorted_df.sort_index(ascending=False) unsorted_df.sort_index(axis=1) -``DataFrame.sort_index`` can accept an optional ``by`` argument for ``axis=0`` +:meth:`DataFrame.sort_index` can accept an optional ``by`` argument for ``axis=0`` which will use an arbitrary vector or a column name of the DataFrame to determine the sort order: .. ipython:: python - df1 = DataFrame({'one':[2,1,1,1],'two':[1,3,2,4],'three':[5,4,3,2]}) + df1 = pd.DataFrame({'one':[2,1,1,1],'two':[1,3,2,4],'three':[5,4,3,2]}) df1.sort_index(by='two') The ``by`` argument can take a list of column names, e.g.: @@ -1238,7 +1241,7 @@ The ``by`` argument can take a list of column names, e.g.: df1[['one', 'two', 'three']].sort_index(by=['one','two']) -Series has the method ``order`` (analogous to `R's order function +Series has the method :meth:`~Series.order` (analogous to `R's order function <http://stat.ethz.ch/R-manual/R-patched/library/base/html/order.html>`__) which sorts by value, with special treatment of NA values via the ``na_position`` argument: @@ -1251,21 +1254,21 @@ argument: .. note:: - ``Series.sort`` sorts a Series by value in-place. This is to provide + :meth:`Series.sort` sorts a Series by value in-place. This is to provide compatibility with NumPy methods which expect the ``ndarray.sort`` - behavior. ``Series.order`` returns a copy of the sorted data. + behavior. :meth:`Series.order` returns a copy of the sorted data. -Series has the ``searchsorted`` method, which works similar to -``np.ndarray.searchsorted``. +Series has the :meth:`~Series.searchsorted` method, which works similar to +:meth:`numpy.ndarray.searchsorted`. .. ipython:: python - ser = Series([1, 2, 3]) + ser = pd.Series([1, 2, 3]) ser.searchsorted([0, 3]) ser.searchsorted([0, 4]) ser.searchsorted([1, 3], side='right') ser.searchsorted([1, 3], side='left') - ser = Series([3, 1, 2]) + ser = pd.Series([3, 1, 2]) ser.searchsorted([0, 3], sorter=np.argsort(ser)) .. _basics.nsorted: @@ -1275,13 +1278,13 @@ smallest / largest values .. versionadded:: 0.14.0 -``Series`` has the ``nsmallest`` and ``nlargest`` methods which return the +``Series`` has the :meth:`~Series.nsmallest` and :meth:`~Series.nlargest` methods which return the smallest or largest :math:`n` values. For a large ``Series`` this can be much faster than sorting the entire Series and calling ``head(n)`` on the result. .. ipython:: python - s = Series(np.random.permutation(10)) + s = pd.Series(np.random.permutation(10)) s s.order() s.nsmallest(3) @@ -1298,14 +1301,14 @@ all levels to ``by``. .. ipython:: python - df1.columns = MultiIndex.from_tuples([('a','one'),('a','two'),('b','three')]) + df1.columns = pd.MultiIndex.from_tuples([('a','one'),('a','two'),('b','three')]) df1.sort_index(by=('a','two')) Copying ------- -The ``copy`` method on pandas objects copies the underlying data (though not +The :meth:`~DataFrame.copy` method on pandas objects copies the underlying data (though not the axis indexes, since they are immutable) and returns a new object. Note that **it is seldom necessary to copy objects**. For example, there are only a handful of ways to alter a DataFrame *in-place*: @@ -1324,23 +1327,24 @@ untouched. If data is modified, it is because you did so explicitly. dtypes ------ -The main types stored in pandas objects are ``float``, ``int``, ``bool``, ``datetime64[ns]``, ``timedelta[ns]``, -and ``object``. In addition these dtypes have item sizes, e.g. ``int64`` and ``int32``. A convenient ``dtypes`` +The main types stored in pandas objects are ``float``, ``int``, ``bool``, +``datetime64[ns]``, ``timedelta[ns]`` and ``object``. In addition these dtypes +have item sizes, e.g. ``int64`` and ``int32``. A convenient :attr:`~DataFrame.dtypes`` attribute for DataFrames returns a Series with the data type of each column. .. ipython:: python - dft = DataFrame(dict( A = np.random.rand(3), - B = 1, - C = 'foo', - D = Timestamp('20010102'), - E = Series([1.0]*3).astype('float32'), - F = False, - G = Series([1]*3,dtype='int8'))) + dft = pd.DataFrame(dict(A = np.random.rand(3), + B = 1, + C = 'foo', + D = pd.Timestamp('20010102'), + E = pd.Series([1.0]*3).astype('float32'), + F = False, + G = pd.Series([1]*3,dtype='int8'))) dft dft.dtypes -On a ``Series`` use the ``dtype`` method. +On a ``Series`` use the :attr:`~Series.dtype` attribute. .. ipython:: python @@ -1353,12 +1357,12 @@ general). .. ipython:: python # these ints are coerced to floats - Series([1, 2, 3, 4, 5, 6.]) + pd.Series([1, 2, 3, 4, 5, 6.]) # string data forces an ``object`` dtype - Series([1, 2, 3, 6., 'foo']) + pd.Series([1, 2, 3, 6., 'foo']) -The method ``get_dtype_counts`` will return the number of columns of +The method :meth:`~DataFrame.get_dtype_counts` will return the number of columns of each type in a ``DataFrame``: .. ipython:: python @@ -1372,12 +1376,12 @@ different numeric dtypes will **NOT** be combined. The following example will gi .. ipython:: python - df1 = DataFrame(randn(8, 1), columns = ['A'], dtype = 'float32') + df1 = pd.DataFrame(np.random.randn(8, 1), columns=['A'], dtype='float32') df1 df1.dtypes - df2 = DataFrame(dict( A = Series(randn(8),dtype='float16'), - B = Series(randn(8)), - C = Series(np.array(randn(8),dtype='uint8')) )) + df2 = pd.DataFrame(dict( A = pd.Series(np.random.randn(8), dtype='float16'), + B = pd.Series(np.random.randn(8)), + C = pd.Series(np.array(np.random.randn(8), dtype='uint8')) )) df2 df2.dtypes @@ -1389,16 +1393,16 @@ By default integer types are ``int64`` and float types are ``float64``, .. ipython:: python - DataFrame([1, 2], columns=['a']).dtypes - DataFrame({'a': [1, 2]}).dtypes - DataFrame({'a': 1 }, index=list(range(2))).dtypes + pd.DataFrame([1, 2], columns=['a']).dtypes + pd.DataFrame({'a': [1, 2]}).dtypes + pd.DataFrame({'a': 1 }, index=list(range(2))).dtypes Numpy, however will choose *platform-dependent* types when creating arrays. The following **WILL** result in ``int32`` on 32-bit platform. .. ipython:: python - frame = DataFrame(np.array([1, 2])) + frame = pd.DataFrame(np.array([1, 2])) upcasting @@ -1426,7 +1430,7 @@ astype .. _basics.cast: -You can use the ``astype`` method to explicitly convert dtypes from one to another. These will by default return a copy, +You can use the :meth:`~DataFrame.astype` method to explicitly convert dtypes from one to another. These will by default return a copy, even if the dtype was unchanged (pass ``copy=False`` to change this behavior). In addition, they will raise an exception if the astype operation is invalid. @@ -1444,7 +1448,7 @@ then the more *general* one will be used as the result of the operation. object conversion ~~~~~~~~~~~~~~~~~ -``convert_objects`` is a method to try to force conversion of types from the ``object`` dtype to other types. +:meth:`~DataFrame.convert_objects` is a method to try to force conversion of types from the ``object`` dtype to other types. To force conversion of specific types that are *number like*, e.g. could be a string that represents a number, pass ``convert_numeric=True``. This will force strings and numbers alike to be numbers if possible, otherwise they will be set to ``np.nan``. @@ -1467,13 +1471,14 @@ but occasionally has non-dates intermixed and you want to represent as missing. .. ipython:: python - s = Series([datetime(2001,1,1,0,0), - 'foo', 1.0, 1, Timestamp('20010104'), - '20010105'],dtype='O') + import datetime + s = pd.Series([datetime.datetime(2001,1,1,0,0), + 'foo', 1.0, 1, pd.Timestamp('20010104'), + '20010105'], dtype='O') s s.convert_objects(convert_dates='coerce') -In addition, ``convert_objects`` will attempt the *soft* conversion of any *object* dtypes, meaning that if all +In addition, :meth:`~DataFrame.convert_objects` will attempt the *soft* conversion of any *object* dtypes, meaning that if all the objects in a Series are of the same type, the Series will have that dtype. gotchas @@ -1513,29 +1518,29 @@ Selecting columns based on ``dtype`` .. versionadded:: 0.14.1 -The :meth:`~pandas.DataFrame.select_dtypes` method implements subsetting of columns +The :meth:`~DataFrame.select_dtypes` method implements subsetting of columns based on their ``dtype``. -First, let's create a :class:`~pandas.DataFrame` with a slew of different +First, let's create a :class:`DataFrame` with a slew of different dtypes: .. ipython:: python - df = DataFrame({'string': list('abc'), - 'int64': list(range(1, 4)), - 'uint8': np.arange(3, 6).astype('u1'), - 'float64': np.arange(4.0, 7.0), - 'bool1': [True, False, True], - 'bool2': [False, True, False], - 'dates': pd.date_range('now', periods=3).values, - 'category': pd.Categorical(list("ABC"))}) + df = pd.DataFrame({'string': list('abc'), + 'int64': list(range(1, 4)), + 'uint8': np.arange(3, 6).astype('u1'), + 'float64': np.arange(4.0, 7.0), + 'bool1': [True, False, True], + 'bool2': [False, True, False], + 'dates': pd.date_range('now', periods=3).values, + 'category': pd.Series(list("ABC")).astype('category')}) df['tdeltas'] = df.dates.diff() df['uint64'] = np.arange(3, 6).astype('u8') df['other_dates'] = pd.date_range('20130101', periods=3).values df -``select_dtypes`` has two parameters ``include`` and ``exclude`` that allow you to +:meth:`~DataFrame.select_dtypes` has two parameters ``include`` and ``exclude`` that allow you to say "give me the columns WITH these dtypes" (``include``) and/or "give the columns WITHOUT these dtypes" (``exclude``). diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index d03e0fb117c5c..0c63759201517 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -6,14 +6,10 @@ :suppress: import numpy as np - import random - import os - np.random.seed(123456) - from pandas import options - from pandas import * import pandas as pd + np.random.seed(123456) np.set_printoptions(precision=4, suppress=True) - options.display.max_rows=15 + pd.options.display.max_rows = 15 **************** @@ -23,11 +19,11 @@ Categorical Data .. versionadded:: 0.15 .. note:: - While there was in `pandas.Categorical` in earlier versions, the ability to use + While there was `pandas.Categorical` in earlier versions, the ability to use categorical data in `Series` and `DataFrame` is new. -This is a introduction to pandas categorical data type, including a short comparison +This is an introduction to pandas categorical data type, including a short comparison with R's ``factor``. `Categoricals` are a pandas data type, which correspond to categorical variables in @@ -65,14 +61,14 @@ By specifying ``dtype="category"`` when constructing a `Series`: .. ipython:: python - s = Series(["a","b","c","a"], dtype="category") + s = pd.Series(["a","b","c","a"], dtype="category") s By converting an existing `Series` or column to a ``category`` dtype: .. ipython:: python - df = DataFrame({"A":["a","b","c","a"]}) + df = pd.DataFrame({"A":["a","b","c","a"]}) df["B"] = df["A"].astype('category') df @@ -80,7 +76,7 @@ By using some special functions: .. ipython:: python - df = DataFrame({'value': np.random.randint(0, 100, 20)}) + df = pd.DataFrame({'value': np.random.randint(0, 100, 20)}) labels = [ "{0} - {1}".format(i, i + 9) for i in range(0, 100, 10) ] df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels) @@ -92,11 +88,11 @@ By passing a :class:`pandas.Categorical` object to a `Series` or assigning it to .. ipython:: python - raw_cat = Categorical(["a","b","c","a"], categories=["b","c","d"], + raw_cat = pd.Categorical(["a","b","c","a"], categories=["b","c","d"], ordered=False) - s = Series(raw_cat) + s = pd.Series(raw_cat) s - df = DataFrame({"A":["a","b","c","a"]}) + df = pd.DataFrame({"A":["a","b","c","a"]}) df["B"] = raw_cat df @@ -104,7 +100,7 @@ You can also specify differently ordered categories or make the resulting data o .. ipython:: python - s = Series(["a","b","c","a"]) + s = pd.Series(["a","b","c","a"]) s_cat = s.astype("category", categories=["b","c","d"], ordered=False) s_cat @@ -129,7 +125,7 @@ To get back to the original Series or `numpy` array, use ``Series.astype(origina .. ipython:: python - s = Series(["a","b","c","a"]) + s = pd.Series(["a","b","c","a"]) s s2 = s.astype('category') s2 @@ -143,7 +139,7 @@ constructor to save the factorize step during normal constructor mode: .. ipython:: python splitter = np.random.choice([0,1], 5, p=[0.5,0.5]) - s = Series(Categorical.from_codes(splitter, categories=["train", "test"])) + s = pd.Series(pd.Categorical.from_codes(splitter, categories=["train", "test"])) Description ----------- @@ -153,8 +149,8 @@ Using ``.describe()`` on categorical data will produce similar output to a `Seri .. ipython:: python - cat = Categorical(["a","c","c",np.nan], categories=["b","a","c",np.nan] ) - df = DataFrame({"cat":cat, "s":["a","c","c",np.nan]}) + cat = pd.Categorical(["a","c","c",np.nan], categories=["b","a","c",np.nan] ) + df = pd.DataFrame({"cat":cat, "s":["a","c","c",np.nan]}) df.describe() df["cat"].describe() @@ -168,7 +164,7 @@ passed in values. .. ipython:: python - s = Series(["a","b","c","a"], dtype="category") + s = pd.Series(["a","b","c","a"], dtype="category") s.cat.categories s.cat.ordered @@ -176,7 +172,7 @@ It's also possible to pass in the categories in a specific order: .. ipython:: python - s = Series(Categorical(["a","b","c","a"], categories=["c","b","a"])) + s = pd.Series(pd.Categorical(["a","b","c","a"], categories=["c","b","a"])) s.cat.categories s.cat.ordered @@ -194,7 +190,7 @@ by using the :func:`Categorical.rename_categories` method: .. ipython:: python - s = Series(["a","b","c","a"], dtype="category") + s = pd.Series(["a","b","c","a"], dtype="category") s s.cat.categories = ["Group %s" % g for g in s.cat.categories] s @@ -247,7 +243,7 @@ Removing unused categories can also be done: .. ipython:: python - s = Series(Categorical(["a","b","a"], categories=["a","b","c","d"])) + s = pd.Series(pd.Categorical(["a","b","a"], categories=["a","b","c","d"])) s s.cat.remove_unused_categories() @@ -259,7 +255,7 @@ or simply set the categories to a predefined scale, use :func:`Categorical.set_c .. ipython:: python - s = Series(["one","two","four", "-"], dtype="category") + s = pd.Series(["one","two","four", "-"], dtype="category") s s = s.cat.set_categories(["one","two","three","four"]) s @@ -276,16 +272,16 @@ Sorting and Order .. warning:: - The default for construction has change in v0.16.0 to ``ordered=False``, from the prior implicit ``ordered=True`` + The default for construction has changed in v0.16.0 to ``ordered=False``, from the prior implicit ``ordered=True`` If categorical data is ordered (``s.cat.ordered == True``), then the order of the categories has a meaning and certain operations are possible. If the categorical is unordered, ``.min()/.max()`` will raise a `TypeError`. .. ipython:: python - s = Series(Categorical(["a","b","c","a"], ordered=False)) + s = pd.Series(pd.Categorical(["a","b","c","a"], ordered=False)) s.sort() - s = Series(["a","b","c","a"]).astype('category', ordered=True) + s = pd.Series(["a","b","c","a"]).astype('category', ordered=True) s.sort() s s.min(), s.max() @@ -303,7 +299,7 @@ This is even true for strings and numeric data: .. ipython:: python - s = Series([1,2,3,1], dtype="category") + s = pd.Series([1,2,3,1], dtype="category") s = s.cat.set_categories([2,3,1], ordered=True) s s.sort() @@ -321,7 +317,7 @@ necessarily make the sort order the same as the categories order. .. ipython:: python - s = Series([1,2,3,1], dtype="category") + s = pd.Series([1,2,3,1], dtype="category") s = s.cat.reorder_categories([2,3,1], ordered=True) s s.sort() @@ -347,15 +343,15 @@ Multi Column Sorting ~~~~~~~~~~~~~~~~~~~~ A categorical dtyped column will partcipate in a multi-column sort in a similar manner to other columns. -The ordering of the categorical is determined by the ``categories`` of that columns. +The ordering of the categorical is determined by the ``categories`` of that column. .. ipython:: python - dfs = DataFrame({'A' : Categorical(list('bbeebbaa'),categories=['e','a','b'],ordered=True), - 'B' : [1,2,1,2,2,1,2,1] }) - dfs.sort(['A','B']) + dfs = pd.DataFrame({'A' : pd.Categorical(list('bbeebbaa'), categories=['e','a','b'], ordered=True), + 'B' : [1,2,1,2,2,1,2,1] }) + dfs.sort(['A', 'B']) -Reordering the ``categories``, changes a future sort. +Reordering the ``categories`` changes a future sort. .. ipython:: python @@ -380,14 +376,14 @@ categories or a categorical with any list-like object, will raise a TypeError. Any "non-equality" comparisons of categorical data with a `Series`, `np.array`, `list` or categorical data with different categories or ordering will raise an `TypeError` because custom - categories ordering could be interpreted in two ways: one with taking in account the + categories ordering could be interpreted in two ways: one with taking into account the ordering and one without. .. ipython:: python - cat = Series([1,2,3]).astype("category", categories=[3,2,1], ordered=True) - cat_base = Series([2,2,2]).astype("category", categories=[3,2,1], ordered=True) - cat_base2 = Series([2,2,2]).astype("category", ordered=True) + cat = pd.Series([1,2,3]).astype("category", categories=[3,2,1], ordered=True) + cat_base = pd.Series([2,2,2]).astype("category", categories=[3,2,1], ordered=True) + cat_base2 = pd.Series([2,2,2]).astype("category", ordered=True) cat cat_base @@ -443,19 +439,19 @@ present in the data: .. ipython:: python - s = Series(Categorical(["a","b","c","c"], categories=["c","a","b","d"])) + s = pd.Series(pd.Categorical(["a","b","c","c"], categories=["c","a","b","d"])) s.value_counts() Groupby will also show "unused" categories: .. ipython:: python - cats = Categorical(["a","b","b","b","c","c","c"], categories=["a","b","c","d"]) - df = DataFrame({"cats":cats,"values":[1,2,2,2,3,4,5]}) + cats = pd.Categorical(["a","b","b","b","c","c","c"], categories=["a","b","c","d"]) + df = pd.DataFrame({"cats":cats,"values":[1,2,2,2,3,4,5]}) df.groupby("cats").mean() - cats2 = Categorical(["a","a","b","b"], categories=["a","b","c"]) - df2 = DataFrame({"cats":cats2,"B":["c","d","c","d"], "values":[1,2,3,4]}) + cats2 = pd.Categorical(["a","a","b","b"], categories=["a","b","c"]) + df2 = pd.DataFrame({"cats":cats2,"B":["c","d","c","d"], "values":[1,2,3,4]}) df2.groupby(["cats","B"]).mean() @@ -463,15 +459,15 @@ Pivot tables: .. ipython:: python - raw_cat = Categorical(["a","a","b","b"], categories=["a","b","c"]) - df = DataFrame({"A":raw_cat,"B":["c","d","c","d"], "values":[1,2,3,4]}) + raw_cat = pd.Categorical(["a","a","b","b"], categories=["a","b","c"]) + df = pd.DataFrame({"A":raw_cat,"B":["c","d","c","d"], "values":[1,2,3,4]}) pd.pivot_table(df, values='values', index=['A', 'B']) Data munging ------------ The optimized pandas data access methods ``.loc``, ``.iloc``, ``.ix`` ``.at``, and ``.iat``, -work as normal, the only difference is the return type (for getting) and +work as normal. The only difference is the return type (for getting) and that only values already in `categories` can be assigned. Getting @@ -482,10 +478,10 @@ the ``category`` dtype is preserved. .. ipython:: python - idx = Index(["h","i","j","k","l","m","n",]) - cats = Series(["a","b","b","b","c","c","c"], dtype="category", index=idx) + idx = pd.Index(["h","i","j","k","l","m","n",]) + cats = pd.Series(["a","b","b","b","c","c","c"], dtype="category", index=idx) values= [1,2,2,2,3,4,5] - df = DataFrame({"cats":cats,"values":values}, index=idx) + df = pd.DataFrame({"cats":cats,"values":values}, index=idx) df.iloc[2:4,:] df.iloc[2:4,:].dtypes df.loc["h":"j","cats"] @@ -527,10 +523,10 @@ Setting values in a categorical column (or `Series`) works as long as the value .. ipython:: python - idx = Index(["h","i","j","k","l","m","n"]) - cats = Categorical(["a","a","a","a","a","a","a"], categories=["a","b"]) + idx = pd.Index(["h","i","j","k","l","m","n"]) + cats = pd.Categorical(["a","a","a","a","a","a","a"], categories=["a","b"]) values = [1,1,1,1,1,1,1] - df = DataFrame({"cats":cats,"values":values}, index=idx) + df = pd.DataFrame({"cats":cats,"values":values}, index=idx) df.iloc[2:4,:] = [["b",2],["b",2]] df @@ -543,10 +539,10 @@ Setting values by assigning categorical data will also check that the `categorie .. ipython:: python - df.loc["j":"k","cats"] = Categorical(["a","a"], categories=["a","b"]) + df.loc["j":"k","cats"] = pd.Categorical(["a","a"], categories=["a","b"]) df try: - df.loc["j":"k","cats"] = Categorical(["b","b"], categories=["a","b","c"]) + df.loc["j":"k","cats"] = pd.Categorical(["b","b"], categories=["a","b","c"]) except ValueError as e: print("ValueError: " + str(e)) @@ -554,9 +550,9 @@ Assigning a `Categorical` to parts of a column of other types will use the value .. ipython:: python - df = DataFrame({"a":[1,1,1,1,1], "b":["a","a","a","a","a"]}) - df.loc[1:2,"a"] = Categorical(["b","b"], categories=["a","b"]) - df.loc[2:3,"b"] = Categorical(["b","b"], categories=["a","b"]) + df = pd.DataFrame({"a":[1,1,1,1,1], "b":["a","a","a","a","a"]}) + df.loc[1:2,"a"] = pd.Categorical(["b","b"], categories=["a","b"]) + df.loc[2:3,"b"] = pd.Categorical(["b","b"], categories=["a","b"]) df df.dtypes @@ -569,9 +565,9 @@ but the categories of these categoricals need to be the same: .. ipython:: python - cat = Series(["a","b"], dtype="category") + cat = pd.Series(["a","b"], dtype="category") vals = [1,2] - df = DataFrame({"cats":cat, "vals":vals}) + df = pd.DataFrame({"cats":cat, "vals":vals}) res = pd.concat([df,df]) res res.dtypes @@ -611,12 +607,12 @@ relevant columns back to `category` and assign the right categories and categori .. ipython:: python - s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'd'])) + s = pd.Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'd'])) # rename the categories s.cat.categories = ["very good", "good", "bad"] # reorder the categories and add missing categories s = s.cat.set_categories(["very bad", "bad", "medium", "good", "very good"]) - df = DataFrame({"cats":s, "vals":[1,2,3,4,5,6]}) + df = pd.DataFrame({"cats":s, "vals":[1,2,3,4,5,6]}) csv = StringIO() df.to_csv(csv) df2 = pd.read_csv(StringIO(csv.getvalue())) @@ -643,10 +639,10 @@ available ("missing value") or `np.nan` is a valid category. .. ipython:: python - s = Series(["a","b",np.nan,"a"], dtype="category") + s = pd.Series(["a","b",np.nan,"a"], dtype="category") # only two categories s - s2 = Series(["a","b","c","a"], dtype="category") + s2 = pd.Series(["a","b","c","a"], dtype="category") s2.cat.categories = [1,2,np.nan] # three categories, np.nan included s2 @@ -660,11 +656,11 @@ available ("missing value") or `np.nan` is a valid category. .. ipython:: python - c = Series(["a","b",np.nan], dtype="category") + c = pd.Series(["a","b",np.nan], dtype="category") c.cat.set_categories(["a","b",np.nan], inplace=True) # will be inserted as a NA category: c[0] = np.nan - s = Series(c) + s = pd.Series(c) s pd.isnull(s) s.fillna("a") @@ -697,7 +693,7 @@ an ``object`` dtype is a constant times the length of the data. .. ipython:: python - s = Series(['foo','bar']*1000) + s = pd.Series(['foo','bar']*1000) # object dtype s.nbytes @@ -707,12 +703,12 @@ an ``object`` dtype is a constant times the length of the data. .. note:: - If the number of categories approaches the length of the data, the ``Categorical`` will use nearly (or more) memory than an - equivalent ``object`` dtype representation. + If the number of categories approaches the length of the data, the ``Categorical`` will use nearly the same or + more memory than an equivalent ``object`` dtype representation. .. ipython:: python - s = Series(['foo%04d' % i for i in range(2000)]) + s = pd.Series(['foo%04d' % i for i in range(2000)]) # object dtype s.nbytes @@ -734,7 +730,7 @@ will work with the current pandas version, resulting in subtle bugs: .. code-block:: python - >>> cat = Categorical([1,2], [1,2,3]) + >>> cat = pd.Categorical([1,2], [1,2,3]) >>> # old version >>> cat.get_values() array([2, 3], dtype=int64) @@ -762,7 +758,7 @@ object and not as a low-level `numpy` array dtype. This leads to some problems. except TypeError as e: print("TypeError: " + str(e)) - dtype = Categorical(["a"]).dtype + dtype = pd.Categorical(["a"]).dtype try: np.dtype(dtype) except TypeError as e: @@ -780,15 +776,15 @@ To check if a Series contains Categorical data, with pandas 0.16 or later, use .. ipython:: python - hasattr(Series(['a'], dtype='category'), 'cat') - hasattr(Series(['a']), 'cat') + hasattr(pd.Series(['a'], dtype='category'), 'cat') + hasattr(pd.Series(['a']), 'cat') Using `numpy` functions on a `Series` of type ``category`` should not work as `Categoricals` are not numeric data (even in the case that ``.categories`` is numeric). .. ipython:: python - s = Series(Categorical([1,2,3,4])) + s = pd.Series(pd.Categorical([1,2,3,4])) try: np.sum(s) #same with np.log(s),.. @@ -807,33 +803,36 @@ basic type) and applying along columns will also convert to object. .. ipython:: python - df = DataFrame({"a":[1,2,3,4], - "b":["a","b","c","d"], - "cats":Categorical([1,2,3,2])}) + df = pd.DataFrame({"a":[1,2,3,4], + "b":["a","b","c","d"], + "cats":pd.Categorical([1,2,3,2])}) df.apply(lambda row: type(row["cats"]), axis=1) df.apply(lambda col: col.dtype, axis=0) -No Categorical Index -~~~~~~~~~~~~~~~~~~~~ +Categorical Index +~~~~~~~~~~~~~~~~~ -There is currently no index of type ``category``, so setting the index to categorical column will -convert the categorical data to a "normal" dtype first and therefore remove any custom -ordering of the categories: +.. versionadded:: 0.16.1 + +A new ``CategoricalIndex`` index type is introduced in version 0.16.1. See the +:ref:`advanced indexing docs <indexing.categoricalindex>` for a more detailed +explanation. + +Setting the index, will create create a ``CategoricalIndex`` .. ipython:: python - cats = Categorical([1,2,3,4], categories=[4,2,3,1]) + cats = pd.Categorical([1,2,3,4], categories=[4,2,3,1]) strings = ["a","b","c","d"] values = [4,2,3,1] - df = DataFrame({"strings":strings, "values":values}, index=cats) + df = pd.DataFrame({"strings":strings, "values":values}, index=cats) df.index - # This should sort by categories but does not as there is no CategoricalIndex! + # This now sorts by the categories order df.sort_index() -.. note:: - This could change if a `CategoricalIndex` is implemented (see - https://github.com/pydata/pandas/issues/7629) - +In previous versions (<0.16.1) there is no index of type ``category``, so +setting the index to categorical column will convert the categorical data to a +"normal" dtype first and therefore remove any custom ordering of the categories. Side Effects ~~~~~~~~~~~~ @@ -843,12 +842,12 @@ means that changes to the `Series` will in most cases change the original `Categ .. ipython:: python - cat = Categorical([1,2,3,10], categories=[1,2,3,4,10]) - s = Series(cat, name="cat") + cat = pd.Categorical([1,2,3,10], categories=[1,2,3,4,10]) + s = pd.Series(cat, name="cat") cat s.iloc[0:2] = 10 cat - df = DataFrame(s) + df = pd.DataFrame(s) df["cat"].cat.categories = [1,2,3,4,5] cat @@ -856,8 +855,8 @@ Use ``copy=True`` to prevent such a behaviour or simply don't reuse `Categorical .. ipython:: python - cat = Categorical([1,2,3,10], categories=[1,2,3,4,10]) - s = Series(cat, name="cat", copy=True) + cat = pd.Categorical([1,2,3,10], categories=[1,2,3,4,10]) + s = pd.Series(cat, name="cat", copy=True) cat s.iloc[0:2] = 10 cat diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 4b0fe39d929a9..dfb9fab19bf31 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -1,23 +1,22 @@ .. currentmodule:: pandas -.. _computation: .. ipython:: python :suppress: import numpy as np np.random.seed(123456) - from pandas import * - import pandas.util.testing as tm - randn = np.random.randn np.set_printoptions(precision=4, suppress=True) + import pandas as pd import matplotlib try: matplotlib.style.use('ggplot') except AttributeError: - options.display.mpl_style = 'default' + pd.options.display.mpl_style = 'default' import matplotlib.pyplot as plt plt.close('all') - options.display.max_rows=15 + pd.options.display.max_rows=15 + +.. _computation: Computational tools =================== @@ -36,13 +35,13 @@ NA/null values *before* computing the percent change). .. ipython:: python - ser = Series(randn(8)) + ser = pd.Series(np.random.randn(8)) ser.pct_change() .. ipython:: python - df = DataFrame(randn(10, 4)) + df = pd.DataFrame(np.random.randn(10, 4)) df.pct_change(periods=3) @@ -56,8 +55,8 @@ The ``Series`` object has a method ``cov`` to compute covariance between series .. ipython:: python - s1 = Series(randn(1000)) - s2 = Series(randn(1000)) + s1 = pd.Series(np.random.randn(1000)) + s2 = pd.Series(np.random.randn(1000)) s1.cov(s2) Analogously, ``DataFrame`` has a method ``cov`` to compute pairwise covariances @@ -78,7 +77,7 @@ among the series in the DataFrame, also excluding NA/null values. .. ipython:: python - frame = DataFrame(randn(1000, 5), columns=['a', 'b', 'c', 'd', 'e']) + frame = pd.DataFrame(np.random.randn(1000, 5), columns=['a', 'b', 'c', 'd', 'e']) frame.cov() ``DataFrame.cov`` also supports an optional ``min_periods`` keyword that @@ -87,7 +86,7 @@ in order to have a valid result. .. ipython:: python - frame = DataFrame(randn(20, 3), columns=['a', 'b', 'c']) + frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c']) frame.ix[:5, 'a'] = np.nan frame.ix[5:10, 'b'] = np.nan @@ -123,7 +122,7 @@ All of these are currently computed using pairwise complete observations. .. ipython:: python - frame = DataFrame(randn(1000, 5), columns=['a', 'b', 'c', 'd', 'e']) + frame = pd.DataFrame(np.random.randn(1000, 5), columns=['a', 'b', 'c', 'd', 'e']) frame.ix[::2] = np.nan # Series with Series @@ -140,7 +139,7 @@ Like ``cov``, ``corr`` also supports the optional ``min_periods`` keyword: .. ipython:: python - frame = DataFrame(randn(20, 3), columns=['a', 'b', 'c']) + frame = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c']) frame.ix[:5, 'a'] = np.nan frame.ix[5:10, 'b'] = np.nan @@ -157,8 +156,8 @@ objects. index = ['a', 'b', 'c', 'd', 'e'] columns = ['one', 'two', 'three', 'four'] - df1 = DataFrame(randn(5, 4), index=index, columns=columns) - df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns) + df1 = pd.DataFrame(np.random.randn(5, 4), index=index, columns=columns) + df2 = pd.DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns) df1.corrwith(df2) df2.corrwith(df1, axis=1) @@ -172,7 +171,7 @@ of the ranks (by default) for the group: .. ipython:: python - s = Series(np.random.randn(5), index=list('abcde')) + s = pd.Series(np.random.np.random.randn(5), index=list('abcde')) s['d'] = s['b'] # so there's a tie s.rank() @@ -181,7 +180,7 @@ or the columns (``axis=1``). ``NaN`` values are excluded from the ranking. .. ipython:: python - df = DataFrame(np.random.randn(10, 6)) + df = pd.DataFrame(np.random.np.random.randn(10, 6)) df[4] = df[2][:5] # some ties df df.rank(1) @@ -253,13 +252,13 @@ These functions can be applied to ndarrays or Series objects: .. ipython:: python - ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000)) + ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000)) ts = ts.cumsum() ts.plot(style='k--') @savefig rolling_mean_ex.png - rolling_mean(ts, 60).plot(style='k') + pd.rolling_mean(ts, 60).plot(style='k') They can also be applied to DataFrame objects. This is really just syntactic sugar for applying the moving window operator to all of the DataFrame's columns: @@ -271,12 +270,12 @@ sugar for applying the moving window operator to all of the DataFrame's columns: .. ipython:: python - df = DataFrame(randn(1000, 4), index=ts.index, + df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=['A', 'B', 'C', 'D']) df = df.cumsum() @savefig rolling_mean_frame.png - rolling_sum(df, 60).plot(subplots=True) + pd.rolling_sum(df, 60).plot(subplots=True) The ``rolling_apply`` function takes an extra ``func`` argument and performs generic rolling computations. The ``func`` argument should be a single function @@ -287,7 +286,7 @@ compute the mean absolute deviation on a rolling basis: mad = lambda x: np.fabs(x - x.mean()).mean() @savefig rolling_apply_ex.png - rolling_apply(ts, 60, mad).plot(style='k') + pd.rolling_apply(ts, 60, mad).plot(style='k') The ``rolling_window`` function performs a generic rolling window computation on the input data. The weights used in the window are specified by the ``win_type`` @@ -310,23 +309,23 @@ keyword. The list of recognized types are: .. ipython:: python - ser = Series(randn(10), index=date_range('1/1/2000', periods=10)) + ser = pd.Series(np.random.randn(10), index=pd.date_range('1/1/2000', periods=10)) - rolling_window(ser, 5, 'triang') + pd.rolling_window(ser, 5, 'triang') Note that the ``boxcar`` window is equivalent to ``rolling_mean``. .. ipython:: python - rolling_window(ser, 5, 'boxcar') + pd.rolling_window(ser, 5, 'boxcar') - rolling_mean(ser, 5) + pd.rolling_mean(ser, 5) For some windowing functions, additional parameters must be specified: .. ipython:: python - rolling_window(ser, 5, 'gaussian', std=0.1) + pd.rolling_window(ser, 5, 'gaussian', std=0.1) By default the labels are set to the right edge of the window, but a ``center`` keyword is available so the labels can be set at the center. @@ -334,11 +333,11 @@ This keyword is available in other rolling functions as well. .. ipython:: python - rolling_window(ser, 5, 'boxcar') + pd.rolling_window(ser, 5, 'boxcar') - rolling_window(ser, 5, 'boxcar', center=True) + pd.rolling_window(ser, 5, 'boxcar', center=True) - rolling_mean(ser, 5, center=True) + pd.rolling_mean(ser, 5, center=True) .. _stats.moments.normalization: @@ -377,7 +376,7 @@ For example: .. ipython:: python df2 = df[:20] - rolling_corr(df2, df2['B'], window=5) + pd.rolling_corr(df2, df2['B'], window=5) .. _stats.moments.corr_pairwise: @@ -402,12 +401,12 @@ can even be omitted: .. ipython:: python - covs = rolling_cov(df[['B','C','D']], df[['A','B','C']], 50, pairwise=True) + covs = pd.rolling_cov(df[['B','C','D']], df[['A','B','C']], 50, pairwise=True) covs[df.index[-50]] .. ipython:: python - correls = rolling_corr(df, 50) + correls = pd.rolling_corr(df, 50) correls[df.index[-50]] .. note:: @@ -441,9 +440,9 @@ they are implemented in pandas such that the following two calls are equivalent: .. ipython:: python - rolling_mean(df, window=len(df), min_periods=1)[:5] + pd.rolling_mean(df, window=len(df), min_periods=1)[:5] - expanding_mean(df)[:5] + pd.expanding_mean(df)[:5] Like the ``rolling_`` functions, the following methods are included in the ``pandas`` namespace or can be located in ``pandas.stats.moments``. @@ -502,7 +501,7 @@ relative impact of an individual data point. As an example, here is the ts.plot(style='k--') @savefig expanding_mean_frame.png - expanding_mean(ts).plot(style='k') + pd.expanding_mean(ts).plot(style='k') .. _stats.moments.exponentially_weighted: @@ -584,7 +583,7 @@ Here is an example for a univariate time series: ts.plot(style='k--') @savefig ewma_ex.png - ewma(ts, span=20).plot(style='k') + pd.ewma(ts, span=20).plot(style='k') All the EW functions have a ``min_periods`` argument, which has the same meaning it does for all the ``expanding_`` and ``rolling_`` functions: diff --git a/doc/source/conf.py b/doc/source/conf.py index fcb9c3fdd0016..08fc8483762ab 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -211,7 +211,30 @@ # Additional templates that should be rendered to pages, maps page names to # template names. -# html_additional_pages = {} + +# Add redirect for previously existing API pages (which are now included in +# the API pages as top-level functions) based on a template (GH9911) +moved_api_pages = [ + 'pandas.core.common.isnull', 'pandas.core.common.notnull', 'pandas.core.reshape.get_dummies', + 'pandas.tools.merge.concat', 'pandas.tools.merge.merge', 'pandas.tools.pivot.pivot_table', + 'pandas.tseries.tools.to_datetime', 'pandas.io.clipboard.read_clipboard', 'pandas.io.excel.ExcelFile.parse', + 'pandas.io.excel.read_excel', 'pandas.io.html.read_html', 'pandas.io.json.read_json', + 'pandas.io.parsers.read_csv', 'pandas.io.parsers.read_fwf', 'pandas.io.parsers.read_table', + 'pandas.io.pickle.read_pickle', 'pandas.io.pytables.HDFStore.append', 'pandas.io.pytables.HDFStore.get', + 'pandas.io.pytables.HDFStore.put', 'pandas.io.pytables.HDFStore.select', 'pandas.io.pytables.read_hdf', + 'pandas.io.sql.read_sql', 'pandas.io.sql.read_frame', 'pandas.io.sql.write_frame', + 'pandas.io.stata.read_stata', 'pandas.stats.moments.ewma', 'pandas.stats.moments.ewmcorr', + 'pandas.stats.moments.ewmcov', 'pandas.stats.moments.ewmstd', 'pandas.stats.moments.ewmvar', + 'pandas.stats.moments.expanding_apply', 'pandas.stats.moments.expanding_corr', 'pandas.stats.moments.expanding_count', + 'pandas.stats.moments.expanding_cov', 'pandas.stats.moments.expanding_kurt', 'pandas.stats.moments.expanding_mean', + 'pandas.stats.moments.expanding_median', 'pandas.stats.moments.expanding_quantile', 'pandas.stats.moments.expanding_skew', + 'pandas.stats.moments.expanding_std', 'pandas.stats.moments.expanding_sum', 'pandas.stats.moments.expanding_var', + 'pandas.stats.moments.rolling_apply', 'pandas.stats.moments.rolling_corr', 'pandas.stats.moments.rolling_count', + 'pandas.stats.moments.rolling_cov', 'pandas.stats.moments.rolling_kurt', 'pandas.stats.moments.rolling_mean', + 'pandas.stats.moments.rolling_median', 'pandas.stats.moments.rolling_quantile', 'pandas.stats.moments.rolling_skew', + 'pandas.stats.moments.rolling_std', 'pandas.stats.moments.rolling_sum', 'pandas.stats.moments.rolling_var'] + +html_additional_pages = {'generated/' + page: 'api_redirect.html' for page in moved_api_pages} # If false, no module index is generated. html_use_modindex = True diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index b3b2d272e66c6..1f58992dba017 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -17,8 +17,8 @@ If you are simply looking to start working with the *pandas* codebase, navigate `GitHub "issues" tab <https://github.com/pydata/pandas/issues>`_ and start looking through interesting issues. There are a number of issues listed under `Docs <https://github.com/pydata/pandas/issues?labels=Docs&sort=updated&state=open>`_ -and `Good as first PR -<https://github.com/pydata/pandas/issues?labels=Good+as+first+PR&sort=updated&state=open>`_ +and `Difficulty Novice +<https://github.com/pydata/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_ where you could start out. Or maybe through using *pandas* you have an idea of you own or are looking for something @@ -96,6 +96,8 @@ Getting Started with Git setting up your SSH key, and configuring git. All these steps need to be completed before working seamlessly with your local repository and GitHub. +.. _contributing.forking: + Forking ------- @@ -110,9 +112,11 @@ want to clone your fork to your machine: :: This creates the directory `pandas-yourname` and connects your repository to the upstream (main project) *pandas* repository. -You will also need to hook up Travis-CI to your GitHub repository so the suite -is automatically run when a Pull Request is submitted. Instructions are `here -<http://about.travis-ci.org/docs/user/getting-started/>`_. +The testing suite will run automatically on Travis-CI once your Pull Request is +submitted. However, if you wish to run the test suite on a branch prior to +submitting the Pull Request, then Travis-CI needs to be hooked up to your +GitHub repository. Instructions are for doing so are `here +<http://about.travis-ci.org/docs/user/getting-started/>`__. Creating a Branch ----------------- @@ -132,6 +136,95 @@ changes in this branch specific to one bug or feature so it is clear what the branch brings to *pandas*. You can have many shiny-new-features and switch in between them using the git checkout command. +To update this branch, you need to retrieve the changes from the master branch:: + + git fetch upstream + git rebase upstream/master + +This will replay your commits on top of the lastest pandas git master. If this +leads to merge conflicts, you must resolve these before submitting your Pull +Request. If you have uncommitted changes, you will need to `stash` them prior +to updating. This will effectively store your changes and they can be reapplied +after updating. + +.. _contributing.dev_env: + +Creating a Development Environment +---------------------------------- + +An easy way to create a *pandas* development environment is as follows. + +- Install either :ref:`Install Anaconda <install-anaconda>` or :ref:`Install miniconda <install-miniconda>` +- Make sure that you have :ref:`cloned the repository <contributing-forking>` +- ``cd`` to the pandas source directory + +Tell ``conda`` to create a new environment, named ``pandas_dev``, or any name you would like for this environment by running: + +:: + + conda create -n pandas_dev --file ci/requirements_dev.txt + + +For a python 3 environment + +:: + + conda create -n pandas_dev python=3 --file ci/requirements_dev.txt + + +If you are on ``windows``, then you will need to install the compiler linkages: + +:: + + conda install -n pandas_dev libpython + +This will create the new environment, and not touch any of your existing environments, nor any existing python installation. It will install all of the basic dependencies of *pandas*, as well as the development and testing tools. If you would like to install other dependencies, you can install them as follows: + +:: + + conda install -n pandas_dev -c pandas pytables scipy + +To install *all* pandas dependencies you can do the following: + +:: + + conda install -n pandas_dev -c pandas --file ci/requirements_all.txt + +To work in this environment, ``activate`` it as follows: + +:: + + activate pandas_dev + +At which point, the prompt will change to indicate you are in the new development environment. + +.. note:: + + The above syntax is for ``windows`` environments. To work on ``macosx/linux``, use: + + :: + + source activate pandas_dev + +To view your environments: + +:: + + conda info -e + +To return to you home root environment: + +:: + + deactivate + +See the full ``conda`` docs `here +<http://conda.pydata.org/docs>`__. + +At this point you can easily do an *in-place* install, as detailed in the next section. + +.. _contributing.getting_source: + Making changes -------------- @@ -237,9 +330,15 @@ follow the Numpy Docstring Standard (see above), but you don't need to install this because a local copy of ``numpydoc`` is included in the *pandas* source code. +It is easiest to :ref:`create a development environment <contributing-dev_env>`, then install: + +:: + + conda install -n pandas_dev sphinx ipython + Furthermore, it is recommended to have all `optional dependencies <http://pandas.pydata.org/pandas-docs/dev/install.html#optional-dependencies>`_ -installed. This is not needed, but be aware that you will see some error +installed. This is not strictly necessary, but be aware that you will see some error messages. Because all the code in the documentation is executed during the doc build, the examples using this optional dependencies will generate errors. Run ``pd.show_versions()`` to get an overview of the installed version of all @@ -252,7 +351,7 @@ dependencies. Building the documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~ -So how do you build the docs? Navigate to your local the folder +So how do you build the docs? Navigate to your local the folder ``pandas/doc/`` directory in the console and run:: python make.py html @@ -272,8 +371,9 @@ If you want to do a full clean build, do:: Starting with 0.13.1 you can tell ``make.py`` to compile only a single section of the docs, greatly reducing the turn-around time for checking your changes. -You will be prompted to delete `.rst` files that aren't required, since the -last committed version can always be restored from git. +You will be prompted to delete `.rst` files that aren't required. This is okay +since the prior version can be checked out from git, but make sure to +not commit the file deletions. :: @@ -295,6 +395,13 @@ browser to see the full documentation you just built:: And you'll have the satisfaction of seeing your new and improved documentation! +.. _contributing.dev_docs: + +Built Master Branch Documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When pull-requests are merged into the pandas *master* branch, the main parts of the documentation are +also built by Travis-CI. These docs are then hosted `here <http://pandas-docs.github.io/pandas-docs-travis>`__. Contributing to the code base ============================= @@ -324,7 +431,7 @@ deprecation warnings where needed. Test-driven Development/Writing Code ------------------------------------ -*Pandas* is serious about `Test-driven Development (TDD) +*Pandas* is serious about testing and strongly encourages individuals to embrace `Test-driven Development (TDD) <http://en.wikipedia.org/wiki/Test-driven_development>`_. This development process "relies on the repetition of a very short development cycle: first the developer writes an (initially failing) automated test case that defines a desired @@ -457,8 +564,8 @@ Doing 'git status' again should give something like :: # modified: /relative/path/to/file-you-added.py # -Finally, commit your changes to your local repository with an explanatory message. An informal -commit message format is in effect for the project. Please try to adhere to it. Here are +Finally, commit your changes to your local repository with an explanatory message. *Pandas* +uses a convention for commit message prefixes and layout. Here are some common prefixes along with general guidelines for when to use them: * ENH: Enhancement, new functionality @@ -572,6 +679,3 @@ branch has not actually been merged. The branch will still exist on GitHub, so to delete it there do :: git push origin --delete shiny-new-feature - - - diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 0e6386955a653..f69f926296020 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -1006,6 +1006,9 @@ The :ref:`HDFStores <io.hdf5>` docs `Merging on-disk tables with millions of rows <http://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925>`__ +`Avoiding inconsistencies when writing to a store from multiple processes/threads +<http://stackoverflow.com/a/29014295/2858145>`__ + De-duplicating a large store by chunks, essentially a recursive reduction operation. Shows a function for taking in data from csv file and creating a store by chunks, with date parsing as well. `See here diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index e1c14029f1cf9..9221f2685d79b 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -461,7 +461,7 @@ Inspired by `dplyr's <http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html#mutate>`__ ``mutate`` verb, DataFrame has an :meth:`~pandas.DataFrame.assign` method that allows you to easily create new columns that are potentially -derived from existing columns. +derived from existing columns. .. ipython:: python @@ -511,7 +511,9 @@ DataFrame is returned, with the new values inserted. .. warning:: Since the function signature of ``assign`` is ``**kwargs``, a dictionary, - the order of the new columns in the resulting DataFrame cannot be guaranteed. + the order of the new columns in the resulting DataFrame cannot be guaranteed + to match the order you pass in. To make things predictable, items are inserted + alphabetically (by key) at the end of the DataFrame. All expressions are computed first, and then assigned. So you can't refer to another column being assigned in the same call to ``assign``. For example: @@ -575,10 +577,8 @@ row-wise. For example: df - df.iloc[0] -In the special case of working with time series data, if the Series is a -TimeSeries (which it will be automatically if the index contains datetime -objects), and the DataFrame index also contains dates, the broadcasting will be -column-wise: +In the special case of working with time series data, and the DataFrame index +also contains dates, the broadcasting will be column-wise: .. ipython:: python :okwarning: diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 4a0743b8be3e4..c70b6deade36e 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -57,7 +57,7 @@ large data to thin clients. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hadley Wickham's `ggplot2 <http://ggplot2.org/>`__ is a foundational exploratory visualization package for the R language. -Based on `"The Grammer of Graphics" <http://www.cs.uic.edu/~wilkinson/TheGrammarOfGraphics/GOG.html>`__ it +Based on `"The Grammar of Graphics" <http://www.cs.uic.edu/~wilkinson/TheGrammarOfGraphics/GOG.html>`__ it provides a powerful, declarative and extremely general way to generate bespoke plots of any kind of data. It's really quite incredible. Various implementations to other languages are available, but a faithful implementation for python users has long been missing. Although still young @@ -137,6 +137,24 @@ PyDatastream is a Python interface to the SOAP API to return indexed Pandas DataFrames or Panels with financial data. This package requires valid credentials for this API (non free). +`pandaSDMX <http://pandasdmx.readthedocs.org>`_ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +pandaSDMX is an extensible library to retrieve and acquire statistical data +and metadata disseminated in +`SDMX <http://www.sdmx.org>`_ 2.1. This standard is currently supported by +the European statistics office (Eurostat) +and the European Central Bank (ECB). Datasets may be returned as pandas Series +or multi-indexed DataFrames. + +`fredapi <https://github.com/mortada/fredapi>`_ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +fredapi is a Python interface to the `Federal Reserve Economic Data (FRED) <http://research.stlouisfed.org/fred2/>`__ +provided by the Federal Reserve Bank of St. Louis. It works with both the FRED database and ALFRED database that +contains point-in-time data (i.e. historic data revisions). fredapi provides a wrapper in python to the FRED +HTTP API, and also provides several conveninent methods for parsing and analyzing point-in-time data from ALFRED. +fredapi makes use of pandas and returns data in a Series or DataFrame. This module requires a FRED API key that +you can obtain for free on the FRED website. + .. _ecosystem.domain: diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index e6b735173110b..d007446a5b922 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -66,7 +66,7 @@ Here's the function in pure python: s += f(a + i * dx) return s * dx -We achieve our result by by using ``apply`` (row-wise): +We achieve our result by using ``apply`` (row-wise): .. ipython:: python @@ -86,7 +86,7 @@ hence we'll concentrate our efforts cythonizing these two functions. .. note:: In python 2 replacing the ``range`` with its generator counterpart (``xrange``) - would mean the ``range`` line would vanish. In python 3 range is already a generator. + would mean the ``range`` line would vanish. In python 3 ``range`` is already a generator. .. _enhancingperf.plain: @@ -248,7 +248,7 @@ efforts here. More advanced techniques ~~~~~~~~~~~~~~~~~~~~~~~~ -There is still scope for improvement, here's an example of using some more +There is still hope for improvement. Here's an example of using some more advanced cython techniques: .. ipython:: @@ -373,7 +373,7 @@ This Python syntax is **not** allowed: :func:`~pandas.eval` Examples ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -:func:`pandas.eval` works well with expressions containing large arrays +:func:`pandas.eval` works well with expressions containing large arrays. First let's create a few decent-sized arrays to play with: diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 467ec02b55f20..1fc8488e92fde 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -207,9 +207,9 @@ properties. Here are the pandas equivalents: Frequency conversion ~~~~~~~~~~~~~~~~~~~~ -Frequency conversion is implemented using the ``resample`` method on TimeSeries -and DataFrame objects (multiple time series). ``resample`` also works on panels -(3D). Here is some code that resamples daily data to monthly: +Frequency conversion is implemented using the ``resample`` method on Series +and DataFrame objects with a DatetimeIndex or PeriodIndex. ``resample`` also +works on panels (3D). Here is some code that resamples daily data to montly: .. ipython:: python diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 7ad2641dec52a..c9e18b585c764 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -784,11 +784,11 @@ will be (silently) dropped. Thus, this does not pose any problems: df.groupby('A').std() -NA group handling -~~~~~~~~~~~~~~~~~ +NA and NaT group handling +~~~~~~~~~~~~~~~~~~~~~~~~~ -If there are any NaN values in the grouping key, these will be automatically -excluded. So there will never be an "NA group". This was not the case in older +If there are any NaN or NaT values in the grouping key, these will be automatically +excluded. So there will never be an "NA group" or "NaT group". This was not the case in older versions of pandas, but users were generally discarding the NA group anyway (and supporting it was an implementation headache). diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index ee779715bcb95..fb63d0c6d66f1 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -115,6 +115,7 @@ See the package overview for more detail about what's in the library. {%if not single -%} whatsnew install + contributing faq overview 10min @@ -149,7 +150,6 @@ See the package overview for more detail about what's in the library. api {% endif -%} {%if not single -%} - contributing internals release {% endif -%} diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index fc074802353ee..a1912032bc3bf 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -30,9 +30,9 @@ The axis labeling information in pandas objects serves many purposes: In this section, we will focus on the final point: namely, how to slice, dice, and generally get and set subsets of pandas objects. The primary focus will be on Series and DataFrame as they have received more development attention in -this area. Expect more work to be invested higher-dimensional data structures -(including ``Panel``) in the future, especially in label-based advanced -indexing. +this area. Expect more work to be invested in higher-dimensional data +structures (including ``Panel``) in the future, especially in label-based +advanced indexing. .. note:: @@ -54,7 +54,7 @@ indexing. .. warning:: - In 0.15.0 ``Index`` has internally been refactored to no longer sub-class ``ndarray`` + In 0.15.0 ``Index`` has internally been refactored to no longer subclass ``ndarray`` but instead subclass ``PandasObject``, similarly to the rest of the pandas objects. This should be a transparent change with only very limited API implications (See the :ref:`Internal Refactoring <whatsnew_0150.refactoring>`) @@ -225,9 +225,9 @@ new column. sa.a = 5 sa - dfa.A = list(range(len(dfa.index))) # ok if A already exists + dfa.A = list(range(len(dfa.index))) # ok if A already exists dfa - dfa['A'] = list(range(len(dfa.index))) # use this form to create a new column + dfa['A'] = list(range(len(dfa.index))) # use this form to create a new column dfa .. warning:: @@ -249,6 +249,14 @@ new column. If you are using the IPython environment, you may also use tab-completion to see these accessible attributes. +You can also assign a ``dict`` to a row of a ``DataFrame``: + +.. ipython:: python + + x = pd.DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]}) + x.iloc[1] = dict(x=9, y=99) + x + Slicing ranges -------------- @@ -314,7 +322,7 @@ Selection By Label dfl.loc['20130102':'20130104'] pandas provides a suite of methods in order to have **purely label based indexing**. This is a strict inclusion based protocol. -**at least 1** of the labels for which you ask, must be in the index or a ``KeyError`` will be raised! When slicing, the start bound is *included*, **AND** the stop bound is *included*. Integers are valid labels, but they refer to the label **and not the position**. +**At least 1** of the labels for which you ask, must be in the index or a ``KeyError`` will be raised! When slicing, the start bound is *included*, **AND** the stop bound is *included*. Integers are valid labels, but they refer to the label **and not the position**. The ``.loc`` attribute is the primary access method. The following are valid inputs: @@ -500,6 +508,81 @@ A list of indexers where any element is out of bounds will raise an .. _indexing.basics.partial_setting: +Selecting Random Samples +------------------------ +.. versionadded::0.16.1 + +A random selection of rows or columns from a Series, DataFrame, or Panel with the :meth:`~DataFrame.sample` method. The method will sample rows by default, and accepts a specific number of rows/columns to return, or a fraction of rows. + +.. ipython :: python + + s = Series([0,1,2,3,4,5]) + + # When no arguments are passed, returns 1 row. + s.sample() + + # One may specify either a number of rows: + s.sample(n=3) + + # Or a fraction of the rows: + s.sample(frac=0.5) + +By default, ``sample`` will return each row at most once, but one can also sample with replacement +using the ``replace`` option: + +.. ipython :: python + + s = Series([0,1,2,3,4,5]) + + # Without replacement (default): + s.sample(n=6, replace=False) + + # With replacement: + s.sample(n=6, replace=True) + + +By default, each row has an equal probability of being selected, but if you want rows +to have different probabilities, you can pass the ``sample`` function sampling weights as +``weights``. These weights can be a list, a numpy array, or a Series, but they must be of the same length as the object you are sampling. Missing values will be treated as a weight of zero, and inf values are not allowed. If weights do not sum to 1, they will be re-normalized by dividing all weights by the sum of the weights. For example: + +.. ipython :: python + + s = Series([0,1,2,3,4,5]) + example_weights = [0, 0, 0.2, 0.2, 0.2, 0.4] + s.sample(n=3, weights=example_weights) + + # Weights will be re-normalized automatically + example_weights2 = [0.5, 0, 0, 0, 0, 0] + s.sample(n=1, weights=example_weights2) + +When applied to a DataFrame, you can use a column of the DataFrame as sampling weights +(provided you are sampling rows and not columns) by simply passing the name of the column +as a string. + +.. ipython :: python + + df2 = DataFrame({'col1':[9,8,7,6], 'weight_column':[0.5, 0.4, 0.1, 0]}) + df2.sample(n = 3, weights = 'weight_column') + +``sample`` also allows users to sample columns instead of rows using the ``axis`` argument. + +.. ipython :: python + + df3 = DataFrame({'col1':[1,2,3], 'col2':[2,3,4]}) + df3.sample(n=1, axis=1) + +Finally, one can also set a seed for ``sample``'s random number generator using the ``random_state`` argument, which will accept either an integer (as a seed) or a numpy RandomState object. + +.. ipython :: python + + df4 = DataFrame({'col1':[1,2,3], 'col2':[2,3,4]}) + + # With a given seed, the sample will always draw the same rows. + df4.sample(n=2, random_state=2) + df4.sample(n=2, random_state=2) + + + Setting With Enlargement ------------------------ @@ -578,9 +661,10 @@ Using a boolean vector to index a Series works exactly as in a numpy ndarray: .. ipython:: python + s = Series(range(-3, 4)) + s s[s > 0] - s[(s < 0) & (s > -0.5)] - s[(s < -1) | (s > 1 )] + s[(s < -1) | (s > 0.5)] s[~(s < 0)] You may select rows from a DataFrame using a boolean vector the same length as diff --git a/doc/source/install.rst b/doc/source/install.rst index dd9021d0439dc..b3f86db5e3e59 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -35,6 +35,8 @@ pandas at all. Simply create an account, and have access to pandas from within your brower via an `IPython Notebook <http://ipython.org/notebook.html>`__ in a few minutes. +.. _install.anaconda: + Installing pandas with Anaconda ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -66,6 +68,8 @@ admin rights to install it, it will install in the user's home directory, and this also makes it trivial to delete Anaconda at a later date (just delete that folder). +.. _install.miniconda: + Installing pandas with Miniconda ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -173,47 +177,8 @@ Installing using your Linux distribution's package manager. Installing from source ~~~~~~~~~~~~~~~~~~~~~~ -.. note:: - - Installing from the git repository requires a recent installation of `Cython - <http://cython.org>`__ as the cythonized C sources are no longer checked - into source control. Released source distributions will contain the built C - files. I recommend installing the latest Cython via ``easy_install -U - Cython`` - -The source code is hosted at http://github.com/pydata/pandas, it can be checked -out using git and compiled / installed like so: - -:: - - git clone git://github.com/pydata/pandas.git - cd pandas - python setup.py install - -Make sure you have Cython installed when installing from the repository, -rather then a tarball or pypi. -On Windows, I suggest installing the MinGW compiler suite following the -directions linked to above. Once configured property, run the following on the -command line: - -:: - - python setup.py build --compiler=mingw32 - python setup.py install - -Note that you will not be able to import pandas if you open an interpreter in -the source directory unless you build the C extensions in place: - -:: - - python setup.py build_ext --inplace - -The most recent version of MinGW (any installer dated after 2011-08-03) -has removed the '-mno-cygwin' option but Distutils has not yet been updated to -reflect that. Thus, you may run into an error like "unrecognized command line -option '-mno-cygwin'". Until the bug is fixed in Distutils, you may need to -install a slightly older version of MinGW (2011-08-02 installer). +See the :ref:`contributing documentation <contributing>` for complete instructions on building from the git source tree. Further, see :ref:`creating a devevlopment environment <contributing-dev_env>` if you wish to create a *pandas* development environment. Running the test suite ~~~~~~~~~~~~~~~~~~~~~~ @@ -278,7 +243,7 @@ Optional Dependencies * `Cython <http://www.cython.org>`__: Only necessary to build development version. Version 0.19.1 or higher. * `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions -* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage. Version 3.0.0 or higher required. +* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage. Version 3.0.0 or higher required, Version 3.2.0 or higher highly recommended. * `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended. * `matplotlib <http://matplotlib.sourceforge.net/>`__: for plotting * `statsmodels <http://statsmodels.sourceforge.net/>`__ @@ -290,6 +255,7 @@ Optional Dependencies * Alternative Excel writer. * `boto <https://pypi.python.org/pypi/boto>`__: necessary for Amazon S3 access. +* `blosc <https://pypi.python.org/pypi/blosc>`__: for msgpack compression using ``blosc`` * One of `PyQt4 <http://www.riverbankcomputing.com/software/pyqt/download>`__, `PySide <http://qt-project.org/wiki/Category:LanguageBindings::PySide>`__, `pygtk @@ -354,4 +320,3 @@ Optional Dependencies work. Hence, it is highly recommended that you install these. A packaged distribution like `Enthought Canopy <http://enthought.com/products/canopy>`__ may be worth considering. - diff --git a/doc/source/internals.rst b/doc/source/internals.rst index 9418ca5265f1a..17be04cd64d27 100644 --- a/doc/source/internals.rst +++ b/doc/source/internals.rst @@ -94,4 +94,155 @@ not check (or care) whether the levels themselves are sorted. Fortunately, the constructors ``from_tuples`` and ``from_arrays`` ensure that this is true, but if you compute the levels and labels yourself, please be careful. +.. _ref-subclassing-pandas: + +Subclassing pandas Data Structures +---------------------------------- + +.. warning:: There are some easier alternatives before considering subclassing ``pandas`` data structures. + + 1. Monkey-patching: See :ref:`Adding Features to your pandas Installation <ref-monkey-patching>`. + + 2. Use *composition*. See `here <http://en.wikipedia.org/wiki/Composition_over_inheritance>`_. + +This section describes how to subclass ``pandas`` data structures to meet more specific needs. There are 2 points which need attention: + +1. Override constructor properties. +2. Define original properties + +.. note:: You can find a nice example in `geopandas <https://github.com/geopandas/geopandas>`_ project. + +Override Constructor Properties +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Each data structure has constructor properties to specifying data constructors. By overriding these properties, you can retain defined-classes through ``pandas`` data manipulations. + +There are 3 constructors to be defined: + +- ``_constructor``: Used when a manipulation result has the same dimesions as the original. +- ``_constructor_sliced``: Used when a manipulation result has one lower dimension(s) as the original, such as ``DataFrame`` single columns slicing. +- ``_constructor_expanddim``: Used when a manipulation result has one higher dimension as the original, such as ``Series.to_frame()`` and ``DataFrame.to_panel()``. + +Following table shows how ``pandas`` data structures define constructor properties by default. + +=========================== ======================= =================== ======================= +Property Attributes ``Series`` ``DataFrame`` ``Panel`` +=========================== ======================= =================== ======================= +``_constructor`` ``Series`` ``DataFrame`` ``Panel`` +``_constructor_sliced`` ``NotImplementedError`` ``Series`` ``DataFrame`` +``_constructor_expanddim`` ``DataFrame`` ``Panel`` ``NotImplementedError`` +=========================== ======================= =================== ======================= + +Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame`` overriding constructor properties. + +.. code-block:: python + + class SubclassedSeries(Series): + + @property + def _constructor(self): + return SubclassedSeries + + @property + def _constructor_expanddim(self): + return SubclassedDataFrame + + class SubclassedDataFrame(DataFrame): + + @property + def _constructor(self): + return SubclassedDataFrame + + @property + def _constructor_sliced(self): + return SubclassedSeries + +.. code-block:: python + + >>> s = SubclassedSeries([1, 2, 3]) + >>> type(s) + <class '__main__.SubclassedSeries'> + + >>> to_framed = s.to_frame() + >>> type(to_framed) + <class '__main__.SubclassedDataFrame'> + + >>> df = SubclassedDataFrame({'A', [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]}) + >>> df + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> type(df) + <class '__main__.SubclassedDataFrame'> + + >>> sliced1 = df[['A', 'B']] + >>> sliced1 + A B + 0 1 4 + 1 2 5 + 2 3 6 + >>> type(sliced1) + <class '__main__.SubclassedDataFrame'> + + >>> sliced2 = df['A'] + >>> sliced2 + 0 1 + 1 2 + 2 3 + Name: A, dtype: int64 + >>> type(sliced2) + <class '__main__.SubclassedSeries'> + +Define Original Properties +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To let original data structures have additional properties, you should let ``pandas`` knows what properties are added. ``pandas`` maps unknown properties to data names overriding ``__getattribute__``. Defining original properties can be done in one of 2 ways: + +1. Define ``_internal_names`` and ``_internal_names_set`` for temporary properties which WILL NOT be passed to manipulation results. +2. Define ``_metadata`` for normal properties which will be passed to manipulation results. + +Below is an example to define 2 original properties, "internal_cache" as a temporary property and "added_property" as a normal property + +.. code-block:: python + + class SubclassedDataFrame2(DataFrame): + + # temporary properties + _internal_names = DataFrame._internal_names + ['internal_cache'] + _internal_names_set = set(_internal_names) + + # normal properties + _metadata = ['added_property'] + + @property + def _constructor(self): + return SubclassedDataFrame2 + +.. code-block:: python + + >>> df = SubclassedDataFrame2({'A', [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]}) + >>> df + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> df.internal_cache = 'cached' + >>> df.added_property = 'property' + + >>> df.internal_cache + cached + >>> df.added_property + property + + # properties defined in _internal_names is reset after manipulation + >>> df[['A', 'B']].internal_cache + AttributeError: 'SubclassedDataFrame2' object has no attribute 'internal_cache' + + # properties defined in _metadata are retained + >>> df[['A', 'B']].added_property + property + diff --git a/doc/source/io.rst b/doc/source/io.rst index 1c8a1159ab162..73a2f2f1d3531 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -89,6 +89,8 @@ They can take a number of arguments: - ``delim_whitespace``: Parse whitespace-delimited (spaces or tabs) file (much faster than using a regular expression) - ``compression``: decompress ``'gzip'`` and ``'bz2'`` formats on the fly. + Set to ``'infer'`` (the default) to guess a format based on the file + extension. - ``dialect``: string or :class:`python:csv.Dialect` instance to expose more ways to specify the file format - ``dtype``: A data type name or a dict of column name to data type. If not @@ -2362,6 +2364,10 @@ for some advanced strategies As of version 0.15.0, pandas requires ``PyTables`` >= 3.0.0. Stores written with prior versions of pandas / ``PyTables`` >= 2.3 are fully compatible (this was the previous minimum ``PyTables`` required version). +.. warning:: + + There is a ``PyTables`` indexing bug which may appear when querying stores using an index. If you see a subset of results being returned, upgrade to ``PyTables`` >= 3.2. Stores created previously will need to be rewritten using the updated version. + .. ipython:: python :suppress: :okexcept: @@ -3994,6 +4000,24 @@ whether imported ``Categorical`` variables are ordered. a ``Categorial`` with string categories for the values that are labeled and numeric categories for values with no label. +.. _io.other: + +Other file formats +------------------ + +pandas itself only supports IO with a limited set of file formats that map +cleanly to its tabular data model. For reading and writing other file formats +into and from pandas, we recommend these packages from the broader community. + +netCDF +~~~~~~ + +xray_ provides data structures inspired by the pandas DataFrame for working +with multi-dimensional datasets, with a focus on the netCDF file format and +easy conversion to and from pandas. + +.. _xray: http://xray.readthedocs.org/ + .. _io.perf: Performance Considerations diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 7128e2dd82d6c..d51c2f62b8a0c 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -12,6 +12,12 @@ randn = np.random.randn np.set_printoptions(precision=4, suppress=True) + import matplotlib.pyplot as plt + plt.close('all') + import pandas.util.doctools as doctools + p = doctools.TablePlotter() + + **************************** Merge, join, and concatenate **************************** @@ -37,14 +43,34 @@ a simple example: .. ipython:: python - df = DataFrame(np.random.randn(10, 4)) - df + df1 = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}, + index=[0, 1, 2, 3]) + + df2 = DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], + 'B': ['B4', 'B5', 'B6', 'B7'], + 'C': ['C4', 'C5', 'C6', 'C7'], + 'D': ['D4', 'D5', 'D6', 'D7']}, + index=[4, 5, 6, 7]) + + df3 = DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], + 'B': ['B8', 'B9', 'B10', 'B11'], + 'C': ['C8', 'C9', 'C10', 'C11'], + 'D': ['D8', 'D9', 'D10', 'D11']}, + index=[8, 9, 10, 11]) + + frames = [df1, df2, df3] + result = concat(frames) - # break it into pieces - pieces = [df[:3], df[3:7], df[7:]] +.. ipython:: python + :suppress: - concatenated = concat(pieces) - concatenated + @savefig merging_concat_basic.png + p.plot(frames, result, + labels=['df1', 'df2', 'df3'], vertical=True); + plt.close('all'); Like its sibling function on ndarrays, ``numpy.concatenate``, ``pandas.concat`` takes a list or dict of homogeneously-typed objects and concatenates them with @@ -86,8 +112,15 @@ this using the ``keys`` argument: .. ipython:: python - concatenated = concat(pieces, keys=['first', 'second', 'third']) - concatenated + result = concat(frames, keys=['x', 'y', 'z']) + +.. ipython:: python + :suppress: + + @savefig merging_concat_keys.png + p.plot(frames, result, + labels=['df1', 'df2', 'df3'], vertical=True) + plt.close('all'); As you can see (if you've read the rest of the documentation), the resulting object's index has a :ref:`hierarchical index <advanced.hierarchical>`. This @@ -95,7 +128,7 @@ means that we can now do stuff like select out each chunk by key: .. ipython:: python - concatenated.ix['second'] + result.ix['y'] It's not a stretch to see how this can be very useful. More detail on this functionality below. @@ -130,29 +163,50 @@ behavior: .. ipython:: python - from pandas.util.testing import rands_array - df = DataFrame(np.random.randn(10, 4), columns=['a', 'b', 'c', 'd'], - index=rands_array(5, 10)) - df + df4 = DataFrame({'B': ['B2', 'B3', 'B6', 'B7'], + 'D': ['D2', 'D3', 'D6', 'D7'], + 'F': ['F2', 'F3', 'F6', 'F7']}, + index=[2, 3, 6, 7]) + result = concat([df1, df4], axis=1) + + +.. ipython:: python + :suppress: - concat([df.ix[:7, ['a', 'b']], df.ix[2:-2, ['c']], - df.ix[-7:, ['d']]], axis=1) + @savefig merging_concat_axis1.png + p.plot([df1, df4], result, + labels=['df1', 'df4'], vertical=False); + plt.close('all'); Note that the row indexes have been unioned and sorted. Here is the same thing with ``join='inner'``: .. ipython:: python - concat([df.ix[:7, ['a', 'b']], df.ix[2:-2, ['c']], - df.ix[-7:, ['d']]], axis=1, join='inner') + result = concat([df1, df4], axis=1, join='inner') + +.. ipython:: python + :suppress: + + @savefig merging_concat_axis1_inner.png + p.plot([df1, df4], result, + labels=['df1', 'df4'], vertical=False); + plt.close('all'); Lastly, suppose we just wanted to reuse the *exact index* from the original DataFrame: .. ipython:: python - concat([df.ix[:7, ['a', 'b']], df.ix[2:-2, ['c']], - df.ix[-7:, ['d']]], axis=1, join_axes=[df.index]) + result = concat([df1, df4], axis=1, join_axes=[df1.index]) + +.. ipython:: python + :suppress: + + @savefig merging_concat_axis1_join_axes.png + p.plot([df1, df4], result, + labels=['df1', 'df4'], vertical=False); + plt.close('all'); .. _merging.concatenation: @@ -165,32 +219,44 @@ along ``axis=0``, namely the index: .. ipython:: python - s = Series(randn(10), index=np.arange(10)) - s1 = s[:5] # note we're slicing with labels here, so 5 is included - s2 = s[6:] - s1.append(s2) + result = df1.append(df2) + +.. ipython:: python + :suppress: + + @savefig merging_append1.png + p.plot([df1, df2], result, + labels=['df1', 'df2'], vertical=True); + plt.close('all'); In the case of DataFrame, the indexes must be disjoint but the columns do not need to be: .. ipython:: python - df = DataFrame(randn(6, 4), index=date_range('1/1/2000', periods=6), - columns=['A', 'B', 'C', 'D']) - df1 = df.ix[:3] - df2 = df.ix[3:, :3] - df1 - df2 - df1.append(df2) + result = df1.append(df4) + +.. ipython:: python + :suppress: + + @savefig merging_append2.png + p.plot([df1, df4], result, + labels=['df1', 'df4'], vertical=True); + plt.close('all'); ``append`` may take multiple objects to concatenate: .. ipython:: python - df1 = df.ix[:2] - df2 = df.ix[2:4] - df3 = df.ix[4:] - df1.append([df2,df3]) + result = df1.append([df2, df3]) + +.. ipython:: python + :suppress: + + @savefig merging_append3.png + p.plot([df1, df2, df3], result, + labels=['df1', 'df2', 'df3'], vertical=True); + plt.close('all'); .. note:: @@ -205,25 +271,33 @@ Ignoring indexes on the concatenation axis For DataFrames which don't have a meaningful index, you may wish to append them and ignore the fact that they may have overlapping indexes: -.. ipython:: python - - df1 = DataFrame(randn(6, 4), columns=['A', 'B', 'C', 'D']) - df2 = DataFrame(randn(3, 4), columns=['A', 'B', 'C', 'D']) +To do this, use the ``ignore_index`` argument: - df1 - df2 +.. ipython:: python -To do this, use the ``ignore_index`` argument: + result = concat([df1, df4], ignore_index=True) .. ipython:: python + :suppress: - concat([df1, df2], ignore_index=True) + @savefig merging_concat_ignore_index.png + p.plot([df1, df4], result, + labels=['df1', 'df4'], vertical=True); + plt.close('all'); This is also a valid argument to ``DataFrame.append``: .. ipython:: python - df1.append(df2, ignore_index=True) + result = df1.append(df4, ignore_index=True) + +.. ipython:: python + :suppress: + + @savefig merging_append_ignore_index.png + p.plot([df1, df4], result, + labels=['df1', 'df4'], vertical=True); + plt.close('all'); .. _merging.mixed_ndims: @@ -236,22 +310,45 @@ the name of the Series. .. ipython:: python - df1 = DataFrame(randn(6, 4), columns=['A', 'B', 'C', 'D']) - s1 = Series(randn(6), name='foo') - concat([df1, s1],axis=1) + s1 = Series(['X0', 'X1', 'X2', 'X3'], name='X') + result = concat([df1, s1], axis=1) + +.. ipython:: python + :suppress: + + @savefig merging_concat_mixed_ndim.png + p.plot([df1, s1], result, + labels=['df1', 's1'], vertical=False); + plt.close('all'); If unnamed Series are passed they will be numbered consecutively. .. ipython:: python - s2 = Series(randn(6)) - concat([df1, s2, s2, s2],axis=1) + s2 = Series(['_0', '_1', '_2', '_3']) + result = concat([df1, s2, s2, s2], axis=1) + +.. ipython:: python + :suppress: + + @savefig merging_concat_unnamed_series.png + p.plot([df1, s2], result, + labels=['df1', 's2'], vertical=False); + plt.close('all'); Passing ``ignore_index=True`` will drop all name references. .. ipython:: python - concat([df1, s1],axis=1,ignore_index=True) + result = concat([df1, s1], axis=1, ignore_index=True) + +.. ipython:: python + :suppress: + + @savefig merging_concat_series_ignore_index.png + p.plot([df1, s1], result, + labels=['df1', 's1'], vertical=False); + plt.close('all'); More concatenating with group keys ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -260,43 +357,71 @@ Let's consider a variation on the first example presented: .. ipython:: python - df = DataFrame(np.random.randn(10, 4)) - df + result = concat(frames, keys=['x', 'y', 'z']) - # break it into pieces - pieces = [df.ix[:, [0, 1]], df.ix[:, [2]], df.ix[:, [3]]] +.. ipython:: python + :suppress: - result = concat(pieces, axis=1, keys=['one', 'two', 'three']) - result + @savefig merging_concat_group_keys2.png + p.plot(frames, result, + labels=['df1', 'df2', 'df3'], vertical=True); + plt.close('all'); You can also pass a dict to ``concat`` in which case the dict keys will be used for the ``keys`` argument (unless other keys are specified): .. ipython:: python - pieces = {'one': df.ix[:, [0, 1]], - 'two': df.ix[:, [2]], - 'three': df.ix[:, [3]]} - concat(pieces, axis=1) - concat(pieces, keys=['three', 'two']) + pieces = {'x': df1, 'y': df2, 'z': df3} + result = concat(pieces) + +.. ipython:: python + :suppress: + + @savefig merging_concat_dict.png + p.plot([df1, df2, df3], result, + labels=['df1', 'df2', 'df3'], vertical=True); + plt.close('all'); + +.. ipython:: python + + result = concat(pieces, keys=['z', 'y']) + +.. ipython:: python + :suppress: + + @savefig merging_concat_dict_keys.png + p.plot([df1, df2, df3], result, + labels=['df1', 'df2', 'df3'], vertical=True); + plt.close('all'); The MultiIndex created has levels that are constructed from the passed keys and -the columns of the DataFrame pieces: +the index of the DataFrame pieces: .. ipython:: python - result.columns.levels + result.index.levels If you wish to specify other levels (as will occasionally be the case), you can do so using the ``levels`` argument: .. ipython:: python - result = concat(pieces, axis=1, keys=['one', 'two', 'three'], - levels=[['three', 'two', 'one', 'zero']], + result = concat(pieces, keys=['x', 'y', 'z'], + levels=[['z', 'y', 'x', 'w']], names=['group_key']) - result - result.columns.levels + +.. ipython:: python + :suppress: + + @savefig merging_concat_dict_keys_names.png + p.plot([df1, df2, df3], result, + labels=['df1', 'df2', 'df3'], vertical=True); + plt.close('all'); + +.. ipython:: python + + result.index.levels Yes, this is fairly esoteric, but is actually necessary for implementing things like GroupBy where the order of a categorical variable is meaningful. @@ -312,10 +437,16 @@ which returns a new DataFrame as above. .. ipython:: python - df = DataFrame(np.random.randn(8, 4), columns=['A','B','C','D']) - df - s = df.xs(3) - df.append(s, ignore_index=True) + s2 = Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D']) + result = df1.append(s2, ignore_index=True) + +.. ipython:: python + :suppress: + + @savefig merging_append_series_as_row.png + p.plot([df1, s2], result, + labels=['df1', 's2'], vertical=True); + plt.close('all'); You should use ``ignore_index`` with this method to instruct DataFrame to discard its index. If you wish to preserve the index, you should construct an @@ -325,12 +456,17 @@ You can also pass a list of dicts or Series: .. ipython:: python - df = DataFrame(np.random.randn(5, 4), - columns=['foo', 'bar', 'baz', 'qux']) - dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4}, - {'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}] - result = df.append(dicts, ignore_index=True) - result + dicts = [{'A': 1, 'B': 2, 'C': 3, 'X': 4}, + {'A': 5, 'B': 6, 'C': 7, 'Y': 8}] + result = df1.append(dicts, ignore_index=True) + +.. ipython:: python + :suppress: + + @savefig merging_append_dits.png + p.plot([df1, DataFrame(dicts)], result, + labels=['df1', 'dicts'], vertical=True); + plt.close('all'); .. _merging.join: @@ -354,7 +490,7 @@ standard database join operations between DataFrame objects: :: - merge(left, right, how='left', on=None, left_on=None, right_on=None, + merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=True, suffixes=('_x', '_y'), copy=True) @@ -430,24 +566,46 @@ key combination: .. ipython:: python - left = DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]}) - right = DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]}) - left - right - merge(left, right, on='key') + left = DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], + 'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3']}) + + right = DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}) + result = merge(left, right, on='key') + +.. ipython:: python + :suppress: + + @savefig merging_merge_on_key.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); Here is a more complicated example with multiple join keys: .. ipython:: python - left = DataFrame({'key1': ['foo', 'foo', 'bar'], - 'key2': ['one', 'two', 'one'], - 'lval': [1, 2, 3]}) - right = DataFrame({'key1': ['foo', 'foo', 'bar', 'bar'], - 'key2': ['one', 'one', 'one', 'two'], - 'rval': [4, 5, 6, 7]}) - merge(left, right, how='outer') - merge(left, right, how='inner') + left = DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'], + 'key2': ['K0', 'K1', 'K0', 'K1'], + 'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3']}) + + right = DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'], + 'key2': ['K0', 'K0', 'K0', 'K0'], + 'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}) + + result = merge(left, right, on=['key1', 'key2']) + +.. ipython:: python + :suppress: + + @savefig merging_merge_on_key_multiple.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); The ``how`` argument to ``merge`` specifies how to determine which keys are to be included in the resulting table. If a key combination **does not appear** in @@ -463,6 +621,53 @@ either the left or right tables, the values in the joined table will be ``outer``, ``FULL OUTER JOIN``, Use union of keys from both frames ``inner``, ``INNER JOIN``, Use intersection of keys from both frames +.. ipython:: python + + result = merge(left, right, how='left', on=['key1', 'key2']) + +.. ipython:: python + :suppress: + + @savefig merging_merge_on_key_left.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + +.. ipython:: python + + result = merge(left, right, how='right', on=['key1', 'key2']) + +.. ipython:: python + :suppress: + + @savefig merging_merge_on_key_right.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + +.. ipython:: python + + result = merge(left, right, how='outer', on=['key1', 'key2']) + +.. ipython:: python + :suppress: + + @savefig merging_merge_on_key_outer.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + +.. ipython:: python + + result = merge(left, right, how='inner', on=['key1', 'key2']) + +.. ipython:: python + :suppress: + + @savefig merging_merge_on_key_inner.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + .. _merging.join.index: Joining on index @@ -474,14 +679,47 @@ is a very basic example: .. ipython:: python - df = DataFrame(np.random.randn(8, 4), columns=['A','B','C','D']) - df1 = df.ix[1:, ['A', 'B']] - df2 = df.ix[:5, ['C', 'D']] - df1 - df2 - df1.join(df2) - df1.join(df2, how='outer') - df1.join(df2, how='inner') + left = DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=['K0', 'K1', 'K2']) + + right = DataFrame({'C': ['C0', 'C2', 'C3'], + 'D': ['D0', 'D2', 'D3']}, + index=['K0', 'K2', 'K3']) + + result = left.join(right) + +.. ipython:: python + :suppress: + + @savefig merging_join.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + +.. ipython:: python + + result = left.join(right, how='outer') + +.. ipython:: python + :suppress: + + @savefig merging_join_outer.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + +.. ipython:: python + + result = left.join(right, how='inner') + +.. ipython:: python + :suppress: + + @savefig merging_join_inner.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); The data alignment here is on the indexes (row labels). This same behavior can be achieved using ``merge`` plus additional arguments instructing it to use the @@ -489,7 +727,27 @@ indexes: .. ipython:: python - merge(df1, df2, left_index=True, right_index=True, how='outer') + result = merge(left, right, left_index=True, right_index=True, how='outer') + +.. ipython:: python + :suppress: + + @savefig merging_merge_index_outer.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + +.. ipython:: python + + result = merge(left, right, left_index=True, right_index=True, how='inner'); + +.. ipython:: python + :suppress: + + @savefig merging_merge_index_inner.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); Joining key columns on an index ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -511,14 +769,36 @@ key), using ``join`` may be more convenient. Here is a simple example: .. ipython:: python - df['key'] = ['foo', 'bar'] * 4 - to_join = DataFrame(randn(2, 2), index=['bar', 'foo'], - columns=['j1', 'j2']) - df - to_join - df.join(to_join, on='key') - merge(df, to_join, left_on='key', right_index=True, - how='left', sort=False) + left = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'key': ['K0', 'K1', 'K0', 'K1']}) + + right = DataFrame({'C': ['C0', 'C1'], + 'D': ['D0', 'D1']}, + index=['K0', 'K1']) + + result = left.join(right, on='key') + +.. ipython:: python + :suppress: + + @savefig merging_join_key_columns.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + +.. ipython:: python + + result = merge(left, right, left_on='key', right_index=True, + how='left', sort=False); + +.. ipython:: python + :suppress: + + @savefig merging_merge_key_columns.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); .. _merging.multikey_join: @@ -526,31 +806,30 @@ To join on multiple keys, the passed DataFrame must have a ``MultiIndex``: .. ipython:: python - index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], - ['one', 'two', 'three']], - labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], - [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=['first', 'second']) - to_join = DataFrame(np.random.randn(10, 3), index=index, - columns=['j_one', 'j_two', 'j_three']) - - # a little relevant example with NAs - key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux', - 'qux', 'snap'] - key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two', - 'three', 'one'] + left = DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], + 'B': ['B0', 'B1', 'B2', 'B3'], + 'key1': ['K0', 'K0', 'K1', 'K2'], + 'key2': ['K0', 'K1', 'K0', 'K1']}) - data = np.random.randn(len(key1)) - data = DataFrame({'key1' : key1, 'key2' : key2, - 'data' : data}) - data - to_join + index = MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'), + ('K2', 'K0'), ('K2', 'K1')]) + right = DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}, + index=index) Now this can be joined by passing the two key column names: .. ipython:: python - data.join(to_join, on=['key1', 'key2']) + result = left.join(right, on=['key1', 'key2']) + +.. ipython:: python + :suppress: + + @savefig merging_join_multikeys.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); .. _merging.df_inner_join: @@ -561,10 +840,92 @@ easily performed: .. ipython:: python - data.join(to_join, on=['key1', 'key2'], how='inner') + result = left.join(right, on=['key1', 'key2'], how='inner') + +.. ipython:: python + :suppress: + + @savefig merging_join_multikeys_inner.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); As you can see, this drops any rows where there was no match. +.. _merging.join_on_mi: + +Joining a single Index to a Multi-index +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 0.14.0 + +You can join a singly-indexed ``DataFrame`` with a level of a multi-indexed ``DataFrame``. +The level will match on the name of the index of the singly-indexed frame against +a level name of the multi-indexed frame. + +.. ipython:: python + + left = DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=Index(['K0', 'K1', 'K2'], name='key')) + + index = MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'), + ('K2', 'Y2'), ('K2', 'Y3')], + names=['key', 'Y']) + right = DataFrame({'C': ['C0', 'C1', 'C2', 'C3'], + 'D': ['D0', 'D1', 'D2', 'D3']}, + index=index) + + result = left.join(right, how='inner') + +.. ipython:: python + :suppress: + + @savefig merging_join_multiindex_inner.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + +This is equivalent but less verbose and more memory efficient / faster than this. + +.. ipython:: python + + result = merge(left.reset_index(), right.reset_index(), + on=['key'], how='inner').set_index(['key','Y']) + +.. ipython:: python + :suppress: + + @savefig merging_merge_multiindex_alternative.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + +Joining with two multi-indexes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This is not Implemented via ``join`` at-the-moment, however it can be done using the following. + +.. ipython:: python + + index = MultiIndex.from_tuples([('K0', 'X0'), ('K0', 'X1'), + ('K1', 'X2')], + names=['key', 'X']) + left = DataFrame({'A': ['A0', 'A1', 'A2'], + 'B': ['B0', 'B1', 'B2']}, + index=index) + + result = merge(left.reset_index(), right.reset_index(), + on=['key'], how='inner').set_index(['key','X','Y']) + +.. ipython:: python + :suppress: + + @savefig merging_merge_two_multiindex.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + Overlapping value columns ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -574,38 +935,47 @@ columns: .. ipython:: python - left = DataFrame({'key': ['foo', 'foo'], 'value': [1, 2]}) - right = DataFrame({'key': ['foo', 'foo'], 'value': [4, 5]}) - merge(left, right, on='key', suffixes=['_left', '_right']) + left = DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]}) + right = DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]}) -``DataFrame.join`` has ``lsuffix`` and ``rsuffix`` arguments which behave -similarly. + result = merge(left, right, on='k') -.. _merging.ordered_merge: +.. ipython:: python + :suppress: -Merging Ordered Data -~~~~~~~~~~~~~~~~~~~~ + @savefig merging_merge_overlapped.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); -New in v0.8.0 is the ordered_merge function for combining time series and other -ordered data. In particular it has an optional ``fill_method`` keyword to -fill/interpolate missing data: +.. ipython:: python + + result = merge(left, right, on='k', suffixes=['_l', '_r']) .. ipython:: python :suppress: - A = DataFrame({'key' : ['a', 'c', 'e'] * 2, - 'lvalue' : [1, 2, 3] * 2, - 'group' : ['a', 'a', 'a', 'b', 'b', 'b']}) - B = DataFrame({'key' : ['b', 'c', 'd'], - 'rvalue' : [1, 2, 3]}) + @savefig merging_merge_overlapped_suffix.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); + +``DataFrame.join`` has ``lsuffix`` and ``rsuffix`` arguments which behave +similarly. .. ipython:: python - A + left = left.set_index('k') + right = right.set_index('k') + result = left.join(right, lsuffix='_l', rsuffix='_r') - B +.. ipython:: python + :suppress: - ordered_merge(A, B, fill_method='ffill', left_by='group') + @savefig merging_merge_overlapped_multi_suffix.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=False); + plt.close('all'); .. _merging.multiple_join: @@ -617,11 +987,44 @@ them together on their indexes. The same is true for ``Panel.join``. .. ipython:: python - df1 = df.ix[:, ['A', 'B']] - df2 = df.ix[:, ['C', 'D']] - df3 = df.ix[:, ['key']] - df1 - df1.join([df2, df3]) + right2 = DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2']) + result = left.join([right, right2]) + +.. ipython:: python + :suppress: + + @savefig merging_join_multi_df.png + p.plot([left, right, right2], result, + labels=['left', 'right', 'right2'], vertical=False); + plt.close('all'); + +.. _merging.ordered_merge: + +Merging Ordered Data +~~~~~~~~~~~~~~~~~~~~ + +New in v0.8.0 is the ordered_merge function for combining time series and other +ordered data. In particular it has an optional ``fill_method`` keyword to +fill/interpolate missing data: + +.. ipython:: python + + left = DataFrame({'k': ['K0', 'K1', 'K1', 'K2'], + 'lv': [1, 2, 3, 4], + 's': ['a', 'b', 'c', 'd']}) + + right = DataFrame({'k': ['K1', 'K2', 'K4'], + 'rv': [1, 2, 3]}) + + result = ordered_merge(left, right, fill_method='ffill', left_by='s') + +.. ipython:: python + :suppress: + + @savefig merging_ordered_merge.png + p.plot([left, right], result, + labels=['left', 'right'], vertical=True); + plt.close('all'); .. _merging.combine_first.update: @@ -643,87 +1046,33 @@ For this, use the ``combine_first`` method: .. ipython:: python - df1.combine_first(df2) + result = df1.combine_first(df2) + +.. ipython:: python + :suppress: + + @savefig merging_combine_first.png + p.plot([df1, df2], result, + labels=['df1', 'df2'], vertical=False); + plt.close('all'); Note that this method only takes values from the right DataFrame if they are missing in the left DataFrame. A related method, ``update``, alters non-NA values inplace: .. ipython:: python + :suppress: - df1.update(df2) - df1 - -.. _merging.on_mi: - -Merging with Multi-indexes --------------------------- - -.. _merging.join_on_mi: - -Joining a single Index to a Multi-index -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. versionadded:: 0.14.0 - -You can join a singly-indexed DataFrame with a level of a multi-indexed DataFrame. -The level will match on the name of the index of the singly-indexed frame against -a level name of the multi-indexed frame. - -.. ipython:: python - - household = DataFrame(dict(household_id = [1,2,3], - male = [0,1,0], - wealth = [196087.3,316478.7,294750]), - columns = ['household_id','male','wealth'] - ).set_index('household_id') - household - portfolio = DataFrame(dict(household_id = [1,2,2,3,3,3,4], - asset_id = ["nl0000301109","nl0000289783","gb00b03mlx29", - "gb00b03mlx29","lu0197800237","nl0000289965",np.nan], - name = ["ABN Amro","Robeco","Royal Dutch Shell","Royal Dutch Shell", - "AAB Eastern Europe Equity Fund","Postbank BioTech Fonds",np.nan], - share = [1.0,0.4,0.6,0.15,0.6,0.25,1.0]), - columns = ['household_id','asset_id','name','share'] - ).set_index(['household_id','asset_id']) - portfolio - - household.join(portfolio, how='inner') - -This is equivalent but less verbose and more memory efficient / faster than this. - -.. code-block:: python - - merge(household.reset_index(), - portfolio.reset_index(), - on=['household_id'], - how='inner' - ).set_index(['household_id','asset_id']) - -Joining with two multi-indexes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This is not Implemented via ``join`` at-the-moment, however it can be done using the following. + df1_copy = df1.copy() .. ipython:: python - household = DataFrame(dict(household_id = [1,2,2,3,3,3,4], - asset_id = ["nl0000301109","nl0000301109","gb00b03mlx29", - "gb00b03mlx29","lu0197800237","nl0000289965",np.nan], - share = [1.0,0.4,0.6,0.15,0.6,0.25,1.0]), - columns = ['household_id','asset_id','share'] - ).set_index(['household_id','asset_id']) - household + df1.update(df2) - log_return = DataFrame(dict(asset_id = ["gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", - "lu0197800237", "lu0197800237"], - t = [233, 234, 235, 180, 181], - log_return = [.09604978, -.06524096, .03532373, .03025441, .036997]), - ).set_index(["asset_id","t"]) - log_return +.. ipython:: python + :suppress: - merge(household.reset_index(), - log_return.reset_index(), - on=['asset_id'], - how='inner' - ).set_index(['household_id','asset_id','t']) + @savefig merging_update.png + p.plot([df1_copy, df2], df1, + labels=['df1', 'df2'], vertical=False); + plt.close('all'); diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index 4505d256d31f6..04a6302f958a2 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -1,11 +1,19 @@ .. currentmodule:: pandas -.. _missing_data: .. ipython:: python :suppress: - from pandas import * - options.display.max_rows=15 + import numpy as np + import pandas as pd + pd.options.display.max_rows=15 + import matplotlib + try: + matplotlib.style.use('ggplot') + except AttributeError: + pd.options.display.mpl_style = 'default' + import matplotlib.pyplot as plt + +.. _missing_data: ************************* Working with missing data @@ -14,14 +22,6 @@ Working with missing data In this section, we will discuss missing (also referred to as NA) values in pandas. -.. ipython:: python - :suppress: - - import numpy as np; randn = np.random.randn; randint =np.random.randint - from pandas import * - import matplotlib.pyplot as plt - from pandas.compat import lrange - .. note:: The choice of using ``NaN`` internally to denote missing data was largely @@ -50,8 +50,8 @@ a data set is by reindexing. For example .. ipython:: python - df = DataFrame(randn(5, 3), index=['a', 'c', 'e', 'f', 'h'], - columns=['one', 'two', 'three']) + df = pd.DataFrame(np.random.randn(5, 3), index=['a', 'c', 'e', 'f', 'h'], + columns=['one', 'two', 'three']) df['four'] = 'bar' df['five'] = df['one'] > 0 df @@ -118,7 +118,7 @@ the missing value type chosen: .. ipython:: python - s = Series([1, 2, 3]) + s = pd.Series([1, 2, 3]) s.loc[0] = None s @@ -128,7 +128,7 @@ For object containers, pandas will use the value given: .. ipython:: python - s = Series(["a", "b", "c"]) + s = pd.Series(["a", "b", "c"]) s.loc[0] = None s.loc[1] = np.nan s @@ -255,7 +255,7 @@ use case of this is to fill a DataFrame with the mean of that column. .. ipython:: python - dff = DataFrame(np.random.randn(10,3),columns=list('ABC')) + dff = pd.DataFrame(np.random.randn(10,3),columns=list('ABC')) dff.iloc[3:5,0] = np.nan dff.iloc[4:6,1] = np.nan dff.iloc[5:8,2] = np.nan @@ -307,7 +307,7 @@ Interpolation .. versionadded:: 0.13.0 :meth:`~pandas.DataFrame.interpolate`, and :meth:`~pandas.Series.interpolate` have - revamped interpolation methods and functionaility. + revamped interpolation methods and functionality. Both Series and Dataframe objects have an ``interpolate`` method that, by default, performs linear interpolation at missing datapoints. @@ -317,7 +317,7 @@ performs linear interpolation at missing datapoints. np.random.seed(123456) idx = date_range('1/1/2000', periods=100, freq='BM') - ts = Series(randn(100), index=idx) + ts = pd.Series(np.random.randn(100), index=idx) ts[1:20] = np.nan ts[60:80] = np.nan ts = ts.cumsum() @@ -328,7 +328,6 @@ performs linear interpolation at missing datapoints. ts.count() ts.interpolate().count() - plt.figure() @savefig series_interpolate.png ts.interpolate().plot() @@ -351,7 +350,7 @@ For a floating-point index, use ``method='values'``: :suppress: idx = [0., 1., 10.] - ser = Series([0., np.nan, 10.], idx) + ser = pd.Series([0., np.nan, 10.], idx) .. ipython:: python @@ -363,7 +362,7 @@ You can also interpolate with a DataFrame: .. ipython:: python - df = DataFrame({'A': [1, 2.1, np.nan, 4.7, 5.6, 6.8], + df = pd.DataFrame({'A': [1, 2.1, np.nan, 4.7, 5.6, 6.8], 'B': [.25, np.nan, np.nan, 4, 12.2, 14.4]}) df df.interpolate() @@ -401,13 +400,12 @@ Compare several methods: np.random.seed(2) - ser = Series(np.arange(1, 10.1, .25)**2 + np.random.randn(37)) + ser = pd.Series(np.arange(1, 10.1, .25)**2 + np.random.randn(37)) bad = np.array([4, 13, 14, 15, 16, 17, 18, 20, 29]) ser[bad] = np.nan methods = ['linear', 'quadratic', 'cubic'] - df = DataFrame({m: ser.interpolate(method=m) for m in methods}) - plt.figure() + df = pd.DataFrame({m: ser.interpolate(method=m) for m in methods}) @savefig compare_interpolations.png df.plot() @@ -419,7 +417,7 @@ at the new values. .. ipython:: python - ser = Series(np.sort(np.random.uniform(size=100))) + ser = pd.Series(np.sort(np.random.uniform(size=100))) # interpolate at new_index new_index = ser.index | Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75]) @@ -438,7 +436,7 @@ observation: .. ipython:: python - ser = Series([1, 3, np.nan, np.nan, np.nan, 11]) + ser = pd.Series([1, 3, np.nan, np.nan, np.nan, 11]) ser.interpolate(limit=2) .. _missing_data.replace: @@ -454,7 +452,7 @@ value: .. ipython:: python - ser = Series([0., 1., 2., 3., 4.]) + ser = pd.Series([0., 1., 2., 3., 4.]) ser.replace(0, 5) @@ -474,7 +472,7 @@ For a DataFrame, you can specify individual values by column: .. ipython:: python - df = DataFrame({'a': [0, 1, 2, 3, 4], 'b': [5, 6, 7, 8, 9]}) + df = pd.DataFrame({'a': [0, 1, 2, 3, 4], 'b': [5, 6, 7, 8, 9]}) df.replace({'a': 0, 'b': 5}, 100) @@ -502,31 +500,24 @@ String/Regular Expression Replacement Replace the '.' with ``nan`` (str -> str) -.. ipython:: python - :suppress: - - from numpy.random import rand, randn - from numpy import nan - from pandas import DataFrame - .. ipython:: python - d = {'a': list(range(4)), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']} - df = DataFrame(d) - df.replace('.', nan) + d = {'a': list(range(4)), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} + df = pd.DataFrame(d) + df.replace('.', np.nan) Now do it with a regular expression that removes surrounding whitespace (regex -> regex) .. ipython:: python - df.replace(r'\s*\.\s*', nan, regex=True) + df.replace(r'\s*\.\s*', np.nan, regex=True) Replace a few different values (list -> list) .. ipython:: python - df.replace(['a', '.'], ['b', nan]) + df.replace(['a', '.'], ['b', np.nan]) list of regex -> list of regex @@ -538,14 +529,14 @@ Only search in column ``'b'`` (dict -> dict) .. ipython:: python - df.replace({'b': '.'}, {'b': nan}) + df.replace({'b': '.'}, {'b': np.nan}) Same as the previous example, but use a regular expression for searching instead (dict of regex -> dict) .. ipython:: python - df.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True) + df.replace({'b': r'\s*\.\s*'}, {'b': np.nan}, regex=True) You can pass nested dictionaries of regular expressions that use ``regex=True`` @@ -557,7 +548,7 @@ or you can pass the nested dictionary like so .. ipython:: python - df.replace(regex={'b': {r'\s*\.\s*': nan}}) + df.replace(regex={'b': {r'\s*\.\s*': np.nan}}) You can also use the group of a regular expression match when replacing (dict of regex -> dict of regex), this works for lists as well @@ -571,7 +562,7 @@ will be replaced with a scalar (list of regex -> regex) .. ipython:: python - df.replace([r'\s*\.\s*', r'a|b'], nan, regex=True) + df.replace([r'\s*\.\s*', r'a|b'], np.nan, regex=True) All of the regular expression examples can also be passed with the ``to_replace`` argument as the ``regex`` argument. In this case the ``value`` @@ -580,7 +571,7 @@ dictionary. The previous example, in this case, would then be .. ipython:: python - df.replace(regex=[r'\s*\.\s*', r'a|b'], value=nan) + df.replace(regex=[r'\s*\.\s*', r'a|b'], value=np.nan) This can be convenient if you do not want to pass ``regex=True`` every time you want to use a regular expression. @@ -595,33 +586,25 @@ Numeric Replacement Similar to ``DataFrame.fillna`` -.. ipython:: python - :suppress: - - from numpy.random import rand, randn - from numpy import nan - from pandas import DataFrame - from pandas.util.testing import assert_frame_equal - .. ipython:: python - df = DataFrame(randn(10, 2)) - df[rand(df.shape[0]) > 0.5] = 1.5 - df.replace(1.5, nan) + df = pd.DataFrame(np.random.randn(10, 2)) + df[np.random.rand(df.shape[0]) > 0.5] = 1.5 + df.replace(1.5, np.nan) Replacing more than one value via lists works as well .. ipython:: python df00 = df.values[0, 0] - df.replace([1.5, df00], [nan, 'a']) + df.replace([1.5, df00], [np.nan, 'a']) df[1].dtype You can also operate on the DataFrame in place .. ipython:: python - df.replace(1.5, nan, inplace=True) + df.replace(1.5, np.nan, inplace=True) .. warning:: @@ -631,7 +614,7 @@ You can also operate on the DataFrame in place .. code-block:: python - s = Series([True, False, True]) + s = pd.Series([True, False, True]) s.replace({'a string': 'new value', True: False}) # raises TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str' @@ -643,7 +626,7 @@ You can also operate on the DataFrame in place .. ipython:: python - s = Series([True, False, True]) + s = pd.Series([True, False, True]) s.replace('a string', 'another string') the original ``NDFrame`` object will be returned untouched. We're working on @@ -672,7 +655,7 @@ For example: .. ipython:: python - s = Series(randn(5), index=[0, 2, 4, 6, 7]) + s = pd.Series(np.random.randn(5), index=[0, 2, 4, 6, 7]) s > 0 (s > 0).dtype crit = (s > 0).reindex(list(range(8))) diff --git a/doc/source/options.rst b/doc/source/options.rst index 7e36f369bc7e7..4b69015353612 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -18,7 +18,7 @@ Overview pandas has an options system that lets you customize some aspects of its behaviour, display-related options being those the user is most likely to adjust. -Options have a full "dotted-style", case-insensitive name (e.g. ``display.max_rows``), +Options have a full "dotted-style", case-insensitive name (e.g. ``display.max_rows``). You can get/set options directly as attributes of the top-level ``options`` attribute: .. ipython:: python @@ -29,7 +29,7 @@ You can get/set options directly as attributes of the top-level ``options`` attr pd.options.display.max_rows There is also an API composed of 5 relevant functions, available directly from the ``pandas`` -namespace, and they are: +namespace: - :func:`~pandas.get_option` / :func:`~pandas.set_option` - get/set the value of a single option. - :func:`~pandas.reset_option` - reset one or more options to their default value. @@ -412,7 +412,7 @@ mode.use_inf_as_null False True means treat None, NaN, -INF, Number Formatting ------------------ -pandas also allow you to set how numbers are displayed in the console. +pandas also allows you to set how numbers are displayed in the console. This option is not set through the ``set_options`` API. Use the ``set_eng_float_format`` function diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 49a788def2854..b1addddc2121d 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -9,7 +9,7 @@ Package overview :mod:`pandas` consists of the following things * A set of labeled array data structures, the primary of which are - Series/TimeSeries and DataFrame + Series and DataFrame * Index objects enabling both simple axis indexing and multi-level / hierarchical axis indexing * An integrated group by engine for aggregating and transforming data sets @@ -32,7 +32,6 @@ Data structures at a glance :widths: 15, 20, 50 1, Series, "1D labeled homogeneously-typed array" - 1, TimeSeries, "Series with index containing datetimes" 2, DataFrame, "General 2D labeled, size-mutable tabular structure with potentially heterogeneously-typed columns" 3, Panel, "General 3D labeled, also size-mutable array" diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst index 826d9e980538e..da37c92c88ecf 100644 --- a/doc/source/r_interface.rst +++ b/doc/source/r_interface.rst @@ -15,7 +15,69 @@ rpy2 / R interface .. warning:: - In v0.16.0, the ``pandas.rpy`` interface has been **deprecated and will be removed in a future version**. Similar functionaility can be accessed thru the `rpy2 <http://rpy.sourceforge.net/>`_ project. + In v0.16.0, the ``pandas.rpy`` interface has been **deprecated and will be + removed in a future version**. Similar functionality can be accessed + through the `rpy2 <http://rpy.sourceforge.net/>`_ project. + See the :ref:`updating <rpy.updating>` section for a guide to port your + code from the ``pandas.rpy`` to ``rpy2`` functions. + + +.. _rpy.updating: + +Updating your code to use rpy2 functions +---------------------------------------- + +In v0.16.0, the ``pandas.rpy`` module has been **deprecated** and users are +pointed to the similar functionality in ``rpy2`` itself (rpy2 >= 2.4). + +Instead of importing ``import pandas.rpy.common as com``, the following imports +should be done to activate the pandas conversion support in rpy2:: + + from rpy2.robjects import pandas2ri + pandas2ri.activate() + +Converting data frames back and forth between rpy2 and pandas should be largely +automated (no need to convert explicitly, it will be done on the fly in most +rpy2 functions). + +To convert explicitly, the functions are ``pandas2ri.py2ri()`` and +``pandas2ri.ri2py()``. So these functions can be used to replace the existing +functions in pandas: + +- ``com.convert_to_r_dataframe(df)`` should be replaced with ``pandas2ri.py2ri(df)`` +- ``com.convert_robj(rdf)`` should be replaced with ``pandas2ri.ri2py(rdf)`` + +Note: these functions are for the latest version (rpy2 2.5.x) and were called +``pandas2ri.pandas2ri()`` and ``pandas2ri.ri2pandas()`` previously. + +Some of the other functionality in `pandas.rpy` can be replaced easily as well. +For example to load R data as done with the ``load_data`` function, the +current method:: + + df_iris = com.load_data('iris') + +can be replaced with:: + + from rpy2.robjects import r + r.data('iris') + df_iris = pandas2ri.ri2py(r[name]) + +The ``convert_to_r_matrix`` function can be replaced by the normal +``pandas2ri.py2ri`` to convert dataframes, with a subsequent call to R +``as.matrix`` function. + +.. warning:: + + Not all conversion functions in rpy2 are working exactly the same as the + current methods in pandas. If you experience problems or limitations in + comparison to the ones in pandas, please report this at the + `issue tracker <https://github.com/pydata/pandas/issues>`_. + +See also the documentation of the `rpy2 <http://rpy.sourceforge.net/>`_ project. + + +R interface with rpy2 +--------------------- If your computer has R and rpy2 (> 2.2) installed (which will be left to the reader), you will be able to leverage the below functionality. On Windows, @@ -56,6 +118,7 @@ appropriate pandas object (most likely a DataFrame): .. ipython:: python + :okwarning: import pandas.rpy.common as com infert = com.load_data('infert') diff --git a/doc/source/release.rst b/doc/source/release.rst index 074e686ac1662..f22f95fd0a7d4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -45,6 +45,80 @@ analysis / manipulation tool available in any language. * Binary installers on PyPI: http://pypi.python.org/pypi/pandas * Documentation: http://pandas.pydata.org +pandas 0.16.1 +------------- + +**Release date:** (May 11, 2015) + +This is a minor release from 0.16.0 and includes a large number of bug fixes +along with several new features, enhancements, and performance improvements. +A small number of API changes were necessary to fix existing bugs. + +See the :ref:`v0.16.1 Whatsnew <whatsnew_0161>` overview for an extensive list +of all API changes, enhancements and bugs that have been fixed in 0.16.1. + +Thanks +~~~~~~ + +- Alfonso MHC +- Andy Hayden +- Artemy Kolchinsky +- Chris Gilmer +- Chris Grinolds +- Dan Birken +- David BROCHART +- David Hirschfeld +- David Stephens +- Dr. Leo +- Evan Wright +- Frans van Dunné +- Hatem Nassrat +- Henning Sperr +- Hugo Herter +- Jan Schulz +- Jeff Blackburne +- Jeff Reback +- Jim Crist +- Jonas Abernot +- Joris Van den Bossche +- Kerby Shedden +- Leo Razoumov +- Manuel Riel +- Mortada Mehyar +- Nick Burns +- Nick Eubank +- Olivier Grisel +- Phillip Cloud +- Pietro Battiston +- Roy Hyunjin Han +- Sam Zhang +- Scott Sanderson +- Stephan Hoyer +- Tiago Antao +- Tom Ajamian +- Tom Augspurger +- Tomaz Berisa +- Vikram Shirgur +- Vladimir Filimonov +- William Hogman +- Yasin A +- Younggun Kim +- behzad nouri +- dsm054 +- floydsoft +- flying-sheep +- gfr +- jnmclarty +- jreback +- ksanghai +- lucas +- mschmohl +- ptype +- rockg +- scls19fr +- sinhrks + + pandas 0.16.0 ------------- diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst index ac9b6c9aecc4a..1992288fd4d00 100644 --- a/doc/source/remote_data.rst +++ b/doc/source/remote_data.rst @@ -25,6 +25,24 @@ Remote Data Access ****************** +.. _remote_data.pandas_datareader: + +.. warning:: + + In pandas 0.17.0, the sub-package ``pandas.io.data`` will be removed in favor of a separately installable `pandas-datareader package <https://github.com/pydata/pandas-datareader>`_. This will allow the data modules to be independently updated to your pandas installation. The API for ``pandas-datareader v0.1.1`` is the same as in ``pandas v0.16.1``. (:issue:`8961`) + + You should replace the imports of the following: + + .. code-block:: python + + from pandas.io import data, wb + + With: + + .. code-block:: python + + from pandas_datareader import data, wb + .. _remote_data.data_reader: Functions from :mod:`pandas.io.data` and :mod:`pandas.io.ga` extract data from various Internet sources into a DataFrame. Currently the following sources are supported: @@ -49,7 +67,7 @@ Yahoo! Finance import datetime start = datetime.datetime(2010, 1, 1) end = datetime.datetime(2013, 1, 27) - f=web.DataReader("F", 'yahoo', start, end) + f = web.DataReader("F", 'yahoo', start, end) f.ix['2010-01-04'] .. _remote_data.yahoo_options: @@ -58,10 +76,10 @@ Yahoo! Finance Options ---------------------- ***Experimental*** -The Options class allows the download of options data from Yahoo! Finance. +The ``Options`` class allows the download of options data from Yahoo! Finance. The ``get_all_data`` method downloads and caches option data for all expiry months -and provides a formatted ``DataFrame`` with a hierarchical index, so its easy to get +and provides a formatted ``DataFrame`` with a hierarchical index, so it is easy to get to the specific option you want. .. ipython:: python @@ -71,10 +89,10 @@ to the specific option you want. data = aapl.get_all_data() data.iloc[0:5, 0:5] - #Show the $100 strike puts at all expiry dates: + # Show the $100 strike puts at all expiry dates: data.loc[(100, slice(None), 'put'),:].iloc[0:5, 0:5] - #Show the volume traded of $100 strike puts at all expiry dates: + # Show the volume traded of $100 strike puts at all expiry dates: data.loc[(100, slice(None), 'put'),'Vol'].head() If you don't want to download all the data, more specific requests can be made. @@ -121,7 +139,7 @@ Google Finance import datetime start = datetime.datetime(2010, 1, 1) end = datetime.datetime(2013, 1, 27) - f=web.DataReader("F", 'google', start, end) + f = web.DataReader("F", 'google', start, end) f.ix['2010-01-04'] .. _remote_data.fred: @@ -152,7 +170,7 @@ Dataset names are listed at `Fama/French Data Library .. ipython:: python import pandas.io.data as web - ip=web.DataReader("5_Industry_Portfolios", "famafrench") + ip = web.DataReader("5_Industry_Portfolios", "famafrench") ip[4].ix[192607] .. _remote_data.wb: @@ -168,7 +186,7 @@ Indicators ~~~~~~~~~~ Either from exploring the World Bank site, or using the search function included, -every world bank indicator is accessible. +every world bank indicator is accessible. For example, if you wanted to compare the Gross Domestic Products per capita in constant dollars in North America, you would use the ``search`` function: @@ -287,7 +305,7 @@ Country Codes .. versionadded:: 0.15.1 -The ``country`` argument accepts a string or list of mixed +The ``country`` argument accepts a string or list of mixed `two <http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2>`__ or `three <http://en.wikipedia.org/wiki/ISO_3166-1_alpha-3>`__ character ISO country codes, as well as dynamic `World Bank exceptions <http://data.worldbank.org/node/18>`__ to the ISO standards. @@ -298,13 +316,12 @@ Problematic Country Codes & Indicators .. note:: - The World Bank's country list and indicators are dynamic. As of 0.15.1, + The World Bank's country list and indicators are dynamic. As of 0.15.1, :func:`wb.download()` is more flexible. To achieve this, the warning and exception logic changed. - -The world bank converts some country codes, -in their response, which makes error checking by pandas difficult. -Retired indicators still persist in the search. + +The world bank converts some country codes in their response, which makes error +checking by pandas difficult. Retired indicators still persist in the search. Given the new flexibility of 0.15.1, improved error handling by the user may be necessary for fringe cases. @@ -321,12 +338,12 @@ There are at least 4 kinds of country codes: There are at least 3 kinds of indicators: 1. Current - Returns data. -2. Retired - Appears in search results, yet won't return data. +2. Retired - Appears in search results, yet won't return data. 3. Bad - Will not return data. Use the ``errors`` argument to control warnings and exceptions. Setting errors to ignore or warn, won't stop failed responses. (ie, 100% bad -indicators, or a single "bad" (#4 above) country code). +indicators, or a single "bad" (#4 above) country code). See docstrings for more info. @@ -377,15 +394,14 @@ The following will fetch users and pageviews (metrics) data per day of the week, filters = "pagePath=~aboutus;ga:country==France", ) -The only mandatory arguments are ``metrics,`` ``dimensions`` and ``start_date``. We can only strongly recommend you to always specify the ``account_id``, ``profile_id`` and ``property_id`` to avoid accessing the wrong data bucket in Google Analytics. +The only mandatory arguments are ``metrics,`` ``dimensions`` and ``start_date``. We strongly recommend that you always specify the ``account_id``, ``profile_id`` and ``property_id`` to avoid accessing the wrong data bucket in Google Analytics. The ``index_col`` argument indicates which dimension(s) has to be taken as index. -The ``filters`` argument indicates the filtering to apply to the query. In the above example, the page has URL has to contain ``aboutus`` AND the visitors country has to be France. +The ``filters`` argument indicates the filtering to apply to the query. In the above example, the page URL has to contain ``aboutus`` AND the visitors country has to be France. -Detailed informations in the followings: +Detailed information in the following: * `pandas & google analytics, by yhat <http://blog.yhathq.com/posts/pandas-google-analytics.html>`__ * `Google Analytics integration in pandas, by Chang She <http://quantabee.wordpress.com/2012/12/17/google-analytics-pandas/>`__ * `Google Analytics Dimensions and Metrics Reference <https://developers.google.com/analytics/devguides/reporting/core/dimsmets>`_ - diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index dc13ce3e5c4da..26aaf9c2be69d 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -6,14 +6,9 @@ import numpy as np np.random.seed(123456) - from pandas import * - options.display.max_rows=15 - from pandas.core.reshape import * - import pandas.util.testing as tm - randn = np.random.randn + import pandas as pd + pd.options.display.max_rows=15 np.set_printoptions(precision=4, suppress=True) - from pandas.tools.tile import * - from pandas.compat import zip ************************** Reshaping and Pivot Tables @@ -56,7 +51,7 @@ For the curious here is how the above DataFrame was created: data = {'value' : frame.values.ravel('F'), 'variable' : np.asarray(frame.columns).repeat(N), 'date' : np.tile(np.asarray(frame.index), K)} - return DataFrame(data, columns=['date', 'variable', 'value']) + return pd.DataFrame(data, columns=['date', 'variable', 'value']) df = unpivot(tm.makeTimeDataFrame()) To select out everything for variable ``A`` we could do: @@ -119,11 +114,11 @@ from the hierarchical indexing section: .. ipython:: python tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', - 'foo', 'foo', 'qux', 'qux'], - ['one', 'two', 'one', 'two', - 'one', 'two', 'one', 'two']])) - index = MultiIndex.from_tuples(tuples, names=['first', 'second']) - df = DataFrame(randn(8, 2), index=index, columns=['A', 'B']) + 'foo', 'foo', 'qux', 'qux'], + ['one', 'two', 'one', 'two', + 'one', 'two', 'one', 'two']])) + index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second']) + df = pd.DataFrame(np.random.randn(8, 2), index=index, columns=['A', 'B']) df2 = df[:4] df2 @@ -166,8 +161,8 @@ will result in a **sorted** copy of the original DataFrame or Series: .. ipython:: python - index = MultiIndex.from_product([[2,1], ['a', 'b']]) - df = DataFrame(randn(4), index=index, columns=['A']) + index = pd.MultiIndex.from_product([[2,1], ['a', 'b']]) + df = pd.DataFrame(np.random.randn(4), index=index, columns=['A']) df all(df.unstack().stack() == df.sort()) @@ -185,13 +180,13 @@ processed individually. .. ipython:: python - columns = MultiIndex.from_tuples([ + columns = pd.MultiIndex.from_tuples([ ('A', 'cat', 'long'), ('B', 'cat', 'long'), ('A', 'dog', 'short'), ('B', 'dog', 'short') ], names=['exp', 'animal', 'hair_length'] ) - df = DataFrame(randn(4, 4), columns=columns) + df = pd.DataFrame(np.random.randn(4, 4), columns=columns) df df.stack(level=['animal', 'hair_length']) @@ -215,12 +210,13 @@ calling ``sortlevel``, of course). Here is a more complex example: .. ipython:: python - columns = MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'), - ('B', 'cat'), ('A', 'dog')], - names=['exp', 'animal']) - index = MultiIndex.from_product([('bar', 'baz', 'foo', 'qux'), ('one', 'two')], - names=['first', 'second']) - df = DataFrame(randn(8, 4), index=index, columns=columns) + columns = pd.MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'), + ('B', 'cat'), ('A', 'dog')], + names=['exp', 'animal']) + index = pd.MultiIndex.from_product([('bar', 'baz', 'foo', 'qux'), + ('one', 'two')], + names=['first', 'second']) + df = pd.DataFrame(np.random.randn(8, 4), index=index, columns=columns) df2 = df.ix[[0, 1, 2, 4, 5, 7]] df2 @@ -259,13 +255,13 @@ For instance, .. ipython:: python - cheese = DataFrame({'first' : ['John', 'Mary'], - 'last' : ['Doe', 'Bo'], - 'height' : [5.5, 6.0], - 'weight' : [130, 150]}) + cheese = pd.DataFrame({'first' : ['John', 'Mary'], + 'last' : ['Doe', 'Bo'], + 'height' : [5.5, 6.0], + 'weight' : [130, 150]}) cheese - melt(cheese, id_vars=['first', 'last']) - melt(cheese, id_vars=['first', 'last'], var_name='quantity') + pd.melt(cheese, id_vars=['first', 'last']) + pd.melt(cheese, id_vars=['first', 'last'], var_name='quantity') Another way to transform is to use the ``wide_to_long`` panel data convenience function. @@ -324,22 +320,22 @@ Consider a data set like this: .. ipython:: python import datetime - df = DataFrame({'A' : ['one', 'one', 'two', 'three'] * 6, - 'B' : ['A', 'B', 'C'] * 8, - 'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 4, - 'D' : np.random.randn(24), - 'E' : np.random.randn(24), - 'F' : [datetime.datetime(2013, i, 1) for i in range(1, 13)] + - [datetime.datetime(2013, i, 15) for i in range(1, 13)]}) + df = pd.DataFrame({'A': ['one', 'one', 'two', 'three'] * 6, + 'B': ['A', 'B', 'C'] * 8, + 'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 4, + 'D': np.random.randn(24), + 'E': np.random.randn(24), + 'F': [datetime.datetime(2013, i, 1) for i in range(1, 13)] + + [datetime.datetime(2013, i, 15) for i in range(1, 13)]}) df We can produce pivot tables from this data very easily: .. ipython:: python - pivot_table(df, values='D', index=['A', 'B'], columns=['C']) - pivot_table(df, values='D', index=['B'], columns=['A', 'C'], aggfunc=np.sum) - pivot_table(df, values=['D','E'], index=['B'], columns=['A', 'C'], aggfunc=np.sum) + pd.pivot_table(df, values='D', index=['A', 'B'], columns=['C']) + pd.pivot_table(df, values='D', index=['B'], columns=['A', 'C'], aggfunc=np.sum) + pd.pivot_table(df, values=['D','E'], index=['B'], columns=['A', 'C'], aggfunc=np.sum) The result object is a DataFrame having potentially hierarchical indexes on the rows and columns. If the ``values`` column name is not given, the pivot table @@ -348,20 +344,20 @@ hierarchy in the columns: .. ipython:: python - pivot_table(df, index=['A', 'B'], columns=['C']) + pd.pivot_table(df, index=['A', 'B'], columns=['C']) Also, you can use ``Grouper`` for ``index`` and ``columns`` keywords. For detail of ``Grouper``, see :ref:`Grouping with a Grouper specification <groupby.specify>`. .. ipython:: python - pivot_table(df, values='D', index=Grouper(freq='M', key='F'), columns='C') + pd.pivot_table(df, values='D', index=Grouper(freq='M', key='F'), columns='C') You can render a nice output of the table omitting the missing values by calling ``to_string`` if you wish: .. ipython:: python - table = pivot_table(df, index=['A', 'B'], columns=['C']) + table = pd.pivot_table(df, index=['A', 'B'], columns=['C']) print(table.to_string(na_rep='')) Note that ``pivot_table`` is also available as an instance method on DataFrame. @@ -397,7 +393,7 @@ For example: a = np.array([foo, foo, bar, bar, foo, foo], dtype=object) b = np.array([one, one, two, one, two, one], dtype=object) c = np.array([dull, dull, shiny, dull, dull, shiny], dtype=object) - crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) + pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) .. _reshaping.pivot.margins: @@ -428,14 +424,14 @@ variables: ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60]) - cut(ages, bins=3) + pd.cut(ages, bins=3) If the ``bins`` keyword is an integer, then equal-width bins are formed. Alternatively we can specify custom bin-edges: .. ipython:: python - cut(ages, bins=[0, 18, 35, 70]) + pd.cut(ages, bins=[0, 18, 35, 70]) .. _reshaping.dummies: @@ -449,17 +445,16 @@ containing ``k`` columns of 1s and 0s: .. ipython:: python - df = DataFrame({'key': list('bbacab'), 'data1': range(6)}) + df = pd.DataFrame({'key': list('bbacab'), 'data1': range(6)}) - - get_dummies(df['key']) + pd.get_dummies(df['key']) Sometimes it's useful to prefix the column names, for example when merging the result with the original DataFrame: .. ipython:: python - dummies = get_dummies(df['key'], prefix='key') + dummies = pd.get_dummies(df['key'], prefix='key') dummies @@ -469,14 +464,14 @@ This function is often used along with discretization functions like ``cut``: .. ipython:: python - values = randn(10) + values = np.random.randn(10) values bins = [0, 0.2, 0.4, 0.6, 0.8, 1] - get_dummies(cut(values, bins)) + pd.get_dummies(pd.cut(values, bins)) See also :func:`Series.str.get_dummies <pandas.Series.str.get_dummies>`. diff --git a/doc/source/text.rst b/doc/source/text.rst index a98153e277fae..d40445d8490f7 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -17,10 +17,10 @@ Working with Text Data .. _text.string_methods: -Series is equipped with a set of string processing methods +Series and Index are equipped with a set of string processing methods that make it easy to operate on each element of the array. Perhaps most importantly, these methods exclude missing/NA values automatically. These are -accessed via the Series's ``str`` attribute and generally have names matching +accessed via the ``str`` attribute and generally have names matching the equivalent (scalar) built-in string methods: .. ipython:: python @@ -30,6 +30,39 @@ the equivalent (scalar) built-in string methods: s.str.upper() s.str.len() +.. ipython:: python + + idx = Index([' jack', 'jill ', ' jesse ', 'frank']) + idx.str.strip() + idx.str.lstrip() + idx.str.rstrip() + +The string methods on Index are especially useful for cleaning up or +transforming DataFrame columns. For instance, you may have columns with +leading or trailing whitespace: + +.. ipython:: python + + df = DataFrame(randn(3, 2), columns=[' Column A ', ' Column B '], + index=range(3)) + df + +Since ``df.columns`` is an Index object, we can use the ``.str`` accessor + +.. ipython:: python + + df.columns.str.strip() + df.columns.str.lower() + +These string methods can then be used to clean up the columns as needed. +Here we are removing leading and trailing whitespaces, lowercasing all names, +and replacing any remaining whitespaces with underscores: + +.. ipython:: python + + df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_') + df + Splitting and Replacing Strings ------------------------------- @@ -49,11 +82,11 @@ Elements in the split lists can be accessed using ``get`` or ``[]`` notation: s2.str.split('_').str.get(1) s2.str.split('_').str[1] -Easy to expand this to return a DataFrame using ``return_type``. +Easy to expand this to return a DataFrame using ``expand``. .. ipython:: python - s2.str.split('_', return_type='frame') + s2.str.split('_', expand=True) Methods like ``replace`` and ``findall`` take `regular expressions <https://docs.python.org/2/library/re.html>`__, too: @@ -229,12 +262,18 @@ Method Summary :meth:`~Series.str.strip`,Equivalent to ``str.strip`` :meth:`~Series.str.rstrip`,Equivalent to ``str.rstrip`` :meth:`~Series.str.lstrip`,Equivalent to ``str.lstrip`` + :meth:`~Series.str.partition`,Equivalent to ``str.partition`` + :meth:`~Series.str.rpartition`,Equivalent to ``str.rpartition`` :meth:`~Series.str.lower`,Equivalent to ``str.lower`` :meth:`~Series.str.upper`,Equivalent to ``str.upper`` :meth:`~Series.str.find`,Equivalent to ``str.find`` :meth:`~Series.str.rfind`,Equivalent to ``str.rfind`` - :meth:`~Series.str.capicalize`,Equivalent to ``str.capitalize`` + :meth:`~Series.str.index`,Equivalent to ``str.index`` + :meth:`~Series.str.rindex`,Equivalent to ``str.rindex`` + :meth:`~Series.str.capitalize`,Equivalent to ``str.capitalize`` :meth:`~Series.str.swapcase`,Equivalent to ``str.swapcase`` + :meth:`~Series.str.normalize`,Return Unicode normal form. Equivalent to ``unicodedata.normalize`` + :meth:`~Series.str.translate`,Equivalent to ``str.translate`` :meth:`~Series.str.isalnum`,Equivalent to ``str.isalnum`` :meth:`~Series.str.isalpha`,Equivalent to ``str.isalpha`` :meth:`~Series.str.isdigit`,Equivalent to ``str.isdigit`` @@ -243,4 +282,4 @@ Method Summary :meth:`~Series.str.isupper`,Equivalent to ``str.isupper`` :meth:`~Series.str.istitle`,Equivalent to ``str.istitle`` :meth:`~Series.str.isnumeric`,Equivalent to ``str.isnumeric`` - :meth:`~Series.str.isnumeric`,Equivalent to ``str.isdecimal`` + :meth:`~Series.str.isdecimal`,Equivalent to ``str.isdecimal`` diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 786a46d343be1..8215414e425fe 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -29,13 +29,13 @@ Time Deltas Starting in v0.15.0, we introduce a new scalar type ``Timedelta``, which is a subclass of ``datetime.timedelta``, and behaves in a similar manner, but allows compatibility with ``np.timedelta64`` types as well as a host of custom representation, parsing, and attributes. -Timedeltas are differences in times, expressed in difference units, e.g. days,hours,minutes,seconds. +Timedeltas are differences in times, expressed in difference units, e.g. days, hours, minutes, seconds. They can be both positive and negative. Parsing ------- -You can construct a ``Timedelta`` scalar thru various arguments: +You can construct a ``Timedelta`` scalar through various arguments: .. ipython:: python @@ -46,7 +46,7 @@ You can construct a ``Timedelta`` scalar thru various arguments: Timedelta('-1 days 2 min 3us') # like datetime.timedelta - # note: these MUST be specified as keyword argments + # note: these MUST be specified as keyword arguments Timedelta(days=1,seconds=1) # integers with a unit @@ -100,7 +100,7 @@ It will construct Series if the input is a Series, a scalar if the input is scal Operations ---------- -You can operate on Series/DataFrames and construct ``timedelta64[ns]`` Series thru +You can operate on Series/DataFrames and construct ``timedelta64[ns]`` Series through subtraction operations on ``datetime64[ns]`` Series, or ``Timestamps``. .. ipython:: python @@ -290,7 +290,7 @@ TimedeltaIndex .. versionadded:: 0.15.0 -To generate an index with time delta, you can use either the TimedeltaIndex or +To generate an index with time delta, you can use either the ``TimedeltaIndex`` or the ``timedelta_range`` constructor. Using ``TimedeltaIndex`` you can pass string-like, ``Timedelta``, ``timedelta``, diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index ac3302ae40fa7..ce1035e91391a 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -4,7 +4,7 @@ .. ipython:: python :suppress: - from datetime import datetime, timedelta + from datetime import datetime, timedelta, time import numpy as np np.random.seed(123456) from pandas import * @@ -243,7 +243,7 @@ variety of frequency aliases. The default frequency for ``date_range`` is a rng = bdate_range(start, end) rng -``date_range`` and ``bdate_range`` makes it easy to generate a range of dates +``date_range`` and ``bdate_range`` make it easy to generate a range of dates using various combinations of parameters like ``start``, ``end``, ``periods``, and ``freq``: @@ -353,7 +353,7 @@ This specifies an **exact** stop time (and is not the same as the above) dft['2013-1':'2013-2-28 00:00:00'] -We are stopping on the included end-point as its part of the index +We are stopping on the included end-point as it is part of the index .. ipython:: python @@ -482,6 +482,7 @@ frequency increment. Specific offset logic like "month", "business day", or BYearEnd, "business year end" BYearBegin, "business year begin" FY5253, "retail (aka 52-53 week) year" + BusinessHour, "business hour" Hour, "one hour" Minute, "one minute" Second, "one second" @@ -540,7 +541,7 @@ The ``rollforward`` and ``rollback`` methods do exactly what you would expect: It's definitely worth exploring the ``pandas.tseries.offsets`` module and the various docstrings for the classes. -These operations (``apply``, ``rollforward`` and ``rollback``) preserves time (hour, minute, etc) information by default. To reset time, use ``normalize=True`` keyword when create offset instance. If ``normalize=True``, result is normalized after the function is applied. +These operations (``apply``, ``rollforward`` and ``rollback``) preserves time (hour, minute, etc) information by default. To reset time, use ``normalize=True`` keyword when creating the offset instance. If ``normalize=True``, result is normalized after the function is applied. .. ipython:: python @@ -563,7 +564,7 @@ Parametric offsets ~~~~~~~~~~~~~~~~~~ Some of the offsets can be "parameterized" when created to result in different -behavior. For example, the ``Week`` offset for generating weekly data accepts a +behaviors. For example, the ``Week`` offset for generating weekly data accepts a ``weekday`` parameter which results in the generated dates always lying on a particular day of the week: @@ -667,6 +668,102 @@ in the usual way. have to change to fix the timezone issues, the behaviour of the ``CustomBusinessDay`` class may have to change in future versions. +.. _timeseries.businesshour: + +Business Hour +~~~~~~~~~~~~~ + +The ``BusinessHour`` class provides a business hour representation on ``BusinessDay``, +allowing to use specific start and end times. + +By default, ``BusinessHour`` uses 9:00 - 17:00 as business hours. +Adding ``BusinessHour`` will increment ``Timestamp`` by hourly. +If target ``Timestamp`` is out of business hours, move to the next business hour then increment it. +If the result exceeds the business hours end, remaining is added to the next business day. + +.. ipython:: python + + bh = BusinessHour() + bh + + # 2014-08-01 is Friday + Timestamp('2014-08-01 10:00').weekday() + Timestamp('2014-08-01 10:00') + bh + + # Below example is the same as Timestamp('2014-08-01 09:00') + bh + Timestamp('2014-08-01 08:00') + bh + + # If the results is on the end time, move to the next business day + Timestamp('2014-08-01 16:00') + bh + + # Remainings are added to the next day + Timestamp('2014-08-01 16:30') + bh + + # Adding 2 business hours + Timestamp('2014-08-01 10:00') + BusinessHour(2) + + # Subtracting 3 business hours + Timestamp('2014-08-01 10:00') + BusinessHour(-3) + +Also, you can specify ``start`` and ``end`` time by keywords. +Argument must be ``str`` which has ``hour:minute`` representation or ``datetime.time`` instance. +Specifying seconds, microseconds and nanoseconds as business hour results in ``ValueError``. + +.. ipython:: python + + bh = BusinessHour(start='11:00', end=time(20, 0)) + bh + + Timestamp('2014-08-01 13:00') + bh + Timestamp('2014-08-01 09:00') + bh + Timestamp('2014-08-01 18:00') + bh + +Passing ``start`` time later than ``end`` represents midnight business hour. +In this case, business hour exceeds midnight and overlap to the next day. +Valid business hours are distinguished by whether it started from valid ``BusinessDay``. + +.. ipython:: python + + bh = BusinessHour(start='17:00', end='09:00') + bh + + Timestamp('2014-08-01 17:00') + bh + Timestamp('2014-08-01 23:00') + bh + + # Although 2014-08-02 is Satuaday, + # it is valid because it starts from 08-01 (Friday). + Timestamp('2014-08-02 04:00') + bh + + # Although 2014-08-04 is Monday, + # it is out of business hours because it starts from 08-03 (Sunday). + Timestamp('2014-08-04 04:00') + bh + +Applying ``BusinessHour.rollforward`` and ``rollback`` to out of business hours results in +the next business hour start or previous day's end. Different from other offsets, ``BusinessHour.rollforward`` +may output different results from ``apply`` by definition. + +This is because one day's business hour end is equal to next day's business hour start. For example, +under the default business hours (9:00 - 17:00), there is no gap (0 minutes) between ``2014-08-01 17:00`` and +``2014-08-04 09:00``. + +.. ipython:: python + + # This adjusts a Timestamp to business hour edge + BusinessHour().rollback(Timestamp('2014-08-02 15:00')) + BusinessHour().rollforward(Timestamp('2014-08-02 15:00')) + + # It is the same as BusinessHour().apply(Timestamp('2014-08-01 17:00')). + # And it is the same as BusinessHour().apply(Timestamp('2014-08-04 09:00')) + BusinessHour().apply(Timestamp('2014-08-02 15:00')) + + # BusinessDay results (for reference) + BusinessHour().rollforward(Timestamp('2014-08-02')) + + # It is the same as BusinessDay().apply(Timestamp('2014-08-01')) + # The result is the same as rollworward because BusinessDay never overlap. + BusinessHour().apply(Timestamp('2014-08-02')) + + Offset Aliases ~~~~~~~~~~~~~~ @@ -696,6 +793,7 @@ frequencies. We will refer to these aliases as *offset aliases* "BA", "business year end frequency" "AS", "year start frequency" "BAS", "business year start frequency" + "BH", "business hour frequency" "H", "hourly frequency" "T", "minutely frequency" "S", "secondly frequency" @@ -806,7 +904,7 @@ strongly recommended that you switch to using the new offset aliases. "ms", "L" "us", "U" -As you can see, legacy quarterly and annual frequencies are business quarter +As you can see, legacy quarterly and annual frequencies are business quarters and business year ends. Please also note the legacy time rule for milliseconds ``ms`` versus the new offset alias for month start ``MS``. This means that offset alias parsing is case sensitive. @@ -910,10 +1008,9 @@ Time series-related instance methods Shifting / lagging ~~~~~~~~~~~~~~~~~~ -One may want to *shift* or *lag* the values in a TimeSeries back and forward in +One may want to *shift* or *lag* the values in a time series back and forward in time. The method for this is ``shift``, which is available on all of the pandas -objects. In DataFrame, ``shift`` will currently only shift along the ``index`` -and in Panel along the ``major_axis``. +objects. .. ipython:: python @@ -929,7 +1026,7 @@ The shift method accepts an ``freq`` argument which can accept a ts.shift(5, freq='BM') Rather than changing the alignment of the data and the index, ``DataFrame`` and -``TimeSeries`` objects also have a ``tshift`` convenience method that changes +``Series`` objects also have a ``tshift`` convenience method that changes all the dates in the index by a specified number of offsets: .. ipython:: python @@ -1060,8 +1157,8 @@ frequency periods. Note that 0.8 marks a watershed in the timeseries functionality in pandas. In previous versions, resampling had to be done using a combination of ``date_range``, ``groupby`` with ``asof``, and then calling an aggregation -function on the grouped object. This was not nearly convenient or performant as -the new pandas timeseries API. +function on the grouped object. This was not nearly as convenient or performant +as the new pandas timeseries API. .. _timeseries.periods: @@ -1099,7 +1196,7 @@ frequency. p - 3 -If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``), ``offsets`` and ``timedelta``-like can be added if the result can have same freq. Otherise, ``ValueError`` will be raised. +If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``), ``offsets`` and ``timedelta``-like can be added if the result can have the same freq. Otherise, ``ValueError`` will be raised. .. ipython:: python @@ -1160,7 +1257,7 @@ objects: ps = Series(randn(len(prng)), prng) ps -``PeriodIndex`` supports addition and subtraction as the same rule as ``Period``. +``PeriodIndex`` supports addition and subtraction with the same rule as ``Period``. .. ipython:: python @@ -1175,7 +1272,7 @@ objects: PeriodIndex Partial String Indexing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can pass in dates and strings to `Series` and `DataFrame` with `PeriodIndex`, as the same manner as `DatetimeIndex`. For details, refer to :ref:`DatetimeIndex Partial String Indexing <timeseries.partialindexing>`. +You can pass in dates and strings to ``Series`` and ``DataFrame`` with ``PeriodIndex``, in the same manner as ``DatetimeIndex``. For details, refer to :ref:`DatetimeIndex Partial String Indexing <timeseries.partialindexing>`. .. ipython:: python @@ -1185,7 +1282,7 @@ You can pass in dates and strings to `Series` and `DataFrame` with `PeriodIndex` ps['10/31/2011':'12/31/2011'] -Passing string represents lower frequency than `PeriodIndex` returns partial sliced data. +Passing a string representing a lower frequency than ``PeriodIndex`` returns partial sliced data. .. ipython:: python @@ -1196,7 +1293,7 @@ Passing string represents lower frequency than `PeriodIndex` returns partial sli dfp dfp['2013-01-01 10H'] -As the same as `DatetimeIndex`, the endpoints will be included in the result. Below example slices data starting from 10:00 to 11:59. +As with ``DatetimeIndex``, the endpoints will be included in the result. The example below slices data starting from 10:00 to 11:59. .. ipython:: python @@ -1204,7 +1301,7 @@ As the same as `DatetimeIndex`, the endpoints will be included in the result. Be Frequency Conversion and Resampling with PeriodIndex ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The frequency of Periods and PeriodIndex can be converted via the ``asfreq`` +The frequency of ``Period`` and ``PeriodIndex`` can be converted via the ``asfreq`` method. Let's start with the fiscal year 2011, ending in December: .. ipython:: python @@ -1247,8 +1344,8 @@ period. Period conversions with anchored frequencies are particularly useful for working with various quarterly data common to economics, business, and other fields. Many organizations define quarters relative to the month in which their -fiscal year start and ends. Thus, first quarter of 2011 could start in 2010 or -a few months into 2011. Via anchored frequencies, pandas works all quarterly +fiscal year starts and ends. Thus, first quarter of 2011 could start in 2010 or +a few months into 2011. Via anchored frequencies, pandas works for all quarterly frequencies ``Q-JAN`` through ``Q-DEC``. ``Q-DEC`` define regular calendar quarters: @@ -1354,7 +1451,7 @@ Time Zone Handling ------------------ Pandas provides rich support for working with timestamps in different time zones using ``pytz`` and ``dateutil`` libraries. -``dateutil`` support is new [in 0.14.1] and currently only supported for fixed offset and tzfile zones. The default library is ``pytz``. +``dateutil`` support is new in 0.14.1 and currently only supported for fixed offset and tzfile zones. The default library is ``pytz``. Support for ``dateutil`` is provided for compatibility with other applications e.g. if you use ``dateutil`` in other python packages. Working with Time Zones @@ -1472,7 +1569,7 @@ time zones using ``tz_convert``: rng_berlin[5] rng_eastern[5].tz_convert('Europe/Berlin') -Localization of Timestamps functions just like DatetimeIndex and TimeSeries: +Localization of Timestamps functions just like DatetimeIndex and Series: .. ipython:: python @@ -1480,8 +1577,8 @@ Localization of Timestamps functions just like DatetimeIndex and TimeSeries: rng[5].tz_localize('Asia/Shanghai') -Operations between TimeSeries in different time zones will yield UTC -TimeSeries, aligning the data on the UTC timestamps: +Operations between Series in different time zones will yield UTC +Series, aligning the data on the UTC timestamps: .. ipython:: python diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 9d4cba2e5ee8c..51912b5d6b106 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -6,20 +6,16 @@ import numpy as np import pandas as pd - from numpy.random import randn, rand, randint np.random.seed(123456) - from pandas import DataFrame, Series, date_range, options - import pandas.util.testing as tm np.set_printoptions(precision=4, suppress=True) - import matplotlib.pyplot as plt - plt.close('all') + pd.options.display.max_rows = 15 import matplotlib try: matplotlib.style.use('ggplot') except AttributeError: - options.display.mpl_style = 'default' - options.display.max_rows = 15 - from pandas.compat import lrange + pd.options.display.mpl_style = 'default' + import matplotlib.pyplot as plt + plt.close('all') ******** Plotting @@ -68,7 +64,7 @@ The ``plot`` method on Series and DataFrame is just a simple wrapper around .. ipython:: python - ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000)) + ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000)) ts = ts.cumsum() @savefig series_plot_basic.png @@ -87,7 +83,7 @@ On DataFrame, :meth:`~DataFrame.plot` is a convenience to plot all of the column .. ipython:: python - df = DataFrame(randn(1000, 4), index=ts.index, columns=list('ABCD')) + df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list('ABCD')) df = df.cumsum() @savefig frame_plot_basic.png @@ -105,8 +101,8 @@ You can plot one column versus another using the `x` and `y` keywords in .. ipython:: python - df3 = DataFrame(randn(1000, 2), columns=['B', 'C']).cumsum() - df3['A'] = Series(list(range(len(df)))) + df3 = pd.DataFrame(np.random.randn(1000, 2), columns=['B', 'C']).cumsum() + df3['A'] = pd.Series(list(range(len(df)))) @savefig df_plot_xy.png df3.plot(x='A', y='B') @@ -182,7 +178,7 @@ bar plot: .. ipython:: python - df2 = DataFrame(rand(10, 4), columns=['a', 'b', 'c', 'd']) + df2 = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd']) @savefig bar_plot_multi_ex.png df2.plot(kind='bar'); @@ -224,8 +220,8 @@ Histogram can be drawn specifying ``kind='hist'``. .. ipython:: python - df4 = DataFrame({'a': randn(1000) + 1, 'b': randn(1000), - 'c': randn(1000) - 1}, columns=['a', 'b', 'c']) + df4 = pd.DataFrame({'a': np.random.randn(1000) + 1, 'b': np.random.randn(1000), + 'c': np.random.randn(1000) - 1}, columns=['a', 'b', 'c']) plt.figure(); @@ -267,7 +263,7 @@ You can pass other keywords supported by matplotlib ``hist``. For example, horiz plt.close('all') See the :meth:`hist <matplotlib.axes.Axes.hist>` method and the -`matplotlib hist documenation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist>`__ for more. +`matplotlib hist documentation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist>`__ for more. The existing interface ``DataFrame.hist`` to plot histogram still can be used. @@ -308,10 +304,10 @@ The ``by`` keyword can be specified to plot grouped histograms: .. ipython:: python - data = Series(randn(1000)) + data = pd.Series(np.random.randn(1000)) @savefig grouped_hist.png - data.hist(by=randint(0, 4, 1000), figsize=(6, 4)) + data.hist(by=np.random.randint(0, 4, 1000), figsize=(6, 4)) .. _visualization.box: @@ -337,7 +333,7 @@ a uniform random variable on [0,1). .. ipython:: python - df = DataFrame(rand(10, 5), columns=['A', 'B', 'C', 'D', 'E']) + df = pd.DataFrame(np.random.rand(10, 5), columns=['A', 'B', 'C', 'D', 'E']) @savefig box_plot_new.png df.plot(kind='box') @@ -392,7 +388,7 @@ The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used. .. ipython:: python - df = DataFrame(rand(10,5)) + df = pd.DataFrame(np.random.rand(10,5)) plt.figure(); @savefig box_plot_ex.png @@ -410,8 +406,8 @@ groupings. For instance, .. ipython:: python :okwarning: - df = DataFrame(rand(10,2), columns=['Col1', 'Col2'] ) - df['X'] = Series(['A','A','A','A','A','B','B','B','B','B']) + df = pd.DataFrame(np.random.rand(10,2), columns=['Col1', 'Col2'] ) + df['X'] = pd.Series(['A','A','A','A','A','B','B','B','B','B']) plt.figure(); @@ -430,9 +426,9 @@ columns: .. ipython:: python :okwarning: - df = DataFrame(rand(10,3), columns=['Col1', 'Col2', 'Col3']) - df['X'] = Series(['A','A','A','A','A','B','B','B','B','B']) - df['Y'] = Series(['A','B','A','B','A','B','A','B','A','B']) + df = pd.DataFrame(np.random.rand(10,3), columns=['Col1', 'Col2', 'Col3']) + df['X'] = pd.Series(['A','A','A','A','A','B','B','B','B','B']) + df['Y'] = pd.Series(['A','B','A','B','A','B','A','B','A','B']) plt.figure(); @@ -473,7 +469,7 @@ DataFrame. :okwarning: np.random.seed(1234) - df_box = DataFrame(np.random.randn(50, 2)) + df_box = pd.DataFrame(np.random.randn(50, 2)) df_box['g'] = np.random.choice(['A', 'B'], size=50) df_box.loc[df_box['g'] == 'B', 1] += 3 @@ -517,7 +513,7 @@ When input data contains `NaN`, it will be automatically filled by 0. If you wan .. ipython:: python - df = DataFrame(rand(10, 4), columns=['a', 'b', 'c', 'd']) + df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd']) @savefig area_plot_stacked.png df.plot(kind='area'); @@ -555,7 +551,7 @@ These can be specified by ``x`` and ``y`` keywords each. .. ipython:: python - df = DataFrame(rand(50, 4), columns=['a', 'b', 'c', 'd']) + df = pd.DataFrame(np.random.rand(50, 4), columns=['a', 'b', 'c', 'd']) @savefig scatter_plot.png df.plot(kind='scatter', x='a', y='b'); @@ -626,7 +622,7 @@ too dense to plot each point individually. .. ipython:: python - df = DataFrame(randn(1000, 2), columns=['a', 'b']) + df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b']) df['b'] = df['b'] + np.arange(1000) @savefig hexbin_plot.png @@ -654,7 +650,7 @@ given by column ``z``. The bins are aggregated with numpy's ``max`` function. .. ipython:: python - df = DataFrame(randn(1000, 2), columns=['a', 'b']) + df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b']) df['b'] = df['b'] = df['b'] + np.arange(1000) df['z'] = np.random.uniform(0, 3, 1000) @@ -689,7 +685,7 @@ A ``ValueError`` will be raised if there are any negative values in your data. .. ipython:: python - series = Series(3 * rand(4), index=['a', 'b', 'c', 'd'], name='series') + series = pd.Series(3 * np.random.rand(4), index=['a', 'b', 'c', 'd'], name='series') @savefig series_pie_plot.png series.plot(kind='pie', figsize=(6, 6)) @@ -716,7 +712,7 @@ A legend will be drawn in each pie plots by default; specify ``legend=False`` to .. ipython:: python - df = DataFrame(3 * rand(4, 2), index=['a', 'b', 'c', 'd'], columns=['x', 'y']) + df = pd.DataFrame(3 * np.random.rand(4, 2), index=['a', 'b', 'c', 'd'], columns=['x', 'y']) @savefig df_pie_plot.png df.plot(kind='pie', subplots=True, figsize=(8, 4)) @@ -759,7 +755,7 @@ If you pass values whose sum total is less than 1.0, matplotlib draws a semicirc .. ipython:: python - series = Series([0.1] * 4, index=['a', 'b', 'c', 'd'], name='series2') + series = pd.Series([0.1] * 4, index=['a', 'b', 'c', 'd'], name='series2') @savefig series_pie_plot_semi.png series.plot(kind='pie', figsize=(6, 6)) @@ -835,7 +831,7 @@ You can create a scatter plot matrix using the .. ipython:: python from pandas.tools.plotting import scatter_matrix - df = DataFrame(randn(1000, 4), columns=['a', 'b', 'c', 'd']) + df = pd.DataFrame(np.random.randn(1000, 4), columns=['a', 'b', 'c', 'd']) @savefig scatter_matrix_kde.png scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde') @@ -863,7 +859,7 @@ setting ``kind='kde'``: .. ipython:: python - ser = Series(randn(1000)) + ser = pd.Series(np.random.randn(1000)) @savefig kde_plot.png ser.plot(kind='kde') @@ -888,10 +884,9 @@ of the same class will usually be closer together and form larger structures. .. ipython:: python - from pandas import read_csv from pandas.tools.plotting import andrews_curves - data = read_csv('data/iris.data') + data = pd.read_csv('data/iris.data') plt.figure() @@ -911,10 +906,9 @@ represents one data point. Points that tend to cluster will appear closer togeth .. ipython:: python - from pandas import read_csv from pandas.tools.plotting import parallel_coordinates - data = read_csv('data/iris.data') + data = pd.read_csv('data/iris.data') plt.figure() @@ -946,8 +940,8 @@ implies that the underlying data are not random. plt.figure() - data = Series(0.1 * rand(1000) + - 0.9 * np.sin(np.linspace(-99 * np.pi, 99 * np.pi, num=1000))) + data = pd.Series(0.1 * np.random.rand(1000) + + 0.9 * np.sin(np.linspace(-99 * np.pi, 99 * np.pi, num=1000))) @savefig lag_plot.png lag_plot(data) @@ -981,7 +975,7 @@ confidence band. plt.figure() - data = Series(0.7 * rand(1000) + + data = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(np.linspace(-9 * np.pi, 9 * np.pi, num=1000))) @savefig autocorrelation_plot.png @@ -1012,7 +1006,7 @@ are what constitutes the bootstrap plot. from pandas.tools.plotting import bootstrap_plot - data = Series(rand(1000)) + data = pd.Series(np.random.rand(1000)) @savefig bootstrap_plot.png bootstrap_plot(data, size=50, samples=500, color='grey') @@ -1042,10 +1036,9 @@ be colored differently. .. ipython:: python - from pandas import read_csv from pandas.tools.plotting import radviz - data = read_csv('data/iris.data') + data = pd.read_csv('data/iris.data') plt.figure() @@ -1095,7 +1088,7 @@ shown by default. .. ipython:: python - df = DataFrame(randn(1000, 4), index=ts.index, columns=list('ABCD')) + df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list('ABCD')) df = df.cumsum() @savefig frame_plot_basic_noleg.png @@ -1119,7 +1112,7 @@ You may pass ``logy`` to get a log-scale Y axis. .. ipython:: python - ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000)) + ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000)) ts = np.exp(ts.cumsum()) @savefig series_plot_logy.png @@ -1227,8 +1220,6 @@ in ``pandas.plot_params`` can be used in a `with statement`: .. ipython:: python - import pandas as pd - plt.figure() @savefig ser_plot_suppress_context.png @@ -1325,10 +1316,10 @@ Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a :suppress: np.random.seed(123456) - ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000)) + ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000)) ts = ts.cumsum() - df = DataFrame(randn(1000, 4), index=ts.index, columns=list('ABCD')) + df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list('ABCD')) df = df.cumsum() .. ipython:: python @@ -1410,7 +1401,7 @@ Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and : .. ipython:: python fig, ax = plt.subplots(1, 1) - df = DataFrame(rand(5, 3), columns=['a', 'b', 'c']) + df = pd.DataFrame(np.random.rand(5, 3), columns=['a', 'b', 'c']) ax.get_xaxis().set_visible(False) # Hide Ticks @savefig line_plot_table_true.png @@ -1482,7 +1473,7 @@ To use the cubehelix colormap, we can simply pass ``'cubehelix'`` to ``colormap= .. ipython:: python - df = DataFrame(randn(1000, 10), index=ts.index) + df = pd.DataFrame(np.random.randn(1000, 10), index=ts.index) df = df.cumsum() plt.figure() @@ -1520,7 +1511,7 @@ Colormaps can also be used other plot types, like bar charts: .. ipython:: python - dd = DataFrame(randn(10, 10)).applymap(abs) + dd = pd.DataFrame(np.random.randn(10, 10)).applymap(abs) dd = dd.cumsum() plt.figure() @@ -1587,8 +1578,8 @@ when plotting a large number of points. .. ipython:: python - price = Series(randn(150).cumsum(), - index=date_range('2000-1-1', periods=150, freq='B')) + price = pd.Series(np.random.randn(150).cumsum(), + index=pd.date_range('2000-1-1', periods=150, freq='B')) ma = pd.rolling_mean(price, 20) mstd = pd.rolling_std(price, 20) @@ -1624,18 +1615,8 @@ Trellis plotting interface .. ipython:: python :suppress: - import numpy as np - np.random.seed(123456) - from pandas import * - options.display.max_rows=15 - import pandas.util.testing as tm - randn = np.random.randn - np.set_printoptions(precision=4, suppress=True) - import matplotlib.pyplot as plt - tips_data = read_csv('data/tips.csv') - iris_data = read_csv('data/iris.data') - from pandas import read_csv - from pandas.tools.plotting import radviz + tips_data = pd.read_csv('data/tips.csv') + iris_data = pd.read_csv('data/iris.data') plt.close('all') @@ -1646,8 +1627,7 @@ Trellis plotting interface .. code-block:: python - from pandas import read_csv - tips_data = read_csv('tips.csv') + tips_data = pd.read_csv('tips.csv') from the directory where you downloaded the file. @@ -1668,7 +1648,6 @@ In the example below, data from the tips data set is arranged by the attributes values, the resulting grid has two columns and two rows. A histogram is displayed for each cell of the grid. - .. ipython:: python plt.figure() diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index d05c19a5e4bea..c8e32ac2a3309 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,8 @@ What's New These are new features and improvements of note in each release. +.. include:: whatsnew/v0.16.2.txt + .. include:: whatsnew/v0.16.1.txt .. include:: whatsnew/v0.16.0.txt diff --git a/doc/source/whatsnew/v0.15.2.txt b/doc/source/whatsnew/v0.15.2.txt index 02de919e3f83e..6a14a4024ba5a 100644 --- a/doc/source/whatsnew/v0.15.2.txt +++ b/doc/source/whatsnew/v0.15.2.txt @@ -49,7 +49,7 @@ API changes In [3]: cat = pd.Categorical(['a', 'b', 'a'], categories=['a', 'b', 'c']) In [4]: cat - Out[4]: + Out[4]: [a, b, a] Categories (3, object): [a < b < c] diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index aa35434802799..f9bef3d9c7f4a 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -474,10 +474,11 @@ Other API Changes - ``Series.values_counts`` and ``Series.describe`` for categorical data will now put ``NaN`` entries at the end. (:issue:`9443`) - ``Series.describe`` for categorical data will now give counts and frequencies of 0, not ``NaN``, for unused categories (:issue:`9443`) -- Due to a bug fix, looking up a partial string label with ``DatetimeIndex.asof`` now includes values that match the string, even if they are after the start of the partial string label (:issue:`9258`). Old behavior: +- Due to a bug fix, looking up a partial string label with ``DatetimeIndex.asof`` now includes values that match the string, even if they are after the start of the partial string label (:issue:`9258`). - .. ipython:: python - :verbatim: + Old behavior: + + .. code-block:: python In [4]: pd.to_datetime(['2000-01-31', '2000-02-28']).asof('2000-02') Out[4]: Timestamp('2000-01-31 00:00:00') diff --git a/doc/source/whatsnew/v0.16.1.txt b/doc/source/whatsnew/v0.16.1.txt old mode 100644 new mode 100755 index 05c762b91b925..fa82a90f2a429 --- a/doc/source/whatsnew/v0.16.1.txt +++ b/doc/source/whatsnew/v0.16.1.txt @@ -1,37 +1,262 @@ .. _whatsnew_0161: -v0.16.1 (April ??, 2015) ------------------------- +v0.16.1 (May 11, 2015) +---------------------- This is a minor bug-fix release from 0.16.0 and includes a a large number of bug fixes along several new features, enhancements, and performance improvements. We recommend that all users upgrade to this version. +Highlights include: + +- Support for a ``CategoricalIndex``, a category based index, see :ref:`here <whatsnew_0161.enhancements.categoricalindex>` +- New section on how-to-contribute to *pandas*, see :ref:`here <contributing>` +- Revised "Merge, join, and concatenate" documentation, including graphical examples to make it easier to understand each operations, see :ref:`here <merging>` +- New method ``sample`` for drawing random samples from Series, DataFrames and Panels. See :ref:`here <whatsnew_0161.enhancements.sample>` +- The default ``Index`` printing has changed to a more uniform format, see :ref:`here <whatsnew_0161.index_repr>` +- ``BusinessHour`` datetime-offset is now supported, see :ref:`here <timeseries.businesshour>` + +- Further enhancement to the ``.str`` accessor to make string operations easier, see :ref:`here <whatsnew_0161.enhancements.string>` + .. contents:: What's new in v0.16.1 :local: :backlinks: none - .. _whatsnew_0161.enhancements: +.. warning:: + + In pandas 0.17.0, the sub-package ``pandas.io.data`` will be removed in favor of a separately installable package. See :ref:`here for details <remote_data.pandas_datareader>` (:issue:`8961`) + Enhancements ~~~~~~~~~~~~ -- Added ``StringMethods.capitalize()`` and ``swapcase`` which behave as the same as standard ``str`` (:issue:`9766`) +.. _whatsnew_0161.enhancements.categoricalindex: + +CategoricalIndex +^^^^^^^^^^^^^^^^ + +We introduce a ``CategoricalIndex``, a new type of index object that is useful for supporting +indexing with duplicates. This is a container around a ``Categorical`` (introduced in v0.15.0) +and allows efficient indexing and storage of an index with a large number of duplicated elements. Prior to 0.16.1, +setting the index of a ``DataFrame/Series`` with a ``category`` dtype would convert this to regular object-based ``Index``. + +.. ipython :: python + + df = DataFrame({'A' : np.arange(6), + 'B' : Series(list('aabbca')).astype('category', + categories=list('cab')) + }) + df + df.dtypes + df.B.cat.categories + +setting the index, will create create a ``CategoricalIndex`` + +.. ipython :: python + + df2 = df.set_index('B') + df2.index + +indexing with ``__getitem__/.iloc/.loc/.ix`` works similarly to an Index with duplicates. +The indexers MUST be in the category or the operation will raise. + +.. ipython :: python + + df2.loc['a'] + +and preserves the ``CategoricalIndex`` + +.. ipython :: python + + df2.loc['a'].index + +sorting will order by the order of the categories + +.. ipython :: python + + df2.sort_index() + +groupby operations on the index will preserve the index nature as well + +.. ipython :: python + + df2.groupby(level=0).sum() + df2.groupby(level=0).sum().index + +reindexing operations, will return a resulting index based on the type of the passed +indexer, meaning that passing a list will return a plain-old-``Index``; indexing with +a ``Categorical`` will return a ``CategoricalIndex``, indexed according to the categories +of the PASSED ``Categorical`` dtype. This allows one to arbitrarly index these even with +values NOT in the categories, similarly to how you can reindex ANY pandas index. + +.. ipython :: python + + df2.reindex(['a','e']) + df2.reindex(['a','e']).index + df2.reindex(pd.Categorical(['a','e'],categories=list('abcde'))) + df2.reindex(pd.Categorical(['a','e'],categories=list('abcde'))).index + +See the :ref:`documentation <advanced.categoricalindex>` for more. (:issue:`7629`, :issue:`10038`, :issue:`10039`) + +.. _whatsnew_0161.enhancements.sample: + +Sample +^^^^^^ + +Series, DataFrames, and Panels now have a new method: :meth:`~pandas.DataFrame.sample`. +The method accepts a specific number of rows or columns to return, or a fraction of the +total number or rows or columns. It also has options for sampling with or without replacement, +for passing in a column for weights for non-uniform sampling, and for setting seed values to +facilitate replication. (:issue:`2419`) + +.. ipython :: python + + example_series = Series([0,1,2,3,4,5]) + + # When no arguments are passed, returns 1 + example_series.sample() + + # One may specify either a number of rows: + example_series.sample(n=3) + + # Or a fraction of the rows: + example_series.sample(frac=0.5) + + # weights are accepted. + example_weights = [0, 0, 0.2, 0.2, 0.2, 0.4] + example_series.sample(n=3, weights=example_weights) + + # weights will also be normalized if they do not sum to one, + # and missing values will be treated as zeros. + example_weights2 = [0.5, 0, 0, 0, None, np.nan] + example_series.sample(n=1, weights=example_weights2) + + +When applied to a DataFrame, one may pass the name of a column to specify sampling weights +when sampling from rows. + +.. ipython :: python + + df = DataFrame({'col1':[9,8,7,6], 'weight_column':[0.5, 0.4, 0.1, 0]}) + df.sample(n=3, weights='weight_column') + + +.. _whatsnew_0161.enhancements.string: + +String Methods Enhancements +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:ref:`Continuing from v0.16.0 <whatsnew_0160.enhancements.string>`, the following +enhancements make string operations easier and more consistent with standard python string operations. + + +- Added ``StringMethods`` (``.str`` accessor) to ``Index`` (:issue:`9068`) + + The ``.str`` accessor is now available for both ``Series`` and ``Index``. + + .. ipython:: python + + idx = Index([' jack', 'jill ', ' jesse ', 'frank']) + idx.str.strip() + + One special case for the `.str` accessor on ``Index`` is that if a string method returns ``bool``, the ``.str`` accessor + will return a ``np.array`` instead of a boolean ``Index`` (:issue:`8875`). This enables the following expression + to work naturally: + + .. ipython:: python + + idx = Index(['a1', 'a2', 'b1', 'b2']) + s = Series(range(4), index=idx) + s + idx.str.startswith('a') + s[s.index.str.startswith('a')] + +- The following new methods are accesible via ``.str`` accessor to apply the function to each values. (:issue:`9766`, :issue:`9773`, :issue:`10031`, :issue:`10045`, :issue:`10052`) + + ================ =============== =============== =============== ================ + .. .. Methods .. .. + ================ =============== =============== =============== ================ + ``capitalize()`` ``swapcase()`` ``normalize()`` ``partition()`` ``rpartition()`` + ``index()`` ``rindex()`` ``translate()`` + ================ =============== =============== =============== ================ + +- ``split`` now takes ``expand`` keyword to specify whether to expand dimensionality. ``return_type`` is deprecated. (:issue:`9847`) + + .. ipython:: python + + s = Series(['a,b', 'a,c', 'b,c']) + + # return Series + s.str.split(',') + + # return DataFrame + s.str.split(',', expand=True) + + idx = Index(['a,b', 'a,c', 'b,c']) + + # return Index + idx.str.split(',') + + # return MultiIndex + idx.str.split(',', expand=True) + + +- Improved ``extract`` and ``get_dummies`` methods for ``Index.str`` (:issue:`9980`) + + +.. _whatsnew_0161.enhancements.other: + +Other Enhancements +^^^^^^^^^^^^^^^^^^ + +- ``BusinessHour`` offset is now supported, which represents business hours starting from 09:00 - 17:00 on ``BusinessDay`` by default. See :ref:`Here <timeseries.businesshour>` for details. (:issue:`7905`) + + .. ipython:: python + + from pandas.tseries.offsets import BusinessHour + Timestamp('2014-08-01 09:00') + BusinessHour() + Timestamp('2014-08-01 07:00') + BusinessHour() + Timestamp('2014-08-01 16:30') + BusinessHour() + +- ``DataFrame.diff`` now takes an ``axis`` parameter that determines the direction of differencing (:issue:`9727`) + +- Allow ``clip``, ``clip_lower``, and ``clip_upper`` to accept array-like arguments as thresholds (This is a regression from 0.11.0). These methods now have an ``axis`` parameter which determines how the Series or DataFrame will be aligned with the threshold(s). (:issue:`6966`) - ``DataFrame.mask()`` and ``Series.mask()`` now support same keywords as ``where`` (:issue:`8801`) +- ``drop`` function can now accept ``errors`` keyword to suppress ``ValueError`` raised when any of label does not exist in the target data. (:issue:`6736`) + .. ipython:: python + df = DataFrame(np.random.randn(3, 3), columns=['A', 'B', 'C']) + df.drop(['A', 'X'], axis=1, errors='ignore') -.. _whatsnew_0161.api: +- Add support for separating years and quarters using dashes, for + example 2014-Q1. (:issue:`9688`) -API changes -~~~~~~~~~~~ +- Allow conversion of values with dtype ``datetime64`` or ``timedelta64`` to strings using ``astype(str)`` (:issue:`9757`) +- ``get_dummies`` function now accepts ``sparse`` keyword. If set to ``True``, the return ``DataFrame`` is sparse, e.g. ``SparseDataFrame``. (:issue:`8823`) +- ``Period`` now accepts ``datetime64`` as value input. (:issue:`9054`) + +- Allow timedelta string conversion when leading zero is missing from time definition, ie `0:00:00` vs `00:00:00`. (:issue:`9570`) +- Allow ``Panel.shift`` with ``axis='items'`` (:issue:`9890`) +- Trying to write an excel file now raises ``NotImplementedError`` if the ``DataFrame`` has a ``MultiIndex`` instead of writing a broken Excel file. (:issue:`9794`) +- Allow ``Categorical.add_categories`` to accept ``Series`` or ``np.array``. (:issue:`9927`) +- Add/delete ``str/dt/cat`` accessors dynamically from ``__dir__``. (:issue:`9910`) +- Add ``normalize`` as a ``dt`` accessor method. (:issue:`10047`) +- ``DataFrame`` and ``Series`` now have ``_constructor_expanddim`` property as overridable constructor for one higher dimensionality data. This should be used only when it is really needed, see :ref:`here <ref-subclassing-pandas>` +- ``pd.lib.infer_dtype`` now returns ``'bytes'`` in Python 3 where appropriate. (:issue:`10032`) + + +.. _whatsnew_0161.api: + +API changes +~~~~~~~~~~~ - When passing in an ax to ``df.plot( ..., ax=ax)``, the `sharex` kwarg will now default to `False`. The result is that the visibility of xlabels and xticklabels will not anymore be changed. You @@ -40,43 +265,282 @@ API changes If pandas creates the subplots itself (e.g. no passed in `ax` kwarg), then the default is still ``sharex=True`` and the visibility changes are applied. +- :meth:`~pandas.DataFrame.assign` now inserts new columns in alphabetical order. Previously + the order was arbitrary. (:issue:`9777`) +- By default, ``read_csv`` and ``read_table`` will now try to infer the compression type based on the file extension. Set ``compression=None`` to restore the previous behavior (no decompression). (:issue:`9770`) -- Add support for separating years and quarters using dashes, for - example 2014-Q1. (:issue:`9688`) +.. _whatsnew_0161.deprecations: -.. _whatsnew_0161.performance: +Deprecations +^^^^^^^^^^^^ -Performance Improvements -~~~~~~~~~~~~~~~~~~~~~~~~ +- ``Series.str.split``'s ``return_type`` keyword was removed in favor of ``expand`` (:issue:`9847`) +.. _whatsnew_0161.index_repr: +Index Representation +~~~~~~~~~~~~~~~~~~~~ +The string representation of ``Index`` and its sub-classes have now been unified. These will show a single-line display if there are few values; a wrapped multi-line display for a lot of values (but less than ``display.max_seq_items``; if lots of items (> ``display.max_seq_items``) will show a truncated display (the head and tail of the data). The formatting for ``MultiIndex`` is unchanges (a multi-line wrapped display). The display width responds to the option ``display.max_seq_items``, which is defaulted to 100. (:issue:`6482`) +Previous Behavior -.. _whatsnew_0161.bug_fixes: +.. code-block:: python -Bug Fixes -~~~~~~~~~ + In [2]: pd.Index(range(4),name='foo') + Out[2]: Int64Index([0, 1, 2, 3], dtype='int64') -- Fixed bug (:issue:`9542`) where labels did not appear properly in legend of ``DataFrame.plot()``. Passing ``label=`` args also now works, and series indices are no longer mutated. -- Bug in json serialization when frame has length zero.(:issue:`9805`) + In [3]: pd.Index(range(104),name='foo') + Out[3]: Int64Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, ...], dtype='int64') + In [4]: pd.date_range('20130101',periods=4,name='foo',tz='US/Eastern') + Out[4]: + <class 'pandas.tseries.index.DatetimeIndex'> + [2013-01-01 00:00:00-05:00, ..., 2013-01-04 00:00:00-05:00] + Length: 4, Freq: D, Timezone: US/Eastern -- Bug in ``scatter_matrix`` draws unexpected axis ticklabels (:issue:`5662`) + In [5]: pd.date_range('20130101',periods=104,name='foo',tz='US/Eastern') + Out[5]: + <class 'pandas.tseries.index.DatetimeIndex'> + [2013-01-01 00:00:00-05:00, ..., 2013-04-14 00:00:00-04:00] + Length: 104, Freq: D, Timezone: US/Eastern +New Behavior +.. ipython:: python + pd.set_option('display.width', 80) + pd.Index(range(4), name='foo') + pd.Index(range(30), name='foo') + pd.Index(range(104), name='foo') + pd.CategoricalIndex(['a','bb','ccc','dddd'], ordered=True, name='foobar') + pd.CategoricalIndex(['a','bb','ccc','dddd']*10, ordered=True, name='foobar') + pd.CategoricalIndex(['a','bb','ccc','dddd']*100, ordered=True, name='foobar') + pd.date_range('20130101',periods=4, name='foo', tz='US/Eastern') + pd.date_range('20130101',periods=25, freq='D') + pd.date_range('20130101',periods=104, name='foo', tz='US/Eastern') -- Bug in ``transform`` causing length mismatch when null entries were present and a fast aggregator was being used (:issue:`9697`) +.. _whatsnew_0161.performance: -- Bug in ``DataFrame`` slicing may not retain metadata (:issue:`9776`) -- Bug where ``TimdeltaIndex`` were not properly serialized in fixed ``HDFStore`` (:issue:`9635`) +Performance Improvements +~~~~~~~~~~~~~~~~~~~~~~~~ + +- Improved csv write performance with mixed dtypes, including datetimes by up to 5x (:issue:`9940`) +- Improved csv write performance generally by 2x (:issue:`9940`) +- Improved the performance of ``pd.lib.max_len_string_array`` by 5-7x (:issue:`10024`) -- Bug in plotting continuously using ``secondary_y`` may not show legend properly. (:issue:`9610`, :issue:`9779`) +.. _whatsnew_0161.bug_fixes: + +Bug Fixes +~~~~~~~~~ +- Bug where labels did not appear properly in the legend of ``DataFrame.plot()``, passing ``label=`` arguments works, and Series indices are no longer mutated. (:issue:`9542`) +- Bug in json serialization causing a segfault when a frame had zero length. (:issue:`9805`) +- Bug in ``read_csv`` where missing trailing delimiters would cause segfault. (:issue:`5664`) +- Bug in retaining index name on appending (:issue:`9862`) +- Bug in ``scatter_matrix`` draws unexpected axis ticklabels (:issue:`5662`) +- Fixed bug in ``StataWriter`` resulting in changes to input ``DataFrame`` upon save (:issue:`9795`). +- Bug in ``transform`` causing length mismatch when null entries were present and a fast aggregator was being used (:issue:`9697`) +- Bug in ``equals`` causing false negatives when block order differed (:issue:`9330`) +- Bug in grouping with multiple ``pd.Grouper`` where one is non-time based (:issue:`10063`) +- Bug in ``read_sql_table`` error when reading postgres table with timezone (:issue:`7139`) +- Bug in ``DataFrame`` slicing may not retain metadata (:issue:`9776`) +- Bug where ``TimdeltaIndex`` were not properly serialized in fixed ``HDFStore`` (:issue:`9635`) +- Bug with ``TimedeltaIndex`` constructor ignoring ``name`` when given another ``TimedeltaIndex`` as data (:issue:`10025`). +- Bug in ``DataFrameFormatter._get_formatted_index`` with not applying ``max_colwidth`` to the ``DataFrame`` index (:issue:`7856`) +- Bug in ``.loc`` with a read-only ndarray data source (:issue:`10043`) +- Bug in ``groupby.apply()`` that would raise if a passed user defined function either returned only ``None`` (for all input). (:issue:`9685`) +- Always use temporary files in pytables tests (:issue:`9992`) +- Bug in plotting continuously using ``secondary_y`` may not show legend properly. (:issue:`9610`, :issue:`9779`) +- Bug in ``DataFrame.plot(kind="hist")`` results in ``TypeError`` when ``DataFrame`` contains non-numeric columns (:issue:`9853`) +- Bug where repeated plotting of ``DataFrame`` with a ``DatetimeIndex`` may raise ``TypeError`` (:issue:`9852`) +- Bug in ``setup.py`` that would allow an incompat cython version to build (:issue:`9827`) +- Bug in plotting ``secondary_y`` incorrectly attaches ``right_ax`` property to secondary axes specifying itself recursively. (:issue:`9861`) - Bug in ``Series.quantile`` on empty Series of type ``Datetime`` or ``Timedelta`` (:issue:`9675`) - Bug in ``where`` causing incorrect results when upcasting was required (:issue:`9731`) +<<<<<<< HEAD + +- Bug in ``ParserBase.convert_to_nd_arrays`` when called by ``DataFrame.read_fwf`` (:issue:`9266``) +======= +- Bug in ``FloatArrayFormatter`` where decision boundary for displaying "small" floats in decimal format is off by one order of magnitude for a given display.precision (:issue:`9764`) +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> ce988b4... DOC: Update whatsnew for 0.16.1 (#9764) +======= + +======= +>>>>>>> 39fa180... FIX: timeseries asfreq would drop the name of the index, closes #9854 +- Fixed bug where ``DataFrame.plot()`` raised an error when both ``color`` and ``style`` keywords were passed and there was no color symbol in the style strings (:issue:`9671`) +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> f00d6bb... Fixed bug #9671 where 'DataFrame.plot()' raised an error when both 'color' and 'style' keywords were passed and there was no color symbol in the style strings (this should be allowed) +======= +======= + +>>>>>>> 2997e70... BUG: read_csv skips lines with initial whitespace + one non-space character (GH9710) +======= +- Not showing a ``DeprecationWarning`` on combining list-likes with an ``Index`` (:issue:`10083`) +>>>>>>> 90a3f26... DOC: additional whatsnew +- Bug in ``read_csv`` and ``read_table`` when using ``skip_rows`` parameter if blank lines are present. (:issue:`9832`) +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> e67893f... BUG: skiprows doesn't handle blank lines properly when engine='c' (GH #9832) +======= + +======= +>>>>>>> 39fa180... FIX: timeseries asfreq would drop the name of the index, closes #9854 +- Bug in ``read_csv()`` interprets ``index_col=True`` as ``1`` (:issue:`9798`) +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> 53f2ea4... BUG: Issue 9798 fixed +======= + +======= +- Bug in index equality comparisons using ``==`` failing on Index/MultiIndex type incompatibility (:issue:`9875`) +>>>>>>> 07257a0... BUG: Fixing == __eq__ operator for MultiIndex ... closes (GH9785) +======= +- Bug in index equality comparisons using ``==`` failing on Index/MultiIndex type incompatibility (:issue:`9785`) +>>>>>>> ad1abce... DOC: fix incorrect issue numbers in whatsnew +- Bug in which ``SparseDataFrame`` could not take `nan` as a column name (:issue:`8822`) +<<<<<<< HEAD +<<<<<<< HEAD + +<<<<<<< HEAD +>>>>>>> 7879205... Fix to allow sparse dataframes to have nan column labels +======= +======= +- Bug in ``Series.quantile`` on empty Series of type ``Datetime`` or ``Timedelta`` (:issue:`9675`) +======= +>>>>>>> 1f9b699... BUG: where behaves badly when dtype of self is datetime or timedelta, and dtype of other is not (GH9804) +- Bug in ``to_msgpack`` and ``read_msgpack`` zlib and blosc compression support (:issue:`9783`) +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> 39fa180... FIX: timeseries asfreq would drop the name of the index, closes #9854 +- Bug in unequal comparisons between a ``Series`` of dtype `"category"` and a scalar (e.g. ``Series(Categorical(list("abc"), categories=list("cba"), ordered=True)) > "b"``, which wouldn't use the order of the categories but use the lexicographical order. (:issue:`9848`) +<<<<<<< HEAD +>>>>>>> f0ac930... Fix: unequal comparisons of categorical and scalar +======= +======= +>>>>>>> ad1abce... DOC: fix incorrect issue numbers in whatsnew + +======= +>>>>>>> fad6079... DOC: last clean-up of whatsnew file 0.16.1 +- Bug ``GroupBy.size`` doesn't attach index name properly if grouped by ``TimeGrouper`` (:issue:`9925`) +- Bug causing an exception in slice assignments because ``length_of_indexer`` returns wrong results (:issue:`9995`) +- Bug in csv parser causing lines with initial whitespace plus one non-space character to be skipped. (:issue:`9710`) +- Bug in C csv parser causing spurious NaNs when data started with newline followed by whitespace. (:issue:`10022`) +- Bug causing elements with a null group to spill into the final group when grouping by a ``Categorical`` (:issue:`9603`) +- Bug where .iloc and .loc behavior is not consistent on empty dataframes (:issue:`9964`) +- Bug in invalid attribute access on a ``TimedeltaIndex`` incorrectly raised ``ValueError`` instead of ``AttributeError`` (:issue:`9680`) +- Bug in unequal comparisons between categorical data and a scalar, which was not in the categories (e.g. ``Series(Categorical(list("abc"), ordered=True)) > "d"``. This returned ``False`` for all elements, but now raises a ``TypeError``. Equality comparisons also now return ``False`` for ``==`` and ``True`` for ``!=``. (:issue:`9848`) +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> 35b20d8... BUG: Fix for comparisons of categorical and an scalar not in categories, xref GH9836 +======= + +======= + +======= +>>>>>>> a97113c... ENH: Raise error on trying to write excel file with a MultiIndexed DataFrame. closes #9794 +- Bug in DataFrame ``__setitem__`` when right hand side is a dictionary (:issue:`9874`) +- Bug in ``where`` when dtype is ``datetime64/timedelta64``, but dtype of other is not (:issue:`9804`) +<<<<<<< HEAD +>>>>>>> 1f9b699... BUG: where behaves badly when dtype of self is datetime or timedelta, and dtype of other is not (GH9804) +- Bug in ``MultiIndex.sortlevel()`` results in unicode level name breaks (:issue:`9875`) +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> f72aa71... BUG: unstack on unicode name level breaks GH9856 +======= + +======= +>>>>>>> 39fa180... FIX: timeseries asfreq would drop the name of the index, closes #9854 +======= +- Bug in ``MultiIndex.sortlevel()`` results in unicode level name breaks (:issue:`9856`) +>>>>>>> ad1abce... DOC: fix incorrect issue numbers in whatsnew +- Bug in which ``groupby.transform`` incorrectly enforced output dtypes to match input dtypes. (:issue:`9807`) +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> 30580e7... Groupby transform preserves output dtype +======= + +======= +- Bug in ``DataFrame`` constructor when ``columns`` parameter is set, and ``data`` is an empty list (:issue:`9939`) +>>>>>>> 514fe2d... BUG: DataFrame constructor fails when columns is set and data=[] (GH9948/9939) +- Bug in bar plot with ``log=True`` raises ``TypeError`` if all values are less than 1 (:issue:`9905`) +- Bug in horizontal bar plot ignores ``log=True`` (:issue:`9905`) +- Bug in PyTables queries that did not return proper results using the index (:issue:`8265`, :issue:`9676`) +- Bug where dividing a dataframe containing values of type ``Decimal`` by another ``Decimal`` would raise. (:issue:`9787`) +<<<<<<< HEAD +>>>>>>> bed38f2... FIX: division of Decimal would crash on fill because Decimal does not support type or dtype. (GH9787) +======= +- Bug where using DataFrames asfreq would remove the name of the index. (:issue:`9885`) +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> 39fa180... FIX: timeseries asfreq would drop the name of the index, closes #9854 +======= + +- Bug in DataFrame ``__setitem__`` when right hand side is a dictionary (:issue:`9874`) +>>>>>>> 99aabee... What's new and doc +======= +======= +- Bug causing extra index point when resample BM/BQ (:issue:`9756`) +>>>>>>> c0d4339... BUG: Resample BM/BQ adds extra index point #9756 +- Changed caching in ``AbstractHolidayCalendar`` to be at the instance level rather than at the class level as the latter can result in unexpected behaviour. (:issue:`9552`) +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> b1e8d9f... Moved caching in `AbstractHolidayCalendar` to the instance level +======= + +======= +>>>>>>> fad6079... DOC: last clean-up of whatsnew file 0.16.1 +- Fixed latex output for multi-indexed dataframes (:issue:`9778`) +<<<<<<< HEAD +>>>>>>> 4d1268e... BUG: Fixed latex output for multi-indexed dataframes - GH9778 +======= +- Bug causing an exception when setting an empty range using ``DataFrame.loc`` (:issue:`9596`) +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> a21f2ce... BUG: Exception when setting an empty range using DataFrame.loc +======= + + + + +======= +>>>>>>> fad6079... DOC: last clean-up of whatsnew file 0.16.1 +- Bug in hiding ticklabels with subplots and shared axes when adding a new plot to an existing grid of axes (:issue:`9158`) +<<<<<<< HEAD +>>>>>>> d3ccb70... BUG: hidden ticklabels with sharex and secondary +======= +- Bug in ``transform`` and ``filter`` when grouping on a categorical variable (:issue:`9921`) +- Bug in ``transform`` when groups are equal in number and dtype to the input index (:issue:`9700`) +<<<<<<< HEAD +>>>>>>> 3d73550... BUG: transform and filter misbehave when grouping on categorical data (GH 9921) +======= +- Google BigQuery connector now imports dependencies on a per-method basis.(:issue:`9713`) +- Updated BigQuery connector to no longer use deprecated ``oauth2client.tools.run()`` (:issue:`8327`) +<<<<<<< HEAD +>>>>>>> 2cf4132... Updates to Google BigQuery connector (#9713, #8327) +======= +- Bug in subclassed ``DataFrame``. It may not return the correct class, when slicing or subsetting it. (:issue:`9632`) +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> 5805889... Return correct subclass when slicing DataFrame. +======= +- BUG in median() where non-float null values are not handled correctly (:issue:`10040`) +>>>>>>> df730a3... BUG: median() not correctly handling non-float null values (fixes #10040) +======= +- Bug in ``.median()`` where non-float null values are not handled correctly (:issue:`10040`) +<<<<<<< HEAD +>>>>>>> 6c80f68... DOC: prepare for 0.16.1 release +======= +- Bug in Series.fillna() where it raises if a numerically convertible string is given (:issue:`10092`) +>>>>>>> 8ccc9b3... BUG: Series.fillna() raises if given a numerically convertible string diff --git a/doc/source/whatsnew/v0.16.2.txt b/doc/source/whatsnew/v0.16.2.txt new file mode 100644 index 0000000000000..b571aab0b19a5 --- /dev/null +++ b/doc/source/whatsnew/v0.16.2.txt @@ -0,0 +1,85 @@ +.. _whatsnew_0162: + +v0.16.2 (June 12, 2015) +----------------------- + +This is a minor bug-fix release from 0.16.1 and includes a a large number of +bug fixes along several new features, enhancements, and performance improvements. +We recommend that all users upgrade to this version. + +Highlights include: + +Check the :ref:`API Changes <whatsnew_0162.api>` before updating. + +.. contents:: What's new in v0.16.2 + :local: + :backlinks: none + +.. _whatsnew_0162.enhancements: + +New features +~~~~~~~~~~~~ + +.. _whatsnew_0162.enhancements.other: + +Other enhancements +^^^^^^^^^^^^^^^^^^ + +.. _whatsnew_0162.api: + +Backwards incompatible API changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0162.api_breaking: + +.. _whatsnew_0162.api_breaking.other: + +Other API Changes +^^^^^^^^^^^^^^^^^ + +- ``Holiday`` now raises ``NotImplementedError`` if both ``offset`` and ``observance`` are used in constructor. (:issue:`102171`) + +.. _whatsnew_0162.performance: + +Performance Improvements +~~~~~~~~~~~~~~~~~~~~~~~~ + +- Improved ``Series.resample`` performance with dtype=datetime64[ns] (:issue:`7754`) + +.. _whatsnew_0162.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +- Bug where read_hdf store.select modifies the passed columns list when + multi-indexed (:issue:`7212`) +- Bug in ``Categorical`` repr with ``display.width`` of ``None`` in Python 3 (:issue:`10087`) + + +- Bug in ``mean()`` where integer dtypes can overflow (:issue:`10172`) +- Bug where Panel.from_dict does not set dtype when specified (:issue:`10058`) +- Bug in ``Index.union`` raises ``AttributeError`` when passing array-likes. (:issue:`10149`) +- Bug in ``Timestamp``'s' ``microsecond``, ``quarter``, ``dayofyear``, ``week`` and ``daysinmonth`` properties return ``np.int`` type, not built-in ``int``. (:issue:`10050`) +- Bug in ``NaT`` raises ``AttributeError`` when accessing to ``daysinmonth``, ``dayofweek`` properties. (:issue:`10096`) + + +- Bug in getting timezone data with ``dateutil`` on various platforms ( :issue:`9059`, :issue:`8639`, :issue:`9663`, :issue:`10121`) +- Bug in display datetimes with mixed frequencies uniformly; display 'ms' datetimes to the proper precision. (:issue:`10170`) + +- Bung in ``Series`` arithmetic methods may incorrectly hold names (:issue:`10068`) + +- Bug in ``DatetimeIndex`` and ``TimedeltaIndex`` names are lost after timedelta arithmetics ( :issue:`9926`) + + +- Bug in `Series.plot(label="LABEL")` not correctly setting the label (:issue:`10119`) + +- Bug in `plot` not defaulting to matplotlib `axes.grid` setting (:issue:`9792`) + +- Bug in ``Series.align`` resets ``name`` when ``fill_value`` is specified (:issue:`10067`) +- Bug in ``SparseSeries.abs`` resets ``name`` (:issue:`10241`) + + +- Bug in GroupBy.get_group raises ValueError when group key contains NaT (:issue:`6992`) + + +- Bug where infer_freq infers timerule (WOM-5XXX) unsupported by to_offset (:issue:`9425`) diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt new file mode 100644 index 0000000000000..6ad108dc020c2 --- /dev/null +++ b/doc/source/whatsnew/v0.17.0.txt @@ -0,0 +1,60 @@ +.. _whatsnew_0170: + +v0.17.0 (July 31, 2015) +----------------------- + +This is a major release from 0.16.2 and includes a small number of API changes, several new features, +enhancements, and performance improvements along with a large number of bug fixes. We recommend that all +users upgrade to this version. + +Highlights include: + + +Check the :ref:`API Changes <whatsnew_0170.api>` and :ref:`deprecations <whatsnew_0170.deprecations>` before updating. + +.. contents:: What's new in v0.17.0 + :local: + :backlinks: none + +.. _whatsnew_0170.enhancements: + +New features +~~~~~~~~~~~~ + +.. _whatsnew_0170.enhancements.other: + +Other enhancements +^^^^^^^^^^^^^^^^^^ + +.. _whatsnew_0170.api: + +Backwards incompatible API changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0170.api_breaking: + +.. _whatsnew_0170.api_breaking.other: + +Other API Changes +^^^^^^^^^^^^^^^^^ + +.. _whatsnew_0170.deprecations: + +Deprecations +^^^^^^^^^^^^ + +.. _whatsnew_0170.prior_deprecations: + +Removal of prior version deprecations/changes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. _whatsnew_0170.performance: + +Performance Improvements +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0170.bug_fixes: + +Bug Fixes +~~~~~~~~~ +fixed bug in csv parsing when using a converting that specified uint8 (:issue: '9266') diff --git a/pandas/__init__.py b/pandas/__init__.py index 939495d3687ad..2a142a6ff2072 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -4,17 +4,13 @@ __docformat__ = 'restructuredtext' try: - from . import hashtable, tslib, lib -except Exception: # pragma: no cover - import sys - e = sys.exc_info()[1] # Py25 and Py3 current exception syntax conflict - print(e) - if 'No module named lib' in str(e): - raise ImportError('C extensions not built: if you installed already ' - 'verify that you are not importing from the source ' - 'directory') - else: - raise + from pandas import hashtable, tslib, lib +except ImportError as e: # pragma: no cover + module = str(e).lstrip('cannot import name ') # hack but overkill to use re + raise ImportError("C extension: {0} not built. If you want to import " + "pandas from the source directory, you may need to run " + "'python setup.py build_ext --inplace' to build the C " + "extensions first.".format(module)) from datetime import datetime import numpy as np diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index bff6eb1f95abc..2a273629544cb 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -26,6 +26,7 @@ Other items: * OrderedDefaultDict +* platform checker """ # pylint disable=W0611 import functools @@ -37,6 +38,8 @@ PY3 = (sys.version_info[0] >= 3) PY3_2 = sys.version_info[:2] == (3, 2) +PY2 = sys.version_info[0] == 2 + try: import __builtin__ as builtins @@ -752,3 +755,16 @@ def __missing__(self, key): def __reduce__(self): # optional, for pickle support args = self.default_factory if self.default_factory else tuple() return type(self), args, None, None, list(self.items()) + + +# https://github.com/pydata/pandas/pull/9123 +def is_platform_windows(): + return sys.platform == 'win32' or sys.platform == 'cygwin' + + +def is_platform_linux(): + return sys.platform == 'linux2' + + +def is_platform_mac(): + return sys.platform == 'darwin' diff --git a/pandas/core/api.py b/pandas/core/api.py index a8b10342593ce..fde9bc77c4bd9 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -8,7 +8,7 @@ from pandas.core.categorical import Categorical from pandas.core.groupby import Grouper from pandas.core.format import set_eng_float_format -from pandas.core.index import Index, Int64Index, Float64Index, MultiIndex +from pandas.core.index import Index, CategoricalIndex, Int64Index, Float64Index, MultiIndex from pandas.core.series import Series, TimeSeries from pandas.core.frame import DataFrame diff --git a/pandas/core/base.py b/pandas/core/base.py index dde2e74132c4b..540b900844a9e 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1,16 +1,14 @@ """ Base and utility classes for pandas objects. """ -import datetime - from pandas import compat import numpy as np from pandas.core import common as com import pandas.core.nanops as nanops -import pandas.tslib as tslib import pandas.lib as lib from pandas.util.decorators import Appender, cache_readonly - +from pandas.core.strings import StringMethods +from pandas.core.common import AbstractMethodError _shared_docs = dict() _indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='', @@ -31,7 +29,7 @@ class StringMixin(object): # Formatting def __unicode__(self): - raise NotImplementedError + raise AbstractMethodError(self) def __str__(self): """ @@ -85,16 +83,22 @@ def __unicode__(self): # Should be overwritten by base classes return object.__repr__(self) - def _local_dir(self): - """ provide addtional __dir__ for this object """ - return [] + def _dir_additions(self): + """ add addtional __dir__ for this object """ + return set() + + def _dir_deletions(self): + """ delete unwanted __dir__ for this object """ + return set() def __dir__(self): """ Provide method name lookup and completion Only provide 'public' methods """ - return list(sorted(list(set(dir(type(self)) + self._local_dir())))) + rv = set(dir(type(self))) + rv = (rv - self._dir_deletions()) | self._dir_additions() + return sorted(rv) def _reset_cache(self, key=None): """ @@ -120,7 +124,7 @@ def _delegate_method(self, name, *args, **kwargs): raise TypeError("You cannot call method {name}".format(name=name)) @classmethod - def _add_delegate_accessors(cls, delegate, accessors, typ): + def _add_delegate_accessors(cls, delegate, accessors, typ, overwrite=False): """ add accessors to cls from the delegate class @@ -130,6 +134,8 @@ def _add_delegate_accessors(cls, delegate, accessors, typ): delegate : the class to get methods/properties & doc-strings acccessors : string list of accessors to add typ : 'property' or 'method' + overwrite : boolean, default False + overwrite the method/property in the target class if it exists """ @@ -163,7 +169,7 @@ def f(self, *args, **kwargs): f = _create_delegator_method(name) # don't overwrite existing methods/properties - if not hasattr(cls, name): + if overwrite or not hasattr(cls, name): setattr(cls,name,f) @@ -497,6 +503,41 @@ def searchsorted(self, key, side='left'): #### needs tests/doc-string return self.values.searchsorted(key, side=side) + # string methods + def _make_str_accessor(self): + from pandas.core.series import Series + from pandas.core.index import Index + if isinstance(self, Series) and not com.is_object_dtype(self.dtype): + # this really should exclude all series with any non-string values, + # but that isn't practical for performance reasons until we have a + # str dtype (GH 9343) + raise AttributeError("Can only use .str accessor with string " + "values, which use np.object_ dtype in " + "pandas") + elif isinstance(self, Index): + # see scc/inferrence.pyx which can contain string values + allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer') + if self.inferred_type not in allowed_types: + message = ("Can only use .str accessor with string values " + "(i.e. inferred_type is 'string', 'unicode' or 'mixed')") + raise AttributeError(message) + if self.nlevels > 1: + message = "Can only use .str accessor with Index, not MultiIndex" + raise AttributeError(message) + return StringMethods(self) + + str = AccessorProperty(StringMethods, _make_str_accessor) + + def _dir_additions(self): + return set() + + def _dir_deletions(self): + try: + getattr(self, 'str') + except AttributeError: + return set(['str']) + return set() + _shared_docs['drop_duplicates'] = ( """Return %(klass)s with duplicate values removed @@ -547,4 +588,4 @@ def duplicated(self, take_last=False): # abstracts def _update_inplace(self, result, **kwargs): - raise NotImplementedError + raise AbstractMethodError(self) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 991678a8e7d79..c5cd8390359dc 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -9,20 +9,18 @@ from pandas.core.algorithms import factorize from pandas.core.base import PandasObject, PandasDelegate -from pandas.core.index import Index, _ensure_index -from pandas.tseries.period import PeriodIndex import pandas.core.common as com -from pandas.util.decorators import cache_readonly +from pandas.util.decorators import cache_readonly, deprecate_kwarg -from pandas.core.common import (CategoricalDtype, ABCSeries, isnull, notnull, +from pandas.core.common import (CategoricalDtype, ABCSeries, ABCIndexClass, ABCPeriodIndex, ABCCategoricalIndex, + isnull, notnull, is_dtype_equal, is_categorical_dtype, is_integer_dtype, is_object_dtype, _possibly_infer_to_datetimelike, get_dtype_kinds, is_list_like, is_sequence, is_null_slice, is_bool, _ensure_platform_int, _ensure_object, _ensure_int64, - _coerce_indexer_dtype, _values_from_object, take_1d) + _coerce_indexer_dtype, take_1d) from pandas.util.terminal import get_terminal_size from pandas.core.config import get_option -from pandas.core import format as fmt def _cat_compare_op(op): def f(self, other): @@ -61,7 +59,14 @@ def f(self, other): i = self.categories.get_loc(other) return getattr(self._codes, op)(i) else: - return np.repeat(False, len(self)) + if op == '__eq__': + return np.repeat(False, len(self)) + elif op == '__ne__': + return np.repeat(True, len(self)) + else: + msg = "Cannot compare a Categorical for op {op} with a scalar, " \ + "which is not a category." + raise TypeError(msg.format(op=op)) else: # allow categorical vs object dtype array comparisons for equality @@ -79,7 +84,7 @@ def f(self, other): def maybe_to_categorical(array): """ coerce to a categorical if a series is given """ - if isinstance(array, ABCSeries): + if isinstance(array, (ABCSeries, ABCCategoricalIndex)): return array.values return array @@ -229,15 +234,17 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F # sanitize input if is_categorical_dtype(values): - # we are either a Series or a Categorical - cat = values - if isinstance(values, ABCSeries): - cat = values.values + # we are either a Series or a CategoricalIndex + if isinstance(values, (ABCSeries, ABCCategoricalIndex)): + values = values.values + + if ordered is None: + ordered = values.ordered if categories is None: - categories = cat.categories + categories = values.categories values = values.__array__() - elif isinstance(values, Index): + elif isinstance(values, ABCIndexClass): pass else: @@ -288,11 +295,11 @@ def __init__(self, values, categories=None, ordered=False, name=None, fastpath=F warn("Values and categories have different dtypes. Did you mean to use\n" "'Categorical.from_codes(codes, categories)'?", RuntimeWarning) - if is_integer_dtype(values) and (codes == -1).all(): + if len(values) and is_integer_dtype(values) and (codes == -1).all(): warn("None of the categories were found in values. Did you mean to use\n" "'Categorical.from_codes(codes, categories)'?", RuntimeWarning) - self.set_ordered(ordered, inplace=True) + self.set_ordered(ordered or False, inplace=True) self.categories = categories self.name = name self._codes = _coerce_indexer_dtype(codes, categories) @@ -302,11 +309,27 @@ def copy(self): return Categorical(values=self._codes.copy(),categories=self.categories, name=self.name, ordered=self.ordered, fastpath=True) + def astype(self, dtype): + """ coerce this type to another dtype """ + if is_categorical_dtype(dtype): + return self + return np.array(self, dtype=dtype) + @cache_readonly def ndim(self): """Number of dimensions of the Categorical """ return self._codes.ndim + @cache_readonly + def size(self): + """ return the len of myself """ + return len(self) + + @cache_readonly + def itemsize(self): + """ return the size of a single category """ + return self.categories.itemsize + def reshape(self, new_shape, **kwargs): """ compat with .reshape """ return self @@ -388,7 +411,8 @@ def _set_codes(self, codes): codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc) def _get_labels(self): - """ Get the category labels (deprecated). + """ + Get the category labels (deprecated). Deprecated, use .codes! """ @@ -402,8 +426,10 @@ def _get_labels(self): @classmethod def _validate_categories(cls, categories): - """" Validates that we have good categories """ - if not isinstance(categories, Index): + """ + Validates that we have good categories + """ + if not isinstance(categories, ABCIndexClass): dtype = None if not hasattr(categories, "dtype"): categories = _convert_to_list_like(categories) @@ -414,6 +440,8 @@ def _validate_categories(cls, categories): with_na = np.array(categories) if with_na.dtype != without_na.dtype: dtype = "object" + + from pandas import Index categories = Index(categories, dtype=dtype) if not categories.is_unique: raise ValueError('Categorical categories must be unique') @@ -680,7 +708,7 @@ def add_categories(self, new_categories, inplace=False): if len(already_included) != 0: msg = "new categories must not include old categories: %s" % str(already_included) raise ValueError(msg) - new_categories = list(self._categories) + (new_categories) + new_categories = list(self._categories) + list(new_categories) new_categories = self._validate_categories(new_categories) cat = self if inplace else self.copy() cat._categories = new_categories @@ -754,6 +782,8 @@ def remove_unused_categories(self, inplace=False): cat = self if inplace else self.copy() _used = sorted(np.unique(cat._codes)) new_categories = cat.categories.take(_ensure_platform_int(_used)) + + from pandas.core.index import _ensure_index new_categories = _ensure_index(new_categories) cat._codes = _get_codes_for_values(cat.__array__(), new_categories) cat._categories = new_categories @@ -783,7 +813,8 @@ def shape(self): return tuple([len(self._codes)]) def __array__(self, dtype=None): - """ The numpy array interface. + """ + The numpy array interface. Returns ------- @@ -792,7 +823,7 @@ def __array__(self, dtype=None): dtype as categorical.categories.dtype """ ret = take_1d(self.categories.values, self._codes) - if dtype and dtype != self.categories.dtype: + if dtype and not is_dtype_equal(dtype,self.categories.dtype): return np.asarray(ret, dtype) return ret @@ -990,7 +1021,7 @@ def get_values(self): """ # if we are a period index, return a string repr - if isinstance(self.categories, PeriodIndex): + if isinstance(self.categories, ABCPeriodIndex): return take_1d(np.array(self.categories.to_native_types(), dtype=object), self._codes) @@ -1137,7 +1168,8 @@ def to_dense(self): """ return np.asarray(self) - def fillna(self, fill_value=None, method=None, limit=None): + @deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value') + def fillna(self, value=None, method=None, limit=None): """ Fill NA/NaN values using the specified method. Parameters @@ -1149,17 +1181,24 @@ def fillna(self, fill_value=None, method=None, limit=None): value : scalar Value to use to fill holes (e.g. 0) limit : int, default None - Maximum size gap to forward or backward fill (not implemented yet!) + (Not implemented yet for Categorical!) + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Returns ------- filled : Categorical with NA/NaN filled """ - if fill_value is None: - fill_value = np.nan + if value is None: + value = np.nan if limit is not None: - raise NotImplementedError + raise NotImplementedError("specifying a limit for fillna has not " + "been implemented yet") values = self._codes @@ -1171,24 +1210,23 @@ def fillna(self, fill_value=None, method=None, limit=None): # we only have one NA in categories values[values == nan_pos] = -1 - # pad / bfill if method is not None: - values = self.to_dense().reshape(-1,len(self)) + values = self.to_dense().reshape(-1, len(self)) values = com.interpolate_2d( - values, method, 0, None, fill_value).astype(self.categories.dtype)[0] + values, method, 0, None, value).astype(self.categories.dtype)[0] values = _get_codes_for_values(values, self.categories) else: - if not isnull(fill_value) and fill_value not in self.categories: + if not isnull(value) and value not in self.categories: raise ValueError("fill value must be in categories") mask = values==-1 if mask.any(): values = values.copy() - values[mask] = self.categories.get_loc(fill_value) + values[mask] = self.categories.get_loc(value) return Categorical(values, categories=self.categories, ordered=self.ordered, name=self.name, fastpath=True) @@ -1235,7 +1273,8 @@ def __iter__(self): """Returns an Iterator over the values of this Categorical.""" return iter(np.array(self)) - def _tidy_repr(self, max_vals=10): + def _tidy_repr(self, max_vals=10, footer=True): + """ a short repr displaying only max_vals and an optional (but default footer) """ num = max_vals // 2 head = self[:num]._get_repr(length=False, name=False, footer=False) tail = self[-(max_vals - num):]._get_repr(length=False, @@ -1243,28 +1282,35 @@ def _tidy_repr(self, max_vals=10): footer=False) result = '%s, ..., %s' % (head[:-1], tail[1:]) - result = '%s\n%s' % (result, self._repr_footer()) + if footer: + result = '%s\n%s' % (result, self._repr_footer()) return compat.text_type(result) - def _repr_categories_info(self): - """ Returns a string representation of the footer.""" - + def _repr_categories(self): + """ return the base repr for the categories """ max_categories = (10 if get_option("display.max_categories") == 0 else get_option("display.max_categories")) + from pandas.core import format as fmt category_strs = fmt.format_array(self.categories.get_values(), None) if len(category_strs) > max_categories: num = max_categories // 2 head = category_strs[:num] tail = category_strs[-(max_categories - num):] category_strs = head + ["..."] + tail + # Strip all leading spaces, which format_array adds for columns... category_strs = [x.strip() for x in category_strs] + return category_strs + + def _repr_categories_info(self): + """ Returns a string representation of the footer.""" + + category_strs = self._repr_categories() levheader = "Categories (%d, %s): " % (len(self.categories), self.categories.dtype) width, height = get_terminal_size() - max_width = (width if get_option("display.width") == 0 - else get_option("display.width")) + max_width = get_option("display.width") or width if com.in_ipython_frontend(): # 0 = no breaks max_width = 0 @@ -1291,8 +1337,11 @@ def _repr_footer(self): len(self), self._repr_categories_info()) def _get_repr(self, name=False, length=True, na_rep='NaN', footer=True): - formatter = fmt.CategoricalFormatter(self, name=name, - length=length, na_rep=na_rep, + from pandas.core import format as fmt + formatter = fmt.CategoricalFormatter(self, + name=name, + length=length, + na_rep=na_rep, footer=footer) result = formatter.to_string() return compat.text_type(result) @@ -1307,9 +1356,9 @@ def __unicode__(self): name=True) else: result = '[], %s' % self._get_repr(name=True, - length=False, - footer=True, - ).replace("\n",", ") + length=False, + footer=True, + ).replace("\n",", ") return result @@ -1350,6 +1399,8 @@ def __setitem__(self, key, value): "categories") rvalue = value if is_list_like(value) else [value] + + from pandas import Index to_add = Index(rvalue).difference(self.categories) # no assignments of values not in categories, but it's always ok to set something to np.nan @@ -1508,11 +1559,27 @@ def equals(self, other): ------- are_equal : boolean """ - if not isinstance(other, Categorical): - return False # TODO: should this also test if name is equal? - return (self.categories.equals(other.categories) and self.ordered == other.ordered and - np.array_equal(self._codes, other._codes)) + return self.is_dtype_equal(other) and np.array_equal(self._codes, other._codes) + + def is_dtype_equal(self, other): + """ + Returns True if categoricals are the same dtype + same categories, and same ordered + + Parameters + ---------- + other : Categorical + + Returns + ------- + are_equal : boolean + """ + + try: + return self.categories.equals(other.categories) and self.ordered == other.ordered + except (AttributeError, TypeError): + return False def describe(self): """ Describes this Categorical @@ -1596,18 +1663,20 @@ def _delegate_method(self, name, *args, **kwargs): ##### utility routines ##### def _get_codes_for_values(values, categories): - """" + """ utility routine to turn values into codes given the specified categories """ from pandas.core.algorithms import _get_data_algo, _hashtables - if values.dtype != categories.dtype: + if not is_dtype_equal(values.dtype,categories.dtype): values = _ensure_object(values) categories = _ensure_object(categories) + (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables) - t = hash_klass(len(categories)) - t.map_locations(_values_from_object(categories)) - return _coerce_indexer_dtype(t.lookup(values), categories) + (_, _), cats = _get_data_algo(categories, _hashtables) + t = hash_klass(len(cats)) + t.map_locations(cats) + return _coerce_indexer_dtype(t.lookup(vals), cats) def _convert_to_list_like(list_like): if hasattr(list_like, "dtype"): diff --git a/pandas/core/common.py b/pandas/core/common.py index ec805aba34d48..1c9326c047a79 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -39,6 +39,17 @@ class AmbiguousIndexError(PandasError, KeyError): pass +class AbstractMethodError(NotImplementedError): + """Raise this error instead of NotImplementedError for abstract methods + while keeping compatibility with Python 2 and Python 3. + """ + def __init__(self, class_instance): + self.class_instance = class_instance + + def __str__(self): + return "This method must be defined on the concrete class of " \ + + self.class_instance.__class__.__name__ + _POSSIBLY_CAST_DTYPES = set([np.dtype(t).name for t in ['O', 'int8', 'uint8', 'int16', 'uint16', 'int32', @@ -72,6 +83,16 @@ def _check(cls, inst): ABCDatetimeIndex = create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)) ABCTimedeltaIndex = create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",)) ABCPeriodIndex = create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)) +ABCCategoricalIndex = create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex",)) +ABCIndexClass = create_pandas_abc_type("ABCIndexClass", "_typ", ("index", + "int64index", + "float64index", + "multiindex", + "datetimeindex", + "timedeltaindex", + "periodindex", + "categoricalindex")) + ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",)) ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",)) @@ -1397,14 +1418,19 @@ def _fill_zeros(result, x, y, name, fill): mask the nan's from x """ - if fill is None or is_float_dtype(result): return result if name.startswith(('r', '__r')): x,y = y,x - if np.isscalar(y): + is_typed_variable = (hasattr(y, 'dtype') or hasattr(y,'type')) + is_scalar = lib.isscalar(y) + + if not is_typed_variable and not is_scalar: + return result + + if is_scalar: y = np.array(y) if is_integer_dtype(y): @@ -2439,8 +2465,27 @@ def _get_dtype_type(arr_or_dtype): return np.dtype(arr_or_dtype).type elif isinstance(arr_or_dtype, CategoricalDtype): return CategoricalDtypeType - return arr_or_dtype.dtype.type + elif isinstance(arr_or_dtype, compat.string_types): + if is_categorical_dtype(arr_or_dtype): + return CategoricalDtypeType + return _get_dtype_type(np.dtype(arr_or_dtype)) + try: + return arr_or_dtype.dtype.type + except AttributeError: + raise ValueError('%r is not a dtype' % arr_or_dtype) + +def is_dtype_equal(source, target): + """ return a boolean if the dtypes are equal """ + source = _get_dtype_type(source) + target = _get_dtype_type(target) + + try: + return source == target + except TypeError: + # invalid comparison + # object == category will hit this + return False def is_any_int_dtype(arr_or_dtype): tipo = _get_dtype_type(arr_or_dtype) @@ -2510,7 +2555,11 @@ def is_floating_dtype(arr_or_dtype): def is_bool_dtype(arr_or_dtype): - tipo = _get_dtype_type(arr_or_dtype) + try: + tipo = _get_dtype_type(arr_or_dtype) + except ValueError: + # this isn't even a dtype + return False return issubclass(tipo, np.bool_) def is_categorical(array): @@ -2637,7 +2686,12 @@ def _astype_nansafe(arr, dtype, copy=True): if not isinstance(dtype, np.dtype): dtype = _coerce_to_dtype(dtype) - if is_datetime64_dtype(arr): + if issubclass(dtype.type, compat.text_type): + # in Py3 that's str, in Py2 that's unicode + return lib.astype_unicode(arr.ravel()).reshape(arr.shape) + elif issubclass(dtype.type, compat.string_types): + return lib.astype_str(arr.ravel()).reshape(arr.shape) + elif is_datetime64_dtype(arr): if dtype == object: return tslib.ints_to_pydatetime(arr.view(np.int64)) elif dtype == np.int64: @@ -2675,11 +2729,6 @@ def _astype_nansafe(arr, dtype, copy=True): elif arr.dtype == np.object_ and np.issubdtype(dtype.type, np.integer): # work around NumPy brokenness, #1987 return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape) - elif issubclass(dtype.type, compat.text_type): - # in Py3 that's str, in Py2 that's unicode - return lib.astype_unicode(arr.ravel()).reshape(arr.shape) - elif issubclass(dtype.type, compat.string_types): - return lib.astype_str(arr.ravel()).reshape(arr.shape) if copy: return arr.astype(dtype) @@ -3083,7 +3132,7 @@ def in_ipython_frontend(): # working with straight ascii. -def _pprint_seq(seq, _nest_lvl=0, **kwds): +def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds): """ internal. pprinter for iterables. you should probably use pprint_thing() rather then calling this directly. @@ -3095,12 +3144,15 @@ def _pprint_seq(seq, _nest_lvl=0, **kwds): else: fmt = u("[%s]") if hasattr(seq, '__setitem__') else u("(%s)") - nitems = get_option("max_seq_items") or len(seq) + if max_seq_items is False: + nitems = len(seq) + else: + nitems = max_seq_items or get_option("max_seq_items") or len(seq) s = iter(seq) r = [] for i in range(min(nitems, len(seq))): # handle sets, no slicing - r.append(pprint_thing(next(s), _nest_lvl + 1, **kwds)) + r.append(pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)) body = ", ".join(r) if nitems < len(seq): @@ -3111,7 +3163,7 @@ def _pprint_seq(seq, _nest_lvl=0, **kwds): return fmt % body -def _pprint_dict(seq, _nest_lvl=0, **kwds): +def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds): """ internal. pprinter for iterables. you should probably use pprint_thing() rather then calling this directly. @@ -3121,11 +3173,14 @@ def _pprint_dict(seq, _nest_lvl=0, **kwds): pfmt = u("%s: %s") - nitems = get_option("max_seq_items") or len(seq) + if max_seq_items is False: + nitems = len(seq) + else: + nitems = max_seq_items or get_option("max_seq_items") or len(seq) for k, v in list(seq.items())[:nitems]: - pairs.append(pfmt % (pprint_thing(k, _nest_lvl + 1, **kwds), - pprint_thing(v, _nest_lvl + 1, **kwds))) + pairs.append(pfmt % (pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds), + pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))) if nitems < len(seq): return fmt % (", ".join(pairs) + ", ...") @@ -3134,7 +3189,7 @@ def _pprint_dict(seq, _nest_lvl=0, **kwds): def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False, - quote_strings=False): + quote_strings=False, max_seq_items=None): """ This function is the sanctioned way of converting objects to a unicode representation. @@ -3153,6 +3208,8 @@ def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False, replacements default_escapes : bool, default False Whether the input escape characters replaces or adds to the defaults + max_seq_items : False, int, default None + Pass thru to other pretty printers to limit sequence printing Returns ------- @@ -3191,11 +3248,11 @@ def as_escaped_unicode(thing, escape_chars=escape_chars): return compat.text_type(thing) elif (isinstance(thing, dict) and _nest_lvl < get_option("display.pprint_nest_depth")): - result = _pprint_dict(thing, _nest_lvl, quote_strings=True) + result = _pprint_dict(thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items) elif is_sequence(thing) and _nest_lvl < \ get_option("display.pprint_nest_depth"): result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars, - quote_strings=quote_strings) + quote_strings=quote_strings, max_seq_items=max_seq_items) elif isinstance(thing, compat.string_types) and quote_strings: if compat.PY3: fmt = "'%s'" @@ -3265,8 +3322,42 @@ def save(obj, path): # TODO remove in 0.13 def _maybe_match_name(a, b): - a_name = getattr(a, 'name', None) - b_name = getattr(b, 'name', None) - if a_name == b_name: - return a_name + a_has = hasattr(a, 'name') + b_has = hasattr(b, 'name') + if a_has and b_has: + if a.name == b.name: + return a.name + else: + return None + elif a_has: + return a.name + elif b_has: + return b.name return None + +def _random_state(state=None): + """ + Helper function for processing random_state arguments. + + Parameters + ---------- + state : int, np.random.RandomState, None. + If receives an int, passes to np.random.RandomState() as seed. + If receives an np.random.RandomState object, just returns object. + If receives `None`, returns an np.random.RandomState object. + If receives anything else, raises an informative ValueError. + Default None. + + Returns + ------- + np.random.RandomState + """ + + if is_integer(state): + return np.random.RandomState(state) + elif isinstance(state, np.random.RandomState): + return state + elif state is None: + return np.random.RandomState() + else: + raise ValueError("random_state must be an integer, a numpy RandomState, or None") diff --git a/pandas/core/format.py b/pandas/core/format.py index b21ca9050ffd0..3ab41ded1deea 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -14,15 +14,14 @@ from pandas.core.config import get_option, set_option import pandas.core.common as com import pandas.lib as lib -from pandas.tslib import iNaT, Timestamp, Timedelta - +from pandas.tslib import iNaT, Timestamp, Timedelta, format_array_from_datetime +from pandas.tseries.index import DatetimeIndex +from pandas.tseries.period import PeriodIndex import numpy as np import itertools import csv -from pandas.tseries.period import PeriodIndex, DatetimeIndex - docstring_to_string = """ Parameters ---------- @@ -613,8 +612,12 @@ def get_col_type(dtype): name = any(self.frame.columns.names) for i, lev in enumerate(self.frame.index.levels): lev2 = lev.format(name=name) - width = len(lev2[0]) - lev3 = [' ' * width] * clevels + lev2 + blank = ' ' * len(lev2[0]) + lev3 = [blank] * clevels + for level_idx, group in itertools.groupby( + self.frame.index.labels[i]): + count = len(list(group)) + lev3.extend([lev2[level_idx]] + [blank] * (count - 1)) strcols.insert(i, lev3) if column_format is None: @@ -773,6 +776,9 @@ def _get_formatted_index(self, frame): formatter=fmt) else: fmt_index = [index.format(name=show_index_names, formatter=fmt)] + fmt_index = [tuple(_make_fixed_width( + list(x), justify='left', minimum=(self.col_space or 0))) + for x in fmt_index] adjoined = adjoin(1, *fmt_index).split('\n') @@ -1255,9 +1261,10 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None, if isinstance(cols, Index): cols = cols.to_native_types(na_rep=na_rep, float_format=float_format, - date_format=date_format) + date_format=date_format, + quoting=self.quoting) else: - cols = list(cols) + cols = np.asarray(list(cols)) self.obj = self.obj.loc[:, cols] # update columns to include possible multiplicity of dupes @@ -1266,9 +1273,10 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None, if isinstance(cols, Index): cols = cols.to_native_types(na_rep=na_rep, float_format=float_format, - date_format=date_format) + date_format=date_format, + quoting=self.quoting) else: - cols = list(cols) + cols = np.asarray(list(cols)) # save it self.cols = cols @@ -1367,8 +1375,10 @@ def strftime_with_nulls(x): values = self.obj.copy() values.index = data_index values.columns = values.columns.to_native_types( - na_rep=na_rep, float_format=float_format, - date_format=date_format) + na_rep=na_rep, + float_format=float_format, + date_format=date_format, + quoting=self.quoting) values = values[cols] series = {} @@ -1539,18 +1549,22 @@ def _save_chunk(self, start_i, end_i): slicer = slice(start_i, end_i) for i in range(len(self.blocks)): b = self.blocks[i] - d = b.to_native_types(slicer=slicer, na_rep=self.na_rep, + d = b.to_native_types(slicer=slicer, + na_rep=self.na_rep, float_format=self.float_format, decimal=self.decimal, - date_format=self.date_format) + date_format=self.date_format, + quoting=self.quoting) for col_loc, col in zip(b.mgr_locs, d): # self.data is a preallocated list self.data[col_loc] = col - ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep, + ix = data_index.to_native_types(slicer=slicer, + na_rep=self.na_rep, float_format=self.float_format, - date_format=self.date_format) + date_format=self.date_format, + quoting=self.quoting) lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer) @@ -1996,7 +2010,7 @@ def _format_strings(self): # this is pretty arbitrary for now has_large_values = (abs_vals > 1e8).any() - has_small_values = ((abs_vals < 10 ** (-self.digits)) & + has_small_values = ((abs_vals < 10 ** (-self.digits+1)) & (abs_vals > 0)).any() if too_long and has_large_values: @@ -2026,16 +2040,43 @@ def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs): self.date_format = date_format def _format_strings(self): - formatter = (self.formatter or - _get_format_datetime64_from_values(self.values, - nat_rep=self.nat_rep, - date_format=self.date_format)) - fmt_values = [formatter(x) for x in self.values] + # we may have a tz, if so, then need to process element-by-element + # when DatetimeBlockWithTimezones is a reality this could be fixed + values = self.values + if not isinstance(values, DatetimeIndex): + values = DatetimeIndex(values) + + if values.tz is None: + fmt_values = format_array_from_datetime(values.asi8.ravel(), + format=_get_format_datetime64_from_values(values, self.date_format), + na_rep=self.nat_rep).reshape(values.shape) + fmt_values = fmt_values.tolist() + + else: + + values = values.asobject + is_dates_only = _is_dates_only(values) + formatter = (self.formatter or _get_format_datetime64(is_dates_only, values, date_format=self.date_format)) + fmt_values = [ formatter(x) for x in self.values ] return fmt_values +def _is_dates_only(values): + # return a boolean if we are only dates (and don't have a timezone) + values = DatetimeIndex(values) + if values.tz is not None: + return False + + values_int = values.asi8 + consider_values = values_int != iNaT + one_day_nanos = (86400 * 1e9) + even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0 + if even_days: + return True + return False + def _format_datetime64(x, tz=None, nat_rep='NaT'): if x is None or lib.checknull(x): return nat_rep @@ -2058,22 +2099,6 @@ def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None): else: return x._date_repr - -def _is_dates_only(values): - # return a boolean if we are only dates (and don't have a timezone) - from pandas import DatetimeIndex - values = DatetimeIndex(values) - if values.tz is not None: - return False - - values_int = values.asi8 - consider_values = values_int != iNaT - one_day_nanos = (86400 * 1e9) - even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0 - if even_days: - return True - return False - def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None): if is_dates_only: @@ -2084,13 +2109,12 @@ def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None): return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep) -def _get_format_datetime64_from_values(values, - nat_rep='NaT', - date_format=None): +def _get_format_datetime64_from_values(values, date_format): + """ given values and a date_format, return a string format """ is_dates_only = _is_dates_only(values) - return _get_format_datetime64(is_dates_only=is_dates_only, - nat_rep=nat_rep, - date_format=date_format) + if is_dates_only: + return date_format or "%Y-%m-%d" + return None class Timedelta64Formatter(GenericArrayFormatter): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f700d4316842c..f36108262432d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -26,8 +26,9 @@ from pandas.core.common import (isnull, notnull, PandasError, _try_sort, _default_index, _maybe_upcast, is_sequence, _infer_dtype_from_scalar, _values_from_object, - is_list_like, _get_dtype, _maybe_box_datetimelike, - is_categorical_dtype, is_object_dtype, _possibly_infer_to_datetimelike) + is_list_like, _maybe_box_datetimelike, + is_categorical_dtype, is_object_dtype, + _possibly_infer_to_datetimelike) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (maybe_droplevels, @@ -66,7 +67,7 @@ # Docstring templates _shared_doc_kwargs = dict(axes='index, columns', klass='DataFrame', - axes_single_arg="{0,1,'index','columns'}") + axes_single_arg="{0, 1, 'index', 'columns'}") _numeric_only_doc = """numeric_only : boolean, default None Include only float, int, boolean data. If None, will attempt to use @@ -191,6 +192,11 @@ def _constructor(self): _constructor_sliced = Series + @property + def _constructor_expanddim(self): + from pandas.core.panel import Panel + return Panel + def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if data is None: @@ -260,8 +266,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) else: - mgr = self._init_ndarray(data, index, columns, dtype=dtype, - copy=copy) + mgr = self._init_dict({}, index, columns, dtype=dtype) elif isinstance(data, collections.Iterator): raise TypeError("data argument can't be an iterator") else: @@ -657,6 +662,8 @@ def from_dict(cls, data, orient='columns', dtype=None): The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. + dtype : dtype, default None + Data type to force, otherwise infer Returns ------- @@ -794,10 +801,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, return cls() try: - if compat.PY3: - first_row = next(data) - else: - first_row = next(data) + first_row = next(data) except StopIteration: return cls(index=index, columns=columns) @@ -1064,8 +1068,6 @@ def to_panel(self): ------- panel : Panel """ - from pandas.core.panel import Panel - # only support this kind for now if (not isinstance(self.index, MultiIndex) or # pragma: no cover len(self.index.levels) != 2): @@ -1103,7 +1105,7 @@ def to_panel(self): shape=shape, ref_items=selfsorted.columns) - return Panel(new_mgr) + return self._constructor_expanddim(new_mgr) to_wide = deprecate('to_wide', to_panel) @@ -1244,6 +1246,9 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', >>> writer.save() """ from pandas.io.excel import ExcelWriter + if self.columns.nlevels > 1: + raise NotImplementedError("Writing as Excel with a MultiIndex is " + "not yet implemented.") need_save = False if encoding == None: @@ -1738,17 +1743,19 @@ def _ixs(self, i, axis=0): lab_slice = slice(label[0], label[-1]) return self.ix[:, lab_slice] else: - label = self.columns[i] if isinstance(label, Index): return self.take(i, axis=1, convert=True) + index_len = len(self.index) + # if the values returned are not the same length # as the index (iow a not found value), iget returns # a 0-len ndarray. This is effectively catching # a numpy error (as numpy should really raise) values = self._data.iget(i) - if not len(values): - values = np.array([np.nan] * len(self.index), dtype=object) + + if index_len and not len(values): + values = np.array([np.nan] * index_len, dtype=object) result = self._constructor_sliced.from_array( values, index=self.index, name=label, fastpath=True) @@ -1835,7 +1842,7 @@ def _getitem_multilevel(self, key): result.columns = result_columns else: new_values = self.values[:, loc] - result = DataFrame(new_values, index=self.index, + result = self._constructor(new_values, index=self.index, columns=result_columns).__finalize__(self) if len(result.columns) == 1: top = result.columns[0] @@ -1843,7 +1850,7 @@ def _getitem_multilevel(self, key): (type(top) == tuple and top[0] == '')): result = result[''] if isinstance(result, Series): - result = Series(result, index=self.index, name=key) + result = self._constructor_sliced(result, index=self.index, name=key) result._set_is_copy(self) return result @@ -2244,10 +2251,11 @@ def assign(self, **kwargs): Notes ----- Since ``kwargs`` is a dictionary, the order of your - arguments may not be preserved, and so the order of the - new columns is not well defined. Assigning multiple - columns within the same ``assign`` is possible, but you cannot - reference other columns created within the same ``assign`` call. + arguments may not be preserved. The make things predicatable, + the columns are inserted in alphabetical order, at the end of + your DataFrame. Assigning multiple columns within the same + ``assign`` is possible, but you cannot reference other columns + created within the same ``assign`` call. Examples -------- @@ -2296,7 +2304,7 @@ def assign(self, **kwargs): results[k] = v # ... and then assign - for k, v in results.items(): + for k, v in sorted(results.items()): data[k] = v return data @@ -2512,6 +2520,19 @@ def rename(self, index=None, columns=None, **kwargs): return super(DataFrame, self).rename(index=index, columns=columns, **kwargs) + @Appender(_shared_docs['fillna'] % _shared_doc_kwargs) + def fillna(self, value=None, method=None, axis=None, inplace=False, + limit=None, downcast=None, **kwargs): + return super(DataFrame, self).fillna(value=value, method=method, + axis=axis, inplace=inplace, + limit=limit, downcast=downcast, + **kwargs) + + @Appender(_shared_docs['shift'] % _shared_doc_kwargs) + def shift(self, periods=1, freq=None, axis=0, **kwargs): + return super(DataFrame, self).shift(periods=periods, freq=freq, + axis=axis, **kwargs) + def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): """ @@ -2724,7 +2745,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, Parameters ---------- - axis : {0, 1}, or tuple/list thereof + axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof Pass tuple or list to drop on multiple axes how : {'any', 'all'} * any : if any NA values are present, drop that label @@ -2869,7 +2890,7 @@ def sort(self, columns=None, axis=0, ascending=True, ascending : boolean or list, default True Sort ascending vs. descending. Specify list for multiple sort orders - axis : {0, 1} + axis : {0 or 'index', 1 or 'columns'}, default 0 Sort index/rows versus columns inplace : boolean, default False Sort the DataFrame without creating a new instance @@ -2898,7 +2919,7 @@ def sort_index(self, axis=0, by=None, ascending=True, inplace=False, Parameters ---------- - axis : {0, 1} + axis : {0 or 'index', 1 or 'columns'}, default 0 Sort index/rows versus columns by : object Column name(s) in frame. Accepts a column name or a list @@ -3006,7 +3027,7 @@ def sortlevel(self, level=0, axis=0, ascending=True, Parameters ---------- level : int - axis : {0, 1} + axis : {0 or 'index', 1 or 'columns'}, default 0 ascending : boolean, default True inplace : boolean, default False Sort the DataFrame without creating a new instance @@ -3583,7 +3604,7 @@ def unstack(self, level=-1): #---------------------------------------------------------------------- # Time series-related - def diff(self, periods=1): + def diff(self, periods=1, axis=0): """ 1st discrete difference of object @@ -3591,12 +3612,14 @@ def diff(self, periods=1): ---------- periods : int, default 1 Periods to shift for forming difference + axis : {0 or 'index', 1 or 'columns'}, default 0 Returns ------- diffed : DataFrame """ - new_data = self._data.diff(n=periods) + bm_axis = self._get_block_manager_axis(axis) + new_data = self._data.diff(n=periods, axis=bm_axis) return self._constructor(new_data) #---------------------------------------------------------------------- @@ -3616,9 +3639,9 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, ---------- func : function Function to apply to each column/row - axis : {0, 1} - * 0 : apply function to each column - * 1 : apply function to each row + axis : {0 or 'index', 1 or 'columns'}, default 0 + * 0 or 'index': apply function to each column + * 1 or 'columns': apply function to each row broadcast : boolean, default False For aggregation functions, return object of same size with values propagated @@ -4139,8 +4162,8 @@ def corrwith(self, other, axis=0, drop=False): Parameters ---------- other : DataFrame - axis : {0, 1} - 0 to compute column-wise, 1 for row-wise + axis : {0 or 'index', 1 or 'columns'}, default 0 + 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise drop : boolean, default False Drop missing indices from result, default returns union of all @@ -4191,8 +4214,8 @@ def count(self, axis=0, level=None, numeric_only=False): Parameters ---------- - axis : {0, 1} - 0 for row-wise, 1 for column-wise + axis : {0 or 'index', 1 or 'columns'}, default 0 + 0 or 'index' for row-wise, 1 or 'columns' for column-wise level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a DataFrame @@ -4345,8 +4368,8 @@ def idxmin(self, axis=0, skipna=True): Parameters ---------- - axis : {0, 1} - 0 for row-wise, 1 for column-wise + axis : {0 or 'index', 1 or 'columns'}, default 0 + 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA @@ -4376,8 +4399,8 @@ def idxmax(self, axis=0, skipna=True): Parameters ---------- - axis : {0, 1} - 0 for row-wise, 1 for column-wise + axis : {0 or 'index', 1 or 'columns'}, default 0 + 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be first index. @@ -4413,19 +4436,19 @@ def mode(self, axis=0, numeric_only=False): """ Gets the mode(s) of each element along the axis selected. Empty if nothing has 2+ occurrences. Adds a row for each mode per label, fills in gaps - with nan. - + with nan. + Note that there could be multiple values returned for the selected - axis (when more than one item share the maximum frequency), which is the - reason why a dataframe is returned. If you want to impute missing values - with the mode in a dataframe ``df``, you can just do this: + axis (when more than one item share the maximum frequency), which is the + reason why a dataframe is returned. If you want to impute missing values + with the mode in a dataframe ``df``, you can just do this: ``df.fillna(df.mode().iloc[0])`` Parameters ---------- - axis : {0, 1, 'index', 'columns'} (default 0) - * 0/'index' : get mode of each column - * 1/'columns' : get mode of each row + axis : {0 or 'index', 1 or 'columns'}, default 0 + * 0 or 'index' : get mode of each column + * 1 or 'columns' : get mode of each row numeric_only : boolean, default False if True, only apply to numeric columns @@ -4530,7 +4553,7 @@ def rank(self, axis=0, numeric_only=None, method='average', Parameters ---------- - axis : {0, 1}, default 0 + axis : {0 or 'index', 1 or 'columns'}, default 0 Ranks over columns (0) or rows (1) numeric_only : boolean, default None Include only float, int, boolean data @@ -4582,7 +4605,7 @@ def to_timestamp(self, freq=None, how='start', axis=0, copy=True): how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end - axis : {0, 1} default 0 + axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default) copy : boolean, default True If false then underlying input data is not copied @@ -4613,7 +4636,7 @@ def to_period(self, freq=None, axis=0, copy=True): Parameters ---------- freq : string, default - axis : {0, 1}, default 0 + axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default) copy : boolean, default True If False then underlying input data is not copied diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 012a73fac1ef4..d6c7d87bb25b1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -17,11 +17,12 @@ import pandas.core.common as com import pandas.core.datetools as datetools from pandas import compat -from pandas.compat import map, zip, lrange, string_types, isidentifier, lmap +from pandas.compat import map, zip, lrange, string_types, isidentifier from pandas.core.common import (isnull, notnull, is_list_like, _values_from_object, _maybe_promote, _maybe_box_datetimelike, ABCSeries, - SettingWithCopyError, SettingWithCopyWarning) + SettingWithCopyError, SettingWithCopyWarning, + AbstractMethodError) import pandas.core.nanops as nanops from pandas.util.decorators import Appender, Substitution, deprecate_kwarg from pandas.core import config @@ -137,7 +138,7 @@ def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): @property def _constructor(self): - raise NotImplementedError + raise AbstractMethodError(self) def __unicode__(self): # unicode representation based upon iterating over self @@ -145,13 +146,17 @@ def __unicode__(self): prepr = '[%s]' % ','.join(map(com.pprint_thing, self)) return '%s(%s)' % (self.__class__.__name__, prepr) - def _local_dir(self): + def _dir_additions(self): """ add the string-like attributes from the info_axis """ - return [c for c in self._info_axis - if isinstance(c, string_types) and isidentifier(c)] + return set([c for c in self._info_axis + if isinstance(c, string_types) and isidentifier(c)]) @property def _constructor_sliced(self): + raise AbstractMethodError(self) + + @property + def _constructor_expanddim(self): raise NotImplementedError #---------------------------------------------------------------------- @@ -1100,7 +1105,7 @@ def _iget_item_cache(self, item): return lower def _box_item_values(self, key, values): - raise NotImplementedError + raise AbstractMethodError(self) def _maybe_cache_changed(self, item, value): """ @@ -1557,7 +1562,7 @@ def reindex_like(self, other, method=None, copy=True, limit=None): return self.reindex(**d) - def drop(self, labels, axis=0, level=None, inplace=False): + def drop(self, labels, axis=0, level=None, inplace=False, errors='raise'): """ Return new object with labels in requested axis removed @@ -1569,6 +1574,8 @@ def drop(self, labels, axis=0, level=None, inplace=False): For MultiIndex inplace : bool, default False If True, do operation inplace and return None. + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and existing labels are dropped. Returns ------- @@ -1582,9 +1589,9 @@ def drop(self, labels, axis=0, level=None, inplace=False): if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') - new_axis = axis.drop(labels, level=level) + new_axis = axis.drop(labels, level=level, errors=errors) else: - new_axis = axis.drop(labels) + new_axis = axis.drop(labels, errors=errors) dropped = self.reindex(**{axis_name: new_axis}) try: dropped.axes[axis_].set_names(axis.names, inplace=True) @@ -1941,6 +1948,103 @@ def tail(self, n=5): return self return self.iloc[-n:] + + def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None): + """ + Returns a random sample of items from an axis of object. + + Parameters + ---------- + n : int, optional + Number of items from axis to return. Cannot be used with `frac`. + Default = 1 if `frac` = None. + frac : float, optional + Fraction of axis items to return. Cannot be used with `n`. + replace : boolean, optional + Sample with or without replacement. Default = False. + weights : str or ndarray-like, optional + Default 'None' results in equal probability weighting. + If called on a DataFrame, will accept the name of a column + when axis = 0. + Weights must be same length as axis being sampled. + If weights do not sum to 1, they will be normalized to sum to 1. + Missing values in the weights column will be treated as zero. + inf and -inf values not allowed. + random_state : int or numpy.random.RandomState, optional + Seed for the random number generator (if int), or numpy RandomState + object. + axis : int or string, optional + Axis to sample. Accepts axis number or name. Default is stat axis + for given data type (0 for Series and DataFrames, 1 for Panels). + + Returns + ------- + Same type as caller. + """ + + if axis is None: + axis = self._stat_axis_number + + axis = self._get_axis_number(axis) + axis_length = self.shape[axis] + + # Process random_state argument + rs = com._random_state(random_state) + + # Check weights for compliance + if weights is not None: + + # Strings acceptable if a dataframe and axis = 0 + if isinstance(weights, string_types): + if isinstance(self, pd.DataFrame): + if axis == 0: + try: + weights = self[weights] + except KeyError: + raise KeyError("String passed to weights not a valid column") + else: + raise ValueError("Strings can only be passed to weights when sampling from rows on a DataFrame") + else: + raise ValueError("Strings cannot be passed as weights when sampling from a Series or Panel.") + + weights = pd.Series(weights, dtype='float64') + + if len(weights) != axis_length: + raise ValueError("Weights and axis to be sampled must be of same length") + + if (weights == np.inf).any() or (weights == -np.inf).any(): + raise ValueError("weight vector may not include `inf` values") + + if (weights < 0).any(): + raise ValueError("weight vector many not include negative values") + + # If has nan, set to zero. + weights = weights.fillna(0) + + # Renormalize if don't sum to 1 + if weights.sum() != 1: + weights = weights / weights.sum() + + weights = weights.values + + # If no frac or n, default to n=1. + if n is None and frac is None: + n = 1 + elif n is not None and frac is None and n % 1 != 0: + raise ValueError("Only integers accepted as `n` values") + elif n is None and frac is not None: + n = int(round(frac * axis_length)) + elif n is not None and frac is not None: + raise ValueError('Please enter a value for `frac` OR `n`, not both') + + # Check for negative sizes + if n < 0: + raise ValueError("A negative number of rows requested. Please provide positive value.") + + locs = rs.choice(axis_length, size=n, replace=replace, p=weights) + return self.take(locs, axis=axis) + + #---------------------------------------------------------------------- # Attribute access @@ -2261,19 +2365,23 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, Parameters ---------- - convert_dates : if True, attempt to soft convert dates, if 'coerce', - force conversion (and non-convertibles get NaT) - convert_numeric : if True attempt to coerce to numbers (including - strings), non-convertibles get NaN - convert_timedeltas : if True, attempt to soft convert timedeltas, if 'coerce', - force conversion (and non-convertibles get NaT) - copy : Boolean, if True, return copy even if no copy is necessary - (e.g. no conversion was done), default is True. - It is meant for internal use, not to be confused with `inplace` kw. + convert_dates : boolean, default True + If True, convert to date where possible. If 'coerce', force + conversion, with unconvertible values becoming NaT. + convert_numeric : boolean, default False + If True, attempt to coerce to numbers (including strings), with + unconvertible values becoming NaN. + convert_timedeltas : boolean, default True + If True, convert to timedelta where possible. If 'coerce', force + conversion, with unconvertible values becoming NaT. + copy : boolean, default True + If True, return a copy even if no copy is necessary (e.g. no + conversion was done). Note: This is meant for internal use, and + should not be confused with inplace. Returns ------- - converted : asm as input object + converted : same as input object """ return self._constructor( self._data.convert(convert_dates=convert_dates, @@ -2284,31 +2392,33 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, #---------------------------------------------------------------------- # Filling NA's - def fillna(self, value=None, method=None, axis=None, inplace=False, - limit=None, downcast=None): + _shared_docs['fillna'] = ( """ Fill NA/NaN values using the specified method Parameters ---------- - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed Series - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). (values not in the dict/Series/DataFrame will not be filled). This value cannot be a list. - axis : {0, 1}, default 0 - * 0: fill column-by-column - * 1: fill row-by-row + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed Series + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + axis : %(axes_single_arg)s inplace : boolean, default False If True, fill in place. Note: this will modify any other views on this object, (e.g. a no-copy slice for a column in a DataFrame). limit : int, default None - Maximum size gap to forward or backward fill + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. downcast : dict, default is None a dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate @@ -2320,8 +2430,13 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, Returns ------- - filled : same type as caller + filled : %(klass)s """ + ) + + @Appender(_shared_docs['fillna'] % _shared_doc_kwargs) + def fillna(self, value=None, method=None, axis=None, inplace=False, + limit=None, downcast=None): if isinstance(value, (list, tuple)): raise TypeError('"value" parameter must be a scalar or dict, but ' 'you passed a "{0}"'.format(type(value).__name__)) @@ -2724,7 +2839,8 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, Examples -------- - # Filling in NaNs: + Filling in NaNs + >>> s = pd.Series([0, 1, np.nan, 3]) >>> s.interpolate() 0 0 @@ -2810,37 +2926,77 @@ def notnull(self): """ return notnull(self).__finalize__(self) - def clip(self, lower=None, upper=None, out=None): + def clip(self, lower=None, upper=None, out=None, axis=None): """ Trim values at input threshold(s) Parameters ---------- - lower : float, default None - upper : float, default None + lower : float or array_like, default None + upper : float or array_like, default None + axis : int or string axis name, optional + Align object with lower and upper along the given axis. Returns ------- clipped : Series + + Examples + -------- + >>> df + 0 1 + 0 0.335232 -1.256177 + 1 -1.367855 0.746646 + 2 0.027753 -1.176076 + 3 0.230930 -0.679613 + 4 1.261967 0.570967 + >>> df.clip(-1.0, 0.5) + 0 1 + 0 0.335232 -1.000000 + 1 -1.000000 0.500000 + 2 0.027753 -1.000000 + 3 0.230930 -0.679613 + 4 0.500000 0.500000 + >>> t + 0 -0.3 + 1 -0.2 + 2 -0.1 + 3 0.0 + 4 0.1 + dtype: float64 + >>> df.clip(t, t + 1, axis=0) + 0 1 + 0 0.335232 -0.300000 + 1 -0.200000 0.746646 + 2 0.027753 -0.100000 + 3 0.230930 0.000000 + 4 1.100000 0.570967 """ if out is not None: # pragma: no cover raise Exception('out argument is not supported yet') # GH 2747 (arguments were reversed) if lower is not None and upper is not None: - lower, upper = min(lower, upper), max(lower, upper) + if lib.isscalar(lower) and lib.isscalar(upper): + lower, upper = min(lower, upper), max(lower, upper) result = self if lower is not None: - result = result.clip_lower(lower) + result = result.clip_lower(lower, axis) if upper is not None: - result = result.clip_upper(upper) + result = result.clip_upper(upper, axis) return result - def clip_upper(self, threshold): + def clip_upper(self, threshold, axis=None): """ - Return copy of input with values above given value truncated + Return copy of input with values above given value(s) truncated + + Parameters + ---------- + threshold : float or array_like + axis : int or string axis name, optional + Align object with threshold along the given axis. See also -------- @@ -2850,14 +3006,21 @@ def clip_upper(self, threshold): ------- clipped : same type as input """ - if isnull(threshold): + if np.any(isnull(threshold)): raise ValueError("Cannot use an NA value as a clip threshold") - return self.where((self <= threshold) | isnull(self), threshold) + subset = self.le(threshold, axis=axis) | isnull(self) + return self.where(subset, threshold, axis=axis) - def clip_lower(self, threshold): + def clip_lower(self, threshold, axis=None): """ - Return copy of the input with values below given value truncated + Return copy of the input with values below given value(s) truncated + + Parameters + ---------- + threshold : float or array_like + axis : int or string axis name, optional + Align object with threshold along the given axis. See also -------- @@ -2867,10 +3030,11 @@ def clip_lower(self, threshold): ------- clipped : same type as input """ - if isnull(threshold): + if np.any(isnull(threshold)): raise ValueError("Cannot use an NA value as a clip threshold") - return self.where((self >= threshold) | isnull(self), threshold) + subset = self.ge(threshold, axis=axis) | isnull(self) + return self.where(subset, threshold, axis=axis) def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False): @@ -2903,13 +3067,13 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, Examples -------- - # DataFrame result - >>> data.groupby(func, axis=0).mean() + DataFrame results - # DataFrame result + >>> data.groupby(func, axis=0).mean() >>> data.groupby(['col1', 'col2'])['col3'].mean() - # DataFrame with hierarchical index + DataFrame with hierarchical index + >>> data.groupby(['col1', 'col2']).mean() Returns @@ -3051,7 +3215,8 @@ def first(self, offset): """ from pandas.tseries.frequencies import to_offset if not isinstance(self.index, DatetimeIndex): - raise NotImplementedError + raise NotImplementedError("'first' only supports a DatetimeIndex " + "index") if len(self.index) == 0: return self @@ -3085,7 +3250,8 @@ def last(self, offset): """ from pandas.tseries.frequencies import to_offset if not isinstance(self.index, DatetimeIndex): - raise NotImplementedError + raise NotImplementedError("'last' only supports a DatetimeIndex " + "index") if len(self.index) == 0: return self @@ -3199,11 +3365,10 @@ def _align_series(self, other, join='outer', axis=None, level=None, level=level, return_indexers=True) - left_result = self._reindex_indexer(join_index, lidx, copy) - right_result = other._reindex_indexer(join_index, ridx, copy) + left = self._reindex_indexer(join_index, lidx, copy) + right = other._reindex_indexer(join_index, ridx, copy) else: - # one has > 1 ndim fdata = self._data if axis == 0: @@ -3233,23 +3398,19 @@ def _align_series(self, other, join='outer', axis=None, level=None, if copy and fdata is self._data: fdata = fdata.copy() - left_result = DataFrame(fdata) + left = DataFrame(fdata) if ridx is None: - right_result = other + right = other else: - right_result = other.reindex(join_index, level=level) + right = other.reindex(join_index, level=level) # fill fill_na = notnull(fill_value) or (method is not None) if fill_na: - return (left_result.fillna(fill_value, method=method, limit=limit, - axis=fill_axis), - right_result.fillna(fill_value, method=method, - limit=limit)) - else: - return (left_result.__finalize__(self), - right_result.__finalize__(other)) + left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) + right = right.fillna(fill_value, method=method, limit=limit) + return (left.__finalize__(self), right.__finalize__(other)) _shared_docs['where'] = (""" Return an object of same shape as self and whose corresponding @@ -3326,11 +3487,20 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, except ValueError: new_other = np.array(other) - if not (new_other == np.array(other)).all(): - other = np.array(other) + matches = (new_other == np.array(other)) + if matches is False or not matches.all(): - # we can't use our existing dtype - # because of incompatibilities + # coerce other to a common dtype if we can + if com.needs_i8_conversion(self.dtype): + try: + other = np.array(other, dtype=self.dtype) + except: + other = np.array(other) + else: + other = np.asarray(other) + other = np.asarray(other, dtype=np.common_type(other, new_other)) + + # we need to use the new dtype try_quick = False else: other = new_other @@ -3409,8 +3579,7 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, return self.where(~cond, other=other, inplace=inplace, axis=axis, level=level, try_cast=try_cast, raise_on_error=raise_on_error) - def shift(self, periods=1, freq=None, axis=0, **kwargs): - """ + _shared_docs['shift'] = (""" Shift index by desired number of periods with an optional time freq Parameters @@ -3420,6 +3589,7 @@ def shift(self, periods=1, freq=None, axis=0, **kwargs): freq : DateOffset, timedelta, or time rule string, optional Increment to use from datetools module or time rule (e.g. 'EOM'). See Notes. + axis : %(axes_single_arg)s Notes ----- @@ -3429,8 +3599,10 @@ def shift(self, periods=1, freq=None, axis=0, **kwargs): Returns ------- - shifted : same type as caller - """ + shifted : %(klass)s + """) + @Appender(_shared_docs['shift'] % _shared_doc_kwargs) + def shift(self, periods=1, freq=None, axis=0, **kwargs): if periods == 0: return self @@ -3589,8 +3761,7 @@ def truncate(self, before=None, after=None, axis=None, copy=True): def tz_convert(self, tz, axis=0, level=None, copy=True): """ - Convert the axis to target time zone. If it is time zone naive, it - will be localized to the passed time zone. + Convert tz-aware axis to target time zone. Parameters ---------- @@ -3604,6 +3775,11 @@ def tz_convert(self, tz, axis=0, level=None, copy=True): Returns ------- + + Raises + ------ + TypeError + If the axis is tz-naive. """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) @@ -3662,6 +3838,11 @@ def tz_localize(self, tz, axis=0, level=None, copy=True, Returns ------- + + Raises + ------ + TypeError + If the TimeSeries is tz-aware and tz is not None. """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 6d98b3b99021b..51674bad60f5b 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -14,7 +14,7 @@ from pandas.core.categorical import Categorical from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame -from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes +from pandas.core.index import Index, MultiIndex, CategoricalIndex, _ensure_index from pandas.core.internals import BlockManager, make_block from pandas.core.series import Series from pandas.core.panel import Panel @@ -25,7 +25,8 @@ notnull, _DATELIKE_DTYPES, is_numeric_dtype, is_timedelta64_dtype, is_datetime64_dtype, is_categorical_dtype, _values_from_object, - is_datetime_or_timedelta_dtype, is_bool_dtype) + is_datetime_or_timedelta_dtype, is_bool, + is_bool_dtype, AbstractMethodError) from pandas.core.config import option_context import pandas.lib as lib from pandas.lib import Timestamp @@ -186,7 +187,7 @@ class Grouper(object): Examples -------- - >>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A') + >>> df.groupby(Grouper(key='A')) : syntactic sugar for df.groupby('A') >>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date' >>> df.groupby(Grouper(level='date',freq='60s',axis=1)) : specify a resample on the level 'date' on the columns axis with a frequency of 60s @@ -279,7 +280,10 @@ def _set_grouper(self, obj, sort=False): return self.grouper def _get_binner_for_grouping(self, obj): - raise NotImplementedError + """ default to the standard binner here """ + group_axis = obj._get_axis(self.axis) + return Grouping(group_axis, None, obj=obj, name=self.key, + level=self.level, sort=self.sort, in_axis=False) @property def groups(self): @@ -422,7 +426,11 @@ def convert(key, s): return Timestamp(key).asm8 return key - sample = next(iter(self.indices)) + if len(self.indices) > 0: + sample = next(iter(self.indices)) + else: + sample = None # Dummy sample + if isinstance(sample, tuple): if not isinstance(name, tuple): msg = ("must supply a tuple to get_group with multiple" @@ -490,15 +498,15 @@ def _set_result_index_ordered(self, result): # shortcut of we have an already ordered grouper if not self.grouper.is_monotonic: - index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ])) + index = Index(np.concatenate([ indices.get(v, []) for v in self.grouper.result_index])) result.index = index result = result.sort_index() result.index = self.obj.index return result - def _local_dir(self): - return sorted(set(self.obj._local_dir() + list(self._apply_whitelist))) + def _dir_additions(self): + return self.obj._dir_additions() | self._apply_whitelist def __getattr__(self, attr): if attr in self._internal_names_set: @@ -670,7 +678,7 @@ def _python_apply_general(self, f): not_indexed_same=mutated) def aggregate(self, func, *args, **kwargs): - raise NotImplementedError + raise AbstractMethodError(self) @Appender(_agg_doc) def agg(self, func, *args, **kwargs): @@ -680,7 +688,7 @@ def _iterate_slices(self): yield self.name, self._selected_obj def transform(self, func, *args, **kwargs): - raise NotImplementedError + raise AbstractMethodError(self) def mean(self): """ @@ -1127,7 +1135,7 @@ def _python_agg_general(self, func, *args, **kwargs): return self._wrap_aggregated_output(output) def _wrap_applied_output(self, *args, **kwargs): - raise NotImplementedError + raise AbstractMethodError(self) def _concat_objects(self, keys, values, not_indexed_same=False): from pandas.tools.merge import concat @@ -1484,13 +1492,16 @@ def aggregate(self, values, how, axis=0): swapped = True values = values.swapaxes(0, axis) if arity > 1: - raise NotImplementedError + raise NotImplementedError("arity of more than 1 is not " + "supported for the 'how' argument") out_shape = (self.ngroups,) + values.shape[1:] is_numeric = is_numeric_dtype(values.dtype) if is_datetime_or_timedelta_dtype(values.dtype): values = values.view('int64') + # GH 7754 + is_numeric = True elif is_bool_dtype(values.dtype): values = _algos.ensure_float64(values) elif com.is_integer_dtype(values): @@ -1556,7 +1567,8 @@ def _aggregate(self, result, counts, values, agg_func, is_numeric): comp_ids, _, ngroups = self.group_info if values.ndim > 3: # punting for now - raise NotImplementedError + raise NotImplementedError("number of dimensions is currently " + "limited to 3") elif values.ndim > 2: for i, chunk in enumerate(values.transpose(2, 0, 1)): @@ -1777,12 +1789,14 @@ def size(self): Compute group sizes """ - base = Series(np.zeros(len(self.result_index), dtype=np.int64), - index=self.result_index) + index = self.result_index + base = Series(np.zeros(len(index), dtype=np.int64), index=index) indices = self.indices for k, v in compat.iteritems(indices): indices[k] = len(v) bin_counts = Series(indices, dtype=np.int64) + # make bin_counts.index to have same name to preserve it + bin_counts.index.name = index.name result = base.add(bin_counts, fill_value=0) # addition with fill_value changes dtype to float64 result = result.astype(np.int64) @@ -1815,7 +1829,8 @@ def _aggregate(self, result, counts, values, agg_func, is_numeric=True): if values.ndim > 3: # punting for now - raise NotImplementedError + raise NotImplementedError("number of dimensions is currently " + "limited to 3") elif values.ndim > 2: for i, chunk in enumerate(values.transpose(2, 0, 1)): agg_func(result[:, :, i], counts, chunk, self.bins) @@ -1922,7 +1937,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self.grouper = com._asarray_tuplesafe(self.grouper) # a passed Categorical - elif isinstance(self.grouper, Categorical): + elif is_categorical_dtype(self.grouper): # must have an ordered categorical if self.sort: @@ -1936,8 +1951,15 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, # fix bug #GH8868 sort=False being ignored in categorical groupby else: self.grouper = self.grouper.reorder_categories(self.grouper.unique()) + + # we make a CategoricalIndex out of the cat grouper + # preserving the categories / ordered attributes self._labels = self.grouper.codes - self._group_index = self.grouper.categories + + c = self.grouper.categories + self._group_index = CategoricalIndex(Categorical.from_codes(np.arange(len(c)), + categories=c, + ordered=self.grouper.ordered)) if self.name is None: self.name = self.grouper.name @@ -1951,8 +1973,12 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, if self.name is None: self.name = grouper.name + # we are done + if isinstance(self.grouper, Grouping): + self.grouper = self.grouper.grouper + # no level passed - if not isinstance(self.grouper, (Series, Index, Categorical, np.ndarray)): + elif not isinstance(self.grouper, (Series, Index, Categorical, np.ndarray)): if getattr(self.grouper, 'ndim', 1) != 1: t = self.name or str(type(self.grouper)) raise ValueError("Grouper for '%s' not 1-dimensional" % t) @@ -2125,8 +2151,8 @@ def is_in_obj(gpr): else: in_axis, name = False, None - if isinstance(gpr, Categorical) and len(gpr) != len(obj): - raise ValueError("Categorical grouper must have len(grouper) == len(data)") + if is_categorical_dtype(gpr) and len(gpr) != len(obj): + raise ValueError("Categorical dtype grouper must have len(grouper) == len(data)") ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort, in_axis=in_axis) @@ -2423,6 +2449,8 @@ def transform(self, func, *args, **kwargs): wrapper = lambda x: func(x, *args, **kwargs) for i, (name, group) in enumerate(self): + if name not in self.indices: + continue object.__setattr__(group, 'name', name) res = wrapper(group) @@ -2438,7 +2466,7 @@ def transform(self, func, *args, **kwargs): except: pass - indexer = self._get_index(name) + indexer = self.indices[name] result[indexer] = res result = _possibly_downcast_to_dtype(result, dtype) @@ -2452,9 +2480,12 @@ def _transform_fast(self, func): """ if isinstance(func, compat.string_types): func = getattr(self,func) + values = func().values - counts = self.size().values + counts = self.size().fillna(0).values values = np.repeat(values, com._ensure_platform_int(counts)) + if any(counts == 0): + values = self._try_cast(values, self._selected_obj) return self._set_result_index_ordered(Series(values)) @@ -2489,8 +2520,11 @@ def true_and_notnull(x, *args, **kwargs): return b and notnull(b) try: - indices = [self._get_index(name) if true_and_notnull(group) else [] - for name, group in self] + indices = [] + for name, group in self: + if true_and_notnull(group) and name in self.indices: + indices.append(self.indices[name]) + except ValueError: raise TypeError("the filter must return a boolean result") except TypeError: @@ -2622,7 +2656,8 @@ def aggregate(self, arg, *args, **kwargs): if self._selection is not None: subset = obj if isinstance(subset, DataFrame): - raise NotImplementedError + raise NotImplementedError("Aggregating on a DataFrame is " + "not supported") for fname, agg_how in compat.iteritems(arg): colg = SeriesGroupBy(subset, selection=self._selection, @@ -2671,7 +2706,7 @@ def _aggregate_multiple_funcs(self, arg): from pandas.tools.merge import concat if self.axis != 0: - raise NotImplementedError + raise NotImplementedError("axis other than 0 is not supported") obj = self._obj_with_exclusions @@ -2721,7 +2756,7 @@ def _aggregate_generic(self, func, *args, **kwargs): return self._wrap_generic_output(result, obj) def _wrap_aggregated_output(self, output, names=None): - raise NotImplementedError + raise AbstractMethodError(self) def _aggregate_item_by_item(self, func, *args, **kwargs): # only for axis==0 @@ -2808,7 +2843,12 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # make Nones an empty object if com._count_not_none(*values) != len(values): - v = next(v for v in values if v is not None) + try: + v = next(v for v in values if v is not None) + except StopIteration: + # If all values are None, then this will throw an error. + # We'd prefer it return an empty dataframe. + return DataFrame() if v is None: return DataFrame() elif isinstance(v, NDFrame): @@ -3001,24 +3041,18 @@ def transform(self, func, *args, **kwargs): if not result.columns.equals(obj.columns): return self._transform_general(func, *args, **kwargs) - # a grouped that doesn't preserve the index, remap index based on the grouper - # and broadcast it - if ((not isinstance(obj.index,MultiIndex) and - type(result.index) != type(obj.index)) or - len(result.index) != len(obj.index)): - results = obj.values.copy() - indices = self.indices - for (name, group), (i, row) in zip(self, result.iterrows()): + results = np.empty_like(obj.values, result.values.dtype) + indices = self.indices + for (name, group), (i, row) in zip(self, result.iterrows()): + if name in indices: indexer = indices[name] results[indexer] = np.tile(row.values,len(indexer)).reshape(len(indexer),-1) - return DataFrame(results,columns=result.columns,index=obj.index).convert_objects() - # we can merge the result in - # GH 7383 - names = result.columns - result = obj.merge(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:] - result.columns = names - return result + counts = self.size().fillna(0).values + if any(counts == 0): + results = self._try_cast(results, obj[result.columns]) + + return DataFrame(results,columns=result.columns,index=obj.index).convert_objects() def _define_paths(self, func, *args, **kwargs): if isinstance(func, compat.string_types): @@ -3110,10 +3144,9 @@ def filter(self, func, dropna=True, *args, **kwargs): pass # interpret the result of the filter - if (isinstance(res, (bool, np.bool_)) or - np.isscalar(res) and isnull(res)): - if res and notnull(res): - indices.append(self._get_index(name)) + if is_bool(res) or (lib.isscalar(res) and isnull(res)): + if res and notnull(res) and name in self.indices: + indices.append(self.indices[name]) else: # non scalars aren't allowed raise TypeError("filter function returned a %s, " @@ -3245,7 +3278,7 @@ def _reindex_output(self, result): return result elif len(groupings) == 1: return result - elif not any([isinstance(ping.grouper, Categorical) + elif not any([isinstance(ping.grouper, (Categorical, CategoricalIndex)) for ping in groupings]): return result @@ -3283,7 +3316,7 @@ def _iterate_slices(self): slice_axis = self._selection_list slicer = lambda x: self._selected_obj[x] else: - raise NotImplementedError + raise NotImplementedError("axis other than 0 is not supported") for val in slice_axis: if val in self.exclusions: @@ -3348,10 +3381,10 @@ def _aggregate_item_by_item(self, func, *args, **kwargs): new_axes[self.axis] = self.grouper.result_index return Panel._from_axes(result, new_axes) else: - raise NotImplementedError + raise ValueError("axis value must be greater than 0") def _wrap_aggregated_output(self, output, names=None): - raise NotImplementedError + raise AbstractMethodError(self) class NDArrayGroupBy(GroupBy): @@ -3405,7 +3438,7 @@ def _chop(self, sdata, slice_obj): return sdata.iloc[slice_obj] def apply(self, f): - raise NotImplementedError + raise AbstractMethodError(self) class ArraySplitter(DataSplitter): diff --git a/pandas/core/index.py b/pandas/core/index.py index e335d00551bab..2bd96fcec2e42 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2,6 +2,7 @@ import datetime import warnings import operator + from functools import partial from pandas.compat import range, zip, lrange, lzip, u, reduce, filter, map from pandas import compat @@ -13,20 +14,22 @@ import pandas.algos as _algos import pandas.index as _index from pandas.lib import Timestamp, Timedelta, is_datetime_array -from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, _shared_docs +from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, _shared_docs, PandasDelegate from pandas.util.decorators import (Appender, Substitution, cache_readonly, deprecate) -from pandas.core.common import isnull, array_equivalent import pandas.core.common as com -from pandas.core.common import (_values_from_object, is_float, is_integer, - ABCSeries, _ensure_object, _ensure_int64, is_bool_indexer, +from pandas.core.common import (isnull, array_equivalent, is_dtype_equal, is_object_dtype, + _values_from_object, is_float, is_integer, is_iterator, is_categorical_dtype, + ABCSeries, ABCCategorical, _ensure_object, _ensure_int64, is_bool_indexer, is_list_like, is_bool_dtype, is_null_slice, is_integer_dtype) from pandas.core.config import get_option from pandas.io.common import PerformanceWarning # simplify -default_pprint = lambda x: com.pprint_thing(x, escape_chars=('\t', '\r', '\n'), - quote_strings=True) +default_pprint = lambda x, max_seq_items=None: com.pprint_thing(x, + escape_chars=('\t', '\r', '\n'), + quote_strings=True, + max_seq_items=max_seq_items) __all__ = ['Index'] @@ -44,27 +47,6 @@ def _try_get_item(x): except AttributeError: return x -def _indexOp(opname): - """ - Wrapper function for index comparison operations, to avoid - code duplication. - """ - - def wrapper(self, other): - func = getattr(self._data.view(np.ndarray), opname) - result = func(np.asarray(other)) - - # technically we could support bool dtyped Index - # for now just return the indexing array directly - if is_bool_dtype(result): - return result - try: - return Index(result) - except: # pragma: no cover - return result - return wrapper - - class InvalidIndexError(Exception): pass @@ -163,6 +145,8 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, return Float64Index(data, copy=copy, dtype=dtype, name=name) elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): subarr = data.astype('object') + elif is_categorical_dtype(data) or is_categorical_dtype(dtype): + return CategoricalIndex(data, copy=copy, name=name, **kwargs) else: subarr = com._asarray_tuplesafe(data, dtype=object) @@ -171,6 +155,8 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, if copy: subarr = subarr.copy() + elif is_categorical_dtype(data) or is_categorical_dtype(dtype): + return CategoricalIndex(data, copy=copy, name=name, **kwargs) elif hasattr(data, '__array__'): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name, **kwargs) @@ -259,7 +245,7 @@ def __len__(self): """ return len(self._data) - def __array__(self, result=None): + def __array__(self, dtype=None): """ the array interface, return my values """ return self._data.view(np.ndarray) @@ -283,9 +269,6 @@ def get_values(self): """ return the underlying data as an ndarray """ return self.values - def _array_values(self): - return self._data - # ops compat def tolist(self): """ @@ -411,9 +394,150 @@ def __unicode__(self): Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. """ - prepr = com.pprint_thing(self, escape_chars=('\t', '\r', '\n'), - quote_strings=True) - return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype) + klass = self.__class__.__name__ + data = self._format_data() + attrs = self._format_attrs() + space = self._format_space() + + prepr = (u(",%s") % space).join([u("%s=%s") % (k, v) + for k, v in attrs]) + + # no data provided, just attributes + if data is None: + data = '' + + res = u("%s(%s%s)") % (klass, + data, + prepr) + + return res + + def _format_space(self): + + # using space here controls if the attributes + # are line separated or not (the default) + + #max_seq_items = get_option('display.max_seq_items') + #if len(self) > max_seq_items: + # space = "\n%s" % (' ' * (len(klass) + 1)) + return " " + + @property + def _formatter_func(self): + """ + Return the formatted data as a unicode string + """ + return default_pprint + + def _format_data(self): + """ + Return the formatted data as a unicode string + """ + from pandas.core.format import get_console_size + display_width, _ = get_console_size() + if display_width is None: + display_width = get_option('display.width') or 80 + + space1 = "\n%s" % (' ' * (len(self.__class__.__name__) + 1)) + space2 = "\n%s" % (' ' * (len(self.__class__.__name__) + 2)) + + n = len(self) + sep = ',' + max_seq_items = get_option('display.max_seq_items') + formatter = self._formatter_func + + # do we want to justify (only do so for non-objects) + is_justify = not (self.inferred_type == 'string' or self.inferred_type == 'categorical' and is_object_dtype(self.categories)) + + # are we a truncated display + is_truncated = n > max_seq_items + + def _extend_line(s, line, value, display_width, next_line_prefix): + + if len(line.rstrip()) + len(value.rstrip()) >= display_width: + s += line.rstrip() + line = next_line_prefix + line += value + return s, line + + def best_len(values): + if values: + return max([len(x) for x in values]) + else: + return 0 + + if n == 0: + summary = '[], ' + elif n == 1: + first = formatter(self[0]) + summary = '[%s], ' % first + elif n == 2: + first = formatter(self[0]) + last = formatter(self[-1]) + summary = '[%s, %s], ' % (first, last) + else: + + if n > max_seq_items: + n = min(max_seq_items//2,10) + head = [ formatter(x) for x in self[:n] ] + tail = [ formatter(x) for x in self[-n:] ] + else: + head = [] + tail = [ formatter(x) for x in self ] + + # adjust all values to max length if needed + if is_justify: + + # however, if we are not truncated and we are only a single line, then don't justify + if is_truncated or not (len(', '.join(head)) < display_width and len(', '.join(tail)) < display_width): + max_len = max(best_len(head), best_len(tail)) + head = [x.rjust(max_len) for x in head] + tail = [x.rjust(max_len) for x in tail] + + summary = "" + line = space2 + + for i in range(len(head)): + word = head[i] + sep + ' ' + summary, line = _extend_line(summary, line, word, + display_width, space2) + if is_truncated: + summary += line + space2 + '...' + line = space2 + + for i in range(len(tail)-1): + word = tail[i] + sep + ' ' + summary, line = _extend_line(summary, line, word, + display_width, space2) + + # last value: no sep added + 1 space of width used for trailing ',' + summary, line = _extend_line(summary, line, tail[-1], + display_width - 2, space2) + summary += line + summary += '],' + + if len(summary) > (display_width): + summary += space1 + else: # one row + summary += ' ' + + # remove initial space + summary = '[' + summary[len(space2):] + + return summary + + def _format_attrs(self): + """ + Return a list of tuples of the (attr,formatted_value) + """ + attrs = [] + attrs.append(('dtype',"'%s'" % self.dtype)) + if self.name is not None: + attrs.append(('name',default_pprint(self.name))) + max_seq_items = get_option('display.max_seq_items') + if len(self) > max_seq_items: + attrs.append(('length',len(self))) + return attrs def to_series(self, **kwargs): """ @@ -430,9 +554,10 @@ def to_series(self, **kwargs): def _to_embed(self, keep_tz=False): """ + *this is an internal non-public method* + return an array repr of this object, potentially casting to object - This is for internal compat """ return self.values @@ -455,8 +580,18 @@ def to_datetime(self, dayfirst=False): return DatetimeIndex(self.values) def _assert_can_do_setop(self, other): + if not com.is_list_like(other): + raise TypeError('Input must be Index or array-like') return True + def _convert_can_do_setop(self, other): + if not isinstance(other, Index): + other = Index(other, name=self.name) + result_name = self.name + else: + result_name = self.name if self.name == other.name else None + return other, result_name + @property def nlevels(self): return 1 @@ -624,7 +759,10 @@ def is_numeric(self): return self.inferred_type in ['integer', 'floating'] def is_object(self): - return self.dtype == np.object_ + return is_object_dtype(self.dtype) + + def is_categorical(self): + return self.inferred_type in ['categorical'] def is_mixed(self): return 'mixed' in self.inferred_type @@ -773,14 +911,11 @@ def is_int(v): return indexer - def _convert_list_indexer(self, key, kind=None): - """ convert a list indexer. these should be locations """ - return key - - def _convert_list_indexer_for_mixed(self, keyarr, kind=None): - """ passed a key that is tuplesafe that is integer based - and we have a mixed index (e.g. number/labels). figure out - the indexer. return None if we can't help + def _convert_list_indexer(self, keyarr, kind=None): + """ + passed a key that is tuplesafe that is integer based + and we have a mixed index (e.g. number/labels). figure out + the indexer. return None if we can't help """ if (kind is None or kind in ['iloc','ix']) and (is_integer_dtype(keyarr) and not self.is_floating()): if self.inferred_type != 'integer': @@ -955,17 +1090,13 @@ def __getitem__(self, key): else: return result - def append(self, other): + def _ensure_compat_append(self, other): """ - Append a collection of Index options together - - Parameters - ---------- - other : Index or list/tuple of indices + prepare the append Returns ------- - appended : Index + list of to_concat, name of result Index """ name = self.name to_concat = [self] @@ -976,14 +1107,30 @@ def append(self, other): to_concat.append(other) for obj in to_concat: - if isinstance(obj, Index) and obj.name != name: + if (isinstance(obj, Index) and + obj.name != name and + obj.name is not None): name = None break to_concat = self._ensure_compat_concat(to_concat) to_concat = [x.values if isinstance(x, Index) else x for x in to_concat] + return to_concat, name + + def append(self, other): + """ + Append a collection of Index options together + + Parameters + ---------- + other : Index or list/tuple of indices + Returns + ------- + appended : Index + """ + to_concat, name = self._ensure_compat_append(other) return Index(np.concatenate(to_concat), name=name) @staticmethod @@ -1045,10 +1192,12 @@ def _format_with_header(self, header, na_rep='NaN', **kwargs): from pandas.core.format import format_array - if values.dtype == np.object_: + if is_categorical_dtype(values.dtype): + values = np.array(values) + elif is_object_dtype(values.dtype): values = lib.maybe_convert_objects(values, safe=1) - if values.dtype == np.object_: + if is_object_dtype(values.dtype): result = [com.pprint_thing(x, escape_chars=('\t', '\r', '\n')) for x in values] @@ -1070,12 +1219,16 @@ def to_native_types(self, slicer=None, **kwargs): values = values[slicer] return values._format_native_types(**kwargs) - def _format_native_types(self, na_rep='', **kwargs): + def _format_native_types(self, na_rep='', quoting=None, **kwargs): """ actually format my specific types """ mask = isnull(self) - values = np.array(self, dtype=object, copy=True) + if not self.is_object() and not quoting: + values = np.asarray(self).astype(str) + else: + values = np.array(self, dtype=object, copy=True) + values[mask] = na_rep - return values.tolist() + return values def equals(self, other): """ @@ -1087,9 +1240,6 @@ def equals(self, other): if not isinstance(other, Index): return False - if type(other) != Index: - return other.equals(self) - return array_equivalent(_values_from_object(self), _values_from_object(other)) def identical(self, other): @@ -1183,26 +1333,26 @@ def argsort(self, *args, **kwargs): return result.argsort(*args, **kwargs) def __add__(self, other): - if isinstance(other, Index): + if com.is_list_like(other): warnings.warn("using '+' to provide set union with Indexes is deprecated, " - "use '|' or .union()",FutureWarning) + "use '|' or .union()", FutureWarning) + if isinstance(other, Index): return self.union(other) return Index(np.array(self) + other) + + def __radd__(self, other): + if com.is_list_like(other): + warnings.warn("using '+' to provide set union with Indexes is deprecated, " + "use '|' or .union()", FutureWarning) + return Index(other + np.array(self)) + __iadd__ = __add__ def __sub__(self, other): - if isinstance(other, Index): - warnings.warn("using '-' to provide set differences with Indexes is deprecated, " - "use .difference()",FutureWarning) + warnings.warn("using '-' to provide set differences with Indexes is deprecated, " + "use .difference()",FutureWarning) return self.difference(other) - __eq__ = _indexOp('__eq__') - __ne__ = _indexOp('__ne__') - __lt__ = _indexOp('__lt__') - __gt__ = _indexOp('__gt__') - __le__ = _indexOp('__le__') - __ge__ = _indexOp('__ge__') - def __and__(self, other): return self.intersection(other) @@ -1224,18 +1374,16 @@ def union(self, other): ------- union : Index """ - if not hasattr(other, '__iter__'): - raise TypeError('Input must be iterable.') + self._assert_can_do_setop(other) + other = _ensure_index(other) if len(other) == 0 or self.equals(other): return self if len(self) == 0: - return _ensure_index(other) + return other - self._assert_can_do_setop(other) - - if self.dtype != other.dtype: + if not is_dtype_equal(self.dtype,other.dtype): this = self.astype('O') other = other.astype('O') return this.union(other) @@ -1299,17 +1447,13 @@ def intersection(self, other): ------- intersection : Index """ - if not hasattr(other, '__iter__'): - raise TypeError('Input must be iterable!') - self._assert_can_do_setop(other) - other = _ensure_index(other) if self.equals(other): return self - if self.dtype != other.dtype: + if not is_dtype_equal(self.dtype,other.dtype): this = self.astype('O') other = other.astype('O') return this.intersection(other) @@ -1352,23 +1496,17 @@ def difference(self, other): >>> index.difference(index2) """ - - if not hasattr(other, '__iter__'): - raise TypeError('Input must be iterable!') + self._assert_can_do_setop(other) if self.equals(other): return Index([], name=self.name) - if not isinstance(other, Index): - other = np.asarray(other) - result_name = self.name - else: - result_name = self.name if self.name == other.name else None + other, result_name = self._convert_can_do_setop(other) theDiff = sorted(set(self) - set(other)) return Index(theDiff, name=result_name) - diff = deprecate('diff',difference) + diff = deprecate('diff', difference) def sym_diff(self, other, result_name=None): """ @@ -1377,7 +1515,7 @@ def sym_diff(self, other, result_name=None): Parameters ---------- - other : array-like + other : Index or array-like result_name : str Returns @@ -1405,13 +1543,10 @@ def sym_diff(self, other, result_name=None): >>> idx1 ^ idx2 Int64Index([1, 5], dtype='int64') """ - if not hasattr(other, '__iter__'): - raise TypeError('Input must be iterable!') - - if not isinstance(other, Index): - other = Index(other) - result_name = result_name or self.name - + self._assert_can_do_setop(other) + other, result_name_update = self._convert_can_do_setop(other) + if result_name is None: + result_name = result_name_update the_diff = sorted(set((self.difference(other)).union(other.difference(self)))) return Index(the_diff, name=result_name) @@ -1468,7 +1603,7 @@ def get_value(self, series, key): raise except TypeError: # generator/iterator-like - if com.is_iterator(key): + if is_iterator(key): raise InvalidIndexError(key) else: raise e1 @@ -1543,7 +1678,7 @@ def get_indexer(self, target, method=None, limit=None): if pself is not self or ptarget is not target: return pself.get_indexer(ptarget, method=method, limit=limit) - if self.dtype != target.dtype: + if not is_dtype_equal(self.dtype,target.dtype): this = self.astype(object) target = target.astype(object) return this.get_indexer(target, method=method, limit=limit) @@ -1642,7 +1777,8 @@ def get_indexer_for(self, target, **kwargs): """ guaranteed return of an indexer even when non-unique """ if self.is_unique: return self.get_indexer(target, **kwargs) - return self.get_indexer_non_unique(target, **kwargs)[0] + indexer, _ = self.get_indexer_non_unique(target, **kwargs) + return indexer def _possibly_promote(self, other): # A hack, but it works @@ -1650,7 +1786,7 @@ def _possibly_promote(self, other): if self.inferred_type == 'date' and isinstance(other, DatetimeIndex): return DatetimeIndex(self), other elif self.inferred_type == 'boolean': - if self.dtype != 'object': + if not is_object_dtype(self.dtype): return self.astype('object'), other.astype('object') return self, other @@ -1702,12 +1838,35 @@ def isin(self, values, level=None): value_set = set(values) if level is not None: self._validate_index_level(level) - return lib.ismember(self._array_values(), value_set) + return lib.ismember(np.array(self), value_set) + + def _can_reindex(self, indexer): + """ + *this is an internal non-public method* + + Check if we are allowing reindexing with this particular indexer + + Parameters + ---------- + indexer : an integer indexer + + Raises + ------ + ValueError if its a duplicate axis + """ + + # trying to reindex on an axis with duplicates + if not self.is_unique and len(indexer): + raise ValueError("cannot reindex from a duplicate axis") def reindex(self, target, method=None, level=None, limit=None): """ Create index with target's values (move/add/delete values as necessary) + Parameters + ---------- + target : an iterable + Returns ------- new_index : pd.Index @@ -1728,6 +1887,7 @@ def reindex(self, target, method=None, level=None, limit=None): target = self._simple_new(np.empty(0, dtype=self.dtype), **attrs) else: target = _ensure_index(target) + if level is not None: if method is not None: raise TypeError('Fill method not supported if level passed') @@ -1752,9 +1912,72 @@ def reindex(self, target, method=None, level=None, limit=None): return target, indexer + def _reindex_non_unique(self, target): + """ + *this is an internal non-public method* + + Create a new index with target's values (move/add/delete values as necessary) + use with non-unique Index and a possibly non-unique target + + Parameters + ---------- + target : an iterable + + Returns + ------- + new_index : pd.Index + Resulting index + indexer : np.ndarray or None + Indices of output values in original index + + """ + + target = _ensure_index(target) + indexer, missing = self.get_indexer_non_unique(target) + check = indexer != -1 + new_labels = self.take(indexer[check]) + new_indexer = None + + if len(missing): + l = np.arange(len(indexer)) + + missing = com._ensure_platform_int(missing) + missing_labels = target.take(missing) + missing_indexer = com._ensure_int64(l[~check]) + cur_labels = self.take(indexer[check]).values + cur_indexer = com._ensure_int64(l[check]) + + new_labels = np.empty(tuple([len(indexer)]), dtype=object) + new_labels[cur_indexer] = cur_labels + new_labels[missing_indexer] = missing_labels + + # a unique indexer + if target.is_unique: + + # see GH5553, make sure we use the right indexer + new_indexer = np.arange(len(indexer)) + new_indexer[cur_indexer] = np.arange(len(cur_labels)) + new_indexer[missing_indexer] = -1 + + # we have a non_unique selector, need to use the original + # indexer here + else: + + # need to retake to have the same size as the indexer + indexer = indexer.values + indexer[~check] = 0 + + # reset the new indexer to account for the new size + new_indexer = np.arange(len(self.take(indexer))) + new_indexer[~check] = -1 + + return self._shallow_copy(new_labels), indexer, new_indexer + def join(self, other, how='left', level=None, return_indexers=False): """ - Internal API method. Compute join_index and indexers to conform data + *this is an internal non-public method* + + Compute join_index and indexers to conform data structures to the new index. Parameters @@ -1813,7 +2036,7 @@ def join(self, other, how='left', level=None, return_indexers=False): result = x, z, y return result - if self.dtype != other.dtype: + if not is_dtype_equal(self.dtype,other.dtype): this = self.astype('O') other = other.astype('O') return this.join(other, how=how, @@ -2325,13 +2548,15 @@ def insert(self, loc, item): (_self[:loc], item_idx, _self[loc:])) return Index(idx, name=self.name) - def drop(self, labels): + def drop(self, labels, errors='raise'): """ Make new Index with passed list of labels deleted Parameters ---------- labels : array-like + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and existing labels are dropped. Returns ------- @@ -2341,7 +2566,9 @@ def drop(self, labels): indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): - raise ValueError('labels %s not contained in axis' % labels[mask]) + if errors != 'ignore': + raise ValueError('labels %s not contained in axis' % labels[mask]) + indexer = indexer[~mask] return self.delete(indexer) @Appender(_shared_docs['drop_duplicates'] % _index_doc_kwargs) @@ -2360,6 +2587,49 @@ def _evaluate_with_timedelta_like(self, other, op, opstr): def _evaluate_with_datetime_like(self, other, op, opstr): raise TypeError("can only perform ops with datetime like values") + @classmethod + def _add_comparison_methods(cls): + """ add in comparison methods """ + + def _make_compare(op): + + def _evaluate_compare(self, other): + func = getattr(self.values, op) + result = func(np.asarray(other)) + + # technically we could support bool dtyped Index + # for now just return the indexing array directly + if is_bool_dtype(result): + return result + try: + return Index(result) + except TypeError: + return result + + return _evaluate_compare + + cls.__eq__ = _make_compare('__eq__') + cls.__ne__ = _make_compare('__ne__') + cls.__lt__ = _make_compare('__lt__') + cls.__gt__ = _make_compare('__gt__') + cls.__le__ = _make_compare('__le__') + cls.__ge__ = _make_compare('__ge__') + + @classmethod + def _add_numericlike_set_methods_disabled(cls): + """ add in the numeric set-like methods to disable """ + + def _make_invalid_op(name): + + def invalid_op(self, other=None): + raise TypeError("cannot perform {name} with this index type: {typ}".format(name=name, + typ=type(self))) + invalid_op.__name__ = name + return invalid_op + + cls.__add__ = cls.__radd__ = __iadd__ = _make_invalid_op('__add__') + cls.__sub__ = __isub__ = _make_invalid_op('__sub__') + @classmethod def _add_numeric_methods_disabled(cls): """ add in numeric methods to disable """ @@ -2414,7 +2684,7 @@ def _evaluate_numeric_binop(self, other): elif isinstance(other, (Timestamp, np.datetime64)): return self._evaluate_with_datetime_like(other, op, opstr) else: - if not (com.is_float(other) or com.is_integer(other)): + if not (is_float(other) or is_integer(other)): raise TypeError("can only perform ops with scalar values") # if we are a reversed non-communative op @@ -2478,7 +2748,7 @@ def _make_logical_function(name, desc, f): @Appender(_doc) def logical_func(self, *args, **kwargs): result = f(self.values) - if isinstance(result, (np.ndarray, com.ABCSeries, Index)) \ + if isinstance(result, (np.ndarray, ABCSeries, Index)) \ and result.ndim == 0: # return NumPy type return result.dtype.type(result.item()) @@ -2510,6 +2780,529 @@ def invalid_op(self, other=None): Index._add_numeric_methods_disabled() Index._add_logical_methods() +Index._add_comparison_methods() + +class CategoricalIndex(Index, PandasDelegate): + """ + + Immutable Index implementing an ordered, sliceable set. CategoricalIndex + represents a sparsely populated Index with an underlying Categorical. + + Parameters + ---------- + data : array-like or Categorical, (1-dimensional) + categories : optional, array-like + categories for the CategoricalIndex + ordered : boolean, + designating if the categories are ordered + copy : bool + Make a copy of input ndarray + name : object + Name to be stored in the index + + """ + + _typ = 'categoricalindex' + _engine_type = _index.Int64Engine + _attributes = ['name','categories','ordered'] + + def __new__(cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs): + + if fastpath: + return cls._simple_new(data, name=name) + + if isinstance(data, ABCCategorical): + data = cls._create_categorical(cls, data, categories, ordered) + elif isinstance(data, CategoricalIndex): + data = data._data + data = cls._create_categorical(cls, data, categories, ordered) + else: + + # don't allow scalars + # if data is None, then categories must be provided + if lib.isscalar(data): + if data is not None or categories is None: + cls._scalar_data_error(data) + data = [] + data = cls._create_categorical(cls, data, categories, ordered) + + if copy: + data = data.copy() + + return cls._simple_new(data, name=name) + + def _create_from_codes(self, codes, categories=None, ordered=None, name=None): + """ + *this is an internal non-public method* + + create the correct categorical from codes + + Parameters + ---------- + codes : new codes + categories : optional categories, defaults to existing + ordered : optional ordered attribute, defaults to existing + name : optional name attribute, defaults to existing + + Returns + ------- + CategoricalIndex + """ + + from pandas.core.categorical import Categorical + if categories is None: + categories = self.categories + if ordered is None: + ordered = self.ordered + if name is None: + name = self.name + cat = Categorical.from_codes(codes, categories=categories, ordered=self.ordered) + return CategoricalIndex(cat, name=name) + + @staticmethod + def _create_categorical(self, data, categories=None, ordered=None): + """ + *this is an internal non-public method* + + create the correct categorical from data and the properties + + Parameters + ---------- + data : data for new Categorical + categories : optional categories, defaults to existing + ordered : optional ordered attribute, defaults to existing + + Returns + ------- + Categorical + """ + + if not isinstance(data, ABCCategorical): + from pandas.core.categorical import Categorical + data = Categorical(data, categories=categories, ordered=ordered) + else: + if categories is not None: + data = data.set_categories(categories) + if ordered is not None: + data = data.set_ordered(ordered) + return data + + @classmethod + def _simple_new(cls, values, name=None, categories=None, ordered=None, **kwargs): + result = object.__new__(cls) + + values = cls._create_categorical(cls, values, categories, ordered) + result._data = values + result.name = name + for k, v in compat.iteritems(kwargs): + setattr(result,k,v) + + result._reset_identity() + return result + + def _is_dtype_compat(self, other): + """ + *this is an internal non-public method* + + provide a comparison between the dtype of self and other (coercing if needed) + + Raises + ------ + TypeError if the dtypes are not compatible + """ + + if is_categorical_dtype(other): + if isinstance(other, CategoricalIndex): + other = other.values + if not other.is_dtype_equal(self): + raise TypeError("categories must match existing categories when appending") + else: + values = other + other = CategoricalIndex(self._create_categorical(self, other, categories=self.categories, ordered=self.ordered)) + if not other.isin(values).all(): + raise TypeError("cannot append a non-category item to a CategoricalIndex") + + return other + + def equals(self, other): + """ + Determines if two CategorialIndex objects contain the same elements. + """ + if self.is_(other): + return True + + try: + other = self._is_dtype_compat(other) + return array_equivalent(self._data, other) + except (TypeError, ValueError): + pass + + return False + + def _format_attrs(self): + """ + Return a list of tuples of the (attr,formatted_value) + """ + max_categories = (10 if get_option("display.max_categories") == 0 + else get_option("display.max_categories")) + attrs = [('categories', default_pprint(self.categories, max_seq_items=max_categories)), + ('ordered',self.ordered)] + if self.name is not None: + attrs.append(('name',default_pprint(self.name))) + attrs.append(('dtype',"'%s'" % self.dtype)) + max_seq_items = get_option('display.max_seq_items') + if len(self) > max_seq_items: + attrs.append(('length',len(self))) + return attrs + + @property + def inferred_type(self): + return 'categorical' + + @property + def values(self): + """ return the underlying data, which is a Categorical """ + return self._data + + @property + def codes(self): + return self._data.codes + + @property + def categories(self): + return self._data.categories + + @property + def ordered(self): + return self._data.ordered + + def __contains__(self, key): + hash(key) + return key in self.values + + def __array__(self, dtype=None): + """ the array interface, return my values """ + return np.array(self._data, dtype=dtype) + + def argsort(self, *args, **kwargs): + return self.values.argsort(*args, **kwargs) + + @cache_readonly + def _engine(self): + + # we are going to look things up with the codes themselves + return self._engine_type(lambda: self.codes.astype('i8'), len(self)) + + @cache_readonly + def is_unique(self): + return not self.duplicated().any() + + @Appender(_shared_docs['duplicated'] % _index_doc_kwargs) + def duplicated(self, take_last=False): + from pandas.hashtable import duplicated_int64 + return duplicated_int64(self.codes.astype('i8'), take_last) + + def get_loc(self, key, method=None): + """ + Get integer location for requested label + + Parameters + ---------- + key : label + method : {None} + * default: exact matches only. + + Returns + ------- + loc : int if unique index, possibly slice or mask if not + """ + codes = self.categories.get_loc(key) + if (codes == -1): + raise KeyError(key) + indexer, _ = self._engine.get_indexer_non_unique(np.array([codes])) + if (indexer==-1).any(): + raise KeyError(key) + + return indexer + + def _can_reindex(self, indexer): + """ always allow reindexing """ + pass + + def reindex(self, target, method=None, level=None, limit=None): + """ + Create index with target's values (move/add/delete values as necessary) + + Returns + ------- + new_index : pd.Index + Resulting index + indexer : np.ndarray or None + Indices of output values in original index + + """ + + if method is not None: + raise NotImplementedError("argument method is not implemented for CategoricalIndex.reindex") + if level is not None: + raise NotImplementedError("argument level is not implemented for CategoricalIndex.reindex") + if limit is not None: + raise NotImplementedError("argument limit is not implemented for CategoricalIndex.reindex") + + target = _ensure_index(target) + + if not is_categorical_dtype(target) and not target.is_unique: + raise ValueError("cannot reindex with a non-unique indexer") + + indexer, missing = self.get_indexer_non_unique(np.array(target)) + new_target = self.take(indexer) + + + # filling in missing if needed + if len(missing): + cats = self.categories.get_indexer(target) + if (cats==-1).any(): + + # coerce to a regular index here! + result = Index(np.array(self),name=self.name) + new_target, indexer, _ = result._reindex_non_unique(np.array(target)) + + else: + + codes = new_target.codes.copy() + codes[indexer==-1] = cats[missing] + new_target = self._create_from_codes(codes) + + # we always want to return an Index type here + # to be consistent with .reindex for other index types (e.g. they don't coerce + # based on the actual values, only on the dtype) + # unless we had an inital Categorical to begin with + # in which case we are going to conform to the passed Categorical + new_target = np.asarray(new_target) + if is_categorical_dtype(target): + new_target = target._shallow_copy(new_target, name=self.name) + else: + new_target = Index(new_target, name=self.name) + + return new_target, indexer + + def _reindex_non_unique(self, target): + """ reindex from a non-unique; which CategoricalIndex's are almost always """ + new_target, indexer = self.reindex(target) + new_indexer = None + + check = indexer==-1 + if check.any(): + new_indexer = np.arange(len(self.take(indexer))) + new_indexer[check] = -1 + + return new_target, indexer, new_indexer + + def get_indexer(self, target, method=None, limit=None): + """ + Compute indexer and mask for new index given the current index. The + indexer should be then used as an input to ndarray.take to align the + current data to the new index. The mask determines whether labels are + found or not in the current index + + Parameters + ---------- + target : MultiIndex or Index (of tuples) + method : {'pad', 'ffill', 'backfill', 'bfill'} + pad / ffill: propagate LAST valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + + Notes + ----- + This is a low-level method and probably should be used at your own risk + + Examples + -------- + >>> indexer, mask = index.get_indexer(new_index) + >>> new_values = cur_values.take(indexer) + >>> new_values[-mask] = np.nan + + Returns + ------- + (indexer, mask) : (ndarray, ndarray) + """ + method = com._clean_reindex_fill_method(method) + target = _ensure_index(target) + + if isinstance(target, CategoricalIndex): + target = target.categories + + if method == 'pad' or method == 'backfill': + raise NotImplementedError("method='pad' and method='backfill' not implemented yet " + 'for CategoricalIndex') + elif method == 'nearest': + raise NotImplementedError("method='nearest' not implemented yet " + 'for CategoricalIndex') + else: + + codes = self.categories.get_indexer(target) + indexer, _ = self._engine.get_indexer_non_unique(codes) + + return com._ensure_platform_int(indexer) + + def get_indexer_non_unique(self, target): + """ this is the same for a CategoricalIndex for get_indexer; the API returns the missing values as well """ + target = _ensure_index(target) + + if isinstance(target, CategoricalIndex): + target = target.categories + + codes = self.categories.get_indexer(target) + return self._engine.get_indexer_non_unique(codes) + + def _convert_list_indexer(self, keyarr, kind=None): + """ + we are passed a list indexer. + Return our indexer or raise if all of the values are not included in the categories + """ + codes = self.categories.get_indexer(keyarr) + if (codes==-1).any(): + raise KeyError("a list-indexer must only include values that are in the categories") + + return None + + def take(self, indexer, axis=0): + """ + return a new CategoricalIndex of the values selected by the indexer + + See also + -------- + numpy.ndarray.take + """ + + indexer = com._ensure_platform_int(indexer) + taken = self.codes.take(indexer) + return self._create_from_codes(taken) + + def delete(self, loc): + """ + Make new Index with passed location(-s) deleted + + Returns + ------- + new_index : Index + """ + return self._create_from_codes(np.delete(self.codes, loc)) + + def insert(self, loc, item): + """ + Make new Index inserting new item at location. Follows + Python list.append semantics for negative values + + Parameters + ---------- + loc : int + item : object + + Returns + ------- + new_index : Index + + Raises + ------ + ValueError if the item is not in the categories + + """ + code = self.categories.get_indexer([item]) + if (code == -1): + raise TypeError("cannot insert an item into a CategoricalIndex that is not already an existing category") + + codes = self.codes + codes = np.concatenate( + (codes[:loc], code, codes[loc:])) + return self._create_from_codes(codes) + + def append(self, other): + """ + Append a collection of CategoricalIndex options together + + Parameters + ---------- + other : Index or list/tuple of indices + + Returns + ------- + appended : Index + + Raises + ------ + ValueError if other is not in the categories + """ + to_concat, name = self._ensure_compat_append(other) + to_concat = [ self._is_dtype_compat(c) for c in to_concat ] + codes = np.concatenate([ c.codes for c in to_concat ]) + return self._create_from_codes(codes, name=name) + + @classmethod + def _add_comparison_methods(cls): + """ add in comparison methods """ + + def _make_compare(op): + + def _evaluate_compare(self, other): + + # if we have a Categorical type, then must have the same categories + if isinstance(other, CategoricalIndex): + other = other.values + elif isinstance(other, Index): + other = self._create_categorical(self, other.values, categories=self.categories, ordered=self.ordered) + + if isinstance(other, ABCCategorical): + if not (self.values.is_dtype_equal(other) and len(self.values) == len(other)): + raise TypeError("categorical index comparisions must have the same categories and ordered attributes") + + return getattr(self.values, op)(other) + + return _evaluate_compare + + cls.__eq__ = _make_compare('__eq__') + cls.__ne__ = _make_compare('__ne__') + cls.__lt__ = _make_compare('__lt__') + cls.__gt__ = _make_compare('__gt__') + cls.__le__ = _make_compare('__le__') + cls.__ge__ = _make_compare('__ge__') + + + def _delegate_method(self, name, *args, **kwargs): + """ method delegation to the .values """ + method = getattr(self.values, name) + if 'inplace' in kwargs: + raise ValueError("cannot use inplace with CategoricalIndex") + res = method(*args, **kwargs) + if lib.isscalar(res): + return res + return CategoricalIndex(res, name=self.name) + + @classmethod + def _add_accessors(cls): + """ add in Categorical accessor methods """ + + from pandas.core.categorical import Categorical + CategoricalIndex._add_delegate_accessors(delegate=Categorical, + accessors=["rename_categories", + "reorder_categories", + "add_categories", + "remove_categories", + "remove_unused_categories", + "set_categories", + "as_ordered", + "as_unordered", + "min", + "max"], + typ='method', + overwrite=True) + + +CategoricalIndex._add_numericlike_set_methods_disabled() +CategoricalIndex._add_numeric_methods_disabled() +CategoricalIndex._add_logical_methods_disabled() +CategoricalIndex._add_comparison_methods() +CategoricalIndex._add_accessors() class NumericIndex(Index): @@ -2782,7 +3575,7 @@ def equals(self, other): try: if not isinstance(other, Float64Index): other = self._constructor(other) - if self.dtype != other.dtype or self.shape != other.shape: + if not is_dtype_equal(self.dtype,other.dtype) or self.shape != other.shape: return False left, right = self.values, other.values return ((left == right) | (self._isnan & other._isnan)).all() @@ -2848,7 +3641,7 @@ def isin(self, values, level=None): value_set = set(values) if level is not None: self._validate_index_level(level) - return lib.ismember_nans(self._array_values(), value_set, + return lib.ismember_nans(np.array(self), value_set, isnull(list(value_set)).any()) @@ -3188,7 +3981,7 @@ def copy(self, names=None, dtype=None, levels=None, labels=None, verify_integrity=False, _set_identity=_set_identity) - def __array__(self, result=None): + def __array__(self, dtype=None): """ the array interface, return my values """ return self.values @@ -3200,10 +3993,6 @@ def view(self, cls=None): _shallow_copy = view - def _array_values(self): - # hack for various methods - return self.values - @cache_readonly def dtype(self): return np.dtype('O') @@ -3216,40 +4005,24 @@ def nbytes(self): names_nbytes = sum(( getsizeof(i) for i in self.names )) return level_nbytes + label_nbytes + names_nbytes - def __repr__(self): - encoding = get_option('display.encoding') - attrs = [('levels', default_pprint(self.levels)), - ('labels', default_pprint(self.labels))] + def _format_attrs(self): + """ + Return a list of tuples of the (attr,formatted_value) + """ + attrs = [('levels', default_pprint(self._levels, max_seq_items=False)), + ('labels', default_pprint(self._labels, max_seq_items=False))] if not all(name is None for name in self.names): attrs.append(('names', default_pprint(self.names))) if self.sortorder is not None: attrs.append(('sortorder', default_pprint(self.sortorder))) + return attrs - space = ' ' * (len(self.__class__.__name__) + 1) - prepr = (u(",\n%s") % space).join([u("%s=%s") % (k, v) - for k, v in attrs]) - res = u("%s(%s)") % (self.__class__.__name__, prepr) - - if not compat.PY3: - # needs to be str in Python 2 - res = res.encode(encoding) - return res - - def __unicode__(self): - """ - Return a string representation for a particular Index + def _format_space(self): + return "\n%s" % (' ' * (len(self.__class__.__name__) + 1)) - Invoked by unicode(df) in py2 only. Yields a Unicode String in both - py2/py3. - """ - rows = self.format(names=True) - max_rows = get_option('display.max_rows') - if len(rows) > max_rows: - spaces = (len(rows[0]) - 3) // 2 - centered = ' ' * spaces - half = max_rows // 2 - rows = rows[:half] + [centered + '...' + centered] + rows[-half:] - return "\n".join(rows) + def _format_data(self): + # we are formatting thru the attributes + return None def __len__(self): return len(self.labels[0]) @@ -3293,7 +4066,7 @@ def _reference_duplicate_name(self, name): return np.sum(name == np.asarray(self.names)) > 1 def _format_native_types(self, **kwargs): - return self.tolist() + return self.values @property def _constructor(self): @@ -3350,7 +4123,7 @@ def values(self): taken = com.take_1d(lev._box_values(lev.values), lab, fill_value=_get_na_value(lev.dtype.type)) else: - taken = com.take_1d(lev.values, lab) + taken = com.take_1d(np.asarray(lev.values), lab) values.append(taken) self._tuples = lib.fast_zip(values) @@ -3415,7 +4188,7 @@ def _try_mi(k): raise except TypeError: # generator/iterator-like - if com.is_iterator(key): + if is_iterator(key): raise InvalidIndexError(key) else: raise e1 @@ -3847,7 +4620,7 @@ def repeat(self, n): sortorder=self.sortorder, verify_integrity=False) - def drop(self, labels, level=None): + def drop(self, labels, level=None, errors='raise'): """ Make new MultiIndex with passed list of labels deleted @@ -3870,19 +4643,24 @@ def drop(self, labels, level=None): indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): - raise ValueError('labels %s not contained in axis' - % labels[mask]) - return self.delete(indexer) + if errors != 'ignore': + raise ValueError('labels %s not contained in axis' + % labels[mask]) + indexer = indexer[~mask] except Exception: pass inds = [] for label in labels: - loc = self.get_loc(label) - if isinstance(loc, int): - inds.append(loc) - else: - inds.extend(lrange(loc.start, loc.stop)) + try: + loc = self.get_loc(label) + if isinstance(loc, int): + inds.append(loc) + else: + inds.extend(lrange(loc.start, loc.stop)) + except KeyError: + if errors != 'ignore': + raise return self.delete(inds) @@ -4014,7 +4792,7 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True): labels = list(self.labels) shape = list(self.levshape) - if isinstance(level, (str, int)): + if isinstance(level, (compat.string_types, int)): level = [level] level = [self._get_level_number(lev) for lev in level] @@ -4081,7 +4859,7 @@ def get_indexer(self, target, method=None, limit=None): if isinstance(target, MultiIndex): target_index = target._tuple_index - if target_index.dtype != object: + if not is_object_dtype(target_index.dtype): return np.ones(len(target_index)) * -1 if not self.is_unique: @@ -4640,9 +5418,9 @@ def equals(self, other): return False for i in range(self.nlevels): - svalues = com.take_nd(self.levels[i].values, self.labels[i], + svalues = com.take_nd(np.asarray(self.levels[i].values), self.labels[i], allow_fill=False) - ovalues = com.take_nd(other.levels[i].values, other.labels[i], + ovalues = com.take_nd(np.asarray(other.levels[i].values), other.labels[i], allow_fill=False) if not array_equivalent(svalues, ovalues): return False @@ -4677,12 +5455,11 @@ def union(self, other): >>> index.union(index2) """ self._assert_can_do_setop(other) + other, result_names = self._convert_can_do_setop(other) if len(other) == 0 or self.equals(other): return self - result_names = self.names if self.names == other.names else None - uniq_tuples = lib.fast_unique_multiple([self.values, other.values]) return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names) @@ -4700,12 +5477,11 @@ def intersection(self, other): Index """ self._assert_can_do_setop(other) + other, result_names = self._convert_can_do_setop(other) if self.equals(other): return self - result_names = self.names if self.names == other.names else None - self_tuples = self.values other_tuples = other.values uniq_tuples = sorted(set(self_tuples) & set(other_tuples)) @@ -4726,18 +5502,10 @@ def difference(self, other): diff : MultiIndex """ self._assert_can_do_setop(other) + other, result_names = self._convert_can_do_setop(other) - if not isinstance(other, MultiIndex): - if len(other) == 0: + if len(other) == 0: return self - try: - other = MultiIndex.from_tuples(other) - except: - raise TypeError('other must be a MultiIndex or a list of' - ' tuples') - result_names = self.names - else: - result_names = self.names if self.names == other.names else None if self.equals(other): return MultiIndex(levels=[[]] * self.nlevels, @@ -4754,15 +5522,30 @@ def difference(self, other): return MultiIndex.from_tuples(difference, sortorder=0, names=result_names) - def _assert_can_do_setop(self, other): - pass - def astype(self, dtype): - if np.dtype(dtype) != np.object_: + if not is_object_dtype(np.dtype(dtype)): raise TypeError('Setting %s dtype to anything other than object ' 'is not supported' % self.__class__) return self._shallow_copy() + def _convert_can_do_setop(self, other): + result_names = self.names + + if not hasattr(other, 'names'): + if len(other) == 0: + other = MultiIndex(levels=[[]] * self.nlevels, + labels=[[]] * self.nlevels, + verify_integrity=False) + else: + msg = 'other must be a MultiIndex or a list of tuples' + try: + other = MultiIndex.from_tuples(other) + except: + raise TypeError(msg) + else: + result_names = self.names if self.names == other.names else None + return other, result_names + def insert(self, loc, item): """ Make new MultiIndex inserting new item at location @@ -4838,7 +5621,7 @@ def _wrap_joined_index(self, joined, other): @Appender(Index.isin.__doc__) def isin(self, values, level=None): if level is None: - return lib.ismember(self._array_values(), set(values)) + return lib.ismember(np.array(self), set(values)) else: num = self._get_level_number(level) levs = self.levels[num] diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 920e8aa04aa1f..e0f06e22c431b 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1,7 +1,6 @@ # pylint: disable=W0223 -from datetime import datetime -from pandas.core.index import Index, MultiIndex, _ensure_index +from pandas.core.index import Index, MultiIndex from pandas.compat import range, zip import pandas.compat as compat import pandas.core.common as com @@ -10,8 +9,6 @@ is_null_slice, ABCSeries, ABCDataFrame, ABCPanel, is_float, _values_from_object, _infer_fill_value, is_integer) -import pandas.lib as lib - import numpy as np # the supported indexers @@ -200,7 +197,6 @@ def _has_valid_positional_setitem_indexer(self, indexer): return True def _setitem_with_indexer(self, indexer, value): - self._has_valid_setitem_indexer(indexer) # also has the side effect of consolidating in-place @@ -254,7 +250,7 @@ def _setitem_with_indexer(self, indexer, value): # just replacing the block manager here # so the object is the same index = self.obj._get_axis(i) - labels = safe_append_to_index(index, key) + labels = index.insert(len(index),key) self.obj._data = self.obj.reindex_axis(labels, i)._data self.obj._maybe_update_cacher(clear=True) self.obj.is_copy=None @@ -275,10 +271,7 @@ def _setitem_with_indexer(self, indexer, value): # and set inplace if self.ndim == 1: index = self.obj.index - if len(index) == 0: - new_index = Index([indexer]) - else: - new_index = safe_append_to_index(index, indexer) + new_index = index.insert(len(index),indexer) # this preserves dtype of the value new_values = Series([value]).values @@ -486,8 +479,8 @@ def can_do_equal_len(): self.obj[item_labels[indexer[info_axis]]] = value return - if isinstance(value, ABCSeries): - value = self._align_series(indexer, value) + if isinstance(value, (ABCSeries, dict)): + value = self._align_series(indexer, Series(value)) elif isinstance(value, ABCDataFrame): value = self._align_frame(indexer, value) @@ -929,24 +922,6 @@ def _getitem_iterable(self, key, axis=0): labels = self.obj._get_axis(axis) - def _reindex(keys, level=None): - - try: - result = self.obj.reindex_axis(keys, axis=axis, level=level) - except AttributeError: - # Series - if axis != 0: - raise AssertionError('axis must be 0') - return self.obj.reindex(keys, level=level) - - # this is an error as we are trying to find - # keys in a multi-index that don't exist - if isinstance(labels, MultiIndex) and level is not None: - if hasattr(result,'ndim') and not np.prod(result.shape) and len(keys): - raise KeyError("cannot index a multi-index axis with these keys") - - return result - if is_bool_indexer(key): key = check_bool_indexer(labels, key) inds, = key.nonzero() @@ -959,8 +934,9 @@ def _reindex(keys, level=None): # asarray can be unsafe, NumPy strings are weird keyarr = _asarray_tuplesafe(key) - # handle a mixed integer scenario - indexer = labels._convert_list_indexer_for_mixed(keyarr, kind=self.name) + # have the index handle the indexer and possibly return + # an indexer or raising + indexer = labels._convert_list_indexer(keyarr, kind=self.name) if indexer is not None: return self.obj.take(indexer, axis=axis) @@ -971,65 +947,48 @@ def _reindex(keys, level=None): else: level = None - keyarr_is_unique = Index(keyarr).is_unique + # existing labels are unique and indexer are unique + if labels.is_unique and Index(keyarr).is_unique: + + try: + result = self.obj.reindex_axis(keyarr, axis=axis, level=level) - # existing labels are unique and indexer is unique - if labels.is_unique and keyarr_is_unique: - return _reindex(keyarr, level=level) + # this is an error as we are trying to find + # keys in a multi-index that don't exist + if isinstance(labels, MultiIndex) and level is not None: + if hasattr(result,'ndim') and not np.prod(result.shape) and len(keyarr): + raise KeyError("cannot index a multi-index axis with these keys") + return result + + except AttributeError: + + # Series + if axis != 0: + raise AssertionError('axis must be 0') + return self.obj.reindex(keyarr, level=level) + + # existing labels are non-unique else: - indexer, missing = labels.get_indexer_non_unique(keyarr) - check = indexer != -1 - result = self.obj.take(indexer[check], axis=axis, - convert=False) - - # need to merge the result labels and the missing labels - if len(missing): - l = np.arange(len(indexer)) - - missing = com._ensure_platform_int(missing) - missing_labels = keyarr.take(missing) - missing_indexer = com._ensure_int64(l[~check]) - cur_labels = result._get_axis(axis).values - cur_indexer = com._ensure_int64(l[check]) - - new_labels = np.empty(tuple([len(indexer)]), dtype=object) - new_labels[cur_indexer] = cur_labels - new_labels[missing_indexer] = missing_labels - - # reindex with the specified axis - ndim = self.obj.ndim - if axis + 1 > ndim: - raise AssertionError("invalid indexing error with " - "non-unique index") - - # a unique indexer - if keyarr_is_unique: - - # see GH5553, make sure we use the right indexer - new_indexer = np.arange(len(indexer)) - new_indexer[cur_indexer] = np.arange( - len(result._get_axis(axis)) - ) - new_indexer[missing_indexer] = -1 - # we have a non_unique selector, need to use the original - # indexer here - else: + # reindex with the specified axis + if axis + 1 > self.obj.ndim: + raise AssertionError("invalid indexing error with " + "non-unique index") - # need to retake to have the same size as the indexer - rindexer = indexer.values - rindexer[~check] = 0 - result = self.obj.take(rindexer, axis=axis, - convert=False) + new_target, indexer, new_indexer = labels._reindex_non_unique(keyarr) - # reset the new indexer to account for the new size - new_indexer = np.arange(len(result)) - new_indexer[~check] = -1 + if new_indexer is not None: + result = self.obj.take(indexer[indexer!=-1], axis=axis, + convert=False) result = result._reindex_with_indexers({ - axis: [new_labels, new_indexer] - }, copy=True, allow_dups=True) + axis: [new_target, new_indexer] + }, copy=True, allow_dups=True) + + else: + result = self.obj.take(indexer, axis=axis, + convert=False) return result @@ -1106,8 +1065,9 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False): else: objarr = _asarray_tuplesafe(obj) - # If have integer labels, defer to label-based indexing - indexer = labels._convert_list_indexer_for_mixed(objarr, kind=self.name) + # The index may want to handle a list indexer differently + # by returning an indexer or raising + indexer = labels._convert_list_indexer(objarr, kind=self.name) if indexer is not None: return indexer @@ -1628,8 +1588,8 @@ def length_of_indexer(indexer, target=None): if step is None: step = 1 elif step < 0: - step = abs(step) - return (stop - start) / step + step = -step + return (stop - start + step-1) // step elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)): return len(indexer) elif not is_list_like_indexer(indexer): @@ -1720,19 +1680,6 @@ def get_indexer(_i, _idx): return tuple([get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)]) -def safe_append_to_index(index, key): - """ a safe append to an index, if incorrect type, then catch and recreate - """ - try: - return index.insert(len(index), key) - except: - - # raise here as this is basically an unsafe operation and we want - # it to be obvious that you are doing something wrong - raise ValueError("unsafe appending to index of type {0} with a key " - "{1}".format(index.__class__.__name__, key)) - - def maybe_convert_indices(indices, n): """ if we have negative indicies, translate to postive here if have indicies that are out-of-bounds, raise an IndexError diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 7a16fb2b6b0d7..3395ea360165e 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -294,8 +294,9 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): mask = isnull(self.values) if limit is not None: if self.ndim > 2: - raise NotImplementedError - mask[mask.cumsum(self.ndim-1)>limit]=False + raise NotImplementedError("number of dimensions for 'fillna' " + "is currently limited to 2") + mask[mask.cumsum(self.ndim-1) > limit] = False value = self._try_fill(value) blocks = self.putmask(mask, value, inplace=inplace) @@ -483,16 +484,21 @@ def _try_coerce_and_cast_result(self, result, dtype=None): def _try_fill(self, value): return value - def to_native_types(self, slicer=None, na_rep='', **kwargs): + def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: values = values[:, slicer] - values = np.array(values, dtype=object) mask = isnull(values) + + if not self.is_object and not quoting: + values = values.astype(str) + else: + values = np.array(values, dtype='object') + values[mask] = na_rep - return values.tolist() + return values # block actions #### def copy(self, deep=True): @@ -581,7 +587,7 @@ def _is_empty_indexer(indexer): if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) - return all([ isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer ]) + return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) return False # empty indexers @@ -868,9 +874,9 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): def get_values(self, dtype=None): return self.values - def diff(self, n): + def diff(self, n, axis=1): """ return block for the diff of the values """ - new_values = com.diff(self.values, n, axis=1) + new_values = com.diff(self.values, n, axis=axis) return [make_block(values=new_values, ndim=self.ndim, fastpath=True, placement=self.mgr_locs)] @@ -1220,32 +1226,34 @@ def _try_cast(self, element): return element def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.', - **kwargs): + quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: values = values[:, slicer] - values = np.array(values, dtype=object) mask = isnull(values) - values[mask] = na_rep - + formatter = None if float_format and decimal != '.': formatter = lambda v : (float_format % v).replace('.',decimal,1) elif decimal != '.': formatter = lambda v : ('%g' % v).replace('.',decimal,1) elif float_format: formatter = lambda v : float_format % v + + if formatter is None and not quoting: + values = values.astype(str) else: - formatter = None + values = np.array(values, dtype='object') + values[mask] = na_rep if formatter: imask = (~mask).ravel() values.flat[imask] = np.array( [formatter(val) for val in values.ravel()[imask]]) - return values.tolist() + return values def should_store(self, value): # when inserting a column should not coerce integers to floats @@ -1324,13 +1332,11 @@ def _try_fill(self, value): return value def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments - we are going to compare vs i8, so coerce to floats - repring NaT with np.nan so nans propagate - values is always ndarray like, other may not be """ + """ Coerce values and other to float64, with null values converted to + NaN. values is always ndarray-like, other may not be """ def masker(v): mask = isnull(v) - v = v.view('i8').astype('float64') + v = v.astype('float64') v[mask] = np.nan return v @@ -1342,6 +1348,8 @@ def masker(v): other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item() if other == tslib.iNaT: other = np.nan + elif lib.isscalar(other): + other = np.float64(other) else: other = masker(other) @@ -1365,7 +1373,7 @@ def _try_coerce_result(self, result): def should_store(self, value): return issubclass(value.dtype.type, np.timedelta64) - def to_native_types(self, slicer=None, na_rep=None, **kwargs): + def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values @@ -1386,7 +1394,7 @@ def to_native_types(self, slicer=None, na_rep=None, **kwargs): rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all') for val in values.ravel()[imask]], dtype=object) - return rvalues.tolist() + return rvalues def get_values(self, dtype=None): @@ -1681,10 +1689,11 @@ def _slice(self, slicer): def fillna(self, value, limit=None, inplace=False, downcast=None): # we may need to upcast our fill to match our dtype if limit is not None: - raise NotImplementedError + raise NotImplementedError("specifying a limit for 'fillna' has " + "not been implemented yet") values = self.values if inplace else self.values.copy() - return [self.make_block_same_class(values=values.fillna(fill_value=value, + return [self.make_block_same_class(values=values.fillna(value=value, limit=limit), placement=self.mgr_locs)] @@ -1761,18 +1770,19 @@ def _astype(self, dtype, copy=False, raise_on_error=True, values=None, ndim=self.ndim, placement=self.mgr_locs) - def to_native_types(self, slicer=None, na_rep='', **kwargs): + def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: # Categorical is always one dimension values = values[slicer] - values = np.array(values, dtype=object) mask = isnull(values) + values = np.array(values, dtype='object') values[mask] = na_rep - # Blocks.to_native_type returns list of lists, but we are always only a list - return [values.tolist()] + + # we are expected to return a 2-d ndarray + return values.reshape(1,len(values)) class DatetimeBlock(Block): __slots__ = () @@ -1807,16 +1817,20 @@ def _try_operate(self, values): return values.view('i8') def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments - we are going to compare vs i8, so coerce to integer - values is always ndarra like, other may not be """ + """ Coerce values and other to dtype 'i8'. NaN and NaT convert to + the smallest i8, and will correctly round-trip to NaT if converted + back in _try_coerce_result. values is always ndarray-like, other + may not be """ values = values.view('i8') + if is_null_datelike_scalar(other): other = tslib.iNaT elif isinstance(other, datetime): other = lib.Timestamp(other).asm8.view('i8') - else: + elif hasattr(other, 'dtype') and com.is_integer_dtype(other): other = other.view('i8') + else: + other = np.array(other, dtype='i8') return values, other @@ -1848,7 +1862,8 @@ def fillna(self, value, limit=None, value = self._try_fill(value) if limit is not None: if self.ndim > 2: - raise NotImplementedError + raise NotImplementedError("number of dimensions for 'fillna' " + "is currently limited to 2") mask[mask.cumsum(self.ndim-1)>limit]=False np.putmask(values, mask, value) @@ -1857,29 +1872,21 @@ def fillna(self, value, limit=None, fastpath=True, placement=self.mgr_locs)] def to_native_types(self, slicer=None, na_rep=None, date_format=None, - **kwargs): + quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: values = values[:, slicer] - mask = isnull(values) - - rvalues = np.empty(values.shape, dtype=object) - if na_rep is None: - na_rep = 'NaT' - rvalues[mask] = na_rep - imask = (~mask).ravel() - if date_format is None: - date_formatter = lambda x: Timestamp(x)._repr_base - else: - date_formatter = lambda x: Timestamp(x).strftime(date_format) - - rvalues.flat[imask] = np.array([date_formatter(val) for val in - values.ravel()[imask]], dtype=object) + from pandas.core.format import _get_format_datetime64_from_values + format = _get_format_datetime64_from_values(values, date_format) - return rvalues.tolist() + result = tslib.format_array_from_datetime(values.view('i8').ravel(), + tz=None, + format=format, + na_rep=na_rep).reshape(values.shape) + return result def should_store(self, value): return issubclass(value.dtype.type, np.datetime64) @@ -2011,7 +2018,8 @@ def interpolate(self, method='pad', axis=0, inplace=False, def fillna(self, value, limit=None, inplace=False, downcast=None): # we may need to upcast our fill to match our dtype if limit is not None: - raise NotImplementedError + raise NotImplementedError("specifying a limit for 'fillna' has " + "not been implemented yet") if issubclass(self.dtype.type, np.floating): value = float(value) values = self.values if inplace else self.values.copy() @@ -3126,7 +3134,6 @@ def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, pandas-indexer with -1's only. """ - if indexer is None: if new_axis is self.axes[axis] and not copy: return self @@ -3138,10 +3145,9 @@ def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, self._consolidate_inplace() - # trying to reindex on an axis with duplicates - if (not allow_dups and not self.axes[axis].is_unique - and len(indexer)): - raise ValueError("cannot reindex from a duplicate axis") + # some axes don't allow reindexing with dups + if not allow_dups: + self.axes[axis]._can_reindex(indexer) if axis >= self.ndim: raise IndexError("Requested axis not found in manager") @@ -3310,8 +3316,20 @@ def equals(self, other): return False self._consolidate_inplace() other._consolidate_inplace() + if len(self.blocks) != len(other.blocks): + return False + + # canonicalize block order, using a tuple combining the type + # name and then mgr_locs because there might be unconsolidated + # blocks (say, Categorical) which can only be distinguished by + # the iteration order + def canonicalize(block): + return (block.dtype.name, block.mgr_locs.as_array.tolist()) + + self_blocks = sorted(self.blocks, key=canonicalize) + other_blocks = sorted(other.blocks, key=canonicalize) return all(block.equals(oblock) for block, oblock in - zip(self.blocks, other.blocks)) + zip(self_blocks, other_blocks)) class SingleBlockManager(BlockManager): @@ -3999,7 +4017,8 @@ def _putmask_smart(v, m, n): try: nn = n[m] nn_at = nn.astype(v.dtype) - if (nn == nn_at).all(): + comp = (nn == nn_at) + if is_list_like(comp) and comp.all(): nv = v.copy() nv[m] = nn_at return nv diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index f68f4f9037d97..0df160618b7c3 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1,7 +1,5 @@ -import sys import itertools import functools - import numpy as np try: @@ -10,7 +8,6 @@ except ImportError: # pragma: no cover _USE_BOTTLENECK = False -import pandas.core.common as com import pandas.hashtable as _hash from pandas import compat, lib, algos, tslib from pandas.compat import builtins @@ -23,7 +20,7 @@ is_complex_dtype, is_integer_dtype, is_bool_dtype, is_object_dtype, is_datetime64_dtype, is_timedelta64_dtype, - is_datetime_or_timedelta_dtype, + is_datetime_or_timedelta_dtype, _get_dtype, is_int_or_datetime_dtype, is_any_int_dtype) @@ -257,8 +254,16 @@ def nansum(values, axis=None, skipna=True): @bottleneck_switch() def nanmean(values, axis=None, skipna=True): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) - the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_max)) - count = _get_counts(mask, axis) + + dtype_sum = dtype_max + dtype_count = np.float64 + if is_integer_dtype(dtype): + dtype_sum = np.float64 + elif is_float_dtype(dtype): + dtype_sum = dtype + dtype_count = dtype + count = _get_counts(mask, axis, dtype=dtype_count) + the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) if axis is not None and getattr(the_sum, 'ndim', False): the_mean = the_sum / count @@ -285,6 +290,7 @@ def get_median(x): if values.dtype != np.float64: values = values.astype('f8') + values[mask] = np.nan if axis is None: values = values.ravel() @@ -559,15 +565,16 @@ def _maybe_arg_null_out(result, axis, mask, skipna): return result -def _get_counts(mask, axis): +def _get_counts(mask, axis, dtype=float): + dtype = _get_dtype(dtype) if axis is None: - return float(mask.size - mask.sum()) + return dtype.type(mask.size - mask.sum()) count = mask.shape[axis] - mask.sum(axis) try: - return count.astype(float) + return count.astype(dtype) except AttributeError: - return np.array(count, dtype=float) + return np.array(count, dtype=dtype) def _maybe_null_out(result, axis, mask): diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 954d2c8a77326..0b62eb1e53ddb 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -213,7 +213,7 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None, Parameters ---------- - flex_arith_method : function (optional) + flex_arith_method : function factory for special arithmetic methods, with op string: f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs) radd_func : function (optional) @@ -571,7 +571,11 @@ def na_op(x, y): return result - def wrapper(self, other): + def wrapper(self, other, axis=None): + # Validate the axis parameter + if axis is not None: + self._get_axis_number(axis) + if isinstance(other, pd.Series): name = _maybe_match_name(self, other) if len(self) != len(other): @@ -594,20 +598,26 @@ def wrapper(self, other): mask = isnull(self) - values = self.get_values() - other = _index.convert_scalar(values,_values_from_object(other)) + if com.is_categorical_dtype(self): + # cats are a special case as get_values() would return an ndarray, which would then + # not take categories ordering into account + # we can go directly to op, as the na_op would just test again and dispatch to it. + res = op(self.values, other) + else: + values = self.get_values() + other = _index.convert_scalar(values,_values_from_object(other)) - if issubclass(values.dtype.type, (np.datetime64, np.timedelta64)): - values = values.view('i8') + if issubclass(values.dtype.type, (np.datetime64, np.timedelta64)): + values = values.view('i8') - # scalars - res = na_op(values, other) - if np.isscalar(res): - raise TypeError('Could not compare %s type with Series' - % type(other)) + # scalars + res = na_op(values, other) + if np.isscalar(res): + raise TypeError('Could not compare %s type with Series' + % type(other)) - # always return a full value series here - res = _values_from_object(res) + # always return a full value series here + res = _values_from_object(res) res = pd.Series(res, index=self.index, name=self.name, dtype='bool') @@ -693,12 +703,35 @@ def _radd_compat(left, right): return output +_op_descriptions = {'add': {'op': '+', 'desc': 'Addition', 'reversed': False, 'reverse': 'radd'}, + 'sub': {'op': '-', 'desc': 'Subtraction', 'reversed': False, 'reverse': 'rsub'}, + 'mul': {'op': '*', 'desc': 'Multiplication', 'reversed': False, 'reverse': 'rmul'}, + 'mod': {'op': '%', 'desc': 'Modulo', 'reversed': False, 'reverse': 'rmod'}, + 'pow': {'op': '**', 'desc': 'Exponential power', 'reversed': False, 'reverse': 'rpow'}, + 'truediv': {'op': '/', 'desc': 'Floating division', 'reversed': False, 'reverse': 'rtruediv'}, + 'floordiv': {'op': '//', 'desc': 'Integer division', 'reversed': False, 'reverse': 'rfloordiv'}} + +_op_names = list(_op_descriptions.keys()) +for k in _op_names: + reverse_op = _op_descriptions[k]['reverse'] + _op_descriptions[reverse_op] = _op_descriptions[k].copy() + _op_descriptions[reverse_op]['reversed'] = True + _op_descriptions[reverse_op]['reverse'] = k def _flex_method_SERIES(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs): + op_name = name.replace('__', '') + op_desc = _op_descriptions[op_name] + if op_desc['reversed']: + equiv = 'other ' + op_desc['op'] + ' series' + else: + equiv = 'series ' + op_desc['op'] + ' other' + doc = """ - Binary operator %s with support to substitute a fill_value for missing data - in one of the inputs + %s of series and other, element-wise (binary operator `%s`). + + Equivalent to ``%s``, but with support to substitute a fill_value for + missing data in one of the inputs. Parameters ---------- @@ -713,7 +746,11 @@ def _flex_method_SERIES(op, name, str_rep, default_axis=None, Returns ------- result : Series - """ % name + + See also + -------- + Series.%s + """ % (op_desc['desc'], op_name, equiv, op_desc['reverse']) @Appender(doc) def flex_wrapper(self, other, level=None, fill_value=None, axis=0): @@ -803,7 +840,48 @@ def na_op(x, y): return result - @Appender(_arith_doc_FRAME % name) + if name in _op_descriptions: + op_name = name.replace('__', '') + op_desc = _op_descriptions[op_name] + if op_desc['reversed']: + equiv = 'other ' + op_desc['op'] + ' dataframe' + else: + equiv = 'dataframe ' + op_desc['op'] + ' other' + + doc = """ + %s of dataframe and other, element-wise (binary operator `%s`). + + Equivalent to ``%s``, but with support to substitute a fill_value for + missing data in one of the inputs. + + Parameters + ---------- + other : Series, DataFrame, or constant + axis : {0, 1, 'index', 'columns'} + For Series input, axis to match Series index on + fill_value : None or float value, default None + Fill missing (NaN) values with this value. If both DataFrame locations are + missing, the result will be missing + level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + + Notes + ----- + Mismatched indices will be unioned together + + Returns + ------- + result : DataFrame + + See also + -------- + DataFrame.%s + """ % (op_desc['desc'], op_name, equiv, op_desc['reverse']) + else: + doc = _arith_doc_FRAME % name + + @Appender(doc) def f(self, other, axis=default_axis, level=None, fill_value=None): if isinstance(other, pd.DataFrame): # Another DataFrame return self._combine_frame(other, na_op, fill_value, level) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 7df23a54c737d..580510829baff 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -6,7 +6,6 @@ from pandas.compat import (map, zip, range, lrange, lmap, u, OrderedDict, OrderedDefaultdict) from pandas import compat -import sys import warnings import numpy as np from pandas.core.common import (PandasError, _try_sort, _default_index, @@ -27,14 +26,15 @@ deprecate_kwarg) import pandas.core.common as com import pandas.core.ops as ops -import pandas.core.nanops as nanops import pandas.computation.expressions as expressions from pandas import lib +from pandas.core.ops import _op_descriptions + _shared_doc_kwargs = dict( axes='items, major_axis, minor_axis', klass="Panel", - axes_single_arg="{0,1,2,'items','major_axis','minor_axis'}") + axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}") _shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one" "of\n %s" % _shared_doc_kwargs['axes_single_arg']) @@ -239,7 +239,8 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): (default). Otherwise if the columns of the values of the passed DataFrame objects should be the items (which in the case of mixed-dtype data you should do), instead pass 'minor' - + dtype : dtype, default None + Data type to force, otherwise infer Returns ------- @@ -1161,6 +1162,14 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, def transpose(self, *args, **kwargs): return super(Panel, self).transpose(*args, **kwargs) + @Appender(_shared_docs['fillna'] % _shared_doc_kwargs) + def fillna(self, value=None, method=None, axis=None, inplace=False, + limit=None, downcast=None, **kwargs): + return super(Panel, self).fillna(value=value, method=method, + axis=axis, inplace=inplace, + limit=limit, downcast=downcast, + **kwargs) + def count(self, axis='major'): """ Return number of observations over requested axis. @@ -1184,13 +1193,17 @@ def count(self, axis='major'): @deprecate_kwarg(old_arg_name='lags', new_arg_name='periods') def shift(self, periods=1, freq=None, axis='major'): """ - Shift major or minor axis by specified number of leads/lags. Drops - periods right now compared with DataFrame.shift + Shift index by desired number of periods with an optional time freq. + The shifted data will not include the dropped periods and the + shifted axis will be smaller than the original. This is different + from the behavior of DataFrame.shift() Parameters ---------- - lags : int - axis : {'major', 'minor'} + periods : int + Number of periods to move, can be positive or negative + freq : DateOffset, timedelta, or time rule string, optional + axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- @@ -1199,9 +1212,6 @@ def shift(self, periods=1, freq=None, axis='major'): if freq: return self.tshift(periods, freq, axis=axis) - if axis == 'items': - raise ValueError('Invalid axis') - return super(Panel, self).slice_shift(periods, axis=axis) def tshift(self, periods=1, freq=None, axis='major', **kwds): @@ -1374,6 +1384,7 @@ def _homogenize_dict(self, frames, intersect=True, dtype=None): result[key] = None axes_dict['data'] = result + axes_dict['dtype'] = dtype return axes_dict @staticmethod @@ -1428,7 +1439,7 @@ def _add_aggregate_operations(cls, use_numexpr=True): ---------- other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """ axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """ -Axis to broadcast over + Axis to broadcast over Returns ------- @@ -1450,8 +1461,36 @@ def na_op(x, y): result = com._fill_zeros(result, x, y, name, fill_zeros) return result - @Substitution(name) - @Appender(_agg_doc) + if name in _op_descriptions: + op_name = name.replace('__', '') + op_desc = _op_descriptions[op_name] + if op_desc['reversed']: + equiv = 'other ' + op_desc['op'] + ' panel' + else: + equiv = 'panel ' + op_desc['op'] + ' other' + + _op_doc = """ + %%s of series and other, element-wise (binary operator `%%s`). + Equivalent to ``%%s``. + + Parameters + ---------- + other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """ + axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """ + Axis to broadcast over + + Returns + ------- + """ + cls.__name__ + """ + + See also + -------- + """ + cls.__name__ + ".%s\n" + doc = _op_doc % (op_desc['desc'], op_name, equiv, op_desc['reverse']) + else: + doc = _agg_doc % name + + @Appender(doc) def f(self, other, axis=0): return self._combine(other, na_op, axis=axis) f.__name__ = name diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py index ec0a313ff5767..35e6412efc760 100644 --- a/pandas/core/panelnd.py +++ b/pandas/core/panelnd.py @@ -1,6 +1,5 @@ """ Factory methods to create N-D panels """ -import pandas.lib as lib from pandas.compat import zip import pandas.compat as compat @@ -99,7 +98,7 @@ def _combine_with_constructor(self, other, func): for f in ['to_frame', 'to_excel', 'to_sparse', 'groupby', 'join', 'filter', 'dropna', 'shift']: def func(self, *args, **kwargs): - raise NotImplementedError + raise NotImplementedError("this operation is not supported") setattr(klass, f, func) # add the aggregate operations diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 291a73778197a..3225b4aa33ac2 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -9,9 +9,12 @@ from pandas.core.series import Series from pandas.core.frame import DataFrame +from pandas.core.sparse import SparseDataFrame, SparseSeries +from pandas.sparse.array import SparseArray +from pandas._sparse import IntIndex + from pandas.core.categorical import Categorical -from pandas.core.common import (notnull, _ensure_platform_int, _maybe_promote, - isnull) +from pandas.core.common import notnull, _ensure_platform_int, _maybe_promote from pandas.core.groupby import get_group_index, _compress_group_index import pandas.core.common as com @@ -608,7 +611,7 @@ def _convert_level_number(level_num, columns): new_data[key] = value_slice.ravel() if len(drop_cols) > 0: - new_columns = new_columns - drop_cols + new_columns = new_columns.difference(drop_cols) N = len(this) @@ -932,7 +935,7 @@ def melt_stub(df, stub, i, j): return newdf.set_index([i, j]) def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, - columns=None): + columns=None, sparse=False): """ Convert categorical variable into dummy/indicator variables @@ -953,6 +956,8 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object` or `category` dtype will be converted. + sparse : bool, default False + Whether the returned DataFrame should be sparse or not. Returns ------- @@ -1039,16 +1044,17 @@ def check_len(item, name): with_dummies = [result] for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep): - dummy = _get_dummies_1d(data[col], prefix=pre, - prefix_sep=sep, dummy_na=dummy_na) + dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep, + dummy_na=dummy_na, sparse=sparse) with_dummies.append(dummy) result = concat(with_dummies, axis=1) else: - result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na) + result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na, + sparse=sparse) return result -def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False): +def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False, sparse=False): # Series avoids inconsistent NaN handling cat = Categorical.from_array(Series(data), ordered=True) levels = cat.categories @@ -1059,19 +1065,17 @@ def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False): index = data.index else: index = np.arange(len(data)) - return DataFrame(index=index) - - number_of_cols = len(levels) - if dummy_na: - number_of_cols += 1 - - dummy_mat = np.eye(number_of_cols).take(cat.codes, axis=0) + if not sparse: + return DataFrame(index=index) + else: + return SparseDataFrame(index=index) + codes = cat.codes.copy() if dummy_na: + codes[codes == -1] = len(cat.categories) levels = np.append(cat.categories, np.nan) - else: - # reset NaN GH4446 - dummy_mat[cat.codes == -1] = 0 + + number_of_cols = len(levels) if prefix is not None: dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) @@ -1084,7 +1088,31 @@ def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False): else: index = None - return DataFrame(dummy_mat, index=index, columns=dummy_cols) + if sparse: + sparse_series = {} + N = len(data) + sp_indices = [ [] for _ in range(len(dummy_cols)) ] + for ndx, code in enumerate(codes): + if code == -1: + # Blank entries if not dummy_na and code == -1, #GH4446 + continue + sp_indices[code].append(ndx) + + for col, ixs in zip(dummy_cols, sp_indices): + sarr = SparseArray(np.ones(len(ixs)), sparse_index=IntIndex(N, ixs), + fill_value=0) + sparse_series[col] = SparseSeries(data=sarr, index=index) + + return SparseDataFrame(sparse_series, index=index, columns=dummy_cols) + + else: + dummy_mat = np.eye(number_of_cols).take(codes, axis=0) + + if not dummy_na: + # reset NaN GH4446 + dummy_mat[codes == -1] = 0 + + return DataFrame(dummy_mat, index=index, columns=dummy_cols) def make_axis_dummies(frame, axis='minor', transform=None): diff --git a/pandas/core/series.py b/pandas/core/series.py index 68f3a6032402f..c54bd96f64c73 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -19,8 +19,8 @@ is_list_like, _values_from_object, _possibly_cast_to_datetime, _possibly_castable, _possibly_convert_platform, _try_sort, - ABCSparseArray, _maybe_match_name, _coerce_to_dtype, - _ensure_object, SettingWithCopyError, + ABCSparseArray, _maybe_match_name, + _coerce_to_dtype, SettingWithCopyError, _maybe_box_datetimelike, ABCDataFrame) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, _ensure_index) @@ -28,7 +28,6 @@ from pandas.core import generic, base from pandas.core.internals import SingleBlockManager from pandas.core.categorical import Categorical, CategoricalAccessor -from pandas.core.strings import StringMethods from pandas.tseries.common import (maybe_to_datetimelike, CombinedDatetimelikeProperties) from pandas.tseries.index import DatetimeIndex @@ -60,7 +59,7 @@ _shared_doc_kwargs = dict( axes='index', klass='Series', - axes_single_arg="{0,'index'}", + axes_single_arg="{0, 'index'}", inplace="""inplace : boolean, default False If True, performs operation inplace and returns None.""", duplicated='Series' @@ -141,7 +140,8 @@ def __init__(self, data=None, index=None, dtype=None, name=None, dtype = self._validate_dtype(dtype) if isinstance(data, MultiIndex): - raise NotImplementedError + raise NotImplementedError("initializing a Series from a " + "MultiIndex is not supported") elif isinstance(data, Index): # need to copy to avoid aliasing issues if name is None: @@ -236,6 +236,11 @@ def from_array(cls, arr, index=None, name=None, dtype=None, copy=False, def _constructor(self): return Series + @property + def _constructor_expanddim(self): + from pandas.core.frame import DataFrame + return DataFrame + # types @property def _can_hold_na(self): @@ -1047,11 +1052,10 @@ def to_frame(self, name=None): ------- data_frame : DataFrame """ - from pandas.core.frame import DataFrame if name is None: - df = DataFrame(self) + df = self._constructor_expanddim(self) else: - df = DataFrame({name: self}) + df = self._constructor_expanddim({name: self}) return df @@ -1438,7 +1442,7 @@ def searchsorted(self, v, side='left', sorter=None): def append(self, to_append, verify_integrity=False): """ - Concatenate two or more Series. The indexes must not overlap + Concatenate two or more Series. Parameters ---------- @@ -1504,7 +1508,12 @@ def _binop(self, other, func, level=None, fill_value=None): result = func(this_vals, other_vals) name = _maybe_match_name(self, other) - return self._constructor(result, index=new_index).__finalize__(self) + result = self._constructor(result, index=new_index, name=name) + result = result.__finalize__(self) + if name is None: + # When name is None, __finalize__ overwrites current name + result.name = None + return result def combine(self, other, func, fill_value=nan): """ @@ -2139,6 +2148,19 @@ def rename(self, index=None, **kwargs): def reindex(self, index=None, **kwargs): return super(Series, self).reindex(index=index, **kwargs) + @Appender(generic._shared_docs['fillna'] % _shared_doc_kwargs) + def fillna(self, value=None, method=None, axis=None, inplace=False, + limit=None, downcast=None, **kwargs): + return super(Series, self).fillna(value=value, method=method, + axis=axis, inplace=inplace, + limit=limit, downcast=downcast, + **kwargs) + + @Appender(generic._shared_docs['shift'] % _shared_doc_kwargs) + def shift(self, periods=1, freq=None, axis=0, **kwargs): + return super(Series, self).shift(periods=periods, freq=freq, + axis=axis, **kwargs) + def reindex_axis(self, labels, axis=0, **kwargs): """ for compatibility with higher dims """ if axis != 0: @@ -2494,21 +2516,6 @@ def to_period(self, freq=None, copy=True): return self._constructor(new_values, index=new_index).__finalize__(self) - #------------------------------------------------------------------------------ - # string methods - - def _make_str_accessor(self): - if not com.is_object_dtype(self.dtype): - # this really should exclude all series with any non-string values, - # but that isn't practical for performance reasons until we have a - # str dtype (GH 9343) - raise AttributeError("Can only use .str accessor with string " - "values, which use np.object_ dtype in " - "pandas") - return StringMethods(self) - - str = base.AccessorProperty(StringMethods, _make_str_accessor) - #------------------------------------------------------------------------------ # Datetimelike delegation methods @@ -2532,6 +2539,21 @@ def _make_cat_accessor(self): cat = base.AccessorProperty(CategoricalAccessor, _make_cat_accessor) + def _dir_deletions(self): + return self._accessors + + def _dir_additions(self): + rv = set() + # these accessors are mutually exclusive, so break loop when one exists + for accessor in self._accessors: + try: + getattr(self, accessor) + rv.add(accessor) + break + except AttributeError: + pass + return rv + Series._setup_axes(['index'], info_axis=0, stat_axis=0, aliases={'rows': 0}) Series._add_numeric_operations() @@ -2605,8 +2627,9 @@ def _try_cast(arr, take_fast_path): # GH #846 if isinstance(data, (np.ndarray, Index, Series)): - subarr = np.array(data, copy=False) + if dtype is not None: + subarr = np.array(data, copy=False) # possibility of nan -> garbage if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype): diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 97f6752fb5851..f4ac0166cf44b 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1,9 +1,9 @@ import numpy as np from pandas.compat import zip -from pandas.core.common import isnull, _values_from_object +from pandas.core.common import isnull, _values_from_object, is_bool_dtype import pandas.compat as compat -from pandas.util.decorators import Appender +from pandas.util.decorators import Appender, deprecate_kwarg import re import pandas.lib as lib import warnings @@ -27,19 +27,42 @@ def _get_array_list(arr, others): def str_cat(arr, others=None, sep=None, na_rep=None): """ - Concatenate arrays of strings with given separator + Concatenate strings in the Series/Index with given separator. Parameters ---------- - arr : list or array-like - others : list or array, or list of arrays + others : list-like, or list of list-likes + If None, returns str concatenating strings of the Series sep : string or None, default None na_rep : string or None, default None If None, an NA in any array will propagate Returns ------- - concat : array + concat : Series/Index of objects or str + + Examples + -------- + If ``others`` is specified, corresponding values are + concatenated with the separator. Result will be a Series of strings. + + >>> Series(['a', 'b', 'c']).str.cat(['A', 'B', 'C'], sep=',') + 0 a,A + 1 b,B + 2 c,C + dtype: object + + Otherwise, strings in the Series are concatenated. Result will be a string. + + >>> Series(['a', 'b', 'c']).str.cat(sep=',') + 'a,b,c' + + Also, you can pass a list of list-likes. + + >>> Series(['a', 'b']).str.cat([['x', 'y'], ['1', '2']], sep=',') + 0 a,x,1 + 1 b,y,2 + dtype: object """ if sep is None: sep = '' @@ -130,18 +153,17 @@ def g(x): def str_count(arr, pat, flags=0): """ - Count occurrences of pattern in each string + Count occurrences of pattern in each string of the Series/Index. Parameters ---------- - arr : list or array-like pat : string, valid regular expression flags : int, default 0 (no flags) re module flags, e.g. re.IGNORECASE Returns ------- - counts : arrays + counts : Series/Index of integer values """ regex = re.compile(pat, flags=flags) f = lambda x: len(regex.findall(x)) @@ -150,7 +172,8 @@ def str_count(arr, pat, flags=0): def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): """ - Check whether given pattern is contained in each string in the array + Return boolean Series/``array`` whether given pattern/regex is + contained in each string in the Series/Index. Parameters ---------- @@ -166,7 +189,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): Returns ------- - Series of boolean values + contained : Series/array of boolean values See Also -------- @@ -197,8 +220,9 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): def str_startswith(arr, pat, na=np.nan): """ - Return boolean array indicating whether each string starts with passed - pattern + Return boolean Series/``array`` indicating whether each string in the + Series/Index starts with passed pattern. Equivalent to + :meth:`str.startswith`. Parameters ---------- @@ -208,7 +232,7 @@ def str_startswith(arr, pat, na=np.nan): Returns ------- - startswith : array (boolean) + startswith : Series/array of boolean values """ f = lambda x: x.startswith(pat) return _na_map(f, arr, na, dtype=bool) @@ -216,8 +240,9 @@ def str_startswith(arr, pat, na=np.nan): def str_endswith(arr, pat, na=np.nan): """ - Return boolean array indicating whether each string ends with passed - pattern + Return boolean Series indicating whether each string in the + Series/Index ends with passed pattern. Equivalent to + :meth:`str.endswith`. Parameters ---------- @@ -227,7 +252,7 @@ def str_endswith(arr, pat, na=np.nan): Returns ------- - endswith : array (boolean) + endswith : Series/array of boolean values """ f = lambda x: x.endswith(pat) return _na_map(f, arr, na, dtype=bool) @@ -235,7 +260,9 @@ def str_endswith(arr, pat, na=np.nan): def str_replace(arr, pat, repl, n=-1, case=True, flags=0): """ - Replace + Replace occurrences of pattern/regex in the Series/Index with + some other string. Equivalent to :meth:`str.replace` or + :func:`re.sub`. Parameters ---------- @@ -252,7 +279,7 @@ def str_replace(arr, pat, repl, n=-1, case=True, flags=0): Returns ------- - replaced : array + replaced : Series/Index of objects """ use_re = not case or len(pat) > 1 or flags @@ -272,7 +299,8 @@ def f(x): def str_repeat(arr, repeats): """ - Duplicate each string in the array by indicated number of times + Duplicate each string in the Series/Index by indicated number + of times. Parameters ---------- @@ -281,7 +309,7 @@ def str_repeat(arr, repeats): Returns ------- - repeated : array + repeated : Series/Index of objects """ if np.isscalar(repeats): def rep(x): @@ -305,7 +333,8 @@ def rep(x, r): def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False): """ - Deprecated: Find groups in each string using passed regular expression. + Deprecated: Find groups in each string in the Series/Index + using passed regular expression. If as_indexer=True, determine if each string matches a regular expression. Parameters @@ -322,9 +351,9 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False): Returns ------- - Series of boolean values + Series/array of boolean values if as_indexer=True - Series of tuples + Series/Index of tuples if as_indexer=False, default but deprecated See Also @@ -359,6 +388,7 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False): if (not as_indexer) and regex.groups > 0: dtype = object + def f(x): m = regex.match(x) if m: @@ -382,7 +412,8 @@ def _get_single_group_name(rx): def str_extract(arr, pat, flags=0): """ - Find groups in each string using passed regular expression + Find groups in each string in the Series using passed regular + expression. Parameters ---------- @@ -435,12 +466,14 @@ def str_extract(arr, pat, flags=0): """ from pandas.core.series import Series from pandas.core.frame import DataFrame + from pandas.core.index import Index regex = re.compile(pat, flags=flags) # just to be safe, check this if regex.groups == 0: raise ValueError("This pattern contains no groups to capture.") empty_row = [np.nan]*regex.groups + def f(x): if not isinstance(x, compat.string_types): return empty_row @@ -449,11 +482,14 @@ def f(x): return [np.nan if item is None else item for item in m.groups()] else: return empty_row + if regex.groups == 1: - result = Series([f(val)[0] for val in arr], - name=_get_single_group_name(regex), - index=arr.index, dtype=object) + result = np.array([f(val)[0] for val in arr], dtype=object) + name = _get_single_group_name(regex) else: + if isinstance(arr, Index): + raise ValueError("only one regex group is supported with Index") + name = None names = dict(zip(regex.groupindex.values(), regex.groupindex.keys())) columns = [names.get(1 + i, i) for i in range(regex.groups)] if arr.empty: @@ -463,12 +499,22 @@ def f(x): columns=columns, index=arr.index, dtype=object) - return result + return result, name def str_get_dummies(arr, sep='|'): """ - Split each string by sep and return a frame of dummy/indicator variables. + Split each string in the Series by sep and return a frame of + dummy/indicator variables. + + Parameters + ---------- + sep : string, default "|" + String to split on. + + Returns + ------- + dummies : DataFrame Examples -------- @@ -478,16 +524,22 @@ def str_get_dummies(arr, sep='|'): 1 1 0 0 2 1 0 1 - >>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies() + >>> Series(['a|b', np.nan, 'a|c']).str.get_dummies() a b c 0 1 1 0 1 0 0 0 2 1 0 1 - See also ``pd.get_dummies``. - + See Also + -------- + pandas.get_dummies """ from pandas.core.frame import DataFrame + from pandas.core.index import Index + + # GH9980, Index.str does not support get_dummies() as it returns a frame + if isinstance(arr, Index): + raise TypeError("get_dummies is not supported for string methods on Index") # TODO remove this hack? arr = arr.fillna('') @@ -511,7 +563,8 @@ def str_get_dummies(arr, sep='|'): def str_join(arr, sep): """ - Join lists contained as elements in array, a la str.join + Join lists contained as elements in the Series/Index with + passed delimiter. Equivalent to :meth:`str.join`. Parameters ---------- @@ -520,14 +573,15 @@ def str_join(arr, sep): Returns ------- - joined : array + joined : Series/Index of objects """ return _na_map(sep.join, arr) def str_findall(arr, pat, flags=0): """ - Find all occurrences of pattern or regular expression + Find all occurrences of pattern or regular expression in the + Series/Index. Equivalent to :func:`re.findall`. Parameters ---------- @@ -538,7 +592,7 @@ def str_findall(arr, pat, flags=0): Returns ------- - matches : array + matches : Series/Index of lists """ regex = re.compile(pat, flags=flags) return _na_map(regex.findall, arr) @@ -546,8 +600,8 @@ def str_findall(arr, pat, flags=0): def str_find(arr, sub, start=0, end=None, side='left'): """ - Return indexes in each strings where the substring is - fully contained between [start:end]. Return -1 on failure. + Return indexes in each strings in the Series/Index where the + substring is fully contained between [start:end]. Return -1 on failure. Parameters ---------- @@ -562,7 +616,7 @@ def str_find(arr, sub, start=0, end=None, side='left'): Returns ------- - found : array + found : Series/Index of integer values """ if not isinstance(sub, compat.string_types): @@ -584,13 +638,33 @@ def str_find(arr, sub, start=0, end=None, side='left'): return _na_map(f, arr, dtype=int) +def str_index(arr, sub, start=0, end=None, side='left'): + if not isinstance(sub, compat.string_types): + msg = 'expected a string object, not {0}' + raise TypeError(msg.format(type(sub).__name__)) + + if side == 'left': + method = 'index' + elif side == 'right': + method = 'rindex' + else: # pragma: no cover + raise ValueError('Invalid side') + + if end is None: + f = lambda x: getattr(x, method)(sub, start) + else: + f = lambda x: getattr(x, method)(sub, start, end) + + return _na_map(f, arr, dtype=int) + + def str_pad(arr, width, side='left', fillchar=' '): """ - Pad strings with an additional character + Pad strings in the Series/Index with an additional character to + specified side. Parameters ---------- - arr : list or array-like width : int Minimum width of resulting string; additional characters will be filled with spaces @@ -600,7 +674,7 @@ def str_pad(arr, width, side='left', fillchar=' '): Returns ------- - padded : array + padded : Series/Index of objects """ if not isinstance(fillchar, compat.string_types): @@ -622,33 +696,26 @@ def str_pad(arr, width, side='left', fillchar=' '): return _na_map(f, arr) -def str_split(arr, pat=None, n=None, return_type='series'): +def str_split(arr, pat=None, n=None): """ - Split each string (a la re.split) in array by given pattern, propagating NA - values + Split each string (a la re.split) in the Series/Index by given + pattern, propagating NA values. Equivalent to :meth:`str.split`. Parameters ---------- pat : string, default None String or regular expression to split on. If None, splits on whitespace - n : int, default None (all) - return_type : {'series', 'frame'}, default 'series - If frame, returns a DataFrame (elements are strings) - If series, returns an Series (elements are lists of strings). - - Notes - ----- - Both 0 and -1 will be interpreted as return all splits + n : int, default -1 (all) + None, 0 and -1 will be interpreted as return all splits + expand : bool, default False + * If True, return DataFrame/MultiIndex expanding dimensionality. + * If False, return Series/Index. + return_type : deprecated, use `expand` Returns ------- - split : array + split : Series/Index or DataFrame/MultiIndex of objects """ - from pandas.core.series import Series - from pandas.core.frame import DataFrame - - if return_type not in ('series', 'frame'): - raise ValueError("return_type must be {'series', 'frame'}") if pat is None: if n is None or n == 0: n = -1 @@ -663,16 +730,13 @@ def str_split(arr, pat=None, n=None, return_type='series'): n = 0 regex = re.compile(pat) f = lambda x: regex.split(x, maxsplit=n) - if return_type == 'frame': - res = DataFrame((Series(x) for x in _na_map(f, arr)), index=arr.index) - else: - res = _na_map(f, arr) + res = _na_map(f, arr) return res def str_slice(arr, start=None, stop=None, step=None): """ - Slice substrings from each element in array + Slice substrings from each element in the Series/Index Parameters ---------- @@ -682,7 +746,7 @@ def str_slice(arr, start=None, stop=None, step=None): Returns ------- - sliced : array + sliced : Series/Index of objects """ obj = slice(start, stop, step) f = lambda x: x[obj] @@ -691,17 +755,19 @@ def str_slice(arr, start=None, stop=None, step=None): def str_slice_replace(arr, start=None, stop=None, repl=None): """ - Replace a slice of each string with another string. + Replace a slice of each string in the Series/Index with another + string. Parameters ---------- start : int or None stop : int or None repl : str or None + String for replacement Returns ------- - replaced : array + replaced : Series/Index of objects """ if repl is None: repl = '' @@ -721,96 +787,78 @@ def f(x): return _na_map(f, arr) -def str_strip(arr, to_strip=None): - """ - Strip whitespace (including newlines) from each string in the array - - Parameters - ---------- - to_strip : str or unicode - - Returns - ------- - stripped : array - """ - return _na_map(lambda x: x.strip(to_strip), arr) - - -def str_lstrip(arr, to_strip=None): - """ - Strip whitespace (including newlines) from left side of each string in the - array - - Parameters - ---------- - to_strip : str or unicode - - Returns - ------- - stripped : array - """ - return _na_map(lambda x: x.lstrip(to_strip), arr) - - -def str_rstrip(arr, to_strip=None): +def str_strip(arr, to_strip=None, side='both'): """ - Strip whitespace (including newlines) from right side of each string in the - array + Strip whitespace (including newlines) from each string in the + Series/Index. Parameters ---------- to_strip : str or unicode + side : {'left', 'right', 'both'}, default 'both' Returns ------- - stripped : array + stripped : Series/Index of objects """ - return _na_map(lambda x: x.rstrip(to_strip), arr) + if side == 'both': + f = lambda x: x.strip(to_strip) + elif side == 'left': + f = lambda x: x.lstrip(to_strip) + elif side == 'right': + f = lambda x: x.rstrip(to_strip) + else: # pragma: no cover + raise ValueError('Invalid side') + return _na_map(f, arr) def str_wrap(arr, width, **kwargs): - """ - Wrap long strings to be formatted in paragraphs + r""" + Wrap long strings in the Series/Index to be formatted in + paragraphs with length less than a given width. + + This method has the same keyword parameters and defaults as + :class:`textwrap.TextWrapper`. Parameters ---------- - Same keyword parameters and defaults as :class:`textwrap.TextWrapper` width : int Maximum line-width expand_tabs : bool, optional If true, tab characters will be expanded to spaces (default: True) replace_whitespace : bool, optional - If true, each whitespace character (as defined by string.whitespace) remaining - after tab expansion will be replaced by a single space (default: True) + If true, each whitespace character (as defined by string.whitespace) + remaining after tab expansion will be replaced by a single space + (default: True) drop_whitespace : bool, optional - If true, whitespace that, after wrapping, happens to end up at the beginning - or end of a line is dropped (default: True) + If true, whitespace that, after wrapping, happens to end up at the + beginning or end of a line is dropped (default: True) break_long_words : bool, optional - If true, then words longer than width will be broken in order to ensure that - no lines are longer than width. If it is false, long words will not be broken, - and some lines may be longer than width. (default: True) + If true, then words longer than width will be broken in order to ensure + that no lines are longer than width. If it is false, long words will + not be broken, and some lines may be longer than width. (default: True) break_on_hyphens : bool, optional - If true, wrapping will occur preferably on whitespace and right after hyphens - in compound words, as it is customary in English. If false, only whitespaces - will be considered as potentially good places for line breaks, but you need - to set break_long_words to false if you want truly insecable words. - (default: True) + If true, wrapping will occur preferably on whitespace and right after + hyphens in compound words, as it is customary in English. If false, + only whitespaces will be considered as potentially good places for line + breaks, but you need to set break_long_words to false if you want truly + insecable words. (default: True) Returns ------- - wrapped : array + wrapped : Series/Index of objects Notes ----- - Internally, this method uses a :class:`textwrap.TextWrapper` instance with default - settings. To achieve behavior matching R's stringr library str_wrap function, use - the arguments: + Internally, this method uses a :class:`textwrap.TextWrapper` instance with + default settings. To achieve behavior matching R's stringr library str_wrap + function, use the arguments: - expand_tabs = False - replace_whitespace = True - drop_whitespace = True - break_long_words = False - break_on_hyphens = False + - expand_tabs = False + - replace_whitespace = True + - drop_whitespace = True + - break_long_words = False + - break_on_hyphens = False Examples -------- @@ -827,9 +875,48 @@ def str_wrap(arr, width, **kwargs): return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr) +def str_translate(arr, table, deletechars=None): + """ + Map all characters in the string through the given mapping table. + Equivalent to standard :meth:`str.translate`. Note that the optional + argument deletechars is only valid if you are using python 2. For python 3, + character deletion should be specified via the table argument. + + Parameters + ---------- + table : dict (python 3), str or None (python 2) + In python 3, table is a mapping of Unicode ordinals to Unicode ordinals, + strings, or None. Unmapped characters are left untouched. Characters + mapped to None are deleted. :meth:`str.maketrans` is a helper function + for making translation tables. + In python 2, table is either a string of length 256 or None. If the + table argument is None, no translation is applied and the operation + simply removes the characters in deletechars. :func:`string.maketrans` + is a helper function for making translation tables. + deletechars : str, optional (python 2) + A string of characters to delete. This argument is only valid + in python 2. + + Returns + ------- + translated : Series/Index of objects + """ + if deletechars is None: + f = lambda x: x.translate(table) + else: + from pandas import compat + if compat.PY3: + raise ValueError("deletechars is not a valid argument for " + "str.translate in python 3. You should simply " + "specify character deletions in the table argument") + f = lambda x: x.translate(table, deletechars) + return _na_map(f, arr) + + def str_get(arr, i): """ - Extract element from lists, tuples, or strings in each element in the array + Extract element from lists, tuples, or strings in each element in the + Series/Index. Parameters ---------- @@ -838,7 +925,7 @@ def str_get(arr, i): Returns ------- - items : array + items : Series/Index of objects """ f = lambda x: x[i] if len(x) > i else np.nan return _na_map(f, arr) @@ -846,7 +933,8 @@ def str_get(arr, i): def str_decode(arr, encoding, errors="strict"): """ - Decode character string to unicode using indicated encoding + Decode character string in the Series/Index to unicode + using indicated encoding. Equivalent to :meth:`str.decode`. Parameters ---------- @@ -855,7 +943,7 @@ def str_decode(arr, encoding, errors="strict"): Returns ------- - decoded : array + decoded : Series/Index of objects """ f = lambda x: x.decode(encoding, errors) return _na_map(f, arr) @@ -863,7 +951,8 @@ def str_decode(arr, encoding, errors="strict"): def str_encode(arr, encoding, errors="strict"): """ - Encode character string to some other encoding using indicated encoding + Encode character string in the Series/Index to some other encoding + using indicated encoding. Equivalent to :meth:`str.encode`. Parameters ---------- @@ -872,7 +961,7 @@ def str_encode(arr, encoding, errors="strict"): Returns ------- - encoded : array + encoded : Series/Index of objects """ f = lambda x: x.encode(encoding, errors) return _na_map(f, arr) @@ -926,9 +1015,9 @@ def do_copy(target): class StringMethods(object): """ - Vectorized string functions for Series. NAs stay NA unless handled - otherwise by a particular method. Patterned after Python's string methods, - with some inspiration from R's stringr package. + Vectorized string functions for Series and Index. NAs stay NA unless + handled otherwise by a particular method. Patterned after Python's string + methods, with some inspiration from R's stringr package. Examples -------- @@ -954,29 +1043,132 @@ def __iter__(self): i += 1 g = self.get(i) - def _wrap_result(self, result): + def _wrap_result(self, result, **kwargs): + + # leave as it is to keep extract and get_dummies results + # can be merged to _wrap_result_expand in v0.17 from pandas.core.series import Series from pandas.core.frame import DataFrame + from pandas.core.index import Index if not hasattr(result, 'ndim'): return result - elif result.ndim == 1: - name = getattr(result, 'name', None) - return Series(result, index=self.series.index, - name=name or self.series.name) + name = kwargs.get('name') or getattr(result, 'name', None) or self.series.name + + if result.ndim == 1: + if isinstance(self.series, Index): + # if result is a boolean np.array, return the np.array + # instead of wrapping it into a boolean Index (GH 8875) + if is_bool_dtype(result): + return result + return Index(result, name=name) + return Series(result, index=self.series.index, name=name) else: assert result.ndim < 3 return DataFrame(result, index=self.series.index) + def _wrap_result_expand(self, result, expand=False): + if not isinstance(expand, bool): + raise ValueError("expand must be True or False") + + from pandas.core.index import Index, MultiIndex + if not hasattr(result, 'ndim'): + return result + + if isinstance(self.series, Index): + name = getattr(result, 'name', None) + # if result is a boolean np.array, return the np.array + # instead of wrapping it into a boolean Index (GH 8875) + if hasattr(result, 'dtype') and is_bool_dtype(result): + return result + + if expand: + result = list(result) + return MultiIndex.from_tuples(result, names=name) + else: + return Index(result, name=name) + else: + index = self.series.index + if expand: + cons_row = self.series._constructor + cons = self.series._constructor_expanddim + data = [cons_row(x) for x in result] + return cons(data, index=index) + else: + name = getattr(result, 'name', None) + cons = self.series._constructor + return cons(result, name=name, index=index) + @copy(str_cat) def cat(self, others=None, sep=None, na_rep=None): result = str_cat(self.series, others=others, sep=sep, na_rep=na_rep) return self._wrap_result(result) + @deprecate_kwarg('return_type', 'expand', + mapping={'series': False, 'frame': True}) @copy(str_split) - def split(self, pat=None, n=-1, return_type='series'): - result = str_split(self.series, pat, n=n, return_type=return_type) - return self._wrap_result(result) + def split(self, pat=None, n=-1, expand=False): + result = str_split(self.series, pat, n=n) + return self._wrap_result_expand(result, expand=expand) + + _shared_docs['str_partition'] = (""" + Split the string at the %(side)s occurrence of `sep`, and return 3 elements + containing the part before the separator, the separator itself, + and the part after the separator. + If the separator is not found, return %(return)s. + + Parameters + ---------- + pat : string, default whitespace + String to split on. + expand : bool, default True + * If True, return DataFrame/MultiIndex expanding dimensionality. + * If False, return Series/Index. + + Returns + ------- + split : DataFrame/MultiIndex or Series/Index of objects + + See Also + -------- + %(also)s + + Examples + -------- + + >>> s = Series(['A_B_C', 'D_E_F', 'X']) + 0 A_B_C + 1 D_E_F + 2 X + dtype: object + + >>> s.str.partition('_') + 0 1 2 + 0 A _ B_C + 1 D _ E_F + 2 X + + >>> s.str.rpartition('_') + 0 1 2 + 0 A_B _ C + 1 D_E _ F + 2 X + """) + @Appender(_shared_docs['str_partition'] % {'side': 'first', + 'return': '3 elements containing the string itself, followed by two empty strings', + 'also': 'rpartition : Split the string at the last occurrence of `sep`'}) + def partition(self, pat=' ', expand=True): + f = lambda x: x.partition(pat) + result = _na_map(f, self.series) + return self._wrap_result_expand(result, expand=expand) + + @Appender(_shared_docs['str_partition'] % {'side': 'last', + 'return': '3 elements containing two empty strings, followed by the string itself', + 'also': 'partition : Split the string at the first occurrence of `sep`'}) + def rpartition(self, pat=' ', expand=True): + f = lambda x: x.rpartition(pat) + result = _na_map(f, self.series) + return self._wrap_result_expand(result, expand=expand) @copy(str_get) def get(self, i): @@ -997,7 +1189,7 @@ def contains(self, pat, case=True, flags=0, na=np.nan, regex=True): @copy(str_match) def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=False): result = str_match(self.series, pat, case=case, flags=flags, - na=na, as_indexer=as_indexer) + na=na, as_indexer=as_indexer) return self._wrap_result(result) @copy(str_replace) @@ -1017,7 +1209,8 @@ def pad(self, width, side='left', fillchar=' '): return self._wrap_result(result) _shared_docs['str_pad'] = (""" - Filling %s side of strings with an additional character + Filling %(side)s side of strings in the Series/Index with an + additional character. Equivalent to :meth:`str.%(method)s`. Parameters ---------- @@ -1029,34 +1222,36 @@ def pad(self, width, side='left', fillchar=' '): Returns ------- - filled : array + filled : Series/Index of objects """) - @Appender(_shared_docs['str_pad'] % 'left and right') + @Appender(_shared_docs['str_pad'] % dict(side='left and right', + method='center')) def center(self, width, fillchar=' '): return self.pad(width, side='both', fillchar=fillchar) - @Appender(_shared_docs['str_pad'] % 'right') + @Appender(_shared_docs['str_pad'] % dict(side='right', method='right')) def ljust(self, width, fillchar=' '): return self.pad(width, side='right', fillchar=fillchar) - @Appender(_shared_docs['str_pad'] % 'left') + @Appender(_shared_docs['str_pad'] % dict(side='left', method='left')) def rjust(self, width, fillchar=' '): return self.pad(width, side='left', fillchar=fillchar) def zfill(self, width): """" - Filling left side with 0 + Filling left side of strings in the Series/Index with 0. + Equivalent to :meth:`str.zfill`. Parameters ---------- width : int - Minimum width of resulting string; additional characters will be filled - with 0 + Minimum width of resulting string; additional characters will be + filled with 0 Returns ------- - filled : array + filled : Series/Index of objects """ result = str_pad(self.series, width, side='left', fillchar='0') return self._wrap_result(result) @@ -1081,19 +1276,31 @@ def encode(self, encoding, errors="strict"): result = str_encode(self.series, encoding, errors) return self._wrap_result(result) - @copy(str_strip) + _shared_docs['str_strip'] = (""" + Strip whitespace (including newlines) from each string in the + Series/Index from %(side)s. Equivalent to :meth:`str.%(method)s`. + + Returns + ------- + stripped : Series/Index of objects + """) + + @Appender(_shared_docs['str_strip'] % dict(side='left and right sides', + method='strip')) def strip(self, to_strip=None): - result = str_strip(self.series, to_strip) + result = str_strip(self.series, to_strip, side='both') return self._wrap_result(result) - @copy(str_lstrip) + @Appender(_shared_docs['str_strip'] % dict(side='left side', + method='lstrip')) def lstrip(self, to_strip=None): - result = str_lstrip(self.series, to_strip) + result = str_strip(self.series, to_strip, side='left') return self._wrap_result(result) - @copy(str_rstrip) + @Appender(_shared_docs['str_strip'] % dict(side='right side', + method='rstrip')) def rstrip(self, to_strip=None): - result = str_rstrip(self.series, to_strip) + result = str_strip(self.series, to_strip, side='right') return self._wrap_result(result) @copy(str_wrap) @@ -1106,16 +1313,25 @@ def get_dummies(self, sep='|'): result = str_get_dummies(self.series, sep) return self._wrap_result(result) + @copy(str_translate) + def translate(self, table, deletechars=None): + result = str_translate(self.series, table, deletechars) + return self._wrap_result(result) + count = _pat_wrapper(str_count, flags=True) startswith = _pat_wrapper(str_startswith, na=True) endswith = _pat_wrapper(str_endswith, na=True) findall = _pat_wrapper(str_findall, flags=True) - extract = _pat_wrapper(str_extract, flags=True) + + @copy(str_extract) + def extract(self, pat, flags=0): + result, name = str_extract(self.series, pat, flags=flags) + return self._wrap_result(result, name=name) _shared_docs['find'] = (""" - Return %(side)s indexes in each strings where the substring is - fully contained between [start:end]. Return -1 on failure. - Equivalent to standard ``str.%(method)s``. + Return %(side)s indexes in each strings in the Series/Index + where the substring is fully contained between [start:end]. + Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`. Parameters ---------- @@ -1128,7 +1344,7 @@ def get_dummies(self, sep='|'): Returns ------- - found : array + found : Series/Index of integer values See Also -------- @@ -1147,46 +1363,107 @@ def rfind(self, sub, start=0, end=None): result = str_find(self.series, sub, start=start, end=end, side='right') return self._wrap_result(result) + def normalize(self, form): + """Return the Unicode normal form for the strings in the Series/Index. + For more information on the forms, see the + :func:`unicodedata.normalize`. + + Parameters + ---------- + form : {'NFC', 'NFKC', 'NFD', 'NFKD'} + Unicode form + + Returns + ------- + normalized : Series/Index of objects + """ + import unicodedata + f = lambda x: unicodedata.normalize(form, compat.u_safe(x)) + result = _na_map(f, self.series) + return self._wrap_result(result) + + _shared_docs['index'] = (""" + Return %(side)s indexes in each strings where the substring is + fully contained between [start:end]. This is the same as ``str.%(similar)s`` + except instead of returning -1, it raises a ValueError when the substring + is not found. Equivalent to standard ``str.%(method)s``. + + Parameters + ---------- + sub : str + Substring being searched + start : int + Left edge index + end : int + Right edge index + + Returns + ------- + found : Series/Index of objects + + See Also + -------- + %(also)s + """) + + @Appender(_shared_docs['index'] % dict(side='lowest', similar='find', method='index', + also='rindex : Return highest indexes in each strings')) + def index(self, sub, start=0, end=None): + result = str_index(self.series, sub, start=start, end=end, side='left') + return self._wrap_result(result) + + @Appender(_shared_docs['index'] % dict(side='highest', similar='rfind', method='rindex', + also='index : Return lowest indexes in each strings')) + def rindex(self, sub, start=0, end=None): + result = str_index(self.series, sub, start=start, end=end, side='right') + return self._wrap_result(result) + _shared_docs['len'] = (""" - Compute length of each string in array. + Compute length of each string in the Series/Index. Returns ------- - lengths : array + lengths : Series/Index of integer values """) len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int) _shared_docs['casemethods'] = (""" - Convert strings in array to %(type)s. - Equivalent to ``str.%(method)s``. + Convert strings in the Series/Index to %(type)s. + Equivalent to :meth:`str.%(method)s`. Returns ------- - converted : array + converted : Series/Index of objects """) _shared_docs['lower'] = dict(type='lowercase', method='lower') _shared_docs['upper'] = dict(type='uppercase', method='upper') _shared_docs['title'] = dict(type='titlecase', method='title') - _shared_docs['capitalize'] = dict(type='be capitalized', method='capitalize') + _shared_docs['capitalize'] = dict(type='be capitalized', + method='capitalize') _shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase') lower = _noarg_wrapper(lambda x: x.lower(), - docstring=_shared_docs['casemethods'] % _shared_docs['lower']) + docstring=_shared_docs['casemethods'] % + _shared_docs['lower']) upper = _noarg_wrapper(lambda x: x.upper(), - docstring=_shared_docs['casemethods'] % _shared_docs['upper']) + docstring=_shared_docs['casemethods'] % + _shared_docs['upper']) title = _noarg_wrapper(lambda x: x.title(), - docstring=_shared_docs['casemethods'] % _shared_docs['title']) + docstring=_shared_docs['casemethods'] % + _shared_docs['title']) capitalize = _noarg_wrapper(lambda x: x.capitalize(), - docstring=_shared_docs['casemethods'] % _shared_docs['capitalize']) + docstring=_shared_docs['casemethods'] % + _shared_docs['capitalize']) swapcase = _noarg_wrapper(lambda x: x.swapcase(), - docstring=_shared_docs['casemethods'] % _shared_docs['swapcase']) + docstring=_shared_docs['casemethods'] % + _shared_docs['swapcase']) _shared_docs['ismethods'] = (""" - Check whether all characters in each string in the array are %(type)s. - Equivalent to ``str.%(method)s``. + Check whether all characters in each string in the Series/Index + are %(type)s. Equivalent to :meth:`str.%(method)s`. Returns ------- - Series of boolean values + is : Series/array of boolean values """) _shared_docs['isalnum'] = dict(type='alphanumeric', method='isalnum') _shared_docs['isalpha'] = dict(type='alphabetic', method='isalpha') @@ -1198,20 +1475,29 @@ def rfind(self, sub, start=0, end=None): _shared_docs['isnumeric'] = dict(type='numeric', method='isnumeric') _shared_docs['isdecimal'] = dict(type='decimal', method='isdecimal') isalnum = _noarg_wrapper(lambda x: x.isalnum(), - docstring=_shared_docs['ismethods'] % _shared_docs['isalnum']) + docstring=_shared_docs['ismethods'] % + _shared_docs['isalnum']) isalpha = _noarg_wrapper(lambda x: x.isalpha(), - docstring=_shared_docs['ismethods'] % _shared_docs['isalpha']) + docstring=_shared_docs['ismethods'] % + _shared_docs['isalpha']) isdigit = _noarg_wrapper(lambda x: x.isdigit(), - docstring=_shared_docs['ismethods'] % _shared_docs['isdigit']) + docstring=_shared_docs['ismethods'] % + _shared_docs['isdigit']) isspace = _noarg_wrapper(lambda x: x.isspace(), - docstring=_shared_docs['ismethods'] % _shared_docs['isspace']) + docstring=_shared_docs['ismethods'] % + _shared_docs['isspace']) islower = _noarg_wrapper(lambda x: x.islower(), - docstring=_shared_docs['ismethods'] % _shared_docs['islower']) + docstring=_shared_docs['ismethods'] % + _shared_docs['islower']) isupper = _noarg_wrapper(lambda x: x.isupper(), - docstring=_shared_docs['ismethods'] % _shared_docs['isupper']) + docstring=_shared_docs['ismethods'] % + _shared_docs['isupper']) istitle = _noarg_wrapper(lambda x: x.istitle(), - docstring=_shared_docs['ismethods'] % _shared_docs['istitle']) + docstring=_shared_docs['ismethods'] % + _shared_docs['istitle']) isnumeric = _noarg_wrapper(lambda x: compat.u_safe(x).isnumeric(), - docstring=_shared_docs['ismethods'] % _shared_docs['isnumeric']) + docstring=_shared_docs['ismethods'] % + _shared_docs['isnumeric']) isdecimal = _noarg_wrapper(lambda x: compat.u_safe(x).isdecimal(), - docstring=_shared_docs['ismethods'] % _shared_docs['isdecimal']) + docstring=_shared_docs['ismethods'] % + _shared_docs['isdecimal']) diff --git a/pandas/hashtable.pyx b/pandas/hashtable.pyx index 8bdcfb44242ff..c4cd788216018 100644 --- a/pandas/hashtable.pyx +++ b/pandas/hashtable.pyx @@ -211,7 +211,6 @@ cdef class StringHashTable(HashTable): def unique(self, ndarray[object] values): cdef: Py_ssize_t i, n = len(values) - Py_ssize_t idx, count = 0 int ret = 0 object val char *buf @@ -223,12 +222,9 @@ cdef class StringHashTable(HashTable): buf = util.get_c_string(val) k = kh_get_str(self.table, buf) if k == self.table.n_buckets: - k = kh_put_str(self.table, buf, &ret) - # print 'putting %s, %s' % (val, count) - count += 1 + kh_put_str(self.table, buf, &ret) uniques.append(val) - # return None return uniques.to_array() def factorize(self, ndarray[object] values): @@ -258,7 +254,6 @@ cdef class StringHashTable(HashTable): labels[i] = count count += 1 - # return None return reverse, labels cdef class Int32HashTable(HashTable): @@ -319,7 +314,6 @@ cdef class Int32HashTable(HashTable): def lookup(self, ndarray[int32_t] values): cdef: Py_ssize_t i, n = len(values) - int ret = 0 int32_t val khiter_t k ndarray[int32_t] locs = np.empty(n, dtype=np.int64) @@ -357,7 +351,6 @@ cdef class Int32HashTable(HashTable): labels[i] = count count += 1 - # return None return reverse, labels cdef class Int64HashTable: #(HashTable): @@ -518,7 +511,6 @@ cdef class Int64HashTable: #(HashTable): def unique(self, ndarray[int64_t] values): cdef: Py_ssize_t i, n = len(values) - Py_ssize_t idx, count = 0 int ret = 0 ndarray result int64_t val @@ -529,9 +521,8 @@ cdef class Int64HashTable: #(HashTable): val = values[i] k = kh_get_int64(self.table, val) if k == self.table.n_buckets: - k = kh_put_int64(self.table, val, &ret) + kh_put_int64(self.table, val, &ret) uniques.append(val) - count += 1 result = uniques.to_array() @@ -644,7 +635,6 @@ cdef class Float64HashTable(HashTable): def unique(self, ndarray[float64_t] values): cdef: Py_ssize_t i, n = len(values) - Py_ssize_t idx, count = 0 int ret = 0 float64_t val khiter_t k @@ -657,9 +647,8 @@ cdef class Float64HashTable(HashTable): if val == val: k = kh_get_float64(self.table, val) if k == self.table.n_buckets: - k = kh_put_float64(self.table, val, &ret) + kh_put_float64(self.table, val, &ret) uniques.append(val) - count += 1 elif not seen_na: seen_na = 1 uniques.append(ONAN) @@ -786,7 +775,6 @@ cdef class PyObjectHashTable(HashTable): def unique(self, ndarray[object] values): cdef: Py_ssize_t i, n = len(values) - Py_ssize_t idx, count = 0 int ret = 0 object val ndarray result @@ -800,7 +788,7 @@ cdef class PyObjectHashTable(HashTable): if not _checknan(val): k = kh_get_pymap(self.table, <PyObject*>val) if k == self.table.n_buckets: - k = kh_put_pymap(self.table, <PyObject*>val, &ret) + kh_put_pymap(self.table, <PyObject*>val, &ret) uniques.append(val) elif not seen_na: seen_na = 1 @@ -918,7 +906,7 @@ cdef class Int64Factorizer: cdef build_count_table_int64(ndarray[int64_t] values, kh_int64_t *table): cdef: - int k + khiter_t k Py_ssize_t i, n = len(values) int ret = 0 @@ -938,7 +926,6 @@ cpdef value_count_int64(ndarray[int64_t] values): cdef: Py_ssize_t i kh_int64_t *table - int ret = 0 int k table = kh_init_int64() @@ -961,7 +948,7 @@ cdef build_count_table_object(ndarray[object] values, ndarray[uint8_t, cast=True] mask, kh_pymap_t *table): cdef: - int k + khiter_t k Py_ssize_t i, n = len(values) int ret = 0 @@ -983,7 +970,7 @@ cdef build_count_table_object(ndarray[object] values, cpdef value_count_object(ndarray[object] values, ndarray[uint8_t, cast=True] mask): cdef: - Py_ssize_t i = len(values) + Py_ssize_t i kh_pymap_t *table int k @@ -1008,9 +995,7 @@ def mode_object(ndarray[object] values, ndarray[uint8_t, cast=True] mask): int count, max_count = 2 int j = -1 # so you can do += int k - Py_ssize_t i, n = len(values) kh_pymap_t *table - int ret = 0 table = kh_init_pymap() build_count_table_object(values, mask, table) @@ -1036,11 +1021,10 @@ def mode_object(ndarray[object] values, ndarray[uint8_t, cast=True] mask): def mode_int64(ndarray[int64_t] values): cdef: - int val, max_val = 2 + int count, max_count = 2 int j = -1 # so you can do += int k kh_int64_t *table - list uniques = [] table = kh_init_int64() @@ -1049,12 +1033,12 @@ def mode_int64(ndarray[int64_t] values): modes = np.empty(table.n_buckets, dtype=np.int64) for k in range(table.n_buckets): if kh_exist_int64(table, k): - val = table.vals[k] + count = table.vals[k] - if val == max_val: + if count == max_count: j += 1 - elif val > max_val: - max_val = val + elif count > max_count: + max_count = count j = 0 else: continue diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 9ecffb382e151..f1fcc822adeaf 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -13,75 +13,22 @@ from pandas.tools.merge import concat from pandas.core.common import PandasError -_IMPORTS = False -_GOOGLE_API_CLIENT_INSTALLED = False -_GOOGLE_API_CLIENT_VALID_VERSION = False -_GOOGLE_FLAGS_INSTALLED = False -_GOOGLE_FLAGS_VALID_VERSION = False -_HTTPLIB2_INSTALLED = False -_SETUPTOOLS_INSTALLED = False -def _importers(): - # import things we need - # but make this done on a first use basis - - global _IMPORTS - if _IMPORTS: - return - - _IMPORTS = True - - if not compat.PY3: - - global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \ - _GOOGLE_FLAGS_INSTALLED, _GOOGLE_FLAGS_VALID_VERSION, \ - _HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED - - try: - import pkg_resources - _SETUPTOOLS_INSTALLED = True - except ImportError: - _SETUPTOOLS_INSTALLED = False - - if _SETUPTOOLS_INSTALLED: - try: - from apiclient.discovery import build - from apiclient.http import MediaFileUpload - from apiclient.errors import HttpError - - from oauth2client.client import OAuth2WebServerFlow - from oauth2client.client import AccessTokenRefreshError - from oauth2client.client import flow_from_clientsecrets - from oauth2client.file import Storage - from oauth2client.tools import run - _GOOGLE_API_CLIENT_INSTALLED=True - _GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version - - if LooseVersion(_GOOGLE_API_CLIENT_VERSION) >= '1.2.0': - _GOOGLE_API_CLIENT_VALID_VERSION = True - - except ImportError: - _GOOGLE_API_CLIENT_INSTALLED = False - - - try: - import gflags as flags - _GOOGLE_FLAGS_INSTALLED = True - - _GOOGLE_FLAGS_VERSION = pkg_resources.get_distribution('python-gflags').version +def _check_google_client_version(): + if compat.PY3: + raise NotImplementedError("Google's libraries do not support Python 3 yet") - if LooseVersion(_GOOGLE_FLAGS_VERSION) >= '2.0': - _GOOGLE_FLAGS_VALID_VERSION = True + try: + import pkg_resources - except ImportError: - _GOOGLE_FLAGS_INSTALLED = False + except ImportError: + raise ImportError('Could not import pkg_resources (setuptools).') - try: - import httplib2 - _HTTPLIB2_INSTALLED = True - except ImportError: - _HTTPLIB2_INSTALLED = False + _GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version + if LooseVersion(_GOOGLE_API_CLIENT_VERSION) < '1.2.0': + raise ImportError("pandas requires google-api-python-client >= 1.2.0 for Google " + "BigQuery support, current version " + _GOOGLE_API_CLIENT_VERSION) logger = logging.getLogger('pandas.io.gbq') logger.setLevel(logging.ERROR) @@ -142,6 +89,16 @@ def __init__(self, project_id, reauth=False): self.service = self.get_service(self.credentials) def get_credentials(self): + try: + from oauth2client.client import OAuth2WebServerFlow + from oauth2client.file import Storage + from oauth2client.tools import run_flow, argparser + + except ImportError: + raise ImportError('Could not import Google API Client.') + + _check_google_client_version() + flow = OAuth2WebServerFlow(client_id='495642085510-k0tmvj2m941jhre2nbqka17vqpjfddtd.apps.googleusercontent.com', client_secret='kOc9wMptUtxkcIFbtZCcrEAc', scope='https://www.googleapis.com/auth/bigquery', @@ -151,11 +108,25 @@ def get_credentials(self): credentials = storage.get() if credentials is None or credentials.invalid or self.reauth: - credentials = run(flow, storage) + credentials = run_flow(flow, storage, argparser.parse_args([])) return credentials def get_service(self, credentials): + try: + import httplib2 + + except ImportError: + raise ImportError("pandas requires httplib2 for Google BigQuery support") + + try: + from apiclient.discovery import build + + except ImportError: + raise ImportError('Could not import Google API Client.') + + _check_google_client_version() + http = httplib2.Http() http = credentials.authorize(http) bigquery_service = build('bigquery', 'v2', http=http) @@ -163,6 +134,15 @@ def get_service(self, credentials): return bigquery_service def run_query(self, query): + try: + from apiclient.errors import HttpError + from oauth2client.client import AccessTokenRefreshError + + except ImportError: + raise ImportError('Could not import Google API Client.') + + _check_google_client_version() + job_collection = self.service.jobs() job_data = { 'configuration': { @@ -313,38 +293,6 @@ def _parse_entry(field_value, field_type): return field_value == 'true' return field_value -def _test_imports(): - - _importers() - _GOOGLE_API_CLIENT_INSTALLED - _GOOGLE_API_CLIENT_VALID_VERSION - _GOOGLE_FLAGS_INSTALLED - _GOOGLE_FLAGS_VALID_VERSION - _HTTPLIB2_INSTALLED - _SETUPTOOLS_INSTALLED - - if compat.PY3: - raise NotImplementedError("Google's libraries do not support Python 3 yet") - - if not _SETUPTOOLS_INSTALLED: - raise ImportError('Could not import pkg_resources (setuptools).') - - if not _GOOGLE_API_CLIENT_INSTALLED: - raise ImportError('Could not import Google API Client.') - - if not _GOOGLE_FLAGS_INSTALLED: - raise ImportError('Could not import Google Command Line Flags Module.') - - if not _GOOGLE_API_CLIENT_VALID_VERSION: - raise ImportError("pandas requires google-api-python-client >= 1.2.0 for Google " - "BigQuery support, current version " + _GOOGLE_API_CLIENT_VERSION) - - if not _GOOGLE_FLAGS_VALID_VERSION: - raise ImportError("pandas requires python-gflags >= 2.0.0 for Google " - "BigQuery support, current version " + _GOOGLE_FLAGS_VERSION) - - if not _HTTPLIB2_INSTALLED: - raise ImportError("pandas requires httplib2 for Google BigQuery support") def read_gbq(query, project_id = None, index_col=None, col_order=None, reauth=False): """Load data from Google BigQuery. @@ -379,7 +327,6 @@ def read_gbq(query, project_id = None, index_col=None, col_order=None, reauth=Fa """ - _test_imports() if not project_id: raise TypeError("Missing required parameter: project_id") @@ -450,7 +397,6 @@ def to_gbq(dataframe, destination_table, project_id=None, chunksize=10000, if multiple accounts are used. """ - _test_imports() if not project_id: raise TypeError("Missing required parameter: project_id") diff --git a/pandas/io/html.py b/pandas/io/html.py index 9f5c10ce128d2..b806b5147c4a5 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -19,6 +19,7 @@ raise_with_traceback, binary_type) from pandas.core import common as com from pandas import Series +from pandas.core.common import AbstractMethodError _IMPORTS = False _HAS_BS4 = False @@ -229,7 +230,7 @@ def _text_getter(self, obj): text : str or unicode The text from an individual DOM node. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_td(self, obj): """Return the td elements from a row element. @@ -243,7 +244,7 @@ def _parse_td(self, obj): columns : list of node-like These are the elements of each row, i.e., the columns. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_tables(self, doc, match, attrs): """Return all tables from the parsed DOM. @@ -270,7 +271,7 @@ def _parse_tables(self, doc, match, attrs): tables : list of node-like A list of <table> elements to be parsed into raw data. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_tr(self, table): """Return the list of row elements from the parsed table element. @@ -285,7 +286,7 @@ def _parse_tr(self, table): rows : list of node-like A list row elements of a table, usually <tr> or <th> elements. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_thead(self, table): """Return the header of a table. @@ -300,7 +301,7 @@ def _parse_thead(self, table): thead : node-like A <thead>...</thead> element. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_tbody(self, table): """Return the body of the table. @@ -315,7 +316,7 @@ def _parse_tbody(self, table): tbody : node-like A <tbody>...</tbody> element. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_tfoot(self, table): """Return the footer of the table if any. @@ -330,7 +331,7 @@ def _parse_tfoot(self, table): tfoot : node-like A <tfoot>...</tfoot> element. """ - raise NotImplementedError + raise AbstractMethodError(self) def _build_doc(self): """Return a tree-like object that can be used to iterate over the DOM. @@ -339,7 +340,7 @@ def _build_doc(self): ------- obj : tree-like """ - raise NotImplementedError + raise AbstractMethodError(self) def _build_table(self, table): header = self._parse_raw_thead(table) diff --git a/pandas/io/json.py b/pandas/io/json.py index 9e8ef74545ef2..0659e34c3f27b 100644 --- a/pandas/io/json.py +++ b/pandas/io/json.py @@ -11,6 +11,7 @@ from pandas import compat, isnull from pandas import Series, DataFrame, to_datetime from pandas.io.common import get_filepath_or_buffer +from pandas.core.common import AbstractMethodError import pandas.core.common as com loads = _json.loads @@ -33,7 +34,7 @@ def to_json(path_or_buf, obj, orient=None, date_format='epoch', double_precision=double_precision, ensure_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler).write() else: - raise NotImplementedError + raise NotImplementedError("'obj' should be a Series or a DataFrame") if isinstance(path_or_buf, compat.string_types): with open(path_or_buf, 'w') as fh: @@ -64,7 +65,7 @@ def __init__(self, obj, orient, date_format, double_precision, self._format_axes() def _format_axes(self): - raise NotImplementedError + raise AbstractMethodError(self) def write(self): return dumps( @@ -282,7 +283,7 @@ def _convert_axes(self): setattr(self.obj, axis, new_axis) def _try_convert_types(self): - raise NotImplementedError + raise AbstractMethodError(self) def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): @@ -395,7 +396,7 @@ def _try_convert_to_date(self, data): return data, False def _try_convert_dates(self): - raise NotImplementedError + raise AbstractMethodError(self) class SeriesParser(Parser): diff --git a/pandas/io/packers.py b/pandas/io/packers.py index b3e2e16af54c2..75ca44fd1ef3e 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -65,26 +65,7 @@ # until we can pass this into our conversion functions, # this is pretty hacky compressor = None -_IMPORTS = False -_BLOSC = False -def _importers(): - # import things we need - # but make this done on a first use basis - - global _IMPORTS - if _IMPORTS: - return - - _IMPORTS = True - - global _BLOSC - import zlib - try: - import blosc - _BLOSC = True - except: - pass def to_msgpack(path_or_buf, *args, **kwargs): """ @@ -103,7 +84,6 @@ def to_msgpack(path_or_buf, *args, **kwargs): compress : type of compressor (zlib or blosc), default to None (no compression) """ - _importers() global compressor compressor = kwargs.pop('compress', None) append = kwargs.pop('append', None) @@ -146,7 +126,6 @@ def read_msgpack(path_or_buf, iterator=False, **kwargs): obj : type of object stored in file """ - _importers() path_or_buf, _ = get_filepath_or_buffer(path_or_buf) if iterator: return Iterator(path_or_buf) @@ -232,9 +211,10 @@ def convert(values): # convert to a bytes array v = v.tostring() + import zlib return zlib.compress(v) - elif compressor == 'blosc' and _BLOSC: + elif compressor == 'blosc': # return string arrays like they are if dtype == np.object_: @@ -242,6 +222,7 @@ def convert(values): # convert to a bytes array v = v.tostring() + import blosc return blosc.compress(v, typesize=dtype.itemsize) # ndarray (on original dtype) @@ -253,23 +234,20 @@ def unconvert(values, dtype, compress=None): if dtype == np.object_: return np.array(values, dtype=object) - if compress == 'zlib': + values = values.encode('latin1') + if compress == 'zlib': + import zlib values = zlib.decompress(values) return np.frombuffer(values, dtype=dtype) elif compress == 'blosc': - - if not _BLOSC: - raise Exception("cannot uncompress w/o blosc") - - # decompress + import blosc values = blosc.decompress(values) - return np.frombuffer(values, dtype=dtype) # from a string - return np.fromstring(values.encode('latin1'), dtype=dtype) + return np.fromstring(values, dtype=dtype) def encode(obj): @@ -285,7 +263,8 @@ def encode(obj): 'name': getattr(obj, 'name', None), 'freq': getattr(obj, 'freqstr', None), 'dtype': obj.dtype.num, - 'data': convert(obj.asi8)} + 'data': convert(obj.asi8), + 'compress': compressor} elif isinstance(obj, DatetimeIndex): tz = getattr(obj, 'tz', None) @@ -299,19 +278,22 @@ def encode(obj): 'dtype': obj.dtype.num, 'data': convert(obj.asi8), 'freq': getattr(obj, 'freqstr', None), - 'tz': tz} + 'tz': tz, + 'compress': compressor} elif isinstance(obj, MultiIndex): return {'typ': 'multi_index', 'klass': obj.__class__.__name__, 'names': getattr(obj, 'names', None), 'dtype': obj.dtype.num, - 'data': convert(obj.values)} + 'data': convert(obj.values), + 'compress': compressor} else: return {'typ': 'index', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'dtype': obj.dtype.num, - 'data': convert(obj.values)} + 'data': convert(obj.values), + 'compress': compressor} elif isinstance(obj, Series): if isinstance(obj, SparseSeries): raise NotImplementedError( diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py old mode 100644 new mode 100755 index 637612d5fb09d..1ca396935ae78 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -14,6 +14,7 @@ from pandas.core.frame import DataFrame import datetime import pandas.core.common as com +from pandas.core.common import AbstractMethodError from pandas.core.config import get_option from pandas.io.date_converters import generic_parser from pandas.io.common import get_filepath_or_buffer @@ -55,8 +56,11 @@ class ParserWarning(Warning): dtype : Type name or dict of column -> type Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32} (Unsupported with engine='python') -compression : {'gzip', 'bz2', None}, default None - For on-the-fly decompression of on-disk data +compression : {'gzip', 'bz2', 'infer', None}, default 'infer' + For on-the-fly decompression of on-disk data. If 'infer', then use gzip or + bz2 if filepath_or_buffer is a string ending in '.gz' or '.bz2', + respectively, and no decompression otherwise. Set to None for no + decompression. dialect : string or csv.Dialect instance, default None If None defaults to Excel dialect. Ignored if sep longer than 1 char See csv.Dialect documentation for more details @@ -294,7 +298,7 @@ def _read(filepath_or_buffer, kwds): 'verbose': False, 'encoding': None, 'squeeze': False, - 'compression': None, + 'compression': 'infer', 'mangle_dupe_cols': True, 'tupleize_cols': False, 'infer_datetime_format': False, @@ -334,7 +338,7 @@ def _make_parser_function(name, sep=','): def parser_f(filepath_or_buffer, sep=sep, dialect=None, - compression=None, + compression='infer', doublequote=True, escapechar=None, @@ -652,6 +656,8 @@ def _clean_options(self, options, engine): # really delete this one keep_default_na = result.pop('keep_default_na') + if index_col is True: + raise ValueError("The value of index_col couldn't be 'True'") if _is_index_col(index_col): if not isinstance(index_col, (list, tuple, np.ndarray)): index_col = [index_col] @@ -705,7 +711,7 @@ def _make_engine(self, engine='c'): self._engine = klass(self.f, **self.options) def _failover_to_python(self): - raise NotImplementedError + raise AbstractMethodError(self) def read(self, nrows=None): if nrows is not None: @@ -991,7 +997,7 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False, try: values = lib.map_infer(values, conv_f) except ValueError: - mask = lib.ismember(values, na_values).view(np.uin8) + mask = lib.ismember(values, na_values).view(np.uint8) values = lib.map_infer_mask(values, conv_f, mask) coerce_type = False @@ -1314,6 +1320,7 @@ def _wrap_compressed(f, compression, encoding=None): """ compression = compression.lower() encoding = encoding or get_option('display.encoding') + if compression == 'gzip': import gzip @@ -1386,6 +1393,17 @@ def __init__(self, f, **kwds): self.comment = kwds['comment'] self._comment_lines = [] + if self.compression == 'infer': + if isinstance(f, compat.string_types): + if f.endswith('.gz'): + self.compression = 'gzip' + elif f.endswith('.bz2'): + self.compression = 'bz2' + else: + self.compression = None + else: + self.compression = None + if isinstance(f, compat.string_types): f = com._get_handle(f, 'r', encoding=self.encoding, compression=self.compression) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 458a245da6bdb..4cbc7aeaa3df7 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3453,6 +3453,10 @@ def get_blk_items(mgr, blocks): def process_axes(self, obj, columns=None): """ process axes filters """ + # make a copy to avoid side effects + if columns is not None: + columns = list(columns) + # make sure to include levels if we have them if columns is not None and self.is_multi_index: for n in self.levels: diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 117d7b4a9ceaa..ad88d74a5aa91 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -83,14 +83,14 @@ def _handle_date_column(col, format=None): return to_datetime(col, **format) else: if format in ['D', 's', 'ms', 'us', 'ns']: - return to_datetime(col, coerce=True, unit=format) + return to_datetime(col, coerce=True, unit=format, utc=True) elif (issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer)): # parse dates as timestamp format = 's' if format is None else format - return to_datetime(col, coerce=True, unit=format) + return to_datetime(col, coerce=True, unit=format, utc=True) else: - return to_datetime(col, coerce=True, format=format) + return to_datetime(col, coerce=True, format=format, utc=True) def _parse_date_columns(data_frame, parse_dates): @@ -318,6 +318,10 @@ def read_sql_table(table_name, con, schema=None, index_col=None, ------- DataFrame + Notes + ----- + Any datetime values with time zone information will be converted to UTC + See also -------- read_sql_query : Read SQL query into a DataFrame. @@ -390,6 +394,11 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, ------- DataFrame + Notes + ----- + Any datetime values with time zone information parsed via the `parse_dates` + parameter will be converted to UTC + See also -------- read_sql_table : Read SQL database table into a DataFrame @@ -451,7 +460,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (and for backward compatibility) and will delegate to the specific function depending on the provided input (database - table name or sql query). + table name or sql query). The delegated function might have more specific + notes about their functionality not listed here. See also -------- @@ -531,7 +541,8 @@ def to_sql(frame, name, con, flavor='sqlite', schema=None, if_exists='fail', if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): - raise NotImplementedError + raise NotImplementedError("'frame' argument should be either a " + "Series or a DataFrame") pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label, schema=schema, @@ -1434,7 +1445,8 @@ def __init__(self, con, flavor, is_cursor=False): if flavor is None: flavor = 'sqlite' if flavor not in ['sqlite', 'mysql']: - raise NotImplementedError + raise NotImplementedError("flavors other than SQLite and MySQL " + "are not supported") else: self.flavor = flavor diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 7dd32fd00a4d2..eecc225d06beb 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1626,7 +1626,7 @@ def _dtype_to_stata_type(dtype, column): elif dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? - itemsize = max_len_string_array(column.values) + itemsize = max_len_string_array(com._ensure_object(column.values)) return chr(max(itemsize, 1)) elif dtype == np.float64: return chr(255) @@ -1664,7 +1664,7 @@ def _dtype_to_default_stata_fmt(dtype, column): if not (inferred_dtype in ('string', 'unicode') or len(column) == 0): raise ValueError('Writing general object arrays is not supported') - itemsize = max_len_string_array(column.values) + itemsize = max_len_string_array(com._ensure_object(column.values)) if itemsize > 244: raise ValueError(excessive_string_length_error % column.name) return "%" + str(max(itemsize, 1)) + "s" @@ -1885,6 +1885,8 @@ def _prepare_pandas(self, data): #NOTE: we might need a different API / class for pandas objects so # we can set different semantics - handle this with a PR to pandas.io + data = data.copy() + if self._write_index: data = data.reset_index() @@ -2013,7 +2015,7 @@ def _write_variable_labels(self, labels=None): self._write(_pad_bytes("", 81)) def _prepare_data(self): - data = self.data.copy() + data = self.data typlist = self.typlist convert_dates = self._convert_dates # 1. Convert dates diff --git a/pandas/io/tests/data/test1.csv.bz2 b/pandas/io/tests/data/test1.csv.bz2 new file mode 100644 index 0000000000000..f96f26a8e7419 Binary files /dev/null and b/pandas/io/tests/data/test1.csv.bz2 differ diff --git a/pandas/io/tests/data/test1.csv.gz b/pandas/io/tests/data/test1.csv.gz new file mode 100644 index 0000000000000..1336db6e2af7e Binary files /dev/null and b/pandas/io/tests/data/test1.csv.gz differ diff --git a/pandas/io/tests/test_cparser.py b/pandas/io/tests/test_cparser.py index ad6f071d738ff..93d55c654de90 100644 --- a/pandas/io/tests/test_cparser.py +++ b/pandas/io/tests/test_cparser.py @@ -336,6 +336,28 @@ def test_empty_field_eof(self): 2: np.array(['3', ''], dtype=object)} assert_array_dicts_equal(result, expected) + # GH5664 + a = DataFrame([['b'], [nan]], columns=['a'], index=['a', 'c']) + b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]], + columns=list('abcd'), + index=[1, 1]) + c = DataFrame([[1, 2, 3, 4], [6, nan, nan, nan], + [8, 9, 10, 11], [13, 14, nan, nan]], + columns=list('abcd'), + index=[0, 5, 7, 12]) + + for _ in range(100): + df = read_csv(StringIO('a,b\nc\n'), skiprows=0, + names=['a'], engine='c') + assert_frame_equal(df, a) + + df = read_csv(StringIO('1,1,1,1,0\n'*2 + '\n'*2), + names=list("abcd"), engine='c') + assert_frame_equal(df, b) + + df = read_csv(StringIO('0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14'), + names=list('abcd'), engine='c') + assert_frame_equal(df, c) def assert_array_dicts_equal(left, right): for k, v in compat.iteritems(left): diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index 70a25a45c0ad4..63ed26ea7d931 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -33,7 +33,7 @@ def assert_n_failed_equals_n_null_columns(wngs, obj, cls=SymbolWarning): all_nan_cols = pd.Series(dict((k, pd.isnull(v).all()) for k, v in compat.iteritems(obj))) n_all_nan_cols = all_nan_cols.sum() - valid_warnings = pd.Series([wng for wng in wngs if isinstance(wng, cls)]) + valid_warnings = pd.Series([wng for wng in wngs if wng.category == cls]) assert_equal(len(valid_warnings), n_all_nan_cols) failed_symbols = all_nan_cols[all_nan_cols].index msgs = valid_warnings.map(lambda x: x.message) @@ -79,7 +79,7 @@ def test_get_goog_volume(self): for locale in self.locales: with tm.set_locale(locale): df = web.get_data_google('GOOG').sort_index() - self.assertEqual(df.Volume.ix['OCT-08-2010'], 2863473) + self.assertEqual(df.Volume.ix['JAN-02-2015'], 1446662) @network def test_get_multi1(self): @@ -87,10 +87,10 @@ def test_get_multi1(self): sl = ['AAPL', 'AMZN', 'GOOG'] with tm.set_locale(locale): pan = web.get_data_google(sl, '2012') - ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG] + ts = pan.Close.GOOG.index[pan.Close.AAPL < pan.Close.GOOG] if (hasattr(pan, 'Close') and hasattr(pan.Close, 'GOOG') and hasattr(pan.Close, 'AAPL')): - self.assertEqual(ts[0].dayofyear, 96) + self.assertEqual(ts[0].dayofyear, 3) else: self.assertRaises(AttributeError, lambda: pan.Close) @@ -105,6 +105,7 @@ def test_get_multi_all_invalid(self): sl = ['INVALID', 'INVALID2', 'INVALID3'] self.assertRaises(RemoteDataError, web.get_data_google, sl, '2012') + @network def test_get_multi2(self): with warnings.catch_warnings(record=True) as w: for locale in self.locales: @@ -135,7 +136,7 @@ def test_dtypes(self): def test_unicode_date(self): #GH8967 data = web.get_data_google('F', start='JAN-01-10', end='JAN-27-13') - self.assertEquals(data.index.name, 'Date') + self.assertEqual(data.index.name, 'Date') class TestYahoo(tm.TestCase): diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 699d1212556cc..768aa40696cbc 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -1132,31 +1132,29 @@ def roundtrip(df, header=True, parser_hdr=0): nrows = 5 ncols = 3 - - for i in range(1, 4): # row multindex upto nlevel=3 - for j in range(1, 4): # col "" - df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j) - res = roundtrip(df) - # shape - self.assertEqual(res.shape, (nrows, ncols + i)) - - # no nans - for r in range(len(res.index)): - for c in range(len(res.columns)): - self.assertTrue(res.ix[r, c] is not np.nan) - - for i in range(1, 4): # row multindex upto nlevel=3 - for j in range(1, 4): # col "" - df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j) - res = roundtrip(df, False) - # shape - self.assertEqual(res.shape, ( - nrows - 1, ncols + i)) # first row taken as columns - - # no nans - for r in range(len(res.index)): - for c in range(len(res.columns)): - self.assertTrue(res.ix[r, c] is not np.nan) + for use_headers in (True, False): + for i in range(1, 4): # row multindex upto nlevel=3 + for j in range(1, 4): # col "" + df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j) + + #this if will be removed once multi column excel writing + #is implemented for now fixing #9794 + if j>1: + with tm.assertRaises(NotImplementedError): + res = roundtrip(df, use_headers) + else: + res = roundtrip(df, use_headers) + + if use_headers: + self.assertEqual(res.shape, (nrows, ncols + i)) + else: + # first row taken as columns + self.assertEqual(res.shape, (nrows - 1, ncols + i)) + + # no nans + for r in range(len(res.index)): + for c in range(len(res.columns)): + self.assertTrue(res.ix[r, c] is not np.nan) res = roundtrip(DataFrame([0])) self.assertEqual(res.shape, (1, 1)) @@ -1394,6 +1392,29 @@ class XlwtTests(ExcelWriterBase, tm.TestCase): engine_name = 'xlwt' check_skip = staticmethod(_skip_if_no_xlwt) + def test_excel_raise_not_implemented_error_on_multiindex_columns(self): + _skip_if_no_xlwt() + #MultiIndex as columns is not yet implemented 9794 + cols = pd.MultiIndex.from_tuples([('site',''), + ('2014','height'), + ('2014','weight')]) + df = pd.DataFrame(np.random.randn(10,3), columns=cols) + with tm.assertRaises(NotImplementedError): + with ensure_clean(self.ext) as path: + df.to_excel(path, index=False) + + def test_excel_multiindex_index(self): + _skip_if_no_xlwt() + #MultiIndex as index works so assert no error #9794 + cols = pd.MultiIndex.from_tuples([('site',''), + ('2014','height'), + ('2014','weight')]) + df = pd.DataFrame(np.random.randn(3,10), index=cols) + with ensure_clean(self.ext) as path: + df.to_excel(path, index=False) + + + def test_to_excel_styleconverter(self): _skip_if_no_xlwt() diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py index 2f79cc8ba1826..5417842d3f863 100644 --- a/pandas/io/tests/test_gbq.py +++ b/pandas/io/tests/test_gbq.py @@ -12,6 +12,9 @@ import numpy as np +from distutils.version import LooseVersion +from pandas import compat + from pandas import NaT from pandas.compat import u from pandas.core.frame import DataFrame @@ -22,6 +25,12 @@ VERSION = platform.python_version() +_IMPORTS = False +_GOOGLE_API_CLIENT_INSTALLED = False +_GOOGLE_API_CLIENT_VALID_VERSION = False +_HTTPLIB2_INSTALLED = False +_SETUPTOOLS_INSTALLED = False + def missing_bq(): try: subprocess.call('bq') @@ -29,9 +38,64 @@ def missing_bq(): except OSError: return True +def _test_imports(): + if not compat.PY3: + + global _GOOGLE_API_CLIENT_INSTALLED, _GOOGLE_API_CLIENT_VALID_VERSION, \ + _HTTPLIB2_INSTALLED, _SETUPTOOLS_INSTALLED + + try: + import pkg_resources + _SETUPTOOLS_INSTALLED = True + except ImportError: + _SETUPTOOLS_INSTALLED = False + + if _SETUPTOOLS_INSTALLED: + try: + from apiclient.discovery import build + from apiclient.errors import HttpError + + from oauth2client.client import OAuth2WebServerFlow + from oauth2client.client import AccessTokenRefreshError + + from oauth2client.file import Storage + from oauth2client.tools import run_flow + _GOOGLE_API_CLIENT_INSTALLED=True + _GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version + + if LooseVersion(_GOOGLE_API_CLIENT_VERSION) >= '1.2.0': + _GOOGLE_API_CLIENT_VALID_VERSION = True + + except ImportError: + _GOOGLE_API_CLIENT_INSTALLED = False + + + try: + import httplib2 + _HTTPLIB2_INSTALLED = True + except ImportError: + _HTTPLIB2_INSTALLED = False + + + if compat.PY3: + raise NotImplementedError("Google's libraries do not support Python 3 yet") + + if not _SETUPTOOLS_INSTALLED: + raise ImportError('Could not import pkg_resources (setuptools).') + + if not _GOOGLE_API_CLIENT_INSTALLED: + raise ImportError('Could not import Google API Client.') + + if not _GOOGLE_API_CLIENT_VALID_VERSION: + raise ImportError("pandas requires google-api-python-client >= 1.2.0 for Google " + "BigQuery support, current version " + _GOOGLE_API_CLIENT_VERSION) + + if not _HTTPLIB2_INSTALLED: + raise ImportError("pandas requires httplib2 for Google BigQuery support") + def test_requirements(): try: - gbq._test_imports() + _test_imports() except (ImportError, NotImplementedError) as import_exception: raise nose.SkipTest(import_exception) diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py index 1e8ce7afa9492..26fae0717f956 100644 --- a/pandas/io/tests/test_json/test_pandas.py +++ b/pandas/io/tests/test_json/test_pandas.py @@ -324,12 +324,14 @@ def test_frame_to_json_except(self): def test_frame_empty(self): df = DataFrame(columns=['jim', 'joe']) self.assertFalse(df._is_mixed_type) - assert_frame_equal(read_json(df.to_json()), df) + assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df) + def test_frame_empty_mixedtype(self): # mixed type + df = DataFrame(columns=['jim', 'joe']) df['joe'] = df['joe'].astype('i8') self.assertTrue(df._is_mixed_type) - assert_frame_equal(read_json(df.to_json()), df) + assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df) def test_v12_compat(self): df = DataFrame( diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py index 9633f567ab098..92e0d7ba1a338 100644 --- a/pandas/io/tests/test_packers.py +++ b/pandas/io/tests/test_packers.py @@ -446,6 +446,45 @@ def test_sparse_panel(self): check_panel_type=True) +class TestCompression(TestPackers): + """See https://github.com/pydata/pandas/pull/9783 + """ + + def setUp(self): + super(TestCompression, self).setUp() + data = { + 'A': np.arange(1000, dtype=np.float64), + 'B': np.arange(1000, dtype=np.int32), + 'C': list(100 * 'abcdefghij'), + 'D': date_range(datetime.datetime(2015, 4, 1), periods=1000), + 'E': [datetime.timedelta(days=x) for x in range(1000)], + } + self.frame = { + 'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])), + 'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])), + 'mixed': DataFrame(data), + } + + def test_plain(self): + i_rec = self.encode_decode(self.frame) + for k in self.frame.keys(): + assert_frame_equal(self.frame[k], i_rec[k]) + + def test_compression_zlib(self): + i_rec = self.encode_decode(self.frame, compress='zlib') + for k in self.frame.keys(): + assert_frame_equal(self.frame[k], i_rec[k]) + + def test_compression_blosc(self): + try: + import blosc + except ImportError: + raise nose.SkipTest('no blosc') + i_rec = self.encode_decode(self.frame, compress='blosc') + for k in self.frame.keys(): + assert_frame_equal(self.frame[k], i_rec[k]) + + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py old mode 100644 new mode 100755 index 35530a7f5e07f..48d625744c787 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -17,6 +17,7 @@ from pandas.compat import( StringIO, BytesIO, PY3, range, long, lrange, lmap, u ) + from pandas.io.common import URLError import pandas.io.parsers as parsers from pandas.io.parsers import (read_csv, read_table, read_fwf, @@ -272,7 +273,7 @@ def test_squeeze(self): b,2 c,3 """ - expected = Series([1, 2, 3], ['a', 'b', 'c']) + expected = Series([1, 2, 3], index=Index(['a', 'b', 'c'], name=0)) result = self.read_table(StringIO(data), sep=',', index_col=0, header=None, squeeze=True) tm.assert_isinstance(result, Series) @@ -520,6 +521,11 @@ def test_usecols_index_col_False(self): df = self.read_csv(StringIO(s_malformed), usecols=cols, index_col=False) tm.assert_frame_equal(expected, df) + def test_index_col_is_True(self): + # Issue 9798 + self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data), + index_col=True) + def test_converter_index_col_bug(self): # 1835 data = "A;B\n1;2\n3;4" @@ -839,6 +845,28 @@ def test_deep_skiprows(self): condensed_data = self.read_csv(StringIO(condensed_text)) tm.assert_frame_equal(data, condensed_data) + def test_skiprows_blank(self): + # GH 9832 + text = """#foo,a,b,c +#foo,a,b,c + +#foo,a,b,c +#foo,a,b,c + +1/1/2000,1.,2.,3. +1/2/2000,4,5,6 +1/3/2000,7,8,9 +""" + data = self.read_csv(StringIO(text), skiprows=6, header=None, + index_col=0, parse_dates=True) + + expected = DataFrame(np.arange(1., 10.).reshape((3, 3)), + columns=[1, 2, 3], + index=[datetime(2000, 1, 1), datetime(2000, 1, 2), + datetime(2000, 1, 3)]) + expected.index.name = 0 + tm.assert_frame_equal(data, expected) + def test_detect_string_na(self): data = """A,B foo,bar @@ -954,8 +982,8 @@ def test_yy_format(self): parse_dates=[['date', 'time']]) idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0), datetime(2009, 2, 28, 10, 20, 0), - datetime(2009, 3, 31, 8, 30, 0)]).asobject - idx.name = 'date_time' + datetime(2009, 3, 31, 8, 30, 0)], + dtype=object, name='date_time') xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx) tm.assert_frame_equal(rs, xp) @@ -963,8 +991,8 @@ def test_yy_format(self): parse_dates=[[0, 1]]) idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0), datetime(2009, 2, 28, 10, 20, 0), - datetime(2009, 3, 31, 8, 30, 0)]).asobject - idx.name = 'date_time' + datetime(2009, 3, 31, 8, 30, 0)], + dtype=object, name='date_time') xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx) tm.assert_frame_equal(rs, xp) @@ -1071,6 +1099,21 @@ def test_read_csv_no_index_name(self): self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']].values.dtype, np.float64) tm.assert_frame_equal(df, df2) + def test_read_csv_infer_compression(self): + # GH 9770 + expected = self.read_csv(self.csv1, index_col=0, parse_dates=True) + + inputs = [self.csv1, self.csv1 + '.gz', + self.csv1 + '.bz2', open(self.csv1)] + + for f in inputs: + df = self.read_csv(f, index_col=0, parse_dates=True, + compression='infer') + + tm.assert_frame_equal(expected, df) + + inputs[3].close() + def test_read_table_unicode(self): fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8')) df1 = read_table(fin, sep=";", encoding="utf-8", header=None) @@ -2231,6 +2274,26 @@ def test_nrows_and_chunksize_raises_notimplemented(self): self.assertRaises(NotImplementedError, self.read_csv, StringIO(data), nrows=10, chunksize=5) + def test_single_char_leading_whitespace(self): + # GH 9710 + data = """\ +MyColumn + a + b + a + b\n""" + + expected = DataFrame({'MyColumn' : list('abab')}) + + result = self.read_csv(StringIO(data), skipinitialspace=True) + tm.assert_frame_equal(result, expected) + + def test_chunk_begins_with_newline_whitespace(self): + # GH 10022 + data = '\n hello\nworld\n' + result = self.read_csv(StringIO(data), header=None) + self.assertEqual(len(result), 2) + class TestPythonParser(ParserTests, tm.TestCase): def test_negative_skipfooter_raises(self): @@ -2984,6 +3047,25 @@ def test_variable_width_unicode(self): tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')), header=None, encoding='utf8')) + def test_convert_to_nd_arrays(self): + #GH 9266 + with tm.ensure_clean('test.txt') as path: + with open(path,'w') as f: + f.write( + """1421302964.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127 \n""" + + """1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" + ) + + result = pd.read_fwf('test.txt', colspecs=[(0,17),(25,26),(33,37),(49,51),(58,62),(63,1000)], + names=['time','pri','pgn','dst','src','data'], + converters={'pgn':lambda x: int(x,16), + 'src':lambda x: int(x,16), + 'dst':lambda x: int(x,16), + 'data':lambda x: len(x.split(' '))}, + index_col='time') + self.assertEqual(result['dst'].dtype,np.uint8) + + class TestCParserHighMemory(ParserTests, tm.TestCase): @@ -3068,17 +3150,17 @@ def test_skiprows_lineterminator(self): expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'], ['2007/01/01', '02:00', 0.2141, 'M', 'O'], ['2007/01/01', '04:00', 0.2142, 'D', 'M']], - columns=['date', 'time', 'var', 'flag', + columns=['date', 'time', 'var', 'flag', 'oflag']) # test with the three default lineterminators LF, CR and CRLF df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True, names=['date', 'time', 'var', 'flag', 'oflag']) tm.assert_frame_equal(df, expected) - df = self.read_csv(StringIO(data.replace('\n', '\r')), + df = self.read_csv(StringIO(data.replace('\n', '\r')), skiprows=1, delim_whitespace=True, names=['date', 'time', 'var', 'flag', 'oflag']) tm.assert_frame_equal(df, expected) - df = self.read_csv(StringIO(data.replace('\n', '\r\n')), + df = self.read_csv(StringIO(data.replace('\n', '\r\n')), skiprows=1, delim_whitespace=True, names=['date', 'time', 'var', 'flag', 'oflag']) tm.assert_frame_equal(df, expected) @@ -3271,6 +3353,25 @@ def test_buffer_overflow(self): except Exception as cperr: self.assertIn('Buffer overflow caught - possible malformed input file.', str(cperr)) + def test_single_char_leading_whitespace(self): + # GH 9710 + data = """\ +MyColumn + a + b + a + b\n""" + + expected = DataFrame({'MyColumn' : list('abab')}) + + result = self.read_csv(StringIO(data), delim_whitespace=True, + skipinitialspace=True) + tm.assert_frame_equal(result, expected) + + result = self.read_csv(StringIO(data), lineterminator='\n', + skipinitialspace=True) + tm.assert_frame_equal(result, expected) + class TestCParserLowMemory(ParserTests, tm.TestCase): def read_csv(self, *args, **kwds): @@ -3692,6 +3793,25 @@ def test_buffer_overflow(self): except Exception as cperr: self.assertIn('Buffer overflow caught - possible malformed input file.', str(cperr)) + def test_single_char_leading_whitespace(self): + # GH 9710 + data = """\ +MyColumn + a + b + a + b\n""" + + expected = DataFrame({'MyColumn' : list('abab')}) + + result = self.read_csv(StringIO(data), delim_whitespace=True, + skipinitialspace=True) + tm.assert_frame_equal(result, expected) + + result = self.read_csv(StringIO(data), lineterminator='\n', + skipinitialspace=True) + tm.assert_frame_equal(result, expected) + class TestMiscellaneous(tm.TestCase): # for tests that don't fit into any of the other classes, e.g. those that diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index acdc991c92efe..7d9c3c051344f 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -156,50 +156,51 @@ def tearDown(self): pass def test_factory_fun(self): + path = create_tempfile(self.path) try: - with get_store(self.path) as tbl: + with get_store(path) as tbl: raise ValueError('blah') except ValueError: pass finally: - safe_remove(self.path) + safe_remove(path) try: - with get_store(self.path) as tbl: + with get_store(path) as tbl: tbl['a'] = tm.makeDataFrame() - with get_store(self.path) as tbl: + with get_store(path) as tbl: self.assertEqual(len(tbl), 1) self.assertEqual(type(tbl['a']), DataFrame) finally: safe_remove(self.path) def test_context(self): + path = create_tempfile(self.path) try: - with HDFStore(self.path) as tbl: + with HDFStore(path) as tbl: raise ValueError('blah') except ValueError: pass finally: - safe_remove(self.path) + safe_remove(path) try: - with HDFStore(self.path) as tbl: + with HDFStore(path) as tbl: tbl['a'] = tm.makeDataFrame() - with HDFStore(self.path) as tbl: + with HDFStore(path) as tbl: self.assertEqual(len(tbl), 1) self.assertEqual(type(tbl['a']), DataFrame) finally: - safe_remove(self.path) + safe_remove(path) def test_conv_read_write(self): - + path = create_tempfile(self.path) try: - def roundtrip(key, obj,**kwargs): - obj.to_hdf(self.path, key,**kwargs) - return read_hdf(self.path, key) + obj.to_hdf(path, key,**kwargs) + return read_hdf(path, key) o = tm.makeTimeSeries() assert_series_equal(o, roundtrip('series',o)) @@ -215,12 +216,12 @@ def roundtrip(key, obj,**kwargs): # table df = DataFrame(dict(A=lrange(5), B=lrange(5))) - df.to_hdf(self.path,'table',append=True) - result = read_hdf(self.path, 'table', where = ['index>2']) + df.to_hdf(path,'table',append=True) + result = read_hdf(path, 'table', where = ['index>2']) assert_frame_equal(df[df.index>2],result) finally: - safe_remove(self.path) + safe_remove(path) def test_long_strings(self): @@ -1593,9 +1594,10 @@ def make_index(names=None): # series _maybe_remove(store, 's') - s = Series(np.zeros(12), index=make_index(['date',None,None])) + s = Series(np.zeros(12), index=make_index(['date', None, None])) store.append('s',s) - tm.assert_series_equal(store.select('s'),s) + xp = Series(np.zeros(12), index=make_index(['date', 'level_1', 'level_2'])) + tm.assert_series_equal(store.select('s'), xp) # dup with column _maybe_remove(store, 'df') @@ -3612,7 +3614,7 @@ def test_frame_select_complex(self): # invert ok for filters result = store.select('df', "~(columns=['A','B'])") - expected = df.loc[:,df.columns-['A','B']] + expected = df.loc[:,df.columns.difference(['A','B'])] tm.assert_frame_equal(result, expected) # in @@ -4328,13 +4330,14 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs): df = tm.makeDataFrame() try: - st = HDFStore(self.path) + path = create_tempfile(self.path) + st = HDFStore(path) st.append('df', df, data_columns = ['A']) st.close() - do_copy(f = self.path) - do_copy(f = self.path, propindexes = False) + do_copy(f = path) + do_copy(f = path, propindexes = False) finally: - safe_remove(self.path) + safe_remove(path) def test_legacy_table_write(self): raise nose.SkipTest("cannot write legacy tables") @@ -4584,22 +4587,59 @@ def test_duplicate_column_name(self): with ensure_clean_path(self.path) as path: self.assertRaises(ValueError, df.to_hdf, path, 'df', format='fixed') + df.to_hdf(path, 'df', format='table') + other = read_hdf(path, 'df') + + tm.assert_frame_equal(df, other) + self.assertTrue(df.equals(other)) + self.assertTrue(other.equals(df)) + + def test_round_trip_equals(self): + # GH 9330 + df = DataFrame({"B": [1,2], "A": ["x","y"]}) + + with ensure_clean_path(self.path) as path: df.to_hdf(path, 'df', format='table') other = read_hdf(path, 'df') tm.assert_frame_equal(df, other) + self.assertTrue(df.equals(other)) + self.assertTrue(other.equals(df)) def test_preserve_timedeltaindex_type(self): - # GH9635 + # GH9635 # Storing TimedeltaIndexed DataFrames in fixed stores did not preserve # the type of the index. df = DataFrame(np.random.normal(size=(10,5))) df.index = timedelta_range(start='0s',periods=10,freq='1s',name='example') with ensure_clean_store(self.path) as store: - + store['df'] = df assert_frame_equal(store['df'], df) + def test_colums_multiindex_modified(self): + # BUG: 7212 + # read_hdf store.select modified the passed columns parameters + # when multi-indexed. + + df = DataFrame(np.random.rand(4, 5), + index=list('abcd'), + columns=list('ABCDE')) + df.index.name = 'letters' + df = df.set_index(keys='E', append=True) + + data_columns = df.index.names+df.columns.tolist() + with ensure_clean_path(self.path) as path: + df.to_hdf(path, 'df', + mode='a', + append=True, + data_columns=data_columns, + index=False) + cols2load = list('BCD') + cols2load_original = list(cols2load) + df_loaded = read_hdf(path, 'df', columns=cols2load) + self.assertTrue(cols2load_original == cols2load) + def _test_sort(obj): if isinstance(obj, DataFrame): diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 2db6f1e104770..9576f80696350 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -29,7 +29,7 @@ from datetime import datetime, date, time from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat -from pandas import date_range, to_datetime, to_timedelta +from pandas import date_range, to_datetime, to_timedelta, Timestamp import pandas.compat as compat from pandas.compat import StringIO, range, lrange, string_types from pandas.core.datetools import format as date_format @@ -100,6 +100,7 @@ 'postgresql': """CREATE TABLE types_test_data ( "TextCol" TEXT, "DateCol" TIMESTAMP, + "DateColWithTz" TIMESTAMP WITH TIME ZONE, "IntDateCol" INTEGER, "FloatCol" DOUBLE PRECISION, "IntCol" INTEGER, @@ -109,18 +110,36 @@ )""" }, 'insert_test_types': { - 'sqlite': """ + 'sqlite': { + 'query': """ INSERT INTO types_test_data VALUES(?, ?, ?, ?, ?, ?, ?, ?) """, - 'mysql': """ + 'fields': ( + 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol', + 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + ) + }, + 'mysql': { + 'query': """ INSERT INTO types_test_data VALUES("%s", %s, %s, %s, %s, %s, %s, %s) """, - 'postgresql': """ + 'fields': ( + 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol', + 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + ) + }, + 'postgresql': { + 'query': """ INSERT INTO types_test_data - VALUES(%s, %s, %s, %s, %s, %s, %s, %s) - """ + VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) + """, + 'fields': ( + 'TextCol', 'DateCol', 'DateColWithTz', 'IntDateCol', 'FloatCol', + 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + ) + }, }, 'read_parameters': { 'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?", @@ -218,11 +237,36 @@ def _load_raw_sql(self): self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor]) ins = SQL_STRINGS['insert_test_types'][self.flavor] - data = [( - 'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False), - ('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)] + data = [ + { + 'TextCol': 'first', + 'DateCol': '2000-01-03 00:00:00', + 'DateColWithTz': '2000-01-01 00:00:00-08:00', + 'IntDateCol': 535852800, + 'FloatCol': 10.10, + 'IntCol': 1, + 'BoolCol': False, + 'IntColWithNull': 1, + 'BoolColWithNull': False, + }, + { + 'TextCol': 'first', + 'DateCol': '2000-01-04 00:00:00', + 'DateColWithTz': '2000-06-01 00:00:00-07:00', + 'IntDateCol': 1356998400, + 'FloatCol': 10.10, + 'IntCol': 1, + 'BoolCol': False, + 'IntColWithNull': None, + 'BoolColWithNull': None, + }, + ] + for d in data: - self._get_exec().execute(ins, d) + self._get_exec().execute( + ins['query'], + [d[field] for field in ins['fields']] + ) def _count_rows(self, table_name): result = self._get_exec().execute( @@ -1212,10 +1256,14 @@ def test_transactions(self): self._transaction_test() def test_get_schema_create_table(self): - self._load_test2_data() + # Use a dataframe without a bool column, since MySQL converts bool to + # TINYINT (which read_sql_table returns as an int and causes a dtype + # mismatch) + + self._load_test3_data() tbl = 'test_get_schema_create_table' - create_sql = sql.get_schema(self.test_frame2, tbl, con=self.conn) - blank_test_df = self.test_frame2.iloc[:0] + create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn) + blank_test_df = self.test_frame3.iloc[:0] self.drop_table(tbl) self.conn.execute(create_sql) @@ -1279,19 +1327,19 @@ def test_double_precision(self): 'i64':Series([5,], dtype='int64'), }) - df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace', + df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace', dtype={'f64_as_f32':sqlalchemy.Float(precision=23)}) res = sql.read_sql_table('test_dtypes', self.conn) - + # check precision of float64 - self.assertEqual(np.round(df['f64'].iloc[0],14), + self.assertEqual(np.round(df['f64'].iloc[0],14), np.round(res['f64'].iloc[0],14)) # check sql types meta = sqlalchemy.schema.MetaData(bind=self.conn) meta.reflect() col_dict = meta.tables['test_dtypes'].columns - self.assertEqual(str(col_dict['f32'].type), + self.assertEqual(str(col_dict['f32'].type), str(col_dict['f64_as_f32'].type)) self.assertTrue(isinstance(col_dict['f32'].type, sqltypes.Float)) self.assertTrue(isinstance(col_dict['f64'].type, sqltypes.Float)) @@ -1512,6 +1560,19 @@ def test_schema_support(self): res2 = pdsql.read_table('test_schema_other2') tm.assert_frame_equal(res1, res2) + def test_datetime_with_time_zone(self): + # Test to see if we read the date column with timezones that + # the timezone information is converted to utc and into a + # np.datetime64 (GH #7139) + df = sql.read_sql_table("types_test_data", self.conn) + self.assertTrue(issubclass(df.DateColWithTz.dtype.type, np.datetime64), + "DateColWithTz loaded with incorrect type") + + # "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00" + self.assertEqual(df.DateColWithTz[0], Timestamp('2000-01-01 08:00:00')) + + # "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00" + self.assertEqual(df.DateColWithTz[1], Timestamp('2000-06-01 07:00:00')) #------------------------------------------------------------------------------ #--- Test Sqlite / MySQL fallback @@ -1672,11 +1733,11 @@ def test_illegal_names(self): df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) # Raise error on blank - self.assertRaises(ValueError, df.to_sql, "", self.conn, + self.assertRaises(ValueError, df.to_sql, "", self.conn, flavor=self.flavor) for ndx, weird_name in enumerate(['test_weird_name]','test_weird_name[', - 'test_weird_name`','test_weird_name"', 'test_weird_name\'', + 'test_weird_name`','test_weird_name"', 'test_weird_name\'', '_b.test_weird_name_01-30', '"_b.test_weird_name_01-30"']): df.to_sql(weird_name, self.conn, flavor=self.flavor) sql.table_exists(weird_name, self.conn) @@ -1782,12 +1843,12 @@ def test_illegal_names(self): for ndx, illegal_name in enumerate(['test_illegal_name]','test_illegal_name[', 'test_illegal_name`','test_illegal_name"', 'test_illegal_name\'', '']): df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) - self.assertRaises(ValueError, df.to_sql, illegal_name, self.conn, + self.assertRaises(ValueError, df.to_sql, illegal_name, self.conn, flavor=self.flavor, index=False) df2 = DataFrame([[1, 2], [3, 4]], columns=['a', illegal_name]) c_tbl = 'test_illegal_col_name%d'%ndx - self.assertRaises(ValueError, df2.to_sql, 'test_illegal_col_name', + self.assertRaises(ValueError, df2.to_sql, 'test_illegal_col_name', self.conn, flavor=self.flavor, index=False) @@ -1964,7 +2025,7 @@ def test_tquery(self): frame = tm.makeTimeDataFrame() sql.write_frame(frame, name='test_table', con=self.db) result = sql.tquery("select A from test_table", self.db) - expected = frame.A + expected = Series(frame.A, frame.index) # not to have name result = Series(result, frame.index) tm.assert_series_equal(result, expected) @@ -2134,6 +2195,13 @@ def setUp(self): "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf. ") + def tearDown(self): + from pymysql.err import Error + try: + self.db.close() + except Error: + pass + def test_basic(self): _skip_if_no_pymysql() frame = tm.makeTimeDataFrame() @@ -2302,7 +2370,7 @@ def test_tquery(self): cur.execute(drop_sql) sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql') result = sql.tquery("select A from test_table", self.db) - expected = frame.A + expected = Series(frame.A, frame.index) # not to have name result = Series(result, frame.index) tm.assert_series_equal(result, expected) diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index 8b44be61d5f66..97bbfb0edf92c 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -290,6 +290,15 @@ def test_stata_doc_examples(self): df = DataFrame(np.random.randn(10, 2), columns=list('AB')) df.to_stata(path) + def test_write_preserves_original(self): + # 9795 + np.random.seed(423) + df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd')) + df.ix[2, 'a':'c'] = np.nan + df_copy = df.copy() + df.to_stata('test.dta', write_index=False) + tm.assert_frame_equal(df, df_copy) + def test_encoding(self): # GH 4626, proper encoding handling @@ -866,8 +875,8 @@ def test_categorical_sorting(self): parsed_117.index = np.arange(parsed_117.shape[0]) codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4] categories = ["Poor", "Fair", "Good", "Very good", "Excellent"] - expected = pd.Series(pd.Categorical.from_codes(codes=codes, - categories=categories)) + cat = pd.Categorical.from_codes(codes=codes, categories=categories) + expected = pd.Series(cat, name='srh') tm.assert_series_equal(expected, parsed_115["srh"]) tm.assert_series_equal(expected, parsed_117["srh"]) diff --git a/pandas/lib.pyx b/pandas/lib.pyx index 5ab2ee4327177..cc4c43494176e 100644 --- a/pandas/lib.pyx +++ b/pandas/lib.pyx @@ -1,6 +1,7 @@ cimport numpy as np cimport cython import numpy as np +import sys from numpy cimport * @@ -10,6 +11,7 @@ cdef extern from "numpy/arrayobject.h": cdef enum NPY_TYPES: NPY_intp "NPY_INTP" + from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem, PyDict_Contains, PyDict_Keys, Py_INCREF, PyTuple_SET_ITEM, @@ -18,7 +20,14 @@ from cpython cimport (PyDict_New, PyDict_GetItem, PyDict_SetItem, PyBytes_Check, PyTuple_SetItem, PyTuple_New, - PyObject_SetAttrString) + PyObject_SetAttrString, + PyBytes_GET_SIZE, + PyUnicode_GET_SIZE) + +try: + from cpython cimport PyString_GET_SIZE +except ImportError: + from cpython cimport PyUnicode_GET_SIZE as PyString_GET_SIZE cdef extern from "Python.h": Py_ssize_t PY_SSIZE_T_MAX @@ -32,7 +41,6 @@ cdef extern from "Python.h": Py_ssize_t *slicelength) except -1 - cimport cpython isnan = np.isnan @@ -896,23 +904,32 @@ def clean_index_list(list obj): return maybe_convert_objects(converted), 0 + +ctypedef fused pandas_string: + str + unicode + bytes + + @cython.boundscheck(False) @cython.wraparound(False) -def max_len_string_array(ndarray arr): +cpdef Py_ssize_t max_len_string_array(pandas_string[:] arr): """ return the maximum size of elements in a 1-dim string array """ cdef: - int i, m, l - int length = arr.shape[0] - object v + Py_ssize_t i, m = 0, l = 0, length = arr.shape[0] + pandas_string v - m = 0 - for i from 0 <= i < length: + for i in range(length): v = arr[i] - if PyString_Check(v) or PyBytes_Check(v) or PyUnicode_Check(v): - l = len(v) + if PyString_Check(v): + l = PyString_GET_SIZE(v) + elif PyBytes_Check(v): + l = PyBytes_GET_SIZE(v) + elif PyUnicode_Check(v): + l = PyUnicode_GET_SIZE(v) - if l > m: - m = l + if l > m: + m = l return m @@ -933,7 +950,7 @@ def string_array_replace_from_nan_rep(ndarray[object, ndim=1] arr, object nan_re @cython.boundscheck(False) @cython.wraparound(False) -def write_csv_rows(list data, list data_index, int nlevels, list cols, object writer): +def write_csv_rows(list data, ndarray data_index, int nlevels, ndarray cols, object writer): cdef int N, j, i, ncols cdef list rows @@ -1306,9 +1323,10 @@ def duplicated(ndarray[object] values, take_last=False): def generate_slices(ndarray[int64_t] labels, Py_ssize_t ngroups): cdef: - Py_ssize_t i, group_size, n, lab, start + Py_ssize_t i, group_size, n, start + int64_t lab object slobj - ndarray[int64_t] starts + ndarray[int64_t] starts, ends n = len(labels) @@ -1318,13 +1336,16 @@ def generate_slices(ndarray[int64_t] labels, Py_ssize_t ngroups): start = 0 group_size = 0 for i in range(n): - group_size += 1 lab = labels[i] - if i == n - 1 or lab != labels[i + 1]: - starts[lab] = start - ends[lab] = start + group_size - start += group_size - group_size = 0 + if lab < 0: + start += 1 + else: + group_size += 1 + if i == n - 1 or lab != labels[i + 1]: + starts[lab] = start + ends[lab] = start + group_size + start += group_size + group_size = 0 return starts, ends diff --git a/pandas/parser.pyx b/pandas/parser.pyx index d13781d6fa132..b28e0587264d4 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -175,7 +175,7 @@ cdef extern from "parser/tokenizer.h": int col void coliter_setup(coliter_t *it, parser_t *parser, int i, int start) - char* COLITER_NEXT(coliter_t it) + void COLITER_NEXT(coliter_t, const char *) parser_t* parser_new() @@ -212,7 +212,7 @@ cdef extern from "parser/tokenizer.h": inline int to_longlong(char *item, long long *p_value) # inline int to_longlong_thousands(char *item, long long *p_value, # char tsep) - int to_boolean(char *item, uint8_t *val) + int to_boolean(const char *item, uint8_t *val) cdef extern from "parser/io.h": @@ -541,6 +541,17 @@ cdef class TextReader: self.parser.cb_io = NULL self.parser.cb_cleanup = NULL + if self.compression == 'infer': + if isinstance(source, basestring): + if source.endswith('.gz'): + self.compression = 'gzip' + elif source.endswith('.bz2'): + self.compression = 'bz2' + else: + self.compression = None + else: + self.compression = None + if self.compression: if self.compression == 'gzip': import gzip @@ -1279,7 +1290,7 @@ cdef _string_box_factorize(parser_t *parser, int col, Py_ssize_t i size_t lines coliter_t it - char *word + const char *word = NULL ndarray[object] result int ret = 0 @@ -1296,7 +1307,7 @@ cdef _string_box_factorize(parser_t *parser, int col, coliter_setup(&it, parser, col, line_start) for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) if na_filter: k = kh_get_str(na_hashset, word) @@ -1333,7 +1344,7 @@ cdef _string_box_utf8(parser_t *parser, int col, Py_ssize_t i size_t lines coliter_t it - char *word + const char *word = NULL ndarray[object] result int ret = 0 @@ -1350,7 +1361,7 @@ cdef _string_box_utf8(parser_t *parser, int col, coliter_setup(&it, parser, col, line_start) for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) if na_filter: k = kh_get_str(na_hashset, word) @@ -1388,7 +1399,7 @@ cdef _string_box_decode(parser_t *parser, int col, Py_ssize_t i, size size_t lines coliter_t it - char *word + const char *word = NULL ndarray[object] result int ret = 0 @@ -1407,7 +1418,7 @@ cdef _string_box_decode(parser_t *parser, int col, coliter_setup(&it, parser, col, line_start) for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) if na_filter: k = kh_get_str(na_hashset, word) @@ -1444,7 +1455,7 @@ cdef _to_fw_string(parser_t *parser, int col, int line_start, int error Py_ssize_t i, j coliter_t it - char *word + const char *word = NULL char *data ndarray result @@ -1454,7 +1465,7 @@ cdef _to_fw_string(parser_t *parser, int col, int line_start, coliter_setup(&it, parser, col, line_start) for i in range(line_end - line_start): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) strncpy(data, word, width) data += width @@ -1469,7 +1480,7 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end, int error, na_count = 0 size_t i, lines coliter_t it - char *word + const char *word = NULL char *p_end double *data double NA = na_values[np.float64] @@ -1485,7 +1496,7 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end, if na_filter: for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) k = kh_get_str(na_hashset, word) # in the hash table @@ -1509,7 +1520,7 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end, data += 1 else: for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) data[0] = parser.converter(word, &p_end, parser.decimal, parser.sci, parser.thousands, 1) if errno != 0 or p_end[0] or p_end == word: @@ -1530,7 +1541,7 @@ cdef _try_int64(parser_t *parser, int col, int line_start, int line_end, int error, na_count = 0 size_t i, lines coliter_t it - char *word + const char *word = NULL int64_t *data ndarray result @@ -1544,7 +1555,7 @@ cdef _try_int64(parser_t *parser, int col, int line_start, int line_end, if na_filter: for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) k = kh_get_str(na_hashset, word) # in the hash table if k != na_hashset.n_buckets: @@ -1561,7 +1572,7 @@ cdef _try_int64(parser_t *parser, int col, int line_start, int line_end, return None, None else: for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) data[i] = str_to_int64(word, INT64_MIN, INT64_MAX, &error, parser.thousands) if error != 0: @@ -1578,7 +1589,7 @@ cdef _try_bool(parser_t *parser, int col, int line_start, int line_end, int error, na_count = 0 size_t i, lines coliter_t it - char *word + const char *word = NULL uint8_t *data ndarray result @@ -1592,7 +1603,7 @@ cdef _try_bool(parser_t *parser, int col, int line_start, int line_end, if na_filter: for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) k = kh_get_str(na_hashset, word) # in the hash table @@ -1608,7 +1619,7 @@ cdef _try_bool(parser_t *parser, int col, int line_start, int line_end, data += 1 else: for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) error = to_boolean(word, data) if error != 0: @@ -1625,7 +1636,7 @@ cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end, int error, na_count = 0 size_t i, lines coliter_t it - char *word + const char *word = NULL uint8_t *data ndarray result @@ -1639,7 +1650,7 @@ cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end, if na_filter: for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) k = kh_get_str(na_hashset, word) # in the hash table @@ -1667,7 +1678,7 @@ cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end, data += 1 else: for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) k = kh_get_str(true_hashset, word) if k != true_hashset.n_buckets: @@ -1688,33 +1699,6 @@ cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end, return result.view(np.bool_), na_count -cdef _get_na_mask(parser_t *parser, int col, int line_start, int line_end, - kh_str_t *na_hashset): - cdef: - int error - Py_ssize_t i - size_t lines - coliter_t it - char *word - ndarray[uint8_t, cast=True] result - khiter_t k - - lines = line_end - line_start - result = np.empty(lines, dtype=np.bool_) - - coliter_setup(&it, parser, col, line_start) - for i in range(lines): - word = COLITER_NEXT(it) - - k = kh_get_str(na_hashset, word) - # in the hash table - if k != na_hashset.n_buckets: - result[i] = 1 - else: - result[i] = 0 - - return result - cdef kh_str_t* kset_from_list(list values) except NULL: # caller takes responsibility for freeing the hash table cdef: @@ -1897,7 +1881,7 @@ cdef _apply_converter(object f, parser_t *parser, int col, Py_ssize_t i size_t lines coliter_t it - char *word + const char *word = NULL char *errors = "strict" ndarray[object] result object val @@ -1909,17 +1893,17 @@ cdef _apply_converter(object f, parser_t *parser, int col, if not PY3 and c_encoding == NULL: for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) val = PyBytes_FromString(word) result[i] = f(val) elif ((PY3 and c_encoding == NULL) or c_encoding == b'utf-8'): for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) val = PyUnicode_FromString(word) result[i] = f(val) else: for i in range(lines): - word = COLITER_NEXT(it) + COLITER_NEXT(it, word) val = PyUnicode_Decode(word, strlen(word), c_encoding, errors) result[i] = f(val) diff --git a/pandas/rpy/__init__.py b/pandas/rpy/__init__.py index 899b684ecbff9..bad7ebc580ce2 100644 --- a/pandas/rpy/__init__.py +++ b/pandas/rpy/__init__.py @@ -5,7 +5,10 @@ import warnings warnings.warn("The pandas.rpy module is deprecated and will be " "removed in a future version. We refer to external packages " - "like rpy2, found here: http://rpy.sourceforge.net", FutureWarning) + "like rpy2. " + "\nSee here for a guide on how to port your code to rpy2: " + "http://pandas.pydata.org/pandas-docs/stable/r_interface.html", + FutureWarning) try: from .common import importr, r, load_data diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index 30b06c8a93142..83278fe12d641 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -100,7 +100,7 @@ def __init__(self, data=None, index=None, columns=None, mgr = self._init_mgr( data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif data is None: - data = {} + data = DataFrame() if index is None: index = Index([]) @@ -115,7 +115,7 @@ def __init__(self, data=None, index=None, columns=None, index=index, kind=self._default_kind, fill_value=self._default_fill_value) - mgr = dict_to_manager(data, columns, index) + mgr = df_to_manager(data, columns, index) if dtype is not None: mgr = mgr.astype(dtype) @@ -155,7 +155,7 @@ def _init_dict(self, data, index, columns, dtype=None): kind=self._default_kind, fill_value=self._default_fill_value, copy=True) - sdict = {} + sdict = DataFrame() for k, v in compat.iteritems(data): if isinstance(v, Series): # Force alignment, no copy necessary @@ -181,7 +181,7 @@ def _init_dict(self, data, index, columns, dtype=None): if c not in sdict: sdict[c] = sp_maker(nan_vec) - return dict_to_manager(sdict, columns, index) + return df_to_manager(sdict, columns, index) def _init_matrix(self, data, index, columns, dtype=None): data = _prep_ndarray(data, copy=False) @@ -228,12 +228,12 @@ def _unpickle_sparse_frame_compat(self, state): else: index = idx - series_dict = {} + series_dict = DataFrame() for col, (sp_index, sp_values) in compat.iteritems(series): series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index, fill_value=fv) - self._data = dict_to_manager(series_dict, columns, index) + self._data = df_to_manager(series_dict, columns, index) self._default_fill_value = fv self._default_kind = kind @@ -418,7 +418,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None): new_index, new_columns = this.index, this.columns if level is not None: - raise NotImplementedError + raise NotImplementedError("'level' argument is not supported") if self.empty and other.empty: return SparseDataFrame(index=new_index).__finalize__(self) @@ -459,9 +459,9 @@ def _combine_match_index(self, other, func, level=None, fill_value=None): new_data = {} if fill_value is not None: - raise NotImplementedError + raise NotImplementedError("'fill_value' argument is not supported") if level is not None: - raise NotImplementedError + raise NotImplementedError("'level' argument is not supported") new_index = self.index.union(other.index) this = self @@ -494,9 +494,9 @@ def _combine_match_columns(self, other, func, level=None, fill_value=None): # possible for this to happen, which is bothersome if fill_value is not None: - raise NotImplementedError + raise NotImplementedError("'fill_value' argument is not supported") if level is not None: - raise NotImplementedError + raise NotImplementedError("'level' argument is not supported") new_data = {} @@ -567,10 +567,10 @@ def _reindex_columns(self, columns, copy, level, fill_value, limit=None, raise TypeError('Reindex by level not supported for sparse') if com.notnull(fill_value): - raise NotImplementedError + raise NotImplementedError("'fill_value' argument is not supported") if limit: - raise NotImplementedError + raise NotImplementedError("'limit' argument is not supported") # TODO: fill value handling sdict = dict((k, v) for k, v in compat.iteritems(self) if k in columns) @@ -737,13 +737,13 @@ def applymap(self, func): """ return self.apply(lambda x: lmap(func, x)) -def dict_to_manager(sdict, columns, index): - """ create and return the block manager from a dict of series, columns, index """ +def df_to_manager(sdf, columns, index): + """ create and return the block manager from a dataframe of series, columns, index """ # from BlockManager perspective axes = [_ensure_index(columns), _ensure_index(index)] - return create_block_manager_from_arrays([sdict[c] for c in columns], columns, axes) + return create_block_manager_from_arrays([sdf[c] for c in columns], columns, axes) def stack_sparse_frame(frame): diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index d3f3f59f264c5..34256acfb0e60 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -32,7 +32,7 @@ def __set__(self, obj, value): value = _ensure_index(value) if isinstance(value, MultiIndex): - raise NotImplementedError + raise NotImplementedError("value cannot be a MultiIndex") for v in compat.itervalues(obj._frames): setattr(v, self.frame_attr, value) @@ -159,7 +159,7 @@ def _get_items(self): def _set_items(self, new_items): new_items = _ensure_index(new_items) if isinstance(new_items, MultiIndex): - raise NotImplementedError + raise NotImplementedError("itemps cannot be a MultiIndex") # need to create new frames dict diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index 2c328e51b5090..f53cc66bee961 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -399,7 +399,7 @@ def abs(self): res_sp_values = np.abs(self.sp_values) return self._constructor(res_sp_values, index=self.index, sparse_index=self.sp_index, - fill_value=self.fill_value) + fill_value=self.fill_value).__finalize__(self) def get(self, label, default=None): """ diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py index f187e7f883e11..a7a78ba226a0b 100644 --- a/pandas/sparse/tests/test_sparse.py +++ b/pandas/sparse/tests/test_sparse.py @@ -36,7 +36,7 @@ import pandas.tests.test_panel as test_panel import pandas.tests.test_series as test_series -from .test_array import assert_sp_array_equal +from pandas.sparse.tests.test_array import assert_sp_array_equal import warnings warnings.filterwarnings(action='ignore', category=FutureWarning) @@ -281,7 +281,7 @@ def test_constructor_nonnan(self): arr = [0, 0, 0, nan, nan] sp_series = SparseSeries(arr, fill_value=0) assert_equal(sp_series.values.values, arr) - + # GH 9272 def test_constructor_empty(self): sp = SparseSeries() @@ -509,6 +509,21 @@ def _check_inplace_op(iop, op): _check_inplace_op( getattr(operator, "i%s" % op), getattr(operator, op)) + def test_abs(self): + s = SparseSeries([1, 2, -3], name='x') + expected = SparseSeries([1, 2, 3], name='x') + result = s.abs() + assert_sp_series_equal(result, expected) + self.assertEqual(result.name, 'x') + + result = abs(s) + assert_sp_series_equal(result, expected) + self.assertEqual(result.name, 'x') + + result = np.abs(s) + assert_sp_series_equal(result, expected) + self.assertEqual(result.name, 'x') + def test_reindex(self): def _compare_with_series(sps, new_index): spsre = sps.reindex(new_index) @@ -997,7 +1012,7 @@ def test_constructor_ndarray(self): ValueError, "^Column length", SparseDataFrame, self.frame.values, columns=self.frame.columns[:-1]) - # GH 9272 + # GH 9272 def test_constructor_empty(self): sp = SparseDataFrame() self.assertEqual(len(sp.index), 0) @@ -1283,7 +1298,9 @@ def _check_frame(frame): frame['E'] = to_insert expected = to_insert.to_dense().reindex( frame.index).fillna(to_insert.fill_value) - assert_series_equal(frame['E'].to_dense(), expected) + result = frame['E'].to_dense() + assert_series_equal(result, expected, check_names=False) + self.assertEqual(result.name, 'E') # insert Series frame['F'] = frame['A'].to_dense() @@ -1663,6 +1680,12 @@ def test_as_blocks(self): self.assertEqual(list(df_blocks.keys()), ['float64']) assert_frame_equal(df_blocks['float64'], df) + def test_nan_columnname(self): + # GH 8822 + nan_colname = DataFrame(Series(1.0,index=[0]),columns=[nan]) + nan_colname_sparse = nan_colname.to_sparse() + self.assertTrue(np.isnan(nan_colname_sparse.columns[0])) + def _dense_series_compare(s, f): result = f(s) @@ -1741,8 +1764,8 @@ def test_constructor(self): with tm.assertRaisesRegexp(TypeError, "input must be a dict, a 'list' was passed"): SparsePanel(['a', 'b', 'c']) - - # GH 9272 + + # GH 9272 def test_constructor_empty(self): sp = SparsePanel() self.assertEqual(len(sp.items), 0) diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index 575fcf386f570..598cdff30e4f7 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -37,6 +37,8 @@ cimport util from util cimport is_array, _checknull, _checknan, get_nat +cimport lib +from lib cimport is_null_datetimelike cdef int64_t iNaT = get_nat() @@ -93,12 +95,7 @@ def take_1d_%(name)s_%(dest)s(ndarray[%(c_type_in)s] values, """ -take_2d_axis0_template = """@cython.wraparound(False) -@cython.boundscheck(False) -def take_2d_axis0_%(name)s_%(dest)s(%(c_type_in)s[:, :] values, - ndarray[int64_t] indexer, - %(c_type_out)s[:, :] out, - fill_value=np.nan): +inner_take_2d_axis0_template = """\ cdef: Py_ssize_t i, j, k, n, idx %(c_type_out)s fv @@ -140,12 +137,34 @@ def take_2d_axis0_%(name)s_%(dest)s(%(c_type_in)s[:, :] values, """ -take_2d_axis1_template = """@cython.wraparound(False) +take_2d_axis0_template = """\ +@cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_%(name)s_%(dest)s(%(c_type_in)s[:, :] values, +cdef inline take_2d_axis0_%(name)s_%(dest)s_memview(%(c_type_in)s[:, :] values, + int64_t[:] indexer, + %(c_type_out)s[:, :] out, + fill_value=np.nan): +""" + inner_take_2d_axis0_template + """ + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_%(name)s_%(dest)s(ndarray[%(c_type_in)s, ndim=2] values, ndarray[int64_t] indexer, %(c_type_out)s[:, :] out, fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_%(name)s_%(dest)s_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. +""" + inner_take_2d_axis0_template + + +inner_take_2d_axis1_template = """\ cdef: Py_ssize_t i, j, k, n, idx %(c_type_out)s fv @@ -165,9 +184,36 @@ def take_2d_axis1_%(name)s_%(dest)s(%(c_type_in)s[:, :] values, out[i, j] = fv else: out[i, j] = %(preval)svalues[i, idx]%(postval)s - """ +take_2d_axis1_template = """\ +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_%(name)s_%(dest)s_memview(%(c_type_in)s[:, :] values, + int64_t[:] indexer, + %(c_type_out)s[:, :] out, + fill_value=np.nan): +""" + inner_take_2d_axis1_template + """ + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis1_%(name)s_%(dest)s(ndarray[%(c_type_in)s, ndim=2] values, + ndarray[int64_t] indexer, + %(c_type_out)s[:, :] out, + fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_%(name)s_%(dest)s_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. +""" + inner_take_2d_axis1_template + + take_2d_multi_template = """@cython.wraparound(False) @cython.boundscheck(False) def take_2d_multi_%(name)s_%(dest)s(ndarray[%(c_type_in)s, ndim=2] values, @@ -629,7 +675,7 @@ def groupby_%(name)s(ndarray[%(c_type)s] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -1653,7 +1699,8 @@ def group_ohlc_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, b = 0 if K > 1: - raise NotImplementedError + raise NotImplementedError("Argument 'values' must have only " + "one dimension") else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx index cab3a84f6ffe8..428decd4dca10 100644 --- a/pandas/src/generated.pyx +++ b/pandas/src/generated.pyx @@ -28,6 +28,8 @@ ctypedef unsigned char UChar cimport util from util cimport is_array, _checknull, _checknan, get_nat +cimport lib +from lib cimport is_null_datetimelike cdef int64_t iNaT = get_nat() @@ -2096,7 +2098,7 @@ def groupby_float64(ndarray[float64_t] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2124,7 +2126,7 @@ def groupby_float32(ndarray[float32_t] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2152,7 +2154,7 @@ def groupby_object(ndarray[object] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2180,7 +2182,7 @@ def groupby_int32(ndarray[int32_t] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2208,7 +2210,7 @@ def groupby_int64(ndarray[int64_t] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2236,7 +2238,7 @@ def groupby_bool(ndarray[uint8_t] index, ndarray labels): for i in range(length): key = util.get_value_1d(labels, i) - if _checknull(key): + if is_null_datetimelike(key): continue idx = index[i] @@ -2704,10 +2706,10 @@ def take_1d_object_object(ndarray[object] values, @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_bool_bool(uint8_t[:, :] values, - ndarray[int64_t] indexer, - uint8_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis0_bool_bool_memview(uint8_t[:, :] values, + int64_t[:] indexer, + uint8_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx uint8_t fv @@ -2747,30 +2749,41 @@ def take_2d_axis0_bool_bool(uint8_t[:, :] values, for j from 0 <= j < k: out[i, j] = values[idx, j] + + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_bool_object(uint8_t[:, :] values, +def take_2d_axis0_bool_bool(ndarray[uint8_t, ndim=2] values, ndarray[int64_t] indexer, - object[:, :] out, + uint8_t[:, :] out, fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_bool_bool_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - object fv + uint8_t fv n = len(indexer) k = values.shape[1] fv = fill_value - IF False: + IF True: cdef: - object *v - object *o + uint8_t *v + uint8_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(object) and - sizeof(object) * n >= 256): + values.strides[1] == sizeof(uint8_t) and + sizeof(uint8_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -2780,7 +2793,7 @@ def take_2d_axis0_bool_object(uint8_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(object) * k)) + memmove(o, v, <size_t>(sizeof(uint8_t) * k)) return for i from 0 <= i < n: @@ -2790,32 +2803,32 @@ def take_2d_axis0_bool_object(uint8_t[:, :] values, out[i, j] = fv else: for j from 0 <= j < k: - out[i, j] = True if values[idx, j] > 0 else False + out[i, j] = values[idx, j] @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int8_int8(int8_t[:, :] values, - ndarray[int64_t] indexer, - int8_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis0_bool_object_memview(uint8_t[:, :] values, + int64_t[:] indexer, + object[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - int8_t fv + object fv n = len(indexer) k = values.shape[1] fv = fill_value - IF True: + IF False: cdef: - int8_t *v - int8_t *o + object *v + object *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(int8_t) and - sizeof(int8_t) * n >= 256): + values.strides[1] == sizeof(object) and + sizeof(object) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -2825,7 +2838,7 @@ def take_2d_axis0_int8_int8(int8_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(int8_t) * k)) + memmove(o, v, <size_t>(sizeof(object) * k)) return for i from 0 <= i < n: @@ -2835,17 +2848,28 @@ def take_2d_axis0_int8_int8(int8_t[:, :] values, out[i, j] = fv else: for j from 0 <= j < k: - out[i, j] = values[idx, j] + out[i, j] = True if values[idx, j] > 0 else False + + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int8_int32(int8_t[:, :] values, +def take_2d_axis0_bool_object(ndarray[uint8_t, ndim=2] values, ndarray[int64_t] indexer, - int32_t[:, :] out, + object[:, :] out, fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_bool_object_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - int32_t fv + object fv n = len(indexer) k = values.shape[1] @@ -2854,13 +2878,13 @@ def take_2d_axis0_int8_int32(int8_t[:, :] values, IF False: cdef: - int32_t *v - int32_t *o + object *v + object *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(int32_t) and - sizeof(int32_t) * n >= 256): + values.strides[1] == sizeof(object) and + sizeof(object) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -2870,7 +2894,7 @@ def take_2d_axis0_int8_int32(int8_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(int32_t) * k)) + memmove(o, v, <size_t>(sizeof(object) * k)) return for i from 0 <= i < n: @@ -2880,32 +2904,32 @@ def take_2d_axis0_int8_int32(int8_t[:, :] values, out[i, j] = fv else: for j from 0 <= j < k: - out[i, j] = values[idx, j] + out[i, j] = True if values[idx, j] > 0 else False @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int8_int64(int8_t[:, :] values, - ndarray[int64_t] indexer, - int64_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis0_int8_int8_memview(int8_t[:, :] values, + int64_t[:] indexer, + int8_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - int64_t fv + int8_t fv n = len(indexer) k = values.shape[1] fv = fill_value - IF False: + IF True: cdef: - int64_t *v - int64_t *o + int8_t *v + int8_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(int64_t) and - sizeof(int64_t) * n >= 256): + values.strides[1] == sizeof(int8_t) and + sizeof(int8_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -2915,7 +2939,7 @@ def take_2d_axis0_int8_int64(int8_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(int64_t) * k)) + memmove(o, v, <size_t>(sizeof(int8_t) * k)) return for i from 0 <= i < n: @@ -2927,30 +2951,41 @@ def take_2d_axis0_int8_int64(int8_t[:, :] values, for j from 0 <= j < k: out[i, j] = values[idx, j] + + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int8_float64(int8_t[:, :] values, +def take_2d_axis0_int8_int8(ndarray[int8_t, ndim=2] values, ndarray[int64_t] indexer, - float64_t[:, :] out, + int8_t[:, :] out, fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int8_int8_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - float64_t fv + int8_t fv n = len(indexer) k = values.shape[1] fv = fill_value - IF False: + IF True: cdef: - float64_t *v - float64_t *o + int8_t *v + int8_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(float64_t) and - sizeof(float64_t) * n >= 256): + values.strides[1] == sizeof(int8_t) and + sizeof(int8_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -2960,7 +2995,7 @@ def take_2d_axis0_int8_float64(int8_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(float64_t) * k)) + memmove(o, v, <size_t>(sizeof(int8_t) * k)) return for i from 0 <= i < n: @@ -2974,28 +3009,28 @@ def take_2d_axis0_int8_float64(int8_t[:, :] values, @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int16_int16(int16_t[:, :] values, - ndarray[int64_t] indexer, - int16_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis0_int8_int32_memview(int8_t[:, :] values, + int64_t[:] indexer, + int32_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - int16_t fv + int32_t fv n = len(indexer) k = values.shape[1] fv = fill_value - IF True: + IF False: cdef: - int16_t *v - int16_t *o + int32_t *v + int32_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(int16_t) and - sizeof(int16_t) * n >= 256): + values.strides[1] == sizeof(int32_t) and + sizeof(int32_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -3005,7 +3040,7 @@ def take_2d_axis0_int16_int16(int16_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(int16_t) * k)) + memmove(o, v, <size_t>(sizeof(int32_t) * k)) return for i from 0 <= i < n: @@ -3017,12 +3052,23 @@ def take_2d_axis0_int16_int16(int16_t[:, :] values, for j from 0 <= j < k: out[i, j] = values[idx, j] + + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int16_int32(int16_t[:, :] values, +def take_2d_axis0_int8_int32(ndarray[int8_t, ndim=2] values, ndarray[int64_t] indexer, int32_t[:, :] out, fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int8_int32_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx int32_t fv @@ -3064,10 +3110,10 @@ def take_2d_axis0_int16_int32(int16_t[:, :] values, @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int16_int64(int16_t[:, :] values, - ndarray[int64_t] indexer, - int64_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis0_int8_int64_memview(int8_t[:, :] values, + int64_t[:] indexer, + int64_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx int64_t fv @@ -3107,15 +3153,26 @@ def take_2d_axis0_int16_int64(int16_t[:, :] values, for j from 0 <= j < k: out[i, j] = values[idx, j] + + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int16_float64(int16_t[:, :] values, +def take_2d_axis0_int8_int64(ndarray[int8_t, ndim=2] values, ndarray[int64_t] indexer, - float64_t[:, :] out, + int64_t[:, :] out, fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int8_int64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - float64_t fv + int64_t fv n = len(indexer) k = values.shape[1] @@ -3124,13 +3181,13 @@ def take_2d_axis0_int16_float64(int16_t[:, :] values, IF False: cdef: - float64_t *v - float64_t *o + int64_t *v + int64_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(float64_t) and - sizeof(float64_t) * n >= 256): + values.strides[1] == sizeof(int64_t) and + sizeof(int64_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -3140,7 +3197,7 @@ def take_2d_axis0_int16_float64(int16_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(float64_t) * k)) + memmove(o, v, <size_t>(sizeof(int64_t) * k)) return for i from 0 <= i < n: @@ -3154,28 +3211,28 @@ def take_2d_axis0_int16_float64(int16_t[:, :] values, @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int32_int32(int32_t[:, :] values, - ndarray[int64_t] indexer, - int32_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis0_int8_float64_memview(int8_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - int32_t fv + float64_t fv n = len(indexer) k = values.shape[1] fv = fill_value - IF True: + IF False: cdef: - int32_t *v - int32_t *o + float64_t *v + float64_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(int32_t) and - sizeof(int32_t) * n >= 256): + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -3185,7 +3242,7 @@ def take_2d_axis0_int32_int32(int32_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(int32_t) * k)) + memmove(o, v, <size_t>(sizeof(float64_t) * k)) return for i from 0 <= i < n: @@ -3197,15 +3254,26 @@ def take_2d_axis0_int32_int32(int32_t[:, :] values, for j from 0 <= j < k: out[i, j] = values[idx, j] + + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int32_int64(int32_t[:, :] values, +def take_2d_axis0_int8_float64(ndarray[int8_t, ndim=2] values, ndarray[int64_t] indexer, - int64_t[:, :] out, + float64_t[:, :] out, fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int8_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - int64_t fv + float64_t fv n = len(indexer) k = values.shape[1] @@ -3214,13 +3282,13 @@ def take_2d_axis0_int32_int64(int32_t[:, :] values, IF False: cdef: - int64_t *v - int64_t *o + float64_t *v + float64_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(int64_t) and - sizeof(int64_t) * n >= 256): + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -3230,7 +3298,7 @@ def take_2d_axis0_int32_int64(int32_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(int64_t) * k)) + memmove(o, v, <size_t>(sizeof(float64_t) * k)) return for i from 0 <= i < n: @@ -3244,28 +3312,28 @@ def take_2d_axis0_int32_int64(int32_t[:, :] values, @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int32_float64(int32_t[:, :] values, - ndarray[int64_t] indexer, - float64_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis0_int16_int16_memview(int16_t[:, :] values, + int64_t[:] indexer, + int16_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - float64_t fv + int16_t fv n = len(indexer) k = values.shape[1] fv = fill_value - IF False: + IF True: cdef: - float64_t *v - float64_t *o + int16_t *v + int16_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(float64_t) and - sizeof(float64_t) * n >= 256): + values.strides[1] == sizeof(int16_t) and + sizeof(int16_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -3275,7 +3343,7 @@ def take_2d_axis0_int32_float64(int32_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(float64_t) * k)) + memmove(o, v, <size_t>(sizeof(int16_t) * k)) return for i from 0 <= i < n: @@ -3287,15 +3355,26 @@ def take_2d_axis0_int32_float64(int32_t[:, :] values, for j from 0 <= j < k: out[i, j] = values[idx, j] + + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int64_int64(int64_t[:, :] values, +def take_2d_axis0_int16_int16(ndarray[int16_t, ndim=2] values, ndarray[int64_t] indexer, - int64_t[:, :] out, + int16_t[:, :] out, fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int16_int16_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - int64_t fv + int16_t fv n = len(indexer) k = values.shape[1] @@ -3304,13 +3383,13 @@ def take_2d_axis0_int64_int64(int64_t[:, :] values, IF True: cdef: - int64_t *v - int64_t *o + int16_t *v + int16_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(int64_t) and - sizeof(int64_t) * n >= 256): + values.strides[1] == sizeof(int16_t) and + sizeof(int16_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -3320,7 +3399,7 @@ def take_2d_axis0_int64_int64(int64_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(int64_t) * k)) + memmove(o, v, <size_t>(sizeof(int16_t) * k)) return for i from 0 <= i < n: @@ -3334,13 +3413,13 @@ def take_2d_axis0_int64_int64(int64_t[:, :] values, @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_int64_float64(int64_t[:, :] values, - ndarray[int64_t] indexer, - float64_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis0_int16_int32_memview(int16_t[:, :] values, + int64_t[:] indexer, + int32_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - float64_t fv + int32_t fv n = len(indexer) k = values.shape[1] @@ -3349,13 +3428,13 @@ def take_2d_axis0_int64_float64(int64_t[:, :] values, IF False: cdef: - float64_t *v - float64_t *o + int32_t *v + int32_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(float64_t) and - sizeof(float64_t) * n >= 256): + values.strides[1] == sizeof(int32_t) and + sizeof(int32_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -3365,7 +3444,7 @@ def take_2d_axis0_int64_float64(int64_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(float64_t) * k)) + memmove(o, v, <size_t>(sizeof(int32_t) * k)) return for i from 0 <= i < n: @@ -3377,30 +3456,41 @@ def take_2d_axis0_int64_float64(int64_t[:, :] values, for j from 0 <= j < k: out[i, j] = values[idx, j] + + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_float32_float32(float32_t[:, :] values, +def take_2d_axis0_int16_int32(ndarray[int16_t, ndim=2] values, ndarray[int64_t] indexer, - float32_t[:, :] out, + int32_t[:, :] out, fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int16_int32_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - float32_t fv + int32_t fv n = len(indexer) k = values.shape[1] fv = fill_value - IF True: + IF False: cdef: - float32_t *v - float32_t *o + int32_t *v + int32_t *o #GH3130 if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(float32_t) and - sizeof(float32_t) * n >= 256): + values.strides[1] == sizeof(int32_t) and + sizeof(int32_t) * n >= 256): for i from 0 <= i < n: idx = indexer[i] @@ -3410,7 +3500,7 @@ def take_2d_axis0_float32_float32(float32_t[:, :] values, else: v = &values[idx, 0] o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(float32_t) * k)) + memmove(o, v, <size_t>(sizeof(int32_t) * k)) return for i from 0 <= i < n: @@ -3424,13 +3514,13 @@ def take_2d_axis0_float32_float32(float32_t[:, :] values, @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_float32_float64(float32_t[:, :] values, - ndarray[int64_t] indexer, - float64_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis0_int16_int64_memview(int16_t[:, :] values, + int64_t[:] indexer, + int64_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - float64_t fv + int64_t fv n = len(indexer) k = values.shape[1] @@ -3439,9 +3529,166 @@ def take_2d_axis0_float32_float64(float32_t[:, :] values, IF False: cdef: - float64_t *v - float64_t *o - + int64_t *v + int64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(int64_t) and + sizeof(int64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(int64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_int16_int64(ndarray[int16_t, ndim=2] values, + ndarray[int64_t] indexer, + int64_t[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int16_int64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + int64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + int64_t *v + int64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(int64_t) and + sizeof(int64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(int64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis0_int16_float64_memview(int16_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + float64_t *v + float64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_int16_float64(ndarray[int16_t, ndim=2] values, + ndarray[int64_t] indexer, + float64_t[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int16_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + float64_t *v + float64_t *o + #GH3130 if (values.strides[1] == out.strides[1] and values.strides[1] == sizeof(float64_t) and @@ -3459,114 +3706,1528 @@ def take_2d_axis0_float32_float64(float32_t[:, :] values, return for i from 0 <= i < n: - idx = indexer[i] - if idx == -1: - for j from 0 <= j < k: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis0_int32_int32_memview(int32_t[:, :] values, + int64_t[:] indexer, + int32_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + int32_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF True: + cdef: + int32_t *v + int32_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(int32_t) and + sizeof(int32_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(int32_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_int32_int32(ndarray[int32_t, ndim=2] values, + ndarray[int64_t] indexer, + int32_t[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int32_int32_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + int32_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF True: + cdef: + int32_t *v + int32_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(int32_t) and + sizeof(int32_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(int32_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis0_int32_int64_memview(int32_t[:, :] values, + int64_t[:] indexer, + int64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + int64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + int64_t *v + int64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(int64_t) and + sizeof(int64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(int64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_int32_int64(ndarray[int32_t, ndim=2] values, + ndarray[int64_t] indexer, + int64_t[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int32_int64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + int64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + int64_t *v + int64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(int64_t) and + sizeof(int64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(int64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis0_int32_float64_memview(int32_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + float64_t *v + float64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_int32_float64(ndarray[int32_t, ndim=2] values, + ndarray[int64_t] indexer, + float64_t[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int32_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + float64_t *v + float64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis0_int64_int64_memview(int64_t[:, :] values, + int64_t[:] indexer, + int64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + int64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF True: + cdef: + int64_t *v + int64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(int64_t) and + sizeof(int64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(int64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_int64_int64(ndarray[int64_t, ndim=2] values, + ndarray[int64_t] indexer, + int64_t[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int64_int64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + int64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF True: + cdef: + int64_t *v + int64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(int64_t) and + sizeof(int64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(int64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis0_int64_float64_memview(int64_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + float64_t *v + float64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_int64_float64(ndarray[int64_t, ndim=2] values, + ndarray[int64_t] indexer, + float64_t[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_int64_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + float64_t *v + float64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis0_float32_float32_memview(float32_t[:, :] values, + int64_t[:] indexer, + float32_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + float32_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF True: + cdef: + float32_t *v + float32_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float32_t) and + sizeof(float32_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float32_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_float32_float32(ndarray[float32_t, ndim=2] values, + ndarray[int64_t] indexer, + float32_t[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_float32_float32_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + float32_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF True: + cdef: + float32_t *v + float32_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float32_t) and + sizeof(float32_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float32_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis0_float32_float64_memview(float32_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + float64_t *v + float64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_float32_float64(ndarray[float32_t, ndim=2] values, + ndarray[int64_t] indexer, + float64_t[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_float32_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + float64_t *v + float64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis0_float64_float64_memview(float64_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF True: + cdef: + float64_t *v + float64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_float64_float64(ndarray[float64_t, ndim=2] values, + ndarray[int64_t] indexer, + float64_t[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_float64_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF True: + cdef: + float64_t *v + float64_t *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(float64_t) and + sizeof(float64_t) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(float64_t) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis0_object_object_memview(object[:, :] values, + int64_t[:] indexer, + object[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + object fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + object *v + object *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(object) and + sizeof(object) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(object) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis0_object_object(ndarray[object, ndim=2] values, + ndarray[int64_t] indexer, + object[:, :] out, + fill_value=np.nan): + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis0_object_object_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + object fv + + n = len(indexer) + k = values.shape[1] + + fv = fill_value + + IF False: + cdef: + object *v + object *o + + #GH3130 + if (values.strides[1] == out.strides[1] and + values.strides[1] == sizeof(object) and + sizeof(object) * n >= 256): + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + v = &values[idx, 0] + o = &out[i, 0] + memmove(o, v, <size_t>(sizeof(object) * k)) + return + + for i from 0 <= i < n: + idx = indexer[i] + if idx == -1: + for j from 0 <= j < k: + out[i, j] = fv + else: + for j from 0 <= j < k: + out[i, j] = values[idx, j] + + +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_bool_bool_memview(uint8_t[:, :] values, + int64_t[:] indexer, + uint8_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + uint8_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis1_bool_bool(ndarray[uint8_t, ndim=2] values, + ndarray[int64_t] indexer, + uint8_t[:, :] out, + fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_bool_bool_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + uint8_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_bool_object_memview(uint8_t[:, :] values, + int64_t[:] indexer, + object[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + object fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = True if values[i, idx] > 0 else False + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis1_bool_object(ndarray[uint8_t, ndim=2] values, + ndarray[int64_t] indexer, + object[:, :] out, + fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_bool_object_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + object fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = True if values[i, idx] > 0 else False +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_int8_int8_memview(int8_t[:, :] values, + int64_t[:] indexer, + int8_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + int8_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis1_int8_int8(ndarray[int8_t, ndim=2] values, + ndarray[int64_t] indexer, + int8_t[:, :] out, + fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int8_int8_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + int8_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_int8_int32_memview(int8_t[:, :] values, + int64_t[:] indexer, + int32_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + int32_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis1_int8_int32(ndarray[int8_t, ndim=2] values, + ndarray[int64_t] indexer, + int32_t[:, :] out, + fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int8_int32_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + int32_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_int8_int64_memview(int8_t[:, :] values, + int64_t[:] indexer, + int64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + int64_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis1_int8_int64(ndarray[int8_t, ndim=2] values, + ndarray[int64_t] indexer, + int64_t[:, :] out, + fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int8_int64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + int64_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_int8_float64_memview(int8_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis1_int8_float64(ndarray[int8_t, ndim=2] values, + ndarray[int64_t] indexer, + float64_t[:, :] out, + fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int8_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_int16_int16_memview(int16_t[:, :] values, + int64_t[:] indexer, + int16_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + int16_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def take_2d_axis1_int16_int16(ndarray[int16_t, ndim=2] values, + ndarray[int64_t] indexer, + int16_t[:, :] out, + fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int16_int16_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + cdef: + Py_ssize_t i, j, k, n, idx + int16_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_int16_int32_memview(int16_t[:, :] values, + int64_t[:] indexer, + int32_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + int32_t fv + + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return + + fv = fill_value + + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: out[i, j] = fv - else: - for j from 0 <= j < k: - out[i, j] = values[idx, j] + else: + out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_float64_float64(float64_t[:, :] values, +def take_2d_axis1_int16_int32(ndarray[int16_t, ndim=2] values, ndarray[int64_t] indexer, - float64_t[:, :] out, + int32_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int16_int32_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - float64_t fv + int32_t fv - n = len(indexer) - k = values.shape[1] + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return fv = fill_value - IF True: - cdef: - float64_t *v - float64_t *o + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_int16_int64_memview(int16_t[:, :] values, + int64_t[:] indexer, + int64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + int64_t fv - #GH3130 - if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(float64_t) and - sizeof(float64_t) * n >= 256): + n = len(values) + k = len(indexer) - for i from 0 <= i < n: - idx = indexer[i] - if idx == -1: - for j from 0 <= j < k: - out[i, j] = fv - else: - v = &values[idx, 0] - o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(float64_t) * k)) - return + if n == 0 or k == 0: + return + + fv = fill_value for i from 0 <= i < n: - idx = indexer[i] - if idx == -1: - for j from 0 <= j < k: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: out[i, j] = fv - else: - for j from 0 <= j < k: - out[i, j] = values[idx, j] + else: + out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis0_object_object(object[:, :] values, +def take_2d_axis1_int16_int64(ndarray[int16_t, ndim=2] values, ndarray[int64_t] indexer, - object[:, :] out, + int64_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int16_int64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - object fv + int64_t fv - n = len(indexer) - k = values.shape[1] + n = len(values) + k = len(indexer) + + if n == 0 or k == 0: + return fv = fill_value - IF False: - cdef: - object *v - object *o + for i from 0 <= i < n: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: + out[i, j] = fv + else: + out[i, j] = values[i, idx] +@cython.wraparound(False) +@cython.boundscheck(False) +cdef inline take_2d_axis1_int16_float64_memview(int16_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): + cdef: + Py_ssize_t i, j, k, n, idx + float64_t fv - #GH3130 - if (values.strides[1] == out.strides[1] and - values.strides[1] == sizeof(object) and - sizeof(object) * n >= 256): + n = len(values) + k = len(indexer) - for i from 0 <= i < n: - idx = indexer[i] - if idx == -1: - for j from 0 <= j < k: - out[i, j] = fv - else: - v = &values[idx, 0] - o = &out[i, 0] - memmove(o, v, <size_t>(sizeof(object) * k)) - return + if n == 0 or k == 0: + return + + fv = fill_value for i from 0 <= i < n: - idx = indexer[i] - if idx == -1: - for j from 0 <= j < k: + for j from 0 <= j < k: + idx = indexer[j] + if idx == -1: out[i, j] = fv - else: - for j from 0 <= j < k: - out[i, j] = values[idx, j] + else: + out[i, j] = values[i, idx] @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_bool_bool(uint8_t[:, :] values, +def take_2d_axis1_int16_float64(ndarray[int16_t, ndim=2] values, ndarray[int64_t] indexer, - uint8_t[:, :] out, + float64_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int16_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - uint8_t fv + float64_t fv n = len(values) k = len(indexer) @@ -3583,16 +5244,15 @@ def take_2d_axis1_bool_bool(uint8_t[:, :] values, out[i, j] = fv else: out[i, j] = values[i, idx] - @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_bool_object(uint8_t[:, :] values, - ndarray[int64_t] indexer, - object[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis1_int32_int32_memview(int32_t[:, :] values, + int64_t[:] indexer, + int32_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - object fv + int32_t fv n = len(values) k = len(indexer) @@ -3608,17 +5268,28 @@ def take_2d_axis1_bool_object(uint8_t[:, :] values, if idx == -1: out[i, j] = fv else: - out[i, j] = True if values[i, idx] > 0 else False + out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int8_int8(int8_t[:, :] values, +def take_2d_axis1_int32_int32(ndarray[int32_t, ndim=2] values, ndarray[int64_t] indexer, - int8_t[:, :] out, + int32_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int32_int32_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - int8_t fv + int32_t fv n = len(values) k = len(indexer) @@ -3635,16 +5306,15 @@ def take_2d_axis1_int8_int8(int8_t[:, :] values, out[i, j] = fv else: out[i, j] = values[i, idx] - @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int8_int32(int8_t[:, :] values, - ndarray[int64_t] indexer, - int32_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis1_int32_int64_memview(int32_t[:, :] values, + int64_t[:] indexer, + int64_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - int32_t fv + int64_t fv n = len(values) k = len(indexer) @@ -3662,12 +5332,23 @@ def take_2d_axis1_int8_int32(int8_t[:, :] values, else: out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int8_int64(int8_t[:, :] values, +def take_2d_axis1_int32_int64(ndarray[int32_t, ndim=2] values, ndarray[int64_t] indexer, int64_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int32_int64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx int64_t fv @@ -3687,13 +5368,12 @@ def take_2d_axis1_int8_int64(int8_t[:, :] values, out[i, j] = fv else: out[i, j] = values[i, idx] - @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int8_float64(int8_t[:, :] values, - ndarray[int64_t] indexer, - float64_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis1_int32_float64_memview(int32_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx float64_t fv @@ -3714,15 +5394,26 @@ def take_2d_axis1_int8_float64(int8_t[:, :] values, else: out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int16_int16(int16_t[:, :] values, +def take_2d_axis1_int32_float64(ndarray[int32_t, ndim=2] values, ndarray[int64_t] indexer, - int16_t[:, :] out, + float64_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int32_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - int16_t fv + float64_t fv n = len(values) k = len(indexer) @@ -3739,16 +5430,15 @@ def take_2d_axis1_int16_int16(int16_t[:, :] values, out[i, j] = fv else: out[i, j] = values[i, idx] - @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int16_int32(int16_t[:, :] values, - ndarray[int64_t] indexer, - int32_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis1_int64_int64_memview(int64_t[:, :] values, + int64_t[:] indexer, + int64_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - int32_t fv + int64_t fv n = len(values) k = len(indexer) @@ -3766,12 +5456,23 @@ def take_2d_axis1_int16_int32(int16_t[:, :] values, else: out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int16_int64(int16_t[:, :] values, +def take_2d_axis1_int64_int64(ndarray[int64_t, ndim=2] values, ndarray[int64_t] indexer, int64_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int64_int64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx int64_t fv @@ -3791,13 +5492,12 @@ def take_2d_axis1_int16_int64(int16_t[:, :] values, out[i, j] = fv else: out[i, j] = values[i, idx] - @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int16_float64(int16_t[:, :] values, - ndarray[int64_t] indexer, - float64_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis1_int64_float64_memview(int64_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx float64_t fv @@ -3818,15 +5518,26 @@ def take_2d_axis1_int16_float64(int16_t[:, :] values, else: out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int32_int32(int32_t[:, :] values, +def take_2d_axis1_int64_float64(ndarray[int64_t, ndim=2] values, ndarray[int64_t] indexer, - int32_t[:, :] out, + float64_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_int64_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - int32_t fv + float64_t fv n = len(values) k = len(indexer) @@ -3843,16 +5554,15 @@ def take_2d_axis1_int32_int32(int32_t[:, :] values, out[i, j] = fv else: out[i, j] = values[i, idx] - @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int32_int64(int32_t[:, :] values, - ndarray[int64_t] indexer, - int64_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis1_float32_float32_memview(float32_t[:, :] values, + int64_t[:] indexer, + float32_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - int64_t fv + float32_t fv n = len(values) k = len(indexer) @@ -3870,15 +5580,26 @@ def take_2d_axis1_int32_int64(int32_t[:, :] values, else: out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int32_float64(int32_t[:, :] values, +def take_2d_axis1_float32_float32(ndarray[float32_t, ndim=2] values, ndarray[int64_t] indexer, - float64_t[:, :] out, + float32_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_float32_float32_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx - float64_t fv + float32_t fv n = len(values) k = len(indexer) @@ -3895,16 +5616,15 @@ def take_2d_axis1_int32_float64(int32_t[:, :] values, out[i, j] = fv else: out[i, j] = values[i, idx] - @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int64_int64(int64_t[:, :] values, - ndarray[int64_t] indexer, - int64_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis1_float32_float64_memview(float32_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - int64_t fv + float64_t fv n = len(values) k = len(indexer) @@ -3922,12 +5642,23 @@ def take_2d_axis1_int64_int64(int64_t[:, :] values, else: out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_int64_float64(int64_t[:, :] values, +def take_2d_axis1_float32_float64(ndarray[float32_t, ndim=2] values, ndarray[int64_t] indexer, float64_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_float32_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx float64_t fv @@ -3947,16 +5678,15 @@ def take_2d_axis1_int64_float64(int64_t[:, :] values, out[i, j] = fv else: out[i, j] = values[i, idx] - @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_float32_float32(float32_t[:, :] values, - ndarray[int64_t] indexer, - float32_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis1_float64_float64_memview(float64_t[:, :] values, + int64_t[:] indexer, + float64_t[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - float32_t fv + float64_t fv n = len(values) k = len(indexer) @@ -3974,12 +5704,23 @@ def take_2d_axis1_float32_float32(float32_t[:, :] values, else: out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_float32_float64(float32_t[:, :] values, +def take_2d_axis1_float64_float64(ndarray[float64_t, ndim=2] values, ndarray[int64_t] indexer, float64_t[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_float64_float64_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx float64_t fv @@ -3999,16 +5740,15 @@ def take_2d_axis1_float32_float64(float32_t[:, :] values, out[i, j] = fv else: out[i, j] = values[i, idx] - @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_float64_float64(float64_t[:, :] values, - ndarray[int64_t] indexer, - float64_t[:, :] out, - fill_value=np.nan): +cdef inline take_2d_axis1_object_object_memview(object[:, :] values, + int64_t[:] indexer, + object[:, :] out, + fill_value=np.nan): cdef: Py_ssize_t i, j, k, n, idx - float64_t fv + object fv n = len(values) k = len(indexer) @@ -4026,12 +5766,23 @@ def take_2d_axis1_float64_float64(float64_t[:, :] values, else: out[i, j] = values[i, idx] + @cython.wraparound(False) @cython.boundscheck(False) -def take_2d_axis1_object_object(object[:, :] values, +def take_2d_axis1_object_object(ndarray[object, ndim=2] values, ndarray[int64_t] indexer, object[:, :] out, fill_value=np.nan): + + if values.flags.writeable: + # We can call the memoryview version of the code + take_2d_axis1_object_object_memview(values, indexer, out, + fill_value=fill_value) + return + + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. cdef: Py_ssize_t i, j, k, n, idx object fv @@ -4052,7 +5803,6 @@ def take_2d_axis1_object_object(object[:, :] values, else: out[i, j] = values[i, idx] - @cython.wraparound(False) @cython.boundscheck(False) def take_2d_multi_bool_bool(ndarray[uint8_t, ndim=2] values, @@ -5784,7 +7534,8 @@ def group_ohlc_float64(ndarray[float64_t, ndim=2] out, b = 0 if K > 1: - raise NotImplementedError + raise NotImplementedError("Argument 'values' must have only " + "one dimension") else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: @@ -5857,7 +7608,8 @@ def group_ohlc_float32(ndarray[float32_t, ndim=2] out, b = 0 if K > 1: - raise NotImplementedError + raise NotImplementedError("Argument 'values' must have only " + "one dimension") else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index dbe6f2f1f8351..55d5e37fc19ee 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -1,8 +1,11 @@ +import sys cimport util from tslib import NaT from datetime import datetime, timedelta iNaT = util.get_nat() +cdef bint PY2 = sys.version_info[0] == 2 + # core.common import for fast inference checks def is_float(object obj): return util.is_float_object(obj) @@ -38,10 +41,10 @@ _TYPE_MAP = { 'f' : 'floating', 'complex128': 'complex', 'c' : 'complex', - 'string': 'string', - 'S' : 'string', - 'unicode': 'unicode', - 'U' : 'unicode', + 'string': 'string' if PY2 else 'bytes', + 'S' : 'string' if PY2 else 'bytes', + 'unicode': 'unicode' if PY2 else 'string', + 'U' : 'unicode' if PY2 else 'string', 'bool': 'boolean', 'b' : 'boolean', 'datetime64[ns]' : 'datetime64', @@ -181,6 +184,10 @@ def infer_dtype(object _values): if is_unicode_array(values): return 'unicode' + elif PyBytes_Check(val): + if is_bytes_array(values): + return 'bytes' + elif is_timedelta(val): if is_timedelta_or_timedelta64_array(values): return 'timedelta' @@ -196,11 +203,6 @@ def infer_dtype(object _values): return 'mixed' -def infer_dtype_list(list values): - cdef: - Py_ssize_t i, n = len(values) - pass - def is_possible_datetimelike_array(object arr): # determine if we have a possible datetimelike (or null-like) array @@ -253,7 +255,6 @@ def is_bool_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf - object obj if issubclass(values.dtype.type, np.bool_): return True @@ -277,7 +278,6 @@ def is_integer_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf - object obj if issubclass(values.dtype.type, np.integer): return True @@ -298,7 +298,6 @@ def is_integer_float_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf - object obj if issubclass(values.dtype.type, np.integer): return True @@ -321,7 +320,6 @@ def is_float_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf - object obj if issubclass(values.dtype.type, np.floating): return True @@ -342,9 +340,9 @@ def is_string_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf - object obj - if issubclass(values.dtype.type, (np.string_, np.unicode_)): + if ((PY2 and issubclass(values.dtype.type, np.string_)) or + not PY2 and issubclass(values.dtype.type, np.unicode_)): return True elif values.dtype == np.object_: objbuf = values @@ -363,7 +361,6 @@ def is_unicode_array(ndarray values): cdef: Py_ssize_t i, n = len(values) ndarray[object] objbuf - object obj if issubclass(values.dtype.type, np.unicode_): return True @@ -381,8 +378,29 @@ def is_unicode_array(ndarray values): return False +def is_bytes_array(ndarray values): + cdef: + Py_ssize_t i, n = len(values) + ndarray[object] objbuf + + if issubclass(values.dtype.type, np.bytes_): + return True + elif values.dtype == np.object_: + objbuf = values + + if n == 0: + return False + + for i in range(n): + if not PyBytes_Check(objbuf[i]): + return False + return True + else: + return False + + def is_datetime_array(ndarray[object] values): - cdef int i, null_count = 0, n = len(values) + cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v if n == 0: return False @@ -399,7 +417,7 @@ def is_datetime_array(ndarray[object] values): return null_count != n def is_datetime64_array(ndarray values): - cdef int i, null_count = 0, n = len(values) + cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v if n == 0: return False @@ -416,7 +434,7 @@ def is_datetime64_array(ndarray values): return null_count != n def is_timedelta_array(ndarray values): - cdef int i, null_count = 0, n = len(values) + cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v if n == 0: return False @@ -431,7 +449,7 @@ def is_timedelta_array(ndarray values): return null_count != n def is_timedelta64_array(ndarray values): - cdef int i, null_count = 0, n = len(values) + cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v if n == 0: return False @@ -447,7 +465,7 @@ def is_timedelta64_array(ndarray values): def is_timedelta_or_timedelta64_array(ndarray values): """ infer with timedeltas and/or nat/none """ - cdef int i, null_count = 0, n = len(values) + cdef Py_ssize_t i, null_count = 0, n = len(values) cdef object v if n == 0: return False @@ -462,7 +480,7 @@ def is_timedelta_or_timedelta64_array(ndarray values): return null_count != n def is_date_array(ndarray[object] values): - cdef int i, n = len(values) + cdef Py_ssize_t i, n = len(values) if n == 0: return False for i in range(n): @@ -471,7 +489,7 @@ def is_date_array(ndarray[object] values): return True def is_time_array(ndarray[object] values): - cdef int i, n = len(values) + cdef Py_ssize_t i, n = len(values) if n == 0: return False for i in range(n): @@ -484,7 +502,7 @@ def is_period(object o): return isinstance(o,Period) def is_period_array(ndarray[object] values): - cdef int i, n = len(values) + cdef Py_ssize_t i, n = len(values) from pandas.tseries.period import Period if n == 0: diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c index 975142ebacc2a..3be17f17d6afa 100644 --- a/pandas/src/parser/tokenizer.c +++ b/pandas/src/parser/tokenizer.c @@ -38,7 +38,7 @@ See LICENSE for the license * RESTORE_FINAL (2): * Put the file position at the next byte after the * data read from the file_buffer. -* +* #define RESTORE_NOT 0 #define RESTORE_INITIAL 1 #define RESTORE_FINAL 2 @@ -304,7 +304,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { self->stream_len, &self->stream_cap, nbytes * 2, sizeof(char), &status); - TRACE(("make_stream_space: self->stream=%p, self->stream_len = %zu, self->stream_cap=%zu, status=%zu\n", + TRACE(("make_stream_space: self->stream=%p, self->stream_len = %zu, self->stream_cap=%zu, status=%zu\n", self->stream, self->stream_len, self->stream_cap, status)) if (status != 0) { @@ -334,7 +334,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { self->words_len, &self->words_cap, nbytes, sizeof(char*), &status); - TRACE(("make_stream_space: grow_buffer(self->self->words, %zu, %zu, %zu, %d)\n", + TRACE(("make_stream_space: grow_buffer(self->self->words, %zu, %zu, %zu, %d)\n", self->words_len, self->words_cap, nbytes, status)) if (status != 0) { return PARSER_OUT_OF_MEMORY; @@ -371,7 +371,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { self->lines + 1, &self->lines_cap, nbytes, sizeof(int), &status); - TRACE(("make_stream_space: grow_buffer(self->line_start, %zu, %zu, %zu, %d)\n", + TRACE(("make_stream_space: grow_buffer(self->line_start, %zu, %zu, %zu, %d)\n", self->lines + 1, self->lines_cap, nbytes, status)) if (status != 0) { return PARSER_OUT_OF_MEMORY; @@ -398,7 +398,7 @@ static int push_char(parser_t *self, char c) { /* TRACE(("pushing %c \n", c)) */ TRACE(("push_char: self->stream[%zu] = %x, stream_cap=%zu\n", self->stream_len+1, c, self->stream_cap)) if (self->stream_len >= self->stream_cap) { - TRACE(("push_char: ERROR!!! self->stream_len(%d) >= self->stream_cap(%d)\n", + TRACE(("push_char: ERROR!!! self->stream_len(%d) >= self->stream_cap(%d)\n", self->stream_len, self->stream_cap)) self->error_msg = (char*) malloc(64); sprintf(self->error_msg, "Buffer overflow caught - possible malformed input file.\n"); @@ -463,7 +463,6 @@ static void append_warning(parser_t *self, const char *msg) { static int end_line(parser_t *self) { int fields; - khiter_t k; /* for hash set detection */ int ex_fields = self->expected_fields; char *msg; @@ -483,7 +482,7 @@ static int end_line(parser_t *self) { TRACE(("end_line: Skipping row %d\n", self->file_lines)); // increment file line count self->file_lines++; - + // skip the tokens from this bad line self->line_start[self->lines] += fields; @@ -605,12 +604,11 @@ int parser_set_skipfirstnrows(parser_t *self, int64_t nrows) { static int parser_buffer_bytes(parser_t *self, size_t nbytes) { int status; size_t bytes_read; - void *src = self->source; status = 0; self->datapos = 0; self->data = self->cb_io(self->source, nbytes, &bytes_read, &status); - TRACE(("parser_buffer_bytes self->cb_io: nbytes=%zu, datalen: %d, status=%d\n", + TRACE(("parser_buffer_bytes self->cb_io: nbytes=%zu, datalen: %d, status=%d\n", nbytes, bytes_read, status)); self->datalen = bytes_read; @@ -704,7 +702,7 @@ typedef int (*parser_op)(parser_t *self, size_t line_limit); int skip_this_line(parser_t *self, int64_t rownum) { if (self->skipset != NULL) { - return ( kh_get_int64((kh_int64_t*) self->skipset, self->file_lines) != + return ( kh_get_int64((kh_int64_t*) self->skipset, self->file_lines) != ((kh_int64_t*)self->skipset)->n_buckets ); } else { @@ -757,11 +755,9 @@ int tokenize_delimited(parser_t *self, size_t line_limit) case START_RECORD: // start of record if (skip_this_line(self, self->file_lines)) { + self->state = SKIP_LINE; if (c == '\n') { - END_LINE() - } - else { - self->state = SKIP_LINE; + END_LINE(); } break; } @@ -786,7 +782,7 @@ int tokenize_delimited(parser_t *self, size_t line_limit) else self->state = EAT_CRNL; break; - } + } else if (c == self->commentchar) { self->state = EAT_LINE_COMMENT; break; @@ -853,11 +849,12 @@ int tokenize_delimited(parser_t *self, size_t line_limit) ; else { // backtrack /* We have to use i + 1 because buf has been incremented but not i */ - while (i + 1 > self->datapos && *buf != '\n') { + do { --buf; --i; - } - if (i + 1 > self->datapos) // reached a newline rather than the beginning + } while (i + 1 > self->datapos && *buf != '\n'); + + if (*buf == '\n') // reached a newline rather than the beginning { ++buf; // move pointer to first char after newline ++i; @@ -1077,7 +1074,7 @@ int tokenize_delim_customterm(parser_t *self, size_t line_limit) // Next character in file c = *buf++; - TRACE(("Iter: %d Char: %c Line %d field_count %d, state %d\n", + TRACE(("tokenize_delim_customterm - Iter: %d Char: %c Line %d field_count %d, state %d\n", i, c, self->file_lines + 1, self->line_fields[self->lines], self->state)); @@ -1093,11 +1090,9 @@ int tokenize_delim_customterm(parser_t *self, size_t line_limit) case START_RECORD: // start of record if (skip_this_line(self, self->file_lines)) { + self->state = SKIP_LINE; if (c == self->lineterminator) { - END_LINE() - } - else { - self->state = SKIP_LINE; + END_LINE(); } break; } @@ -1172,11 +1167,12 @@ int tokenize_delim_customterm(parser_t *self, size_t line_limit) ; else { // backtrack /* We have to use i + 1 because buf has been incremented but not i */ - while (i + 1 > self->datapos && *buf != self->lineterminator) { + do { --buf; --i; - } - if (i + 1 > self->datapos) // reached a newline rather than the beginning + } while (i + 1 > self->datapos && *buf != self->lineterminator); + + if (*buf == self->lineterminator) // reached a newline rather than the beginning { ++buf; // move pointer to first char after newline ++i; @@ -1342,7 +1338,7 @@ int tokenize_whitespace(parser_t *self, size_t line_limit) // Next character in file c = *buf++; - TRACE(("Iter: %d Char: %c Line %d field_count %d, state %d\n", + TRACE(("tokenize_whitespace - Iter: %d Char: %c Line %d field_count %d, state %d\n", i, c, self->file_lines + 1, self->line_fields[self->lines], self->state)); @@ -1391,11 +1387,9 @@ int tokenize_whitespace(parser_t *self, size_t line_limit) case START_RECORD: // start of record if (skip_this_line(self, self->file_lines)) { + self->state = SKIP_LINE; if (c == '\n') { - END_LINE() - } - else { - self->state = SKIP_LINE; + END_LINE(); } break; } else if (c == '\n') { @@ -1756,7 +1750,7 @@ int parser_trim_buffers(parser_t *self) { /* trim stream */ new_cap = _next_pow2(self->stream_len) + 1; - TRACE(("parser_trim_buffers: new_cap = %zu, stream_cap = %zu, lines_cap = %zu\n", + TRACE(("parser_trim_buffers: new_cap = %zu, stream_cap = %zu, lines_cap = %zu\n", new_cap, self->stream_cap, self->lines_cap)); if (new_cap < self->stream_cap) { TRACE(("parser_trim_buffers: new_cap < self->stream_cap, calling safe_realloc\n")); @@ -1877,7 +1871,7 @@ int _tokenize_helper(parser_t *self, size_t nrows, int all) { } } - TRACE(("_tokenize_helper: Trying to process %d bytes, datalen=%d, datapos= %d\n", + TRACE(("_tokenize_helper: Trying to process %d bytes, datalen=%d, datapos= %d\n", self->datalen - self->datapos, self->datalen, self->datapos)); /* TRACE(("sourcetype: %c, status: %d\n", self->sourcetype, status)); */ @@ -2039,7 +2033,7 @@ int P_INLINE to_longlong_thousands(char *item, long long *p_value, char tsep) return status; }*/ -int to_boolean(char *item, uint8_t *val) { +int to_boolean(const char *item, uint8_t *val) { char *tmp; int i, status = 0; @@ -2363,7 +2357,7 @@ double precise_xstrtod(const char *str, char **endptr, char decimal, num_digits++; num_decimals++; } - + if (num_digits >= max_digits) // consume extra decimal digits while (isdigit(*p)) ++p; @@ -2659,4 +2653,4 @@ uint64_t str_to_uint64(const char *p_item, uint64_t uint_max, int *error) *error = 0; return number; } -*/ \ No newline at end of file +*/ diff --git a/pandas/src/parser/tokenizer.h b/pandas/src/parser/tokenizer.h index 694a73ec78153..d3777e858b6ca 100644 --- a/pandas/src/parser/tokenizer.h +++ b/pandas/src/parser/tokenizer.h @@ -228,9 +228,12 @@ coliter_t *coliter_new(parser_t *self, int i); /* #define COLITER_NEXT(iter) iter->words[iter->line_start[iter->line++] + iter->col] */ // #define COLITER_NEXT(iter) iter.words[iter.line_start[iter.line++] + iter.col] -#define COLITER_NEXT(iter) iter.words[*iter.line_start++ + iter.col] +#define COLITER_NEXT(iter, word) do { \ + const int i = *iter.line_start++ + iter.col; \ + word = i < *iter.line_start ? iter.words[i]: ""; \ + } while(0) -parser_t* parser_new(); +parser_t* parser_new(void); int parser_init(parser_t *self); @@ -270,6 +273,6 @@ double round_trip(const char *p, char **q, char decimal, char sci, char tsep, in //int P_INLINE to_complex(char *item, double *p_real, double *p_imag, char sci, char decimal); int P_INLINE to_longlong(char *item, long long *p_value); //int P_INLINE to_longlong_thousands(char *item, long long *p_value, char tsep); -int to_boolean(char *item, uint8_t *val); +int to_boolean(const char *item, uint8_t *val); #endif // _PARSER_COMMON_H_ diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx index cc6ad3defe4f3..b4a4930e09d68 100644 --- a/pandas/src/period.pyx +++ b/pandas/src/period.pyx @@ -710,6 +710,10 @@ cdef class Period(object): dt = value if freq is None: raise ValueError('Must supply freq for datetime value') + elif isinstance(value, np.datetime64): + dt = Timestamp(value) + if freq is None: + raise ValueError('Must supply freq for datetime value') elif isinstance(value, date): dt = datetime(year=value.year, month=value.month, day=value.day) if freq is None: diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py index a30286479c847..445530bc5b00c 100644 --- a/pandas/stats/tests/test_moments.py +++ b/pandas/stats/tests/test_moments.py @@ -862,7 +862,7 @@ def _non_null_values(x): if mock_mean: # check that mean equals mock_mean expected = mock_mean(x) - assert_equal(mean_x, expected) + assert_equal(mean_x, expected.astype('float64')) # check that correlation of a series with itself is either 1 or NaN corr_x_x = corr(x, x) @@ -1254,7 +1254,8 @@ def _check_pairwise_moment(self, func, *args, **kwargs): actual = panel.ix[:, 1, 5] expected = func(self.frame[1], self.frame[5], *args, **kwargs) - tm.assert_series_equal(actual, expected) + tm.assert_series_equal(actual, expected, check_names=False) + self.assertEqual(actual.name, 5) def test_flex_binary_moment(self): # GH3155 @@ -1549,6 +1550,7 @@ def test_moment_functions_zero_length(self): df1_expected = df1 df1_expected_panel = Panel(items=df1.index, major_axis=df1.columns, minor_axis=df1.columns) df2 = DataFrame(columns=['a']) + df2['a'] = df2['a'].astype('float64') df2_expected = df2 df2_expected_panel = Panel(items=df2.index, major_axis=df2.columns, minor_axis=df2.columns) diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index b91c46377267a..e9526f9fad1ac 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -244,6 +244,26 @@ def check_ops_properties(self, props, filter=None, ignore_failures=False): else: self.assertRaises(AttributeError, lambda : getattr(o,op)) + def test_binary_ops_docs(self): + from pandas import DataFrame, Panel + op_map = {'add': '+', + 'sub': '-', + 'mul': '*', + 'mod': '%', + 'pow': '**', + 'truediv': '/', + 'floordiv': '//'} + for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv', 'floordiv']: + for klass in [Series, DataFrame, Panel]: + operand1 = klass.__name__.lower() + operand2 = 'other' + op = op_map[op_name] + expected_str = ' '.join([operand1, op, operand2]) + self.assertTrue(expected_str in getattr(klass, op_name).__doc__) + + # reverse version of the binary ops + expected_str = ' '.join([operand2, op, operand1]) + self.assertTrue(expected_str in getattr(klass, 'r' + op_name).__doc__) class TestIndexOps(Ops): diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py old mode 100644 new mode 100755 index 7f4b3fcb94dfa..21b64378cfc24 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -11,7 +11,7 @@ import numpy as np import pandas as pd -from pandas import Categorical, Index, Series, DataFrame, PeriodIndex, Timestamp +from pandas import Categorical, Index, Series, DataFrame, PeriodIndex, Timestamp, CategoricalIndex from pandas.core.config import option_context import pandas.core.common as com @@ -93,6 +93,24 @@ def test_constructor_unsortable(self): else: Categorical.from_array(arr, ordered=True) + def test_is_equal_dtype(self): + + # test dtype comparisons between cats + + c1 = Categorical(list('aabca'),categories=list('abc'),ordered=False) + c2 = Categorical(list('aabca'),categories=list('cab'),ordered=False) + c3 = Categorical(list('aabca'),categories=list('cab'),ordered=True) + self.assertTrue(c1.is_dtype_equal(c1)) + self.assertTrue(c2.is_dtype_equal(c2)) + self.assertTrue(c3.is_dtype_equal(c3)) + self.assertFalse(c1.is_dtype_equal(c2)) + self.assertFalse(c1.is_dtype_equal(c3)) + self.assertFalse(c1.is_dtype_equal(Index(list('aabca')))) + self.assertFalse(c1.is_dtype_equal(c1.astype(object))) + self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1))) + self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1,categories=list('cab')))) + self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1,ordered=True))) + def test_constructor(self): exp_arr = np.array(["a", "b", "c", "a", "b", "c"]) @@ -114,6 +132,9 @@ def f(): Categorical([1,2], [1,2,np.nan, np.nan]) self.assertRaises(ValueError, f) + # The default should be unordered + c1 = Categorical(["a", "b", "c", "a"]) + self.assertFalse(c1.ordered) # Categorical as input c1 = Categorical(["a", "b", "c", "a"]) @@ -221,6 +242,18 @@ def f(): c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) cat = Categorical([1,2], categories=[1,2,3]) + # this is a legitimate constructor + with tm.assert_produces_warning(None): + c = Categorical(np.array([],dtype='int64'),categories=[3,2,1],ordered=True) + + def test_constructor_with_index(self): + + ci = CategoricalIndex(list('aabbca'),categories=list('cab')) + self.assertTrue(ci.values.equals(Categorical(ci))) + + ci = CategoricalIndex(list('aabbca'),categories=list('cab')) + self.assertTrue(ci.values.equals(Categorical(ci.astype(object),categories=ci.categories))) + def test_constructor_with_generator(self): # This was raising an Error in isnull(single_val).any() because isnull returned a scalar # for a generator @@ -367,6 +400,13 @@ def f(): self.assertRaises(TypeError, lambda: a < cat) self.assertRaises(TypeError, lambda: a < cat_rev) + # Make sure that unequal comparison take the categories order in account + cat_rev = pd.Categorical(list("abc"), categories=list("cba"), ordered=True) + exp = np.array([True, False, False]) + res = cat_rev > "b" + self.assert_numpy_array_equal(res, exp) + + def test_na_flags_int_categories(self): # #1457 @@ -481,6 +521,15 @@ def test_empty_print(self): expected = ("[], Categories (0, object): []") self.assertEqual(expected, repr(factor)) + def test_print_none_width(self): + # GH10087 + a = pd.Series(pd.Categorical([1,2,3,4], name="a")) + exp = u("0 1\n1 2\n2 3\n3 4\n" + + "Name: a, dtype: category\nCategories (4, int64): [1, 2, 3, 4]") + + with option_context("display.width", None): + self.assertEqual(exp, repr(a)) + def test_periodindex(self): idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02', '2014-03', '2014-03'], freq='M') @@ -717,6 +766,19 @@ def f(): cat.add_categories(["d"]) self.assertRaises(ValueError, f) + # GH 9927 + cat = Categorical(list("abc"), ordered=True) + expected = Categorical(list("abc"), categories=list("abcde"), ordered=True) + # test with Series, np.array, index, list + res = cat.add_categories(Series(["d", "e"])) + self.assert_categorical_equal(res, expected) + res = cat.add_categories(np.array(["d", "e"])) + self.assert_categorical_equal(res, expected) + res = cat.add_categories(Index(["d", "e"])) + self.assert_categorical_equal(res, expected) + res = cat.add_categories(["d", "e"]) + self.assert_categorical_equal(res, expected) + def test_remove_categories(self): cat = Categorical(["a","b","c","a"], ordered=True) old = cat.copy() @@ -1077,6 +1139,20 @@ def test_reflected_comparison_with_scalars(self): self.assert_numpy_array_equal(cat > cat[0], [False, True, True]) self.assert_numpy_array_equal(cat[0] < cat, [False, True, True]) + def test_comparison_with_unknown_scalars(self): + # https://github.com/pydata/pandas/issues/9836#issuecomment-92123057 and following + # comparisons with scalars not in categories should raise for unequal comps, but not for + # equal/not equal + cat = pd.Categorical([1, 2, 3], ordered=True) + + self.assertRaises(TypeError, lambda: cat < 4) + self.assertRaises(TypeError, lambda: cat > 4) + self.assertRaises(TypeError, lambda: 4 < cat) + self.assertRaises(TypeError, lambda: 4 > cat) + + self.assert_numpy_array_equal(cat == 4 , [False, False, False]) + self.assert_numpy_array_equal(cat != 4 , [True, True, True]) + class TestCategoricalAsBlock(tm.TestCase): _multiprocess_can_split_ = True @@ -1753,6 +1829,35 @@ def f(x): expected['person_name'] = expected['person_name'].astype('object') tm.assert_frame_equal(result, expected) + # GH 9921 + # Monotonic + df = DataFrame({"a": [5, 15, 25]}) + c = pd.cut(df.a, bins=[0,10,20,30,40]) + tm.assert_series_equal(df.a.groupby(c).transform(sum), df['a']) + tm.assert_series_equal(df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) + tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) + tm.assert_frame_equal(df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']]) + + # Filter + tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a']) + tm.assert_frame_equal(df.groupby(c).filter(np.all), df) + + # Non-monotonic + df = DataFrame({"a": [5, 15, 25, -5]}) + c = pd.cut(df.a, bins=[-10, 0,10,20,30,40]) + tm.assert_series_equal(df.a.groupby(c).transform(sum), df['a']) + tm.assert_series_equal(df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) + tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) + tm.assert_frame_equal(df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']]) + + # GH 9603 + df = pd.DataFrame({'a': [1, 0, 0, 0]}) + c = pd.cut(df.a, [0, 1, 2, 3, 4]) + result = df.groupby(c).apply(len) + expected = pd.Series([1, 0, 0, 0], index=c.values.categories) + expected.index.name = 'a' + tm.assert_series_equal(result, expected) + def test_pivot_table(self): raw_cat1 = Categorical(["a","a","b","b"], categories=["a","b","z"], ordered=True) @@ -2390,6 +2495,18 @@ def test_comparisons(self): exp = Series([False, False, True]) tm.assert_series_equal(res, exp) + scalar = base[1] + res = cat > scalar + exp = Series([False, False, True]) + exp2 = cat.values > scalar + tm.assert_series_equal(res, exp) + tm.assert_numpy_array_equal(res.values, exp2) + res_rev = cat_rev > scalar + exp_rev = Series([True, False, False]) + exp_rev2 = cat_rev.values > scalar + tm.assert_series_equal(res_rev, exp_rev) + tm.assert_numpy_array_equal(res_rev.values, exp_rev2) + # Only categories with same categories can be compared def f(): cat > cat_rev @@ -2408,9 +2525,29 @@ def f(): self.assertRaises(TypeError, lambda: a < cat) self.assertRaises(TypeError, lambda: a < cat_rev) - # Categoricals can be compared to scalar values - res = cat_rev > base[0] - tm.assert_series_equal(res, exp) + # unequal comparison should raise for unordered cats + cat = Series(Categorical(list("abc"))) + def f(): + cat > "b" + self.assertRaises(TypeError, f) + cat = Series(Categorical(list("abc"), ordered=False)) + def f(): + cat > "b" + self.assertRaises(TypeError, f) + + # https://github.com/pydata/pandas/issues/9836#issuecomment-92123057 and following + # comparisons with scalars not in categories should raise for unequal comps, but not for + # equal/not equal + cat = Series(Categorical(list("abc"), ordered=True)) + + self.assertRaises(TypeError, lambda: cat < "d") + self.assertRaises(TypeError, lambda: cat > "d") + self.assertRaises(TypeError, lambda: "d" < cat) + self.assertRaises(TypeError, lambda: "d" > cat) + + self.assert_series_equal(cat == "d" , Series([False, False, False])) + self.assert_series_equal(cat != "d" , Series([True, True, True])) + # And test NaN handling... cat = Series(Categorical(["a","b","c", np.nan])) @@ -2506,6 +2643,8 @@ def f(): dfx['grade'].cat.categories self.assert_numpy_array_equal(df['grade'].cat.categories, dfx['grade'].cat.categories) + def test_concat_preserve(self): + # GH 8641 # series concat not preserving category dtype s = Series(list('abc'),dtype='category') @@ -2523,6 +2662,28 @@ def f(): expected = Series(list('abcabc'),index=[0,1,2,0,1,2]).astype('category') tm.assert_series_equal(result, expected) + a = Series(np.arange(6,dtype='int64')) + b = Series(list('aabbca')) + + df2 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('cab')) }) + result = pd.concat([df2,df2]) + expected = DataFrame({'A' : pd.concat([a,a]), 'B' : pd.concat([b,b]).astype('category',categories=list('cab')) }) + tm.assert_frame_equal(result, expected) + + def test_categorical_index_preserver(self): + + a = Series(np.arange(6,dtype='int64')) + b = Series(list('aabbca')) + + df2 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('cab')) }).set_index('B') + result = pd.concat([df2,df2]) + expected = DataFrame({'A' : pd.concat([a,a]), 'B' : pd.concat([b,b]).astype('category',categories=list('cab')) }).set_index('B') + tm.assert_frame_equal(result, expected) + + # wrong catgories + df3 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('abc')) }).set_index('B') + self.assertRaises(TypeError, lambda : pd.concat([df2,df3])) + def test_append(self): cat = pd.Categorical(["a","b"], categories=["a","b"]) vals = [1,2] @@ -2658,6 +2819,14 @@ def cmp(a,b): self.assertRaises(TypeError, lambda : invalid(s)) + def test_astype_categorical(self): + + cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) + tm.assert_categorical_equal(cat,cat.astype('category')) + tm.assert_almost_equal(np.array(cat),cat.astype('object')) + + self.assertRaises(ValueError, lambda : cat.astype(float)) + def test_to_records(self): # GH8626 diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index d0ae7c9988c8d..c3d39fcdf906f 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -524,6 +524,47 @@ def test_is_recompilable(): for f in fails: assert not com.is_re_compilable(f) +def test_random_state(): + import numpy.random as npr + # Check with seed + state = com._random_state(5) + assert_equal(state.uniform(), npr.RandomState(5).uniform()) + + # Check with random state object + state2 = npr.RandomState(10) + assert_equal(com._random_state(state2).uniform(), npr.RandomState(10).uniform()) + + # check with no arg random state + assert isinstance(com._random_state(), npr.RandomState) + + # Error for floats or strings + with tm.assertRaises(ValueError): + com._random_state('test') + + with tm.assertRaises(ValueError): + com._random_state(5.5) + + +def test_maybe_match_name(): + + matched = com._maybe_match_name(Series([1], name='x'), Series([2], name='x')) + assert(matched == 'x') + + matched = com._maybe_match_name(Series([1], name='x'), Series([2], name='y')) + assert(matched is None) + + matched = com._maybe_match_name(Series([1]), Series([2], name='x')) + assert(matched is None) + + matched = com._maybe_match_name(Series([1], name='x'), Series([2])) + assert(matched is None) + + matched = com._maybe_match_name(Series([1], name='x'), [2]) + assert(matched == 'x') + + matched = com._maybe_match_name([1], Series([2], name='y')) + assert(matched == 'y') + class TestTake(tm.TestCase): # standard incompatible fill error @@ -608,8 +649,9 @@ def _test_dtype(dtype, fill_value, out_dtype): _test_dtype(np.bool_, '', np.object_) def test_2d_with_out(self): - def _test_dtype(dtype, can_hold_na): + def _test_dtype(dtype, can_hold_na, writeable=True): data = np.random.randint(0, 2, (5, 3)).astype(dtype) + data.flags.writeable = writeable indexer = [2, 1, 0, 1] out0 = np.empty((4, 3), dtype=dtype) @@ -640,18 +682,22 @@ def _test_dtype(dtype, can_hold_na): # no exception o/w data.take(indexer, out=out, axis=i) - _test_dtype(np.float64, True) - _test_dtype(np.float32, True) - _test_dtype(np.uint64, False) - _test_dtype(np.uint32, False) - _test_dtype(np.uint16, False) - _test_dtype(np.uint8, False) - _test_dtype(np.int64, False) - _test_dtype(np.int32, False) - _test_dtype(np.int16, False) - _test_dtype(np.int8, False) - _test_dtype(np.object_, True) - _test_dtype(np.bool, False) + for writeable in [True, False]: + # Check that take_nd works both with writeable arrays (in which + # case fast typed memoryviews implementation) and read-only + # arrays alike. + _test_dtype(np.float64, True, writeable=writeable) + _test_dtype(np.float32, True, writeable=writeable) + _test_dtype(np.uint64, False, writeable=writeable) + _test_dtype(np.uint32, False, writeable=writeable) + _test_dtype(np.uint16, False, writeable=writeable) + _test_dtype(np.uint8, False, writeable=writeable) + _test_dtype(np.int64, False, writeable=writeable) + _test_dtype(np.int32, False, writeable=writeable) + _test_dtype(np.int16, False, writeable=writeable) + _test_dtype(np.int8, False, writeable=writeable) + _test_dtype(np.object_, True, writeable=writeable) + _test_dtype(np.bool, False, writeable=writeable) def test_2d_fill_nonna(self): def _test_dtype(dtype, fill_value, out_dtype): diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index ce32c8af99a73..a7129bca59a7f 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -14,7 +14,7 @@ from numpy.random import randn import numpy as np -from pandas import DataFrame, Series, Index, Timestamp, MultiIndex +from pandas import DataFrame, Series, Index, Timestamp, MultiIndex, date_range, NaT import pandas.core.format as fmt import pandas.util.testing as tm @@ -298,6 +298,21 @@ def mkframe(n): com.pprint_thing(df._repr_fits_horizontal_()) self.assertTrue(has_expanded_repr(df)) + def test_str_max_colwidth(self): + # GH 7856 + df = pd.DataFrame([{'a': 'foo', 'b': 'bar', + 'c': 'uncomfortably long line with lots of stuff', + 'd': 1}, + {'a': 'foo', 'b': 'bar', 'c': 'stuff', 'd': 1}]) + df.set_index(['a', 'b', 'c']) + self.assertTrue(str(df) == ' a b c d\n' + '0 foo bar uncomfortably long line with lots of stuff 1\n' + '1 foo bar stuff 1') + with option_context('max_colwidth', 20): + self.assertTrue(str(df) == ' a b c d\n' + '0 foo bar uncomfortably lo... 1\n' + '1 foo bar stuff 1') + def test_auto_detect(self): term_width, term_height = get_terminal_size() fac = 1.05 # Arbitrary large factor to exceed term widht @@ -2194,6 +2209,28 @@ def test_to_latex_multiindex(self): x & y & a \\ \bottomrule \end{tabular} +""" + self.assertEqual(result, expected) + + df = DataFrame.from_dict({ + ('c1', 0): pd.Series(dict((x, x) for x in range(4))), + ('c1', 1): pd.Series(dict((x, x + 4) for x in range(4))), + ('c2', 0): pd.Series(dict((x, x) for x in range(4))), + ('c2', 1): pd.Series(dict((x, x + 4) for x in range(4))), + ('c3', 0): pd.Series(dict((x, x) for x in range(4))), + }).T + result = df.to_latex() + expected = r"""\begin{tabular}{llrrrr} +\toprule + & & 0 & 1 & 2 & 3 \\ +\midrule +c1 & 0 & 0 & 1 & 2 & 3 \\ + & 1 & 4 & 5 & 6 & 7 \\ +c2 & 0 & 0 & 1 & 2 & 3 \\ + & 1 & 4 & 5 & 6 & 7 \\ +c3 & 0 & 0 & 1 & 2 & 3 \\ +\bottomrule +\end{tabular} """ self.assertEqual(result, expected) @@ -2458,7 +2495,7 @@ def test_to_string(self): def test_freq_name_separation(self): s = Series(np.random.randn(10), - index=pd.date_range('1/1/2000', periods=10), name=0) + index=date_range('1/1/2000', periods=10), name=0) result = repr(s) self.assertTrue('Freq: D, Name: 0' in result) @@ -2519,7 +2556,6 @@ def test_float_trim_zeros(self): def test_datetimeindex(self): - from pandas import date_range, NaT index = date_range('20130102',periods=6) s = Series(1,index=index) result = s.to_string() @@ -2537,7 +2573,6 @@ def test_datetimeindex(self): def test_timedelta64(self): - from pandas import date_range from datetime import datetime, timedelta Series(np.array([1100, 20], dtype='timedelta64[ns]')).to_string() @@ -2986,6 +3021,25 @@ def test_format(self): self.assertEqual(result[0], " 12") self.assertEqual(result[1], " 0") + def test_output_significant_digits(self): + # Issue #9764 + + # In case default display precision changes: + with pd.option_context('display.precision', 7): + # DataFrame example from issue #9764 + d=pd.DataFrame({'col1':[9.999e-8, 1e-7, 1.0001e-7, 2e-7, 4.999e-7, 5e-7, 5.0001e-7, 6e-7, 9.999e-7, 1e-6, 1.0001e-6, 2e-6, 4.999e-6, 5e-6, 5.0001e-6, 6e-6]}) + + expected_output={ + (0,6):' col1\n0 9.999000e-08\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07', + (1,6):' col1\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07', + (1,8):' col1\n1 1.000000e-07\n2 1.000100e-07\n3 2.000000e-07\n4 4.999000e-07\n5 5.000000e-07\n6 5.000100e-07\n7 6.000000e-07', + (8,16):' col1\n8 9.999000e-07\n9 1.000000e-06\n10 1.000100e-06\n11 2.000000e-06\n12 4.999000e-06\n13 5.000000e-06\n14 5.000100e-06\n15 6.000000e-06', + (9,16):' col1\n9 0.000001\n10 0.000001\n11 0.000002\n12 0.000005\n13 0.000005\n14 0.000005\n15 0.000006' + } + + for (start, stop), v in expected_output.items(): + self.assertEqual(str(d[start:stop]), v) + class TestRepr_timedelta64(tm.TestCase): @@ -3123,6 +3177,44 @@ def test_date_nanos(self): result = fmt.Datetime64Formatter(x).get_result() self.assertEqual(result[0].strip(), "1970-01-01 00:00:00.000000200") + def test_dates_display(self): + + # 10170 + # make sure that we are consistently display date formatting + x = Series(date_range('20130101 09:00:00',periods=5,freq='D')) + x.iloc[1] = np.nan + result = fmt.Datetime64Formatter(x).get_result() + self.assertEqual(result[0].strip(), "2013-01-01 09:00:00") + self.assertEqual(result[1].strip(), "NaT") + self.assertEqual(result[4].strip(), "2013-01-05 09:00:00") + + x = Series(date_range('20130101 09:00:00',periods=5,freq='s')) + x.iloc[1] = np.nan + result = fmt.Datetime64Formatter(x).get_result() + self.assertEqual(result[0].strip(), "2013-01-01 09:00:00") + self.assertEqual(result[1].strip(), "NaT") + self.assertEqual(result[4].strip(), "2013-01-01 09:00:04") + + x = Series(date_range('20130101 09:00:00',periods=5,freq='ms')) + x.iloc[1] = np.nan + result = fmt.Datetime64Formatter(x).get_result() + self.assertEqual(result[0].strip(), "2013-01-01 09:00:00.000") + self.assertEqual(result[1].strip(), "NaT") + self.assertEqual(result[4].strip(), "2013-01-01 09:00:00.004") + + x = Series(date_range('20130101 09:00:00',periods=5,freq='us')) + x.iloc[1] = np.nan + result = fmt.Datetime64Formatter(x).get_result() + self.assertEqual(result[0].strip(), "2013-01-01 09:00:00.000000") + self.assertEqual(result[1].strip(), "NaT") + self.assertEqual(result[4].strip(), "2013-01-01 09:00:00.000004") + + x = Series(date_range('20130101 09:00:00',periods=5,freq='N')) + x.iloc[1] = np.nan + result = fmt.Datetime64Formatter(x).get_result() + self.assertEqual(result[0].strip(), "2013-01-01 09:00:00.000000000") + self.assertEqual(result[1].strip(), "NaT") + self.assertEqual(result[4].strip(), "2013-01-01 09:00:00.000000004") class TestNaTFormatting(tm.TestCase): def test_repr(self): @@ -3159,13 +3251,13 @@ def test_date_explict_date_format(self): class TestDatetimeIndexUnicode(tm.TestCase): def test_dates(self): text = str(pd.to_datetime([datetime(2013,1,1), datetime(2014,1,1)])) - self.assertTrue("[2013-01-01," in text) - self.assertTrue(", 2014-01-01]" in text) + self.assertTrue("['2013-01-01'," in text) + self.assertTrue(", '2014-01-01']" in text) def test_mixed(self): text = str(pd.to_datetime([datetime(2013,1,1), datetime(2014,1,1,12), datetime(2014,1,1)])) - self.assertTrue("[2013-01-01 00:00:00," in text) - self.assertTrue(", 2014-01-01 00:00:00]" in text) + self.assertTrue("'2013-01-01 00:00:00'," in text) + self.assertTrue("'2014-01-01 00:00:00']" in text) class TestStringRepTimestamp(tm.TestCase): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index cdda087b27613..4964d13f7ac28 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -16,7 +16,7 @@ from pandas.compat import( map, zip, range, long, lrange, lmap, lzip, - OrderedDict, u, StringIO + OrderedDict, u, StringIO, string_types ) from pandas import compat @@ -31,9 +31,9 @@ import pandas.core.common as com import pandas.core.format as fmt import pandas.core.datetools as datetools -from pandas import (DataFrame, Index, Series, notnull, isnull, +from pandas import (DataFrame, Index, Series, Panel, notnull, isnull, MultiIndex, DatetimeIndex, Timestamp, date_range, - read_csv, timedelta_range, Timedelta, + read_csv, timedelta_range, Timedelta, CategoricalIndex, option_context) import pandas as pd from pandas.parser import CParserError @@ -784,6 +784,16 @@ def test_setitem_None(self): assert_series_equal(self.frame[None], self.frame['A']) repr(self.frame) + def test_setitem_empty(self): + # GH 9596 + df = pd.DataFrame({'a': ['1', '2', '3'], + 'b': ['11', '22', '33'], + 'c': ['111', '222', '333']}) + + result = df.copy() + result.loc[result.b.isnull(), 'a'] = result.a + assert_frame_equal(result, df) + def test_delitem_corner(self): f = self.frame.copy() del f['D'] @@ -2376,6 +2386,36 @@ def test_set_index_pass_arrays(self): expected = df.set_index(['A', 'B'], drop=False) assert_frame_equal(result, expected, check_names=False) # TODO should set_index check_names ? + def test_construction_with_categorical_index(self): + + ci = tm.makeCategoricalIndex(10) + + # with Categorical + df = DataFrame({'A' : np.random.randn(10), + 'B' : ci.values }) + idf = df.set_index('B') + str(idf) + tm.assert_index_equal(idf.index, ci, check_names=False) + self.assertEqual(idf.index.name, 'B') + + # from a CategoricalIndex + df = DataFrame({'A' : np.random.randn(10), + 'B' : ci }) + idf = df.set_index('B') + str(idf) + tm.assert_index_equal(idf.index, ci, check_names=False) + self.assertEqual(idf.index.name, 'B') + + idf = df.set_index('B').reset_index().set_index('B') + str(idf) + tm.assert_index_equal(idf.index, ci, check_names=False) + self.assertEqual(idf.index.name, 'B') + + new_df = idf.reset_index() + new_df.index = df.B + tm.assert_index_equal(new_df.index, ci, check_names=False) + self.assertEqual(idf.index.name, 'B') + def test_set_index_cast_datetimeindex(self): df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)], @@ -2751,6 +2791,59 @@ def test_insert_error_msmgs(self): with assertRaisesRegexp(TypeError, msg): df['gr'] = df.groupby(['b', 'c']).count() + def test_frame_subclassing_and_slicing(self): + # Subclass frame and ensure it returns the right class on slicing it + # In reference to PR 9632 + + class CustomSeries(Series): + @property + def _constructor(self): + return CustomSeries + + def custom_series_function(self): + return 'OK' + + class CustomDataFrame(DataFrame): + "Subclasses pandas DF, fills DF with simulation results, adds some custom plotting functions." + + def __init__(self, *args, **kw): + super(CustomDataFrame, self).__init__(*args, **kw) + + @property + def _constructor(self): + return CustomDataFrame + + _constructor_sliced = CustomSeries + + def custom_frame_function(self): + return 'OK' + + data = {'col1': range(10), + 'col2': range(10)} + cdf = CustomDataFrame(data) + + # Did we get back our own DF class? + self.assertTrue(isinstance(cdf, CustomDataFrame)) + + # Do we get back our own Series class after selecting a column? + cdf_series = cdf.col1 + self.assertTrue(isinstance(cdf_series, CustomSeries)) + self.assertEqual(cdf_series.custom_series_function(), 'OK') + + # Do we get back our own DF class after slicing row-wise? + cdf_rows = cdf[1:5] + self.assertTrue(isinstance(cdf_rows, CustomDataFrame)) + self.assertEqual(cdf_rows.custom_frame_function(), 'OK') + + # Make sure sliced part of multi-index frame is custom class + mcol = pd.MultiIndex.from_tuples([('A', 'A'), ('A', 'B')]) + cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol) + self.assertTrue(isinstance(cdf_multi['A'], CustomDataFrame)) + + mcol = pd.MultiIndex.from_tuples([('A', ''), ('B', '')]) + cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol) + self.assertTrue(isinstance(cdf_multi2['A'], CustomSeries)) + def test_constructor_subclass_dict(self): # Test for passing dict subclass to constructor data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)), @@ -3125,6 +3218,19 @@ def test_constructor_empty_list(self): expected = DataFrame(index=[]) assert_frame_equal(df, expected) + # GH 9939 + df = DataFrame([], columns=['A', 'B']) + expected = DataFrame({}, columns=['A', 'B']) + assert_frame_equal(df, expected) + + # Empty generator: list(empty_gen()) == [] + def empty_gen(): + return + yield + + df = DataFrame(empty_gen(), columns=['A', 'B']) + assert_frame_equal(df, expected) + def test_constructor_list_of_lists(self): # GH #484 l = [[1, 'a'], [2, 'b']] @@ -4192,6 +4298,30 @@ def test_astype_cast_nan_int(self): df = DataFrame(data={"Values": [1.0, 2.0, 3.0, np.nan]}) self.assertRaises(ValueError, df.astype, np.int64) + def test_astype_str(self): + # GH9757 + a = Series(date_range('2010-01-04', periods=5)) + b = Series(date_range('3/6/2012 00:00', periods=5, tz='US/Eastern')) + c = Series([Timedelta(x, unit='d') for x in range(5)]) + d = Series(range(5)) + e = Series([0.0, 0.2, 0.4, 0.6, 0.8]) + + df = DataFrame({'a' : a, 'b' : b, 'c' : c, 'd' : d, 'e' : e}) + + # Test str and unicode on python 2.x and just str on python 3.x + for tt in set([str, compat.text_type]): + result = df.astype(tt) + + expected = DataFrame({ + 'a' : list(map(tt, a.values)), + 'b' : list(map(tt, b.values)), + 'c' : list(map(tt, c.values)), + 'd' : list(map(tt, d.values)), + 'e' : list(map(tt, e.values)), + }) + + assert_frame_equal(result, expected) + def test_array_interface(self): result = np.sqrt(self.frame) tm.assert_isinstance(result, type(self.frame)) @@ -5944,6 +6074,20 @@ def test_boolean_comparison(self): self.assertRaises(ValueError, lambda : df == (2,2)) self.assertRaises(ValueError, lambda : df == [2,2]) + def test_equals_different_blocks(self): + # GH 9330 + df0 = pd.DataFrame({"A": ["x","y"], "B": [1,2], + "C": ["w","z"]}) + df1 = df0.reset_index()[["A","B","C"]] + # this assert verifies that the above operations have + # induced a block rearrangement + self.assertTrue(df0._data.blocks[0].dtype != + df1._data.blocks[0].dtype) + # do the real tests + self.assert_frame_equal(df0, df1) + self.assertTrue(df0.equals(df1)) + self.assertTrue(df1.equals(df0)) + def test_to_csv_from_csv(self): pname = '__tmp_to_csv_from_csv__' @@ -7409,6 +7553,26 @@ def test_drop_names(self): self.assertEqual(obj.columns.name, 'second') self.assertEqual(list(df.columns), ['d', 'e', 'f']) + self.assertRaises(ValueError, df.drop, ['g']) + self.assertRaises(ValueError, df.drop, ['g'], 1) + + # errors = 'ignore' + dropped = df.drop(['g'], errors='ignore') + expected = Index(['a', 'b', 'c'], name='first') + self.assert_index_equal(dropped.index, expected) + + dropped = df.drop(['b', 'g'], errors='ignore') + expected = Index(['a', 'c'], name='first') + self.assert_index_equal(dropped.index, expected) + + dropped = df.drop(['g'], axis=1, errors='ignore') + expected = Index(['d', 'e', 'f'], name='second') + self.assert_index_equal(dropped.columns, expected) + + dropped = df.drop(['d', 'g'], axis=1, errors='ignore') + expected = Index(['e', 'f'], name='second') + self.assert_index_equal(dropped.columns, expected) + def test_dropEmptyRows(self): N = len(self.frame.index) mat = randn(N) @@ -7787,6 +7951,19 @@ def test_drop(self): assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :]) assert_frame_equal(simple.drop([0, 3], axis='index'), simple.ix[[1, 2], :]) + self.assertRaises(ValueError, simple.drop, 5) + self.assertRaises(ValueError, simple.drop, 'C', 1) + self.assertRaises(ValueError, simple.drop, [1, 5]) + self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1) + + # errors = 'ignore' + assert_frame_equal(simple.drop(5, errors='ignore'), simple) + assert_frame_equal(simple.drop([0, 5], errors='ignore'), + simple.ix[[1, 2, 3], :]) + assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple) + assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'), + simple[['B']]) + #non-unique - wheee! nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')), columns=['a', 'a', 'b']) @@ -9976,6 +10153,12 @@ def test_diff_float_n(self): xp = self.tsframe.diff(1) assert_frame_equal(rs, xp) + def test_diff_axis(self): + # GH 9727 + df = DataFrame([[1., 2.], [3., 4.]]) + assert_frame_equal(df.diff(axis=1), DataFrame([[np.nan, 1.], [np.nan, 1.]])) + assert_frame_equal(df.diff(axis=0), DataFrame([[np.nan, np.nan], [2., 2.]])) + def test_pct_change(self): rs = self.tsframe.pct_change(fill_method=None) assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1) @@ -10663,6 +10846,19 @@ def test_sort_index(self): with assertRaisesRegexp(ValueError, msg): frame.sort_index(by=['A', 'B'], axis=0, ascending=[True] * 5) + def test_sort_index_categorical_index(self): + + df = DataFrame({'A' : np.arange(6,dtype='int64'), + 'B' : Series(list('aabbca')).astype('category',categories=list('cab')) }).set_index('B') + + result = df.sort_index() + expected = df.iloc[[4,0,1,5,2,3]] + assert_frame_equal(result, expected) + + result = df.sort_index(ascending=False) + expected = df.iloc[[3,2,5,1,0,4]] + assert_frame_equal(result, expected) + def test_sort_nan(self): # GH3917 nan = np.nan @@ -11274,6 +11470,39 @@ def test_dataframe_clip(self): self.assertTrue((clipped_df.values[ub_mask] == ub).all() == True) self.assertTrue((clipped_df.values[mask] == df.values[mask]).all() == True) + def test_clip_against_series(self): + # GH #6966 + + df = DataFrame(np.random.randn(1000, 2)) + lb = Series(np.random.randn(1000)) + ub = lb + 1 + + clipped_df = df.clip(lb, ub, axis=0) + + for i in range(2): + lb_mask = df.iloc[:, i] <= lb + ub_mask = df.iloc[:, i] >= ub + mask = ~lb_mask & ~ub_mask + + assert_series_equal(clipped_df.loc[lb_mask, i], lb[lb_mask]) + assert_series_equal(clipped_df.loc[ub_mask, i], ub[ub_mask]) + assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i]) + + def test_clip_against_frame(self): + df = DataFrame(np.random.randn(1000, 2)) + lb = DataFrame(np.random.randn(1000, 2)) + ub = lb + 1 + + clipped_df = df.clip(lb, ub) + + lb_mask = df <= lb + ub_mask = df >= ub + mask = ~lb_mask & ~ub_mask + + assert_frame_equal(clipped_df[lb_mask], lb[lb_mask]) + assert_frame_equal(clipped_df[ub_mask], ub[ub_mask]) + assert_frame_equal(clipped_df[mask], df[mask]) + def test_get_X_columns(self): # numeric and object columns @@ -11684,16 +11913,14 @@ def test_mode(self): df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11], "B": [10, 10, 10, np.nan, 3, 4], "C": [8, 8, 8, 9, 9, 9], - "D": range(6), + "D": np.arange(6,dtype='int64'), "E": [8, 8, 1, 1, 3, 3]}) assert_frame_equal(df[["A"]].mode(), pd.DataFrame({"A": [12]})) - assert_frame_equal(df[["D"]].mode(), - pd.DataFrame(pd.Series([], dtype="int64"), - columns=["D"])) - assert_frame_equal(df[["E"]].mode(), - pd.DataFrame(pd.Series([1, 3, 8], dtype="int64"), - columns=["E"])) + expected = pd.Series([], dtype='int64', name='D').to_frame() + assert_frame_equal(df[["D"]].mode(), expected) + expected = pd.Series([1, 3, 8], dtype='int64', name='E').to_frame() + assert_frame_equal(df[["E"]].mode(), expected) assert_frame_equal(df[["A", "B"]].mode(), pd.DataFrame({"A": [12], "B": [10.]})) assert_frame_equal(df.mode(), @@ -11715,7 +11942,7 @@ def test_mode(self): com.pprint_thing(b) assert_frame_equal(a, b) # should work with heterogeneous types - df = pd.DataFrame({"A": range(6), + df = pd.DataFrame({"A": np.arange(6,dtype='int64'), "B": pd.date_range('2011', periods=6), "C": list('abcdef')}) exp = pd.DataFrame({"A": pd.Series([], dtype=df["A"].dtype), @@ -12357,6 +12584,31 @@ def test_unstack_bool(self): ['c', 'l']])) assert_frame_equal(rs, xp) + def test_unstack_level_binding(self): + # GH9856 + mi = pd.MultiIndex( + levels=[[u('foo'), u('bar')], [u('one'), u('two')], + [u('a'), u('b')]], + labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]], + names=[u('first'), u('second'), u('third')]) + s = pd.Series(0, index=mi) + result = s.unstack([1, 2]).stack(0) + + expected_mi = pd.MultiIndex( + levels=[['foo', 'bar'], ['one', 'two']], + labels=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=['first', 'second']) + + expected = pd.DataFrame(np.array([[np.nan, 0], + [0, np.nan], + [np.nan, 0], + [0, np.nan]], + dtype=np.float64), + index=expected_mi, + columns=pd.Index(['a', 'b'], name='third')) + + self.assert_frame_equal(result, expected) + def test_unstack_to_series(self): # check reversibility data = self.frame.unstack() @@ -14059,12 +14311,21 @@ def test_assign(self): assert_frame_equal(result, expected) def test_assign_multiple(self): - df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) + df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B']) result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B) - expected = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9], - 'D': [1, 2, 3], 'E': [4, 5, 6]}) - # column order isn't preserved - assert_frame_equal(result.reindex_like(expected), expected) + expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5], + [3, 6, 9, 3, 6]], columns=list('ABCDE')) + assert_frame_equal(result, expected) + + def test_assign_alphabetical(self): + # GH 9818 + df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + result = df.assign(D=df.A + df.B, C=df.A - df.B) + expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]], + columns=list('ABCD')) + assert_frame_equal(result, expected) + result = df.assign(C=df.A - df.B, D=df.A + df.B) + assert_frame_equal(result, expected) def test_assign_bad(self): df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) @@ -14099,6 +14360,27 @@ def _constructor(self): # GH9776 self.assertEqual(df.iloc[0:1, :].testattr, 'XXX') + def test_to_panel_expanddim(self): + # GH 9762 + + class SubclassedFrame(DataFrame): + @property + def _constructor_expanddim(self): + return SubclassedPanel + + class SubclassedPanel(Panel): + pass + + index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)]) + df = SubclassedFrame({'X':[1, 2, 3], 'Y': [4, 5, 6]}, index=index) + result = df.to_panel() + self.assertTrue(isinstance(result, SubclassedPanel)) + expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]], + items=['X', 'Y'], major_axis=[0], + minor_axis=[0, 1, 2], + dtype='int64') + tm.assert_panel_equal(result, expected) + def skip_if_no_ne(engine='numexpr'): if engine == 'numexpr': diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 3dd8c2594cd46..3f751310438e4 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -354,6 +354,178 @@ def test_head_tail(self): self._compare(o.head(-3), o.head(7)) self._compare(o.tail(-3), o.tail(7)) + def test_sample(self): + # Fixes issue: 2419 + + o = self._construct(shape=10) + + ### + # Check behavior of random_state argument + ### + + # Check for stability when receives seed or random state -- run 10 times. + for test in range(10): + seed = np.random.randint(0,100) + self._compare(o.sample(n=4, random_state=seed), o.sample(n=4, random_state=seed)) + self._compare(o.sample(frac=0.7,random_state=seed), o.sample(frac=0.7, random_state=seed)) + + self._compare(o.sample(n=4, random_state=np.random.RandomState(test)), + o.sample(n=4, random_state=np.random.RandomState(test))) + + self._compare(o.sample(frac=0.7,random_state=np.random.RandomState(test)), + o.sample(frac=0.7, random_state=np.random.RandomState(test))) + + + # Check for error when random_state argument invalid. + with tm.assertRaises(ValueError): + o.sample(random_state='astring!') + + ### + # Check behavior of `frac` and `N` + ### + + # Giving both frac and N throws error + with tm.assertRaises(ValueError): + o.sample(n=3, frac=0.3) + + # Check that raises right error for negative lengths + with tm.assertRaises(ValueError): + o.sample(n=-3) + with tm.assertRaises(ValueError): + o.sample(frac=-0.3) + + # Make sure float values of `n` give error + with tm.assertRaises(ValueError): + o.sample(n= 3.2) + + # Check lengths are right + self.assertTrue(len(o.sample(n=4) == 4)) + self.assertTrue(len(o.sample(frac=0.34) == 3)) + self.assertTrue(len(o.sample(frac=0.36) == 4)) + + ### + # Check weights + ### + + # Weight length must be right + with tm.assertRaises(ValueError): + o.sample(n=3, weights=[0,1]) + + with tm.assertRaises(ValueError): + bad_weights = [0.5]*11 + o.sample(n=3, weights=bad_weights) + + # Check won't accept negative weights + with tm.assertRaises(ValueError): + bad_weights = [-0.1]*10 + o.sample(n=3, weights=bad_weights) + + # Check inf and -inf throw errors: + with tm.assertRaises(ValueError): + weights_with_inf = [0.1]*10 + weights_with_inf[0] = np.inf + o.sample(n=3, weights=weights_with_inf) + + with tm.assertRaises(ValueError): + weights_with_ninf = [0.1]*10 + weights_with_ninf[0] = -np.inf + o.sample(n=3, weights=weights_with_ninf) + + + # A few dataframe test with degenerate weights. + easy_weight_list = [0]*10 + easy_weight_list[5] = 1 + + df = pd.DataFrame({'col1':range(10,20), + 'col2':range(20,30), + 'colString': ['a']*10, + 'easyweights':easy_weight_list}) + sample1 = df.sample(n=1, weights='easyweights') + assert_frame_equal(sample1, df.iloc[5:6]) + + # Ensure proper error if string given as weight for Series, panel, or + # DataFrame with axis = 1. + s = Series(range(10)) + with tm.assertRaises(ValueError): + s.sample(n=3, weights='weight_column') + + panel = pd.Panel(items = [0,1,2], major_axis = [2,3,4], minor_axis = [3,4,5]) + with tm.assertRaises(ValueError): + panel.sample(n=1, weights='weight_column') + + with tm.assertRaises(ValueError): + df.sample(n=1, weights='weight_column', axis = 1) + + # Check weighting key error + with tm.assertRaises(KeyError): + df.sample(n=3, weights='not_a_real_column_name') + + # Check np.nan are replaced by zeros. + weights_with_nan = [np.nan]*10 + weights_with_nan[5] = 0.5 + self._compare(o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6]) + + # Check None are also replaced by zeros. + weights_with_None = [None]*10 + weights_with_None[5] = 0.5 + self._compare(o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6]) + + # Check that re-normalizes weights that don't sum to one. + weights_less_than_1 = [0]*10 + weights_less_than_1[0] = 0.5 + tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1]) + + + ### + # Test axis argument + ### + + # Test axis argument + df = pd.DataFrame({'col1':range(10), 'col2':['a']*10}) + second_column_weight = [0,1] + assert_frame_equal(df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']]) + + # Different axis arg types + assert_frame_equal(df.sample(n=1, axis='columns', weights=second_column_weight), + df[['col2']]) + + weight = [0]*10 + weight[5] = 0.5 + assert_frame_equal(df.sample(n=1, axis='rows', weights=weight), + df.iloc[5:6]) + assert_frame_equal(df.sample(n=1, axis='index', weights=weight), + df.iloc[5:6]) + + + # Check out of range axis values + with tm.assertRaises(ValueError): + df.sample(n=1, axis=2) + + with tm.assertRaises(ValueError): + df.sample(n=1, axis='not_a_name') + + with tm.assertRaises(ValueError): + s = pd.Series(range(10)) + s.sample(n=1, axis=1) + + # Test weight length compared to correct axis + with tm.assertRaises(ValueError): + df.sample(n=1, axis=1, weights=[0.5]*10) + + # Check weights with axis = 1 + easy_weight_list = [0]*3 + easy_weight_list[2] = 1 + + df = pd.DataFrame({'col1':range(10,20), + 'col2':range(20,30), + 'colString': ['a']*10}) + sample1 = df.sample(n=1, axis=1, weights=easy_weight_list) + assert_frame_equal(sample1, df[['colString']]) + + # Test default axes + p = pd.Panel(items = ['a','b','c'], major_axis=[2,4,6], minor_axis=[1,3,5]) + assert_panel_equal(p.sample(n=3, random_state=42), p.sample(n=3, axis=1, random_state=42)) + assert_frame_equal(df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)) def test_size_compat(self): # GH8846 diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 3ce4e150326a2..82f4b8c05ca06 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -439,6 +439,38 @@ def _check_box_return_type(self, returned, return_type, expected_keys=None, else: raise AssertionError + def _check_grid_settings(self, obj, kinds, kws={}): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + + import matplotlib as mpl + + def is_grid_on(): + xoff = all(not g.gridOn for g in self.plt.gca().xaxis.get_major_ticks()) + yoff = all(not g.gridOn for g in self.plt.gca().yaxis.get_major_ticks()) + return not(xoff and yoff) + + spndx=1 + for kind in kinds: + self.plt.subplot(1,4*len(kinds),spndx); spndx+=1 + mpl.rc('axes',grid=False) + obj.plot(kind=kind, **kws) + self.assertFalse(is_grid_on()) + + self.plt.subplot(1,4*len(kinds),spndx); spndx+=1 + mpl.rc('axes',grid=True) + obj.plot(kind=kind, grid=False, **kws) + self.assertFalse(is_grid_on()) + + if kind != 'pie': + self.plt.subplot(1,4*len(kinds),spndx); spndx+=1 + mpl.rc('axes',grid=True) + obj.plot(kind=kind, **kws) + self.assertTrue(is_grid_on()) + + self.plt.subplot(1,4*len(kinds),spndx); spndx+=1 + mpl.rc('axes',grid=False) + obj.plot(kind=kind, grid=True, **kws) + self.assertTrue(is_grid_on()) @tm.mplskip class TestSeriesPlots(TestPlotBase): @@ -553,6 +585,29 @@ def test_ts_area_lim(self): self.assertEqual(xmin, line[0]) self.assertEqual(xmax, line[-1]) + def test_label(self): + s = Series([1, 2]) + ax = s.plot(label='LABEL', legend=True) + self._check_legend_labels(ax, labels=['LABEL']) + self.plt.close() + ax = s.plot(legend=True) + self._check_legend_labels(ax, labels=['None']) + self.plt.close() + # get name from index + s.name = 'NAME' + ax = s.plot(legend=True) + self._check_legend_labels(ax, labels=['NAME']) + self.plt.close() + # override the default + ax = s.plot(legend=True, label='LABEL') + self._check_legend_labels(ax, labels=['LABEL']) + self.plt.close() + # Add lebel info, but don't draw + ax = s.plot(legend=False, label='LABEL') + self.assertEqual(ax.get_legend(), None) # Hasn't been drawn + ax.legend() # draw it + self._check_legend_labels(ax, labels=['LABEL']) + def test_line_area_nan_series(self): values = [1, 2, np.nan, 3] s = Series(values) @@ -592,6 +647,26 @@ def test_bar_log(self): ax = Series([200, 500]).plot(log=True, kind='bar') assert_array_equal(ax.yaxis.get_ticklocs(), expected) + tm.close() + + ax = Series([200, 500]).plot(log=True, kind='barh') + assert_array_equal(ax.xaxis.get_ticklocs(), expected) + tm.close() + + # GH 9905 + expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00]) + + if not self.mpl_le_1_2_1: + expected = np.hstack((1.0e-04, expected, 1.0e+01)) + + ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar') + assert_array_equal(ax.get_ylim(), (0.001, 0.10000000000000001)) + assert_array_equal(ax.yaxis.get_ticklocs(), expected) + tm.close() + + ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh') + assert_array_equal(ax.get_xlim(), (0.001, 0.10000000000000001)) + assert_array_equal(ax.xaxis.get_ticklocs(), expected) @slow def test_bar_ignore_index(self): @@ -678,6 +753,18 @@ def test_hist_df_kwargs(self): ax = df.plot(kind='hist', bins=5) self.assertEqual(len(ax.patches), 10) + @slow + def test_hist_df_with_nonnumerics(self): + # GH 9853 + with tm.RNGContext(1): + df = DataFrame(np.random.randn(10, 4), columns=['A', 'B', 'C', 'D']) + df['E'] = ['x', 'y'] * 5 + ax = df.plot(kind='hist', bins=5) + self.assertEqual(len(ax.patches), 20) + + ax = df.plot(kind='hist') # bins=10 + self.assertEqual(len(ax.patches), 40) + @slow def test_hist_legacy(self): _check_plot_works(self.ts.hist) @@ -1053,6 +1140,12 @@ def test_table(self): _check_plot_works(self.series.plot, table=True) _check_plot_works(self.series.plot, table=self.series) + @slow + def test_series_grid_settings(self): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + self._check_grid_settings(Series([1,2,3]), + plotting._series_kinds + plotting._common_kinds) + @tm.mplskip class TestDataFramePlots(TestPlotBase): @@ -1154,6 +1247,22 @@ def test_plot(self): self.assertEqual(len(axes), 1) self.assertIs(ax.get_axes(), axes[0]) + def test_color_and_style_arguments(self): + df = DataFrame({'x': [1, 2], 'y': [3, 4]}) + # passing both 'color' and 'style' arguments should be allowed + # if there is no color symbol in the style strings: + ax = df.plot(color = ['red', 'black'], style = ['-', '--']) + # check that the linestyles are correctly set: + linestyle = [line.get_linestyle() for line in ax.lines] + self.assertEqual(linestyle, ['-', '--']) + # check that the colors are correctly set: + color = [line.get_color() for line in ax.lines] + self.assertEqual(color, ['red', 'black']) + # passing both 'color' and 'style' arguments should not be allowed + # if there is a color symbol in the style strings: + with tm.assertRaises(ValueError): + df.plot(color = ['red', 'black'], style = ['k-', 'r--']) + def test_nonnumeric_exclude(self): df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}) ax = df.plot() @@ -1486,6 +1595,19 @@ def test_subplots_ts_share_axes(self): for ax in axes[[0, 1, 2], [2]].ravel(): self._check_visible(ax.get_yticklabels(), visible=False) + def test_subplots_sharex_axes_existing_axes(self): + # GH 9158 + d = {'A': [1., 2., 3., 4.], 'B': [4., 3., 2., 1.], 'C': [5, 1, 3, 4]} + df = DataFrame(d, index=date_range('2014 10 11', '2014 10 14')) + + axes = df[['A', 'B']].plot(subplots=True) + df['C'].plot(ax=axes[0], secondary_y=True) + + self._check_visible(axes[0].get_xticklabels(), visible=False) + self._check_visible(axes[1].get_xticklabels(), visible=True) + for ax in axes.ravel(): + self._check_visible(ax.get_yticklabels(), visible=True) + def test_negative_log(self): df = - DataFrame(rand(6, 4), index=list(string.ascii_letters[:6]), @@ -1581,7 +1703,10 @@ def test_line_lim(self): self.assertEqual(xmax, lines[0].get_data()[0][-1]) axes = df.plot(secondary_y=True, subplots=True) + self._check_axes_shape(axes, axes_num=3, layout=(3, 1)) for ax in axes: + self.assertTrue(hasattr(ax, 'left_ax')) + self.assertFalse(hasattr(ax, 'right_ax')) xmin, xmax = ax.get_xlim() lines = ax.get_lines() self.assertEqual(xmin, lines[0].get_data()[0][0]) @@ -3339,6 +3464,12 @@ def test_sharey_and_ax(self): "y label is invisible but shouldn't") + @slow + def test_df_grid_settings(self): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + self._check_grid_settings(DataFrame({'a':[1,2,3],'b':[2,3,4]}), + plotting._dataframe_kinds, kws={'x':'a','y':'b'}) + @tm.mplskip class TestDataFrameGroupByPlots(TestPlotBase): diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index e7001eb09f20c..0789e20df3945 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -8,7 +8,7 @@ from numpy import nan from pandas import date_range,bdate_range, Timestamp -from pandas.core.index import Index, MultiIndex, Int64Index +from pandas.core.index import Index, MultiIndex, Int64Index, CategoricalIndex from pandas.core.api import Categorical, DataFrame from pandas.core.groupby import (SpecificationError, DataError, _nargsort, _lexsort_indexer) @@ -297,9 +297,9 @@ def test_nth(self): # as it keeps the order in the series (and not the group order) # related GH 7287 expected = s.groupby(g,sort=False).first() - expected.index = range(1,10) - result = s.groupby(g).nth(0,dropna='all') - assert_series_equal(result,expected) + expected.index = pd.Index(range(1,10), name=0) + result = s.groupby(g).nth(0, dropna='all') + assert_series_equal(result, expected) # doc example df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) @@ -430,6 +430,21 @@ def test_grouper_creation_bug(self): expected = s.groupby(level='one').sum() assert_series_equal(result, expected) + def test_grouper_getting_correct_binner(self): + + # GH 10063 + # using a non-time-based grouper and a time-based grouper + # and specifying levels + df = DataFrame({'A' : 1 }, + index=pd.MultiIndex.from_product([list('ab'), + date_range('20130101',periods=80)], + names=['one','two'])) + result = df.groupby([pd.Grouper(level='one'),pd.Grouper(level='two',freq='M')]).sum() + expected = DataFrame({'A' : [31,28,21,31,28,21]}, + index=MultiIndex.from_product([list('ab'),date_range('20130101',freq='M',periods=3)], + names=['one','two'])) + assert_frame_equal(result, expected) + def test_grouper_iter(self): self.assertEqual(sorted(self.df.groupby('A').grouper), ['bar', 'foo']) @@ -684,7 +699,6 @@ def test_get_group(self): expected = wp.reindex(major=[x for x in wp.major_axis if x.month == 1]) assert_panel_equal(gp, expected) - # GH 5267 # be datelike friendly df = DataFrame({'DATE' : pd.to_datetime(['10-Oct-2013', '10-Oct-2013', '10-Oct-2013', @@ -807,9 +821,10 @@ def test_apply_issues(self): # GH 5789 # don't auto coerce dates df = pd.read_csv(StringIO(s), header=None, names=['date', 'time', 'value']) - expected = Series(['00:00','02:00','02:00'],index=['2011.05.16','2011.05.17','2011.05.18']) + exp_idx = pd.Index(['2011.05.16','2011.05.17','2011.05.18'], dtype=object, name='date') + expected = Series(['00:00','02:00','02:00'], index=exp_idx) result = df.groupby('date').apply(lambda x: x['time'][x['value'].idxmax()]) - assert_series_equal(result,expected) + assert_series_equal(result, expected) def test_len(self): df = tm.makeTimeDataFrame() @@ -959,6 +974,12 @@ def demean(arr): g = df.groupby(pd.TimeGrouper('M')) g.transform(lambda x: x-1) + # GH 9700 + df = DataFrame({'a' : range(5, 10), 'b' : range(5)}) + result = df.groupby('a').transform(max) + expected = DataFrame({'b' : range(5)}) + tm.assert_frame_equal(result, expected) + def test_transform_fast(self): df = DataFrame( { 'id' : np.arange( 100000 ) / 3, @@ -1003,6 +1024,14 @@ def test_transform_broadcast(self): for idx in gp.index: assert_fp_equal(res.xs(idx), agged[idx]) + def test_transform_dtype(self): + # GH 9807 + # Check transform dtype output is preserved + df = DataFrame([[1, 3], [2, 3]]) + result = df.groupby(1).transform('mean') + expected = DataFrame([[1.5], [1.5]]) + assert_frame_equal(result, expected) + def test_transform_bug(self): # GH 5712 # transforming on a datetime column @@ -1692,7 +1721,8 @@ def test_groupby_as_index_apply(self): # apply doesn't maintain the original ordering # changed in GH5610 as the as_index=False returns a MI here exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (2, 4)]) - exp_as_apply = MultiIndex.from_tuples([(1, 0), (1, 2), (2, 1), (3, 4)]) + tp = [(1, 0), (1, 2), (2, 1), (3, 4)] + exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None]) assert_index_equal(res_as_apply, exp_as_apply) assert_index_equal(res_not_as_apply, exp_not_as_apply) @@ -1712,6 +1742,8 @@ def test_groupby_head_tail(self): assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1)) empty_not_as = DataFrame(columns=df.columns) + empty_not_as['A'] = empty_not_as['A'].astype(df.A.dtype) + empty_not_as['B'] = empty_not_as['B'].astype(df.B.dtype) assert_frame_equal(empty_not_as, g_not_as.head(0)) assert_frame_equal(empty_not_as, g_not_as.tail(0)) assert_frame_equal(empty_not_as, g_not_as.head(-1)) @@ -1727,6 +1759,8 @@ def test_groupby_head_tail(self): assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1)) empty_as = DataFrame(index=df_as.index[:0], columns=df.columns) + empty_as['A'] = empty_not_as['A'].astype(df.A.dtype) + empty_as['B'] = empty_not_as['B'].astype(df.B.dtype) assert_frame_equal(empty_as, g_as.head(0)) assert_frame_equal(empty_as, g_as.tail(0)) assert_frame_equal(empty_as, g_as.head(-1)) @@ -1914,6 +1948,8 @@ def _testit(op): for (cat1, cat2), group in grouped: expd.setdefault(cat1, {})[cat2] = op(group['C']) exp = DataFrame(expd).T.stack(dropna=False) + exp.index.names = ['A', 'B'] + result = op(grouped)['C'] assert_series_equal(result, exp) @@ -1966,7 +2002,7 @@ def test_cython_agg_nothing_to_agg_with_dates(self): def test_groupby_timedelta_cython_count(self): df = DataFrame({'g': list('ab' * 2), 'delt': np.arange(4).astype('timedelta64[ns]')}) - expected = Series([2, 2], index=['a', 'b'], name='delt') + expected = Series([2, 2], index=pd.Index(['a', 'b'], name='g'), name='delt') result = df.groupby('g').delt.count() tm.assert_series_equal(expected, result) @@ -2377,13 +2413,13 @@ def test_count_object(self): df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3}) result = df.groupby('c').a.count() - expected = pd.Series([3, 3], index=[2, 3], name='a') + expected = pd.Series([3, 3], index=pd.Index([2, 3], name='c'), name='a') tm.assert_series_equal(result, expected) df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3, 'c': [2] * 3 + [3] * 3}) result = df.groupby('c').a.count() - expected = pd.Series([1, 3], index=[2, 3], name='a') + expected = pd.Series([1, 3], index=pd.Index([2, 3], name='c'), name='a') tm.assert_series_equal(result, expected) def test_count_cross_type(self): # GH8169 @@ -2800,6 +2836,49 @@ def test_groupby_list_infer_array_like(self): result = df.groupby(['foo', 'bar']).mean() expected = df.groupby([df['foo'], df['bar']]).mean()[['val']] + def test_groupby_nat_exclude(self): + # GH 6992 + df = pd.DataFrame({'values': np.random.randn(8), + 'dt': [np.nan, pd.Timestamp('2013-01-01'), np.nan, pd.Timestamp('2013-02-01'), + np.nan, pd.Timestamp('2013-02-01'), np.nan, pd.Timestamp('2013-01-01')], + 'str': [np.nan, 'a', np.nan, 'a', + np.nan, 'a', np.nan, 'b']}) + grouped = df.groupby('dt') + + expected = [[1, 7], [3, 5]] + keys = sorted(grouped.groups.keys()) + self.assertEqual(len(keys), 2) + for k, e in zip(keys, expected): + # grouped.groups keys are np.datetime64 with system tz + # not to be affected by tz, only compare values + self.assertEqual(grouped.groups[k], e) + + # confirm obj is not filtered + tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df) + self.assertEqual(grouped.ngroups, 2) + expected = {Timestamp('2013-01-01 00:00:00'): np.array([1, 7]), + Timestamp('2013-02-01 00:00:00'): np.array([3, 5])} + for k in grouped.indices: + self.assert_numpy_array_equal(grouped.indices[k], expected[k]) + + tm.assert_frame_equal(grouped.get_group(Timestamp('2013-01-01')), df.iloc[[1, 7]]) + tm.assert_frame_equal(grouped.get_group(Timestamp('2013-02-01')), df.iloc[[3, 5]]) + + self.assertRaises(KeyError, grouped.get_group, pd.NaT) + + nan_df = DataFrame({'nan': [np.nan, np.nan, np.nan], + 'nat': [pd.NaT, pd.NaT, pd.NaT]}) + self.assertEqual(nan_df['nan'].dtype, 'float64') + self.assertEqual(nan_df['nat'].dtype, 'datetime64[ns]') + + for key in ['nan', 'nat']: + grouped = nan_df.groupby(key) + self.assertEqual(grouped.groups, {}) + self.assertEqual(grouped.ngroups, 0) + self.assertEqual(grouped.indices, {}) + self.assertRaises(KeyError, grouped.get_group, np.nan) + self.assertRaises(KeyError, grouped.get_group, pd.NaT) + def test_dictify(self): dict(iter(self.df.groupby('A'))) dict(iter(self.df.groupby(['A', 'B']))) @@ -3370,12 +3449,11 @@ def test_groupby_datetime_categorical(self): cats = Categorical.from_codes(codes, levels, name='myfactor', ordered=True) data = DataFrame(np.random.randn(100, 4)) - result = data.groupby(cats).mean() expected = data.groupby(np.asarray(cats)).mean() expected = expected.reindex(levels) - expected.index.name = 'myfactor' + expected.index = CategoricalIndex(expected.index,categories=expected.index,name='myfactor',ordered=True) assert_frame_equal(result, expected) self.assertEqual(result.index.name, cats.name) @@ -3390,6 +3468,26 @@ def test_groupby_datetime_categorical(self): expected.index.names = ['myfactor', None] assert_frame_equal(desc_result, expected) + def test_groupby_categorical_index(self): + + levels = ['foo', 'bar', 'baz', 'qux'] + codes = np.random.randint(0, 4, size=20) + cats = Categorical.from_codes(codes, levels, name='myfactor', ordered=True) + df = DataFrame(np.repeat(np.arange(20),4).reshape(-1,4), columns=list('abcd')) + df['cats'] = cats + + # with a cat index + result = df.set_index('cats').groupby(level=0).sum() + expected = df[list('abcd')].groupby(cats.codes).sum() + expected.index = CategoricalIndex(Categorical.from_codes([0,1,2,3], levels, ordered=True),name='cats') + assert_frame_equal(result, expected) + + # with a cat column, should produce a cat index + result = df.groupby('cats').sum() + expected = df[list('abcd')].groupby(cats.codes).sum() + expected.index = CategoricalIndex(Categorical.from_codes([0,1,2,3], levels, ordered=True),name='cats') + assert_frame_equal(result, expected) + def test_groupby_groups_datetimeindex(self): # #1430 from pandas.tseries.api import DatetimeIndex @@ -3518,6 +3616,8 @@ def test_groupby_categorical_no_compress(self): result = data.groupby(cats).mean() exp = data.groupby(codes).mean() + + exp.index = CategoricalIndex(exp.index,categories=cats.categories,ordered=cats.ordered) assert_series_equal(result, exp) codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3]) @@ -3525,6 +3625,7 @@ def test_groupby_categorical_no_compress(self): result = data.groupby(cats).mean() exp = data.groupby(codes).mean().reindex(cats.categories) + exp.index = CategoricalIndex(exp.index,categories=cats.categories,ordered=cats.ordered) assert_series_equal(result, exp) cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], @@ -5053,6 +5154,17 @@ def test_groupby_categorical_two_columns(self): "C3":[nan,nan,nan,nan, 10,100,nan,nan, nan,nan,200,34]}, index=idx) tm.assert_frame_equal(res, exp) + def test_groupby_apply_all_none(self): + # Tests to make sure no errors if apply function returns all None + # values. Issue 9684. + test_df = DataFrame({'groups': [0,0,1,1], 'random_vars': [8,7,4,5]}) + + def test_func(x): + pass + result = test_df.groupby('groups').apply(test_func) + expected = DataFrame() + tm.assert_frame_equal(result, expected) + def assert_fp_equal(a, b): assert (np.abs(a - b) < 1e-12).all() diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 39db387045f12..ed84c9764dd84 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -12,14 +12,10 @@ import numpy as np from numpy.testing import assert_array_equal -from pandas import period_range, date_range - -from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex, - InvalidIndexError, NumericIndex) -from pandas.tseries.index import DatetimeIndex -from pandas.tseries.tdi import TimedeltaIndex -from pandas.tseries.period import PeriodIndex -from pandas.core.series import Series +from pandas import (period_range, date_range, Categorical, Series, + Index, Float64Index, Int64Index, MultiIndex, + CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex) +from pandas.core.index import InvalidIndexError, NumericIndex from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp, assert_copy) from pandas import compat @@ -41,6 +37,11 @@ class Base(object): _holder = None _compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes'] + def setup_indices(self): + # setup the test indices in the self.indicies dict + for name, ind in self.indices.items(): + setattr(self, name, ind) + def verify_pickle(self,index): unpickled = self.round_trip_pickle(index) self.assertTrue(index.equals(unpickled)) @@ -98,6 +99,7 @@ def f(): def test_reindex_base(self): idx = self.create_index() expected = np.arange(idx.size) + actual = idx.get_indexer(idx) assert_array_equal(expected, actual) @@ -118,28 +120,18 @@ def test_ndarray_compat_properties(self): idx.nbytes idx.values.nbytes + def test_repr_roundtrip(self): -class TestIndex(Base, tm.TestCase): - _holder = Index - _multiprocess_can_split_ = True + idx = self.create_index() + tm.assert_index_equal(eval(repr(idx)),idx) - def setUp(self): - self.indices = dict( - unicodeIndex = tm.makeUnicodeIndex(100), - strIndex = tm.makeStringIndex(100), - dateIndex = tm.makeDateIndex(100), - intIndex = tm.makeIntIndex(100), - floatIndex = tm.makeFloatIndex(100), - boolIndex = Index([True,False]), - empty = Index([]), - tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'], - [1, 2, 3])) - ) - for name, ind in self.indices.items(): - setattr(self, name, ind) + def test_str(self): - def create_index(self): - return Index(list('abcde')) + # test the string repr + idx = self.create_index() + idx.name = 'foo' + self.assertTrue("'foo'" in str(idx)) + self.assertTrue(idx.__class__.__name__ in str(idx)) def test_wrong_number_names(self): def testit(ind): @@ -150,14 +142,18 @@ def testit(ind): def test_set_name_methods(self): new_name = "This is the new name for this index" - indices = (self.dateIndex, self.intIndex, self.unicodeIndex, - self.empty) - for ind in indices: + for ind in self.indices.values(): + + # don't tests a MultiIndex here (as its tested separated) + if isinstance(ind, MultiIndex): + continue + original_name = ind.name new_ind = ind.set_names([new_name]) self.assertEqual(new_ind.name, new_name) self.assertEqual(ind.name, original_name) res = ind.rename(new_name, inplace=True) + # should return None self.assertIsNone(res) self.assertEqual(ind.name, new_name) @@ -167,46 +163,258 @@ def test_set_name_methods(self): # ind.set_names("a") with assertRaisesRegexp(ValueError, "Level must be None"): ind.set_names("a", level=0) - # rename in place just leaves tuples and other containers alone - name = ('A', 'B') - ind = self.intIndex - ind.rename(name, inplace=True) - self.assertEqual(ind.name, name) - self.assertEqual(ind.names, [name]) - def test_hash_error(self): - with tm.assertRaisesRegexp(TypeError, - "unhashable type: %r" % - type(self.strIndex).__name__): - hash(self.strIndex) + # rename in place just leaves tuples and other containers alone + name = ('A', 'B') + ind.rename(name, inplace=True) + self.assertEqual(ind.name, name) + self.assertEqual(ind.names, [name]) - def test_new_axis(self): - new_index = self.dateIndex[None, :] - self.assertEqual(new_index.ndim, 2) - tm.assert_isinstance(new_index, np.ndarray) + def test_hash_error(self): + for ind in self.indices.values(): + with tm.assertRaisesRegexp(TypeError, + "unhashable type: %r" % + type(ind).__name__): + hash(ind) def test_copy_and_deepcopy(self): from copy import copy, deepcopy - for func in (copy, deepcopy): - idx_copy = func(self.strIndex) - self.assertIsNot(idx_copy, self.strIndex) - self.assertTrue(idx_copy.equals(self.strIndex)) + for ind in self.indices.values(): - new_copy = self.strIndex.copy(deep=True, name="banana") - self.assertEqual(new_copy.name, "banana") - new_copy2 = self.intIndex.copy(dtype=int) - self.assertEqual(new_copy2.dtype.kind, 'i') + # don't tests a MultiIndex here (as its tested separated) + if isinstance(ind, MultiIndex): + continue + + for func in (copy, deepcopy): + idx_copy = func(ind) + self.assertIsNot(idx_copy, ind) + self.assertTrue(idx_copy.equals(ind)) + + new_copy = ind.copy(deep=True, name="banana") + self.assertEqual(new_copy.name, "banana") def test_duplicates(self): - idx = Index([0, 0, 0]) - self.assertFalse(idx.is_unique) + for ind in self.indices.values(): + + if not len(ind): + continue + idx = self._holder([ind[0]]*5) + self.assertFalse(idx.is_unique) + self.assertTrue(idx.has_duplicates) def test_sort(self): - self.assertRaises(TypeError, self.strIndex.sort) + for ind in self.indices.values(): + self.assertRaises(TypeError, ind.sort) def test_mutability(self): - self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo') + for ind in self.indices.values(): + if not len(ind): + continue + self.assertRaises(TypeError, ind.__setitem__, 0, ind[0]) + + def test_view(self): + for ind in self.indices.values(): + i_view = ind.view() + self.assertEqual(i_view.name, ind.name) + + def test_compat(self): + for ind in self.indices.values(): + self.assertEqual(ind.tolist(),list(ind)) + + def test_argsort(self): + for k, ind in self.indices.items(): + + # sep teststed + if k in ['catIndex']: + continue + + result = ind.argsort() + expected = np.array(ind).argsort() + self.assert_numpy_array_equal(result, expected) + + def test_pickle(self): + for ind in self.indices.values(): + self.verify_pickle(ind) + ind.name = 'foo' + self.verify_pickle(ind) + + def test_take(self): + indexer = [4, 3, 0, 2] + for k, ind in self.indices.items(): + + # separate + if k in ['boolIndex','tuples','empty']: + continue + + result = ind.take(indexer) + expected = ind[indexer] + self.assertTrue(result.equals(expected)) + + def test_setops_errorcases(self): + for name, idx in compat.iteritems(self.indices): + # # non-iterable input + cases = [0.5, 'xxx'] + methods = [idx.intersection, idx.union, idx.difference, idx.sym_diff] + + for method in methods: + for case in cases: + assertRaisesRegexp(TypeError, + "Input must be Index or array-like", + method, case) + + def test_intersection_base(self): + for name, idx in compat.iteritems(self.indices): + first = idx[:5] + second = idx[:3] + intersect = first.intersection(second) + + if isinstance(idx, CategoricalIndex): + pass + else: + self.assertTrue(tm.equalContents(intersect, second)) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + if isinstance(idx, PeriodIndex): + msg = "can only call with other PeriodIndex-ed objects" + with tm.assertRaisesRegexp(ValueError, msg): + result = first.intersection(case) + elif isinstance(idx, CategoricalIndex): + pass + else: + result = first.intersection(case) + self.assertTrue(tm.equalContents(result, second)) + + if isinstance(idx, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with tm.assertRaisesRegexp(TypeError, msg): + result = first.intersection([1, 2, 3]) + + def test_union_base(self): + for name, idx in compat.iteritems(self.indices): + first = idx[3:] + second = idx[:5] + everything = idx + union = first.union(second) + self.assertTrue(tm.equalContents(union, everything)) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + if isinstance(idx, PeriodIndex): + msg = "can only call with other PeriodIndex-ed objects" + with tm.assertRaisesRegexp(ValueError, msg): + result = first.union(case) + elif isinstance(idx, CategoricalIndex): + pass + else: + result = first.union(case) + self.assertTrue(tm.equalContents(result, everything)) + + if isinstance(idx, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with tm.assertRaisesRegexp(TypeError, msg): + result = first.union([1, 2, 3]) + + def test_difference_base(self): + for name, idx in compat.iteritems(self.indices): + first = idx[2:] + second = idx[:4] + answer = idx[4:] + result = first.difference(second) + + if isinstance(idx, CategoricalIndex): + pass + else: + self.assertTrue(tm.equalContents(result, answer)) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + if isinstance(idx, PeriodIndex): + msg = "can only call with other PeriodIndex-ed objects" + with tm.assertRaisesRegexp(ValueError, msg): + result = first.difference(case) + elif isinstance(idx, CategoricalIndex): + pass + elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)): + self.assertEqual(result.__class__, answer.__class__) + self.assert_numpy_array_equal(result.asi8, answer.asi8) + else: + result = first.difference(case) + self.assertTrue(tm.equalContents(result, answer)) + + if isinstance(idx, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with tm.assertRaisesRegexp(TypeError, msg): + result = first.difference([1, 2, 3]) + + def test_symmetric_diff(self): + for name, idx in compat.iteritems(self.indices): + first = idx[1:] + second = idx[:-1] + if isinstance(idx, CategoricalIndex): + pass + else: + answer = idx[[0, -1]] + result = first.sym_diff(second) + self.assertTrue(tm.equalContents(result, answer)) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + if isinstance(idx, PeriodIndex): + msg = "can only call with other PeriodIndex-ed objects" + with tm.assertRaisesRegexp(ValueError, msg): + result = first.sym_diff(case) + elif isinstance(idx, CategoricalIndex): + pass + else: + result = first.sym_diff(case) + self.assertTrue(tm.equalContents(result, answer)) + + if isinstance(idx, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with tm.assertRaisesRegexp(TypeError, msg): + result = first.sym_diff([1, 2, 3]) + + +class TestIndex(Base, tm.TestCase): + _holder = Index + _multiprocess_can_split_ = True + + def setUp(self): + self.indices = dict( + unicodeIndex = tm.makeUnicodeIndex(100), + strIndex = tm.makeStringIndex(100), + dateIndex = tm.makeDateIndex(100), + periodIndex = tm.makePeriodIndex(100), + tdIndex = tm.makeTimedeltaIndex(100), + intIndex = tm.makeIntIndex(100), + floatIndex = tm.makeFloatIndex(100), + boolIndex = Index([True,False]), + catIndex = tm.makeCategoricalIndex(100), + empty = Index([]), + tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'], + [1, 2, 3])) + ) + self.setup_indices() + + def create_index(self): + return Index(list('abcde')) + + def test_new_axis(self): + new_index = self.dateIndex[None, :] + self.assertEqual(new_index.ndim, 2) + tm.assert_isinstance(new_index, np.ndarray) + + def test_copy_and_deepcopy(self): + super(TestIndex, self).test_copy_and_deepcopy() + + new_copy2 = self.intIndex.copy(dtype=int) + self.assertEqual(new_copy2.dtype.kind, 'i') def test_constructor(self): # regular instance creation @@ -297,18 +505,22 @@ def test_constructor_simple_new(self): result = idx._simple_new(idx, 'obj') self.assertTrue(result.equals(idx)) - def test_copy(self): - i = Index([], name='Foo') - i_copy = i.copy() - self.assertEqual(i_copy.name, 'Foo') + def test_view_with_args(self): - def test_view(self): - i = Index([], name='Foo') - i_view = i.view() - self.assertEqual(i_view.name, 'Foo') + restricted = ['unicodeIndex','strIndex','catIndex','boolIndex','empty'] + + for i in restricted: + ind = self.indices[i] + + # with arguments + self.assertRaises(TypeError, lambda : ind.view('i8')) + + # these are ok + for i in list(set(self.indices.keys())-set(restricted)): + ind = self.indices[i] - # with arguments - self.assertRaises(TypeError, lambda : i.view('i8')) + # with arguments + ind.view('i8') def test_legacy_pickle_identity(self): @@ -330,9 +542,6 @@ def test_astype(self): casted = self.intIndex.astype('i8') self.assertEqual(casted.name, 'foobar') - def test_compat(self): - self.strIndex.tolist() - def test_equals(self): # same self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c']))) @@ -459,11 +668,6 @@ def test_nanosecond_index_access(self): self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))]) - def test_argsort(self): - result = self.strIndex.argsort() - expected = np.array(self.strIndex).argsort() - self.assert_numpy_array_equal(result, expected) - def test_comparators(self): index = self.dateIndex element = index[len(index) // 2] @@ -546,16 +750,12 @@ def test_intersection(self): first = self.strIndex[:20] second = self.strIndex[:10] intersect = first.intersection(second) - self.assertTrue(tm.equalContents(intersect, second)) # Corner cases inter = first.intersection(first) self.assertIs(inter, first) - # non-iterable input - assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5) - idx1 = Index([1, 2, 3, 4, 5], name='idx') # if target has the same name, it is preserved idx2 = Index([3, 4, 5, 6, 7], name='idx') @@ -597,6 +797,12 @@ def test_union(self): union = first.union(second) self.assertTrue(tm.equalContents(union, everything)) + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = first.union(case) + self.assertTrue(tm.equalContents(result, everything)) + # Corner cases union = first.union(first) self.assertIs(union, first) @@ -607,9 +813,6 @@ def test_union(self): union = Index([]).union(first) self.assertIs(union, first) - # non-iterable input - assertRaisesRegexp(TypeError, "iterable", first.union, 0.5) - # preserve names first.name = 'A' second.name = 'A' @@ -625,6 +828,10 @@ def test_add(self): # - API change GH 8226 with tm.assert_produces_warning(): self.strIndex + self.strIndex + with tm.assert_produces_warning(): + self.strIndex + self.strIndex.tolist() + with tm.assert_produces_warning(): + self.strIndex.tolist() + self.strIndex firstCat = self.strIndex.union(self.dateIndex) secondCat = self.strIndex.union(self.strIndex) @@ -640,6 +847,13 @@ def test_add(self): tm.assert_contains_all(self.strIndex, secondCat) tm.assert_contains_all(self.dateIndex, firstCat) + # test add and radd + idx = Index(list('abc')) + expected = Index(['a1', 'b1', 'c1']) + self.assert_index_equal(idx + '1', expected) + expected = Index(['1a', '1b', '1c']) + self.assert_index_equal('1' + idx, expected) + def test_append_multiple(self): index = Index(['a', 'b', 'c', 'd', 'e', 'f']) @@ -707,9 +921,6 @@ def test_difference(self): self.assertEqual(len(result), 0) self.assertEqual(result.name, first.name) - # non-iterable input - assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5) - def test_symmetric_diff(self): # smoke idx1 = Index([1, 2, 3, 4], name='idx1') @@ -756,26 +967,17 @@ def test_symmetric_diff(self): self.assertTrue(tm.equalContents(result, expected)) self.assertEqual(result.name, 'new_name') - # other isn't iterable - with tm.assertRaises(TypeError): - Index(idx1,dtype='object') - 1 - - def test_pickle(self): - - self.verify_pickle(self.strIndex) - self.strIndex.name = 'foo' - self.verify_pickle(self.strIndex) - self.verify_pickle(self.dateIndex) - def test_is_numeric(self): self.assertFalse(self.dateIndex.is_numeric()) self.assertFalse(self.strIndex.is_numeric()) self.assertTrue(self.intIndex.is_numeric()) self.assertTrue(self.floatIndex.is_numeric()) + self.assertFalse(self.catIndex.is_numeric()) def test_is_object(self): self.assertTrue(self.strIndex.is_object()) self.assertTrue(self.boolIndex.is_object()) + self.assertFalse(self.catIndex.is_object()) self.assertFalse(self.intIndex.is_object()) self.assertFalse(self.dateIndex.is_object()) self.assertFalse(self.floatIndex.is_object()) @@ -839,12 +1041,6 @@ def test_format_none(self): idx.format() self.assertIsNone(idx[3]) - def test_take(self): - indexer = [4, 3, 0, 2] - result = self.dateIndex.take(indexer) - expected = self.dateIndex[indexer] - self.assertTrue(result.equals(expected)) - def test_logical_compat(self): idx = self.create_index() self.assertEqual(idx.all(), idx.values.all()) @@ -857,6 +1053,7 @@ def _check_method_works(self, method): method(self.strIndex) method(self.intIndex) method(self.tuples) + method(self.catIndex) def test_get_indexer(self): idx1 = Index([1, 2, 3, 4, 5]) @@ -1036,20 +1233,43 @@ def check_slice(in_slice, expected): def test_drop(self): n = len(self.strIndex) - dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)]) + drop = self.strIndex[lrange(5, 10)] + dropped = self.strIndex.drop(drop) expected = self.strIndex[lrange(5) + lrange(10, n)] self.assertTrue(dropped.equals(expected)) self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar']) + self.assertRaises(ValueError, self.strIndex.drop, ['1', 'bar']) + + # errors='ignore' + mixed = drop.tolist() + ['foo'] + dropped = self.strIndex.drop(mixed, errors='ignore') + expected = self.strIndex[lrange(5) + lrange(10, n)] + self.assert_index_equal(dropped, expected) + + dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore') + expected = self.strIndex[lrange(n)] + self.assert_index_equal(dropped, expected) dropped = self.strIndex.drop(self.strIndex[0]) expected = self.strIndex[1:] - self.assertTrue(dropped.equals(expected)) + self.assert_index_equal(dropped, expected) ser = Index([1, 2, 3]) dropped = ser.drop(1) expected = Index([2, 3]) - self.assertTrue(dropped.equals(expected)) + self.assert_index_equal(dropped, expected) + + # errors='ignore' + self.assertRaises(ValueError, ser.drop, [3, 4]) + + dropped = ser.drop(4, errors='ignore') + expected = Index([1, 2, 3]) + self.assert_index_equal(dropped, expected) + + dropped = ser.drop([3, 4, 5], errors='ignore') + expected = Index([1, 2]) + self.assert_index_equal(dropped, expected) def test_tuple_union_bug(self): import pandas @@ -1174,6 +1394,49 @@ def test_join_self(self): for kind in kinds: joined = res.join(res, how=kind) self.assertIs(res, joined) + def test_str_attribute(self): + # GH9068 + methods = ['strip', 'rstrip', 'lstrip'] + idx = Index([' jack', 'jill ', ' jesse ', 'frank']) + for method in methods: + expected = Index([getattr(str, method)(x) for x in idx.values]) + tm.assert_index_equal(getattr(Index.str, method)(idx.str), expected) + + # create a few instances that are not able to use .str accessor + indices = [Index(range(5)), + tm.makeDateIndex(10), + MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]), + PeriodIndex(start='2000', end='2010', freq='A')] + for idx in indices: + with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'): + idx.str.repeat(2) + + idx = Index(['a b c', 'd e', 'f']) + expected = Index([['a', 'b', 'c'], ['d', 'e'], ['f']]) + tm.assert_index_equal(idx.str.split(), expected) + tm.assert_index_equal(idx.str.split(expand=False), expected) + + expected = MultiIndex.from_tuples([('a', 'b', 'c'), + ('d', 'e', np.nan), + ('f', np.nan, np.nan)]) + tm.assert_index_equal(idx.str.split(expand=True), expected) + + # test boolean case, should return np.array instead of boolean Index + idx = Index(['a1', 'a2', 'b1', 'b2']) + expected = np.array([True, True, False, False]) + self.assert_array_equal(idx.str.startswith('a'), expected) + self.assertIsInstance(idx.str.startswith('a'), np.ndarray) + s = Series(range(4), index=idx) + expected = Series(range(2), index=['a1', 'a2']) + tm.assert_series_equal(s[s.index.str.startswith('a')], expected) + + def test_tab_completion(self): + # GH 9910 + idx = Index(list('abcd')) + self.assertTrue('str' in dir(idx)) + + idx = Index(range(4)) + self.assertTrue('str' not in dir(idx)) def test_indexing_doesnt_change_class(self): idx = Index([1, 2, 3, 'a', 'b', 'c']) @@ -1263,6 +1526,387 @@ def test_groupby(self): exp = {1: [0, 1], 2: [2, 3, 4]} tm.assert_dict_equal(groups, exp) + def test_equals_op(self): + # For issue #9785 + index_a = Index(['foo', 'bar', 'baz']) + index_b = Index(['foo', 'bar', 'baz', 'qux']) + # Testing Numpy Results Equivelent + assert_array_equal( + index_a.equals(index_a), + index_a == index_a + ) + assert_array_equal( + index_a.equals(index_b), + index_a == index_b, + ) + assert_array_equal( + index_b.equals(index_a), + index_b == index_a, + ) + +class TestCategoricalIndex(Base, tm.TestCase): + _holder = CategoricalIndex + + def setUp(self): + self.indices = dict(catIndex = tm.makeCategoricalIndex(100)) + self.setup_indices() + + def create_index(self, categories=None, ordered=False): + if categories is None: + categories = list('cab') + return CategoricalIndex(list('aabbca'), categories=categories, ordered=ordered) + + def test_construction(self): + + ci = self.create_index(categories=list('abcd')) + categories = ci.categories + + result = Index(ci) + tm.assert_index_equal(result,ci,exact=True) + self.assertFalse(result.ordered) + + result = Index(ci.values) + tm.assert_index_equal(result,ci,exact=True) + self.assertFalse(result.ordered) + + # empty + result = CategoricalIndex(categories=categories) + self.assertTrue(result.categories.equals(Index(categories))) + self.assert_numpy_array_equal(result.codes,np.array([],dtype='int8')) + self.assertFalse(result.ordered) + + # passing categories + result = CategoricalIndex(list('aabbca'),categories=categories) + self.assertTrue(result.categories.equals(Index(categories))) + self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8')) + + c = pd.Categorical(list('aabbca')) + result = CategoricalIndex(c) + self.assertTrue(result.categories.equals(Index(list('abc')))) + self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8')) + self.assertFalse(result.ordered) + + result = CategoricalIndex(c,categories=categories) + self.assertTrue(result.categories.equals(Index(categories))) + self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8')) + self.assertFalse(result.ordered) + + ci = CategoricalIndex(c,categories=list('abcd')) + result = CategoricalIndex(ci) + self.assertTrue(result.categories.equals(Index(categories))) + self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,2,0],dtype='int8')) + self.assertFalse(result.ordered) + + result = CategoricalIndex(ci, categories=list('ab')) + self.assertTrue(result.categories.equals(Index(list('ab')))) + self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,-1,0],dtype='int8')) + self.assertFalse(result.ordered) + + result = CategoricalIndex(ci, categories=list('ab'), ordered=True) + self.assertTrue(result.categories.equals(Index(list('ab')))) + self.assert_numpy_array_equal(result.codes,np.array([0,0,1,1,-1,0],dtype='int8')) + self.assertTrue(result.ordered) + + # turn me to an Index + result = Index(np.array(ci)) + self.assertIsInstance(result, Index) + self.assertNotIsInstance(result, CategoricalIndex) + + def test_construction_with_dtype(self): + + # specify dtype + ci = self.create_index(categories=list('abc')) + + result = Index(np.array(ci), dtype='category') + tm.assert_index_equal(result,ci,exact=True) + + result = Index(np.array(ci).tolist(), dtype='category') + tm.assert_index_equal(result,ci,exact=True) + + # these are generally only equal when the categories are reordered + ci = self.create_index() + + result = Index(np.array(ci), dtype='category').reorder_categories(ci.categories) + tm.assert_index_equal(result,ci,exact=True) + + # make sure indexes are handled + expected = CategoricalIndex([0,1,2], categories=[0,1,2], ordered=True) + idx = Index(range(3)) + result = CategoricalIndex(idx, categories=idx, ordered=True) + tm.assert_index_equal(result, expected, exact=True) + + def test_disallow_set_ops(self): + + # GH 10039 + # set ops (+/-) raise TypeError + idx = pd.Index(pd.Categorical(['a', 'b'])) + + self.assertRaises(TypeError, lambda : idx - idx) + self.assertRaises(TypeError, lambda : idx + idx) + self.assertRaises(TypeError, lambda : idx - ['a','b']) + self.assertRaises(TypeError, lambda : idx + ['a','b']) + self.assertRaises(TypeError, lambda : ['a','b'] - idx) + self.assertRaises(TypeError, lambda : ['a','b'] + idx) + + def test_method_delegation(self): + + ci = CategoricalIndex(list('aabbca'), categories=list('cabdef')) + result = ci.set_categories(list('cab')) + tm.assert_index_equal(result, CategoricalIndex(list('aabbca'), categories=list('cab'))) + + ci = CategoricalIndex(list('aabbca'), categories=list('cab')) + result = ci.rename_categories(list('efg')) + tm.assert_index_equal(result, CategoricalIndex(list('ffggef'), categories=list('efg'))) + + ci = CategoricalIndex(list('aabbca'), categories=list('cab')) + result = ci.add_categories(['d']) + tm.assert_index_equal(result, CategoricalIndex(list('aabbca'), categories=list('cabd'))) + + ci = CategoricalIndex(list('aabbca'), categories=list('cab')) + result = ci.remove_categories(['c']) + tm.assert_index_equal(result, CategoricalIndex(list('aabb') + [np.nan] + ['a'], categories=list('ab'))) + + ci = CategoricalIndex(list('aabbca'), categories=list('cabdef')) + result = ci.as_unordered() + tm.assert_index_equal(result, ci) + + ci = CategoricalIndex(list('aabbca'), categories=list('cabdef')) + result = ci.as_ordered() + tm.assert_index_equal(result, CategoricalIndex(list('aabbca'), categories=list('cabdef'), ordered=True)) + + # invalid + self.assertRaises(ValueError, lambda : ci.set_categories(list('cab'), inplace=True)) + + def test_contains(self): + + ci = self.create_index(categories=list('cabdef')) + + self.assertTrue('a' in ci) + self.assertTrue('z' not in ci) + self.assertTrue('e' not in ci) + self.assertTrue(np.nan not in ci) + + # assert codes NOT in index + self.assertFalse(0 in ci) + self.assertFalse(1 in ci) + + ci = CategoricalIndex(list('aabbca'), categories=list('cabdef') + [np.nan]) + self.assertFalse(np.nan in ci) + + ci = CategoricalIndex(list('aabbca') + [np.nan], categories=list('cabdef') + [np.nan]) + self.assertTrue(np.nan in ci) + + def test_min_max(self): + + ci = self.create_index(ordered=False) + self.assertRaises(TypeError, lambda : ci.min()) + self.assertRaises(TypeError, lambda : ci.max()) + + ci = self.create_index(ordered=True) + + self.assertEqual(ci.min(),'c') + self.assertEqual(ci.max(),'b') + + def test_append(self): + + ci = self.create_index() + categories = ci.categories + + # append cats with the same categories + result = ci[:3].append(ci[3:]) + tm.assert_index_equal(result,ci,exact=True) + + foos = [ci[:1], ci[1:3], ci[3:]] + result = foos[0].append(foos[1:]) + tm.assert_index_equal(result,ci,exact=True) + + # empty + result = ci.append([]) + tm.assert_index_equal(result,ci,exact=True) + + # appending with different categories or reoreded is not ok + self.assertRaises(TypeError, lambda : ci.append(ci.values.set_categories(list('abcd')))) + self.assertRaises(TypeError, lambda : ci.append(ci.values.reorder_categories(list('abc')))) + + # with objects + result = ci.append(['c','a']) + expected = CategoricalIndex(list('aabbcaca'), categories=categories) + tm.assert_index_equal(result,expected,exact=True) + + # invalid objects + self.assertRaises(TypeError, lambda : ci.append(['a','d'])) + + def test_insert(self): + + ci = self.create_index() + categories = ci.categories + + #test 0th element + result = ci.insert(0, 'a') + expected = CategoricalIndex(list('aaabbca'),categories=categories) + tm.assert_index_equal(result,expected,exact=True) + + #test Nth element that follows Python list behavior + result = ci.insert(-1, 'a') + expected = CategoricalIndex(list('aabbcaa'),categories=categories) + tm.assert_index_equal(result,expected,exact=True) + + #test empty + result = CategoricalIndex(categories=categories).insert(0, 'a') + expected = CategoricalIndex(['a'],categories=categories) + tm.assert_index_equal(result,expected,exact=True) + + # invalid + self.assertRaises(TypeError, lambda : ci.insert(0,'d')) + + def test_delete(self): + + ci = self.create_index() + categories = ci.categories + + result = ci.delete(0) + expected = CategoricalIndex(list('abbca'),categories=categories) + tm.assert_index_equal(result,expected,exact=True) + + result = ci.delete(-1) + expected = CategoricalIndex(list('aabbc'),categories=categories) + tm.assert_index_equal(result,expected,exact=True) + + with tm.assertRaises((IndexError, ValueError)): + # either depeidnig on numpy version + result = ci.delete(10) + + def test_astype(self): + + ci = self.create_index() + result = ci.astype('category') + tm.assert_index_equal(result,ci,exact=True) + + result = ci.astype(object) + self.assertTrue(result.equals(Index(np.array(ci)))) + + # this IS equal, but not the same class + self.assertTrue(result.equals(ci)) + self.assertIsInstance(result, Index) + self.assertNotIsInstance(result, CategoricalIndex) + + def test_reindex_base(self): + + # determined by cat ordering + idx = self.create_index() + expected = np.array([4,0,1,5,2,3]) + + actual = idx.get_indexer(idx) + assert_array_equal(expected, actual) + + with tm.assertRaisesRegexp(ValueError, 'Invalid fill method'): + idx.get_indexer(idx, method='invalid') + + def test_reindexing(self): + + ci = self.create_index() + oidx = Index(np.array(ci)) + + for n in [1,2,5,len(ci)]: + finder = oidx[np.random.randint(0,len(ci),size=n)] + expected = oidx.get_indexer_non_unique(finder)[0] + + actual = ci.get_indexer(finder) + assert_array_equal(expected, actual) + + def test_duplicates(self): + + idx = CategoricalIndex([0, 0, 0]) + self.assertFalse(idx.is_unique) + self.assertTrue(idx.has_duplicates) + + def test_get_indexer(self): + + idx1 = CategoricalIndex(list('aabcde'),categories=list('edabc')) + idx2 = CategoricalIndex(list('abf')) + + for indexer in [idx2, list('abf'), Index(list('abf'))]: + r1 = idx1.get_indexer(idx2) + assert_almost_equal(r1, [0, 1, 2, -1]) + + self.assertRaises(NotImplementedError, lambda : idx2.get_indexer(idx1, method='pad')) + self.assertRaises(NotImplementedError, lambda : idx2.get_indexer(idx1, method='backfill')) + self.assertRaises(NotImplementedError, lambda : idx2.get_indexer(idx1, method='nearest')) + + def test_repr_roundtrip(self): + + ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True) + str(ci) + tm.assert_index_equal(eval(repr(ci)),ci,exact=True) + + # formatting + if compat.PY3: + str(ci) + else: + compat.text_type(ci) + + # long format + # this is not reprable + ci = CategoricalIndex(np.random.randint(0,5,size=100)) + if compat.PY3: + str(ci) + else: + compat.text_type(ci) + + def test_isin(self): + + ci = CategoricalIndex(list('aabca') + [np.nan],categories=['c','a','b',np.nan]) + self.assert_numpy_array_equal(ci.isin(['c']),np.array([False,False,False,True,False,False])) + self.assert_numpy_array_equal(ci.isin(['c','a','b']),np.array([True]*5 + [False])) + self.assert_numpy_array_equal(ci.isin(['c','a','b',np.nan]),np.array([True]*6)) + + # mismatched categorical -> coerced to ndarray so doesn't matter + self.assert_numpy_array_equal(ci.isin(ci.set_categories(list('abcdefghi'))),np.array([True]*6)) + self.assert_numpy_array_equal(ci.isin(ci.set_categories(list('defghi'))),np.array([False]*5 + [True])) + + def test_identical(self): + + ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True) + ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'], ordered=True) + self.assertTrue(ci1.identical(ci1)) + self.assertTrue(ci1.identical(ci1.copy())) + self.assertFalse(ci1.identical(ci2)) + + def test_equals(self): + + ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True) + ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'], ordered=True) + + self.assertTrue(ci1.equals(ci1)) + self.assertFalse(ci1.equals(ci2)) + self.assertTrue(ci1.equals(ci1.astype(object))) + self.assertTrue(ci1.astype(object).equals(ci1)) + + self.assertTrue((ci1 == ci1).all()) + self.assertFalse((ci1 != ci1).all()) + self.assertFalse((ci1 > ci1).all()) + self.assertFalse((ci1 < ci1).all()) + self.assertTrue((ci1 <= ci1).all()) + self.assertTrue((ci1 >= ci1).all()) + + self.assertFalse((ci1 == 1).all()) + self.assertTrue((ci1 == Index(['a','b'])).all()) + self.assertTrue((ci1 == ci1.values).all()) + + # invalid comparisons + self.assertRaises(TypeError, lambda : ci1 == Index(['a','b','c'])) + self.assertRaises(TypeError, lambda : ci1 == ci2) + self.assertRaises(TypeError, lambda : ci1 == Categorical(ci1.values, ordered=False)) + self.assertRaises(TypeError, lambda : ci1 == Categorical(ci1.values, categories=list('abc'))) + + # tests + # make sure that we are testing for category inclusion properly + self.assertTrue(CategoricalIndex(list('aabca'),categories=['c','a','b']).equals(list('aabca'))) + self.assertTrue(CategoricalIndex(list('aabca'),categories=['c','a','b',np.nan]).equals(list('aabca'))) + + self.assertFalse(CategoricalIndex(list('aabca') + [np.nan],categories=['c','a','b',np.nan]).equals(list('aabca'))) + self.assertTrue(CategoricalIndex(list('aabca') + [np.nan],categories=['c','a','b',np.nan]).equals(list('aabca') + [np.nan])) + class Numeric(Base): @@ -1336,24 +1980,38 @@ def test_ufunc_compat(self): expected = Float64Index(np.sin(np.arange(5,dtype='int64'))) tm.assert_index_equal(result, expected) + def test_index_groupby(self): + int_idx = Index(range(6)) + float_idx = Index(np.arange(0, 0.6, 0.1)) + obj_idx = Index('A B C D E F'.split()) + dt_idx = pd.date_range('2013-01-01', freq='M', periods=6) + + for idx in [int_idx, float_idx, obj_idx, dt_idx]: + to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1]) + self.assertEqual(idx.groupby(to_groupby), + {1.0: [idx[0], idx[5]], 2.0: [idx[1], idx[4]]}) + + to_groupby = Index([datetime(2011, 11, 1), datetime(2011, 12, 1), + pd.NaT, pd.NaT, + datetime(2011, 12, 1), datetime(2011, 11, 1)], tz='UTC').values + + ex_keys = pd.tslib.datetime_to_datetime64(np.array([Timestamp('2011-11-01'), Timestamp('2011-12-01')])) + expected = {ex_keys[0][0]: [idx[0], idx[5]], ex_keys[0][1]: [idx[1], idx[4]]} + self.assertEqual(idx.groupby(to_groupby), expected) + class TestFloat64Index(Numeric, tm.TestCase): _holder = Float64Index _multiprocess_can_split_ = True def setUp(self): - self.mixed = Float64Index([1.5, 2, 3, 4, 5]) - self.float = Float64Index(np.arange(5) * 2.5) + self.indices = dict(mixed = Float64Index([1.5, 2, 3, 4, 5]), + float = Float64Index(np.arange(5) * 2.5)) + self.setup_indices() def create_index(self): return Float64Index(np.arange(5,dtype='float64')) - def test_hash_error(self): - with tm.assertRaisesRegexp(TypeError, - "unhashable type: %r" % - type(self.float).__name__): - hash(self.float) - def test_repr_roundtrip(self): for ind in (self.mixed, self.float): tm.assert_index_equal(eval(repr(ind)), ind) @@ -1519,7 +2177,8 @@ class TestInt64Index(Numeric, tm.TestCase): _multiprocess_can_split_ = True def setUp(self): - self.index = Int64Index(np.arange(0, 20, 2)) + self.indices = dict(index = Int64Index(np.arange(0, 20, 2))) + self.setup_indices() def create_index(self): return Int64Index(np.arange(5,dtype='int64')) @@ -1566,27 +2225,23 @@ def test_constructor_corner(self): with tm.assertRaisesRegexp(TypeError, 'casting'): Int64Index(arr_with_floats) - def test_hash_error(self): - with tm.assertRaisesRegexp(TypeError, - "unhashable type: %r" % - type(self.index).__name__): - hash(self.index) - def test_copy(self): i = Int64Index([], name='Foo') i_copy = i.copy() self.assertEqual(i_copy.name, 'Foo') def test_view(self): + super(TestInt64Index, self).test_view() + i = Int64Index([], name='Foo') i_view = i.view() self.assertEqual(i_view.name, 'Foo') i_view = i.view('i8') - tm.assert_index_equal(i, Int64Index(i_view)) + tm.assert_index_equal(i, Int64Index(i_view, name='Foo')) i_view = i.view(Int64Index) - tm.assert_index_equal(i, Int64Index(i_view)) + tm.assert_index_equal(i, Int64Index(i_view, name='Foo')) def test_coerce_list(self): # coerce things @@ -1950,7 +2605,7 @@ def test_print_unicode_columns(self): def test_repr_summary(self): with cf.option_context('display.max_seq_items', 10): r = repr(pd.Index(np.arange(1000))) - self.assertTrue(len(r) < 100) + self.assertTrue(len(r) < 200) self.assertTrue("..." in r) def test_repr_roundtrip(self): @@ -1977,7 +2632,25 @@ def test_slice_keep_name(self): class DatetimeLike(Base): + def test_str(self): + + # test the string repr + idx = self.create_index() + idx.name = 'foo' + self.assertFalse("length=%s" % len(idx) in str(idx)) + self.assertTrue("'foo'" in str(idx)) + self.assertTrue(idx.__class__.__name__ in str(idx)) + + if hasattr(idx,'tz'): + if idx.tz is not None: + self.assertTrue("tz='%s'" % idx.tz in str(idx)) + else: + self.assertTrue("tz=None" in str(idx)) + if hasattr(idx,'freq'): + self.assertTrue("freq='%s'" % idx.freqstr in str(idx)) + def test_view(self): + super(DatetimeLike, self).test_view() i = self.create_index() @@ -1993,6 +2666,10 @@ class TestDatetimeIndex(DatetimeLike, tm.TestCase): _holder = DatetimeIndex _multiprocess_can_split_ = True + def setUp(self): + self.indices = dict(index = tm.makeDateIndex(10)) + self.setup_indices() + def create_index(self): return date_range('20130101',periods=5) @@ -2106,13 +2783,47 @@ def test_time_overflow_for_32bit_machines(self): idx2 = pd.date_range(end='2000', periods=periods, freq='S') self.assertEqual(len(idx2), periods) + def test_intersection(self): + first = self.index + second = self.index[5:] + intersect = first.intersection(second) + self.assertTrue(tm.equalContents(intersect, second)) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = first.intersection(case) + self.assertTrue(tm.equalContents(result, second)) + + third = Index(['a', 'b', 'c']) + result = first.intersection(third) + expected = pd.Index([], dtype=object) + self.assert_index_equal(result, expected) + + def test_union(self): + first = self.index[:5] + second = self.index[5:] + everything = self.index + union = first.union(second) + self.assertTrue(tm.equalContents(union, everything)) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = first.union(case) + self.assertTrue(tm.equalContents(result, everything)) + class TestPeriodIndex(DatetimeLike, tm.TestCase): _holder = PeriodIndex _multiprocess_can_split_ = True + def setUp(self): + self.indices = dict(index = tm.makePeriodIndex(10)) + self.setup_indices() + def create_index(self): - return period_range('20130101',periods=5,freq='D') + return period_range('20130101', periods=5, freq='D') def test_pickle_compat_construction(self): pass @@ -2145,6 +2856,10 @@ class TestTimedeltaIndex(DatetimeLike, tm.TestCase): _holder = TimedeltaIndex _multiprocess_can_split_ = True + def setUp(self): + self.indices = dict(index = tm.makeTimedeltaIndex(10)) + self.setup_indices() + def create_index(self): return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1) @@ -2219,9 +2934,10 @@ def setUp(self): major_labels = np.array([0, 0, 1, 2, 3, 3]) minor_labels = np.array([0, 1, 0, 1, 0, 1]) self.index_names = ['first', 'second'] - self.index = MultiIndex(levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels], - names=self.index_names, verify_integrity=False) + self.indices = dict(index = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=self.index_names, verify_integrity=False)) + self.setup_indices() def create_index(self): return self.index @@ -2257,13 +2973,7 @@ def test_labels_dtypes(self): self.assertTrue((i.labels[0]>=0).all()) self.assertTrue((i.labels[1]>=0).all()) - def test_hash_error(self): - with tm.assertRaisesRegexp(TypeError, - "unhashable type: %r" % - type(self.index).__name__): - hash(self.index) - - def test_set_names_and_rename(self): + def test_set_name_methods(self): # so long as these are synonyms, we don't need to test set_names self.assertEqual(self.index.rename, self.index.set_names) new_names = [name + "SUFFIX" for name in self.index_names] @@ -3402,6 +4112,12 @@ def test_difference(self): # - API change GH 8226 with tm.assert_produces_warning(): first - self.index[-3:] + with tm.assert_produces_warning(): + self.index[-3:] - first + with tm.assert_produces_warning(): + self.index[-3:] - first.tolist() + + self.assertRaises(TypeError, lambda : first.tolist() - self.index[-3:]) expected = MultiIndex.from_tuples(sorted(self.index[:-3].values), sortorder=0, @@ -3529,21 +4245,50 @@ def test_drop(self): dropped2 = self.index.drop(index) expected = self.index[[0, 2, 3, 5]] - self.assertTrue(dropped.equals(expected)) - self.assertTrue(dropped2.equals(expected)) + self.assert_index_equal(dropped, expected) + self.assert_index_equal(dropped2, expected) dropped = self.index.drop(['bar']) expected = self.index[[0, 1, 3, 4, 5]] - self.assertTrue(dropped.equals(expected)) + self.assert_index_equal(dropped, expected) + + dropped = self.index.drop('foo') + expected = self.index[[2, 3, 4, 5]] + self.assert_index_equal(dropped, expected) index = MultiIndex.from_tuples([('bar', 'two')]) self.assertRaises(KeyError, self.index.drop, [('bar', 'two')]) self.assertRaises(KeyError, self.index.drop, index) + self.assertRaises(KeyError, self.index.drop, ['foo', 'two']) + + # partially correct argument + mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')]) + self.assertRaises(KeyError, self.index.drop, mixed_index) + + # error='ignore' + dropped = self.index.drop(index, errors='ignore') + expected = self.index[[0, 1, 2, 3, 4, 5]] + self.assert_index_equal(dropped, expected) + + dropped = self.index.drop(mixed_index, errors='ignore') + expected = self.index[[0, 1, 2, 3, 5]] + self.assert_index_equal(dropped, expected) + + dropped = self.index.drop(['foo', 'two'], errors='ignore') + expected = self.index[[2, 3, 4, 5]] + self.assert_index_equal(dropped, expected) # mixed partial / full drop dropped = self.index.drop(['foo', ('qux', 'one')]) expected = self.index[[2, 3, 5]] - self.assertTrue(dropped.equals(expected)) + self.assert_index_equal(dropped, expected) + + # mixed partial / full drop / error='ignore' + mixed_index = ['foo', ('qux', 'one'), 'two'] + self.assertRaises(KeyError, self.index.drop, mixed_index) + dropped = self.index.drop(mixed_index, errors='ignore') + expected = self.index[[2, 3, 5]] + self.assert_index_equal(dropped, expected) def test_droplevel_with_names(self): index = self.index[self.index.get_loc('foo')] @@ -3734,7 +4479,7 @@ def test_reindex_level(self): assertRaisesRegexp(TypeError, "Fill method not supported", idx.reindex, idx, method='bfill', level='first') - def test_has_duplicates(self): + def test_duplicates(self): self.assertFalse(self.index.has_duplicates) self.assertTrue(self.index.append(self.index).has_duplicates) @@ -3848,7 +4593,25 @@ def test_repr_with_unicode_data(self): self.assertFalse("\\u" in repr(index)) # we don't want unicode-escaped def test_repr_roundtrip(self): - tm.assert_index_equal(eval(repr(self.index)), self.index) + + mi = MultiIndex.from_product([list('ab'),range(3)],names=['first','second']) + str(mi) + tm.assert_index_equal(eval(repr(mi)),mi,exact=True) + + # formatting + if compat.PY3: + str(mi) + else: + compat.text_type(mi) + + # long format + mi = MultiIndex.from_product([list('abcdefg'),range(10)],names=['first','second']) + result = str(mi) + tm.assert_index_equal(eval(repr(mi)),mi,exact=True) + + def test_str(self): + # tested elsewhere + pass def test_unicode_string_with_unicode(self): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} @@ -3989,6 +4752,61 @@ def test_groupby(self): exp = dict((key, [key]) for key in self.index) tm.assert_dict_equal(groups, exp) + def test_index_name_retained(self): + # GH9857 + result = pd.DataFrame({'x': [1, 2, 6], + 'y': [2, 2, 8], + 'z': [-5, 0, 5]}) + result = result.set_index('z') + result.loc[10] = [9, 10] + df_expected = pd.DataFrame({'x': [1, 2, 6, 9], + 'y': [2, 2, 8, 10], + 'z': [-5, 0, 5, 10]}) + df_expected = df_expected.set_index('z') + tm.assert_frame_equal(result, df_expected) + + def test_equals_operator(self): + # For issue #9785 + self.assertTrue((self.index == self.index).all()) + + def test_index_compare(self): + # For issue #9785 + index_unequal = Index(['foo', 'bar', 'baz']) + index_equal = Index([ + ('foo', 'one'), ('foo', 'two'), ('bar', 'one'), + ('baz', 'two'), ('qux', 'one'), ('qux', 'two') + ], tupleize_cols=False) + # Testing Numpy Results Equivelent + assert_array_equal( + index_unequal.equals(self.index), + index_unequal == self.index, + err_msg = 'Index compared with MultiIndex failed', + ) + assert_array_equal( + self.index.equals(index_unequal), + self.index == index_unequal, + err_msg = 'MultiIndex compared with Index failed', + ) + assert_array_equal( + self.index.equals(index_equal), + self.index == index_equal, + err_msg = 'MultiIndex compared with Similar Index failed', + ) + assert_array_equal( + index_equal.equals(self.index), + index_equal == self.index, + err_msg = 'Index compared with Similar MultiIndex failed', + ) + # Testing that the result is true for the index_equal case + self.assertTrue( + (self.index == index_equal).all(), + msg='Assert Index compared with Similar MultiIndex match' + ) + self.assertTrue( + (index_equal == self.index).all(), + msg='Assert MultiIndex compared with Similar Index match' + ) + def test_get_combined_index(): from pandas.core.index import _get_combined_index diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index ee6140828882c..c998ce65791a3 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1063,6 +1063,7 @@ def test_loc_setitem_consistency(self): # empty (essentially noops) expected = DataFrame(columns=['x', 'y']) + expected['x'] = expected['x'].astype(np.int64) df = DataFrame(columns=['x', 'y']) df.loc[:, 'x'] = 1 assert_frame_equal(df,expected) @@ -1438,6 +1439,13 @@ def test_iloc_setitem_series(self): result = s.iloc[:4] assert_series_equal(result, expected) + s= Series([-1]*6) + s.iloc[0::2]= [0,2,4] + s.iloc[1::2]= [1,3,5] + result = s + expected= Series([0,1,2,3,4,5]) + assert_series_equal(result, expected) + def test_iloc_setitem_list_of_lists(self): # GH 7551 @@ -2366,6 +2374,7 @@ def test_dups_fancy_indexing(self): rows = ['C','B','E'] expected = DataFrame({'test' : [11,9,np.nan], 'test1': [7.,6,np.nan], 'other': ['d','c',np.nan]},index=rows) + result = df.ix[rows] assert_frame_equal(result, expected) @@ -3368,7 +3377,7 @@ def f(): expected = DataFrame(columns=['foo']) def f(): df = DataFrame() - df['foo'] = Series([]) + df['foo'] = Series([], dtype='object') return df assert_frame_equal(f(), expected) def f(): @@ -3378,9 +3387,12 @@ def f(): assert_frame_equal(f(), expected) def f(): df = DataFrame() - df['foo'] = Series(range(len(df))) + df['foo'] = df.index return df assert_frame_equal(f(), expected) + + expected = DataFrame(columns=['foo']) + expected['foo'] = expected['foo'].astype('float64') def f(): df = DataFrame() df['foo'] = [] @@ -3388,7 +3400,7 @@ def f(): assert_frame_equal(f(), expected) def f(): df = DataFrame() - df['foo'] = df.index + df['foo'] = Series(range(len(df))) return df assert_frame_equal(f(), expected) def f(): @@ -3421,14 +3433,21 @@ def f(): # GH5720, GH5744 # don't create rows when empty + expected = DataFrame(columns=['A','B','New']) + expected['A'] = expected['A'].astype('int64') + expected['B'] = expected['B'].astype('float64') + expected['New'] = expected['New'].astype('float64') df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) y = df[df.A > 5] y['New'] = np.nan - assert_frame_equal(y,DataFrame(columns=['A','B','New'])) + assert_frame_equal(y,expected) + #assert_frame_equal(y,expected) + expected = DataFrame(columns=['a','b','c c','d']) + expected['d'] = expected['d'].astype('int64') df = DataFrame(columns=['a', 'b', 'c c']) df['d'] = 3 - assert_frame_equal(df,DataFrame(columns=['a','b','c c','d'])) + assert_frame_equal(df,expected) assert_series_equal(df['c c'],Series(name='c c',dtype=object)) # reindex columns is ok @@ -3436,6 +3455,9 @@ def f(): y = df[df.A > 5] result = y.reindex(columns=['A','B','C']) expected = DataFrame(columns=['A','B','C']) + expected['A'] = expected['A'].astype('int64') + expected['B'] = expected['B'].astype('float64') + expected['C'] = expected['C'].astype('float64') assert_frame_equal(result,expected) # GH 5756 @@ -4411,6 +4433,250 @@ def test_slice_with_zero_step_raises(self): self.assertRaisesRegexp(ValueError, 'slice step cannot be zero', lambda: s.ix[::0]) + def test_indexing_assignment_dict_already_exists(self): + df = pd.DataFrame({'x': [1, 2, 6], + 'y': [2, 2, 8], + 'z': [-5, 0, 5]}).set_index('z') + expected = df.copy() + rhs = dict(x=9, y=99) + df.loc[5] = rhs + expected.loc[5] = [9, 99] + tm.assert_frame_equal(df, expected) + + def test_indexing_dtypes_on_empty(self): + # Check that .iloc and .ix return correct dtypes GH9983 + df = DataFrame({'a':[1,2,3],'b':['b','b2','b3']}) + df2 = df.ix[[],:] + + self.assertEqual(df2.loc[:,'a'].dtype, np.int64) + assert_series_equal(df2.loc[:,'a'], df2.iloc[:,0]) + assert_series_equal(df2.loc[:,'a'], df2.ix[:,0]) + + + +class TestCategoricalIndex(tm.TestCase): + + def setUp(self): + + self.df = DataFrame({'A' : np.arange(6,dtype='int64'), + 'B' : Series(list('aabbca')).astype('category',categories=list('cab')) }).set_index('B') + self.df2 = DataFrame({'A' : np.arange(6,dtype='int64'), + 'B' : Series(list('aabbca')).astype('category',categories=list('cabe')) }).set_index('B') + self.df3 = DataFrame({'A' : np.arange(6,dtype='int64'), + 'B' : Series([1,1,2,1,3,2]).astype('category',categories=[3,2,1],ordered=True) }).set_index('B') + self.df4 = DataFrame({'A' : np.arange(6,dtype='int64'), + 'B' : Series([1,1,2,1,3,2]).astype('category',categories=[3,2,1],ordered=False) }).set_index('B') + + + def test_loc_scalar(self): + + result = self.df.loc['a'] + expected = DataFrame({'A' : [0,1,5], + 'B' : Series(list('aaa')).astype('category',categories=list('cab')) }).set_index('B') + assert_frame_equal(result, expected) + + + df = self.df.copy() + df.loc['a'] = 20 + expected = DataFrame({'A' : [20,20,2,3,4,20], + 'B' : Series(list('aabbca')).astype('category',categories=list('cab')) }).set_index('B') + assert_frame_equal(df, expected) + + # value not in the categories + self.assertRaises(KeyError, lambda : df.loc['d']) + + def f(): + df.loc['d'] = 10 + self.assertRaises(TypeError, f) + + def f(): + df.loc['d','A'] = 10 + self.assertRaises(TypeError, f) + + def f(): + df.loc['d','C'] = 10 + self.assertRaises(TypeError, f) + + def test_loc_listlike(self): + + # list of labels + result = self.df.loc[['c','a']] + expected = self.df.iloc[[4,0,1,5]] + assert_frame_equal(result, expected) + + result = self.df2.loc[['a','b','e']] + expected = DataFrame({'A' : [0,1,5,2,3,np.nan], + 'B' : Series(list('aaabbe')).astype('category',categories=list('cabe')) }).set_index('B') + assert_frame_equal(result, expected) + + # element in the categories but not in the values + self.assertRaises(KeyError, lambda : self.df2.loc['e']) + + # assign is ok + df = self.df2.copy() + df.loc['e'] = 20 + result = df.loc[['a','b','e']] + expected = DataFrame({'A' : [0,1,5,2,3,20], + 'B' : Series(list('aaabbe')).astype('category',categories=list('cabe')) }).set_index('B') + assert_frame_equal(result, expected) + + df = self.df2.copy() + result = df.loc[['a','b','e']] + expected = DataFrame({'A' : [0,1,5,2,3,np.nan], + 'B' : Series(list('aaabbe')).astype('category',categories=list('cabe')) }).set_index('B') + assert_frame_equal(result, expected) + + + # not all labels in the categories + self.assertRaises(KeyError, lambda : self.df2.loc[['a','d']]) + + def test_read_only_source(self): + # GH 10043 + rw_array = np.eye(10) + rw_df = DataFrame(rw_array) + + ro_array = np.eye(10) + ro_array.setflags(write=False) + ro_df = DataFrame(ro_array) + + assert_frame_equal(rw_df.iloc[[1,2,3]],ro_df.iloc[[1,2,3]]) + assert_frame_equal(rw_df.iloc[[1]],ro_df.iloc[[1]]) + assert_series_equal(rw_df.iloc[1],ro_df.iloc[1]) + assert_frame_equal(rw_df.iloc[1:3],ro_df.iloc[1:3]) + + assert_frame_equal(rw_df.loc[[1,2,3]],ro_df.loc[[1,2,3]]) + assert_frame_equal(rw_df.loc[[1]],ro_df.loc[[1]]) + assert_series_equal(rw_df.loc[1],ro_df.loc[1]) + assert_frame_equal(rw_df.loc[1:3],ro_df.loc[1:3]) + + def test_reindexing(self): + + # reindexing + # convert to a regular index + result = self.df2.reindex(['a','b','e']) + expected = DataFrame({'A' : [0,1,5,2,3,np.nan], + 'B' : Series(list('aaabbe')) }).set_index('B') + assert_frame_equal(result, expected) + + result = self.df2.reindex(['a','b']) + expected = DataFrame({'A' : [0,1,5,2,3], + 'B' : Series(list('aaabb')) }).set_index('B') + assert_frame_equal(result, expected) + + result = self.df2.reindex(['e']) + expected = DataFrame({'A' : [np.nan], + 'B' : Series(['e']) }).set_index('B') + assert_frame_equal(result, expected) + + result = self.df2.reindex(['d']) + expected = DataFrame({'A' : [np.nan], + 'B' : Series(['d']) }).set_index('B') + assert_frame_equal(result, expected) + + # since we are actually reindexing with a Categorical + # then return a Categorical + cats = list('cabe') + + result = self.df2.reindex(pd.Categorical(['a','d'],categories=cats)) + expected = DataFrame({'A' : [0,1,5,np.nan], + 'B' : Series(list('aaad')).astype('category',categories=cats) }).set_index('B') + assert_frame_equal(result, expected) + + result = self.df2.reindex(pd.Categorical(['a'],categories=cats)) + expected = DataFrame({'A' : [0,1,5], + 'B' : Series(list('aaa')).astype('category',categories=cats) }).set_index('B') + assert_frame_equal(result, expected) + + result = self.df2.reindex(['a','b','e']) + expected = DataFrame({'A' : [0,1,5,2,3,np.nan], + 'B' : Series(list('aaabbe')) }).set_index('B') + assert_frame_equal(result, expected) + + result = self.df2.reindex(['a','b']) + expected = DataFrame({'A' : [0,1,5,2,3], + 'B' : Series(list('aaabb')) }).set_index('B') + assert_frame_equal(result, expected) + + result = self.df2.reindex(['e']) + expected = DataFrame({'A' : [np.nan], + 'B' : Series(['e']) }).set_index('B') + assert_frame_equal(result, expected) + + # give back the type of categorical that we received + result = self.df2.reindex(pd.Categorical(['a','d'],categories=cats,ordered=True)) + expected = DataFrame({'A' : [0,1,5,np.nan], + 'B' : Series(list('aaad')).astype('category',categories=cats,ordered=True) }).set_index('B') + assert_frame_equal(result, expected) + + result = self.df2.reindex(pd.Categorical(['a','d'],categories=['a','d'])) + expected = DataFrame({'A' : [0,1,5,np.nan], + 'B' : Series(list('aaad')).astype('category',categories=['a','d']) }).set_index('B') + assert_frame_equal(result, expected) + + # passed duplicate indexers are not allowed + self.assertRaises(ValueError, lambda : self.df2.reindex(['a','a'])) + + # args NotImplemented ATM + self.assertRaises(NotImplementedError, lambda : self.df2.reindex(['a'],method='ffill')) + self.assertRaises(NotImplementedError, lambda : self.df2.reindex(['a'],level=1)) + self.assertRaises(NotImplementedError, lambda : self.df2.reindex(['a'],limit=2)) + + def test_loc_slice(self): + + # slicing + # not implemented ATM + # GH9748 + + self.assertRaises(TypeError, lambda : self.df.loc[1:5]) + + #result = df.loc[1:5] + #expected = df.iloc[[1,2,3,4]] + #assert_frame_equal(result, expected) + + def test_boolean_selection(self): + + df3 = self.df3 + df4 = self.df4 + + result = df3[df3.index == 'a'] + expected = df3.iloc[[]] + assert_frame_equal(result,expected) + + result = df4[df4.index == 'a'] + expected = df4.iloc[[]] + assert_frame_equal(result,expected) + + result = df3[df3.index == 1] + expected = df3.iloc[[0,1,3]] + assert_frame_equal(result,expected) + + result = df4[df4.index == 1] + expected = df4.iloc[[0,1,3]] + assert_frame_equal(result,expected) + + # since we have an ordered categorical + + # CategoricalIndex([1, 1, 2, 1, 3, 2], + # categories=[3, 2, 1], + # ordered=True, + # name=u'B') + result = df3[df3.index < 2] + expected = df3.iloc[[4]] + assert_frame_equal(result,expected) + + result = df3[df3.index > 1] + expected = df3.iloc[[]] + assert_frame_equal(result,expected) + + # unordered + # cannot be compared + + # CategoricalIndex([1, 1, 2, 1, 3, 2], + # categories=[3, 2, 1], + # ordered=False, + # name=u'B') + self.assertRaises(TypeError, lambda : df4[df4.index < 2]) + self.assertRaises(TypeError, lambda : df4[df4.index > 1]) class TestSeriesNoneCoercion(tm.TestCase): EXPECTED_RESULTS = [ diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index 45f089f5e0a53..36585abd1b98f 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -68,15 +68,15 @@ def create_block(typestr, placement, item_shape=None, num_offset=0): elif typestr in ('object', 'string', 'O'): values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset], shape) - elif typestr in ('bool'): + elif typestr in ('b','bool',): values = np.ones(shape, dtype=np.bool_) elif typestr in ('datetime', 'dt', 'M8[ns]'): values = (mat * 1e9).astype('M8[ns]') elif typestr in ('timedelta', 'td', 'm8[ns]'): values = (mat * 1).astype('m8[ns]') - elif typestr in ('category'): + elif typestr in ('category',): values = Categorical([1,1,2,2,3,3,3,3,4,4]) - elif typestr in ('category2'): + elif typestr in ('category2',): values = Categorical(['a','a','a','a','b','b','c','c','c','d']) elif typestr in ('sparse', 'sparse_na'): # FIXME: doesn't support num_rows != 10 @@ -751,6 +751,25 @@ def test_equals(self): bm2 = BlockManager(bm1.blocks[::-1], bm1.axes) self.assertTrue(bm1.equals(bm2)) + def test_equals_block_order_different_dtypes(self): + # GH 9330 + + mgr_strings = [ + "a:i8;b:f8", # basic case + "a:i8;b:f8;c:c8;d:b", # many types + "a:i8;e:dt;f:td;g:string", # more types + "a:i8;b:category;c:category2;d:category2", # categories + "c:sparse;d:sparse_na;b:f8", # sparse + ] + + for mgr_string in mgr_strings: + bm = create_mgr(mgr_string) + block_perms = itertools.permutations(bm.blocks) + for bm_perm in block_perms: + bm_this = BlockManager(bm_perm, bm.axes) + self.assertTrue(bm.equals(bm_this)) + self.assertTrue(bm_this.equals(bm)) + def test_single_mgr_ctor(self): mgr = create_single_mgr('f8', num_rows=5) self.assertEqual(mgr.as_matrix().tolist(), [0., 1., 2., 3., 4.]) diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index bb860269c5144..6d9bea29cf44d 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -6,20 +6,42 @@ import pandas as pd from pandas.lib import isscalar, item_from_zerodim, max_len_string_array import pandas.util.testing as tm -from pandas.compat import u +from pandas.compat import u, PY2 + class TestMisc(tm.TestCase): def test_max_len_string_array(self): - arr = np.array(['foo','b',np.nan],dtype='object') - self.assertTrue(max_len_string_array(arr),3) + arr = a = np.array(['foo', 'b', np.nan], dtype='object') + self.assertTrue(max_len_string_array(arr), 3) # unicode - arr = arr.astype('U') - self.assertTrue(max_len_string_array(arr),3) + arr = a.astype('U').astype(object) + self.assertTrue(max_len_string_array(arr), 3) + + # bytes for python3 + arr = a.astype('S').astype(object) + self.assertTrue(max_len_string_array(arr), 3) + + # raises + tm.assertRaises(TypeError, + lambda: max_len_string_array(arr.astype('U'))) + + def test_infer_dtype_bytes(self): + compare = 'string' if PY2 else 'bytes' + + # string array of bytes + arr = np.array(list('abc'), dtype='S1') + self.assertEqual(pd.lib.infer_dtype(arr), compare) + + # object array of bytes + arr = arr.astype(object) + self.assertEqual(pd.lib.infer_dtype(arr), compare) + class TestIsscalar(tm.TestCase): + def test_isscalar_builtin_scalars(self): self.assertTrue(isscalar(None)) self.assertTrue(isscalar(True)) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index e6a0f5d7ef45d..b2efc20aa0694 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -426,8 +426,15 @@ def test_frame_setitem_multi_column(self): # it broadcasts df['B', '1'] = [1, 2, 3] df['A'] = df['B', '1'] - assert_series_equal(df['A', '1'], df['B', '1']) - assert_series_equal(df['A', '2'], df['B', '1']) + + sliced_a1 = df['A', '1'] + sliced_a2 = df['A', '2'] + sliced_b1 = df['B', '1'] + assert_series_equal(sliced_a1, sliced_b1, check_names=False) + assert_series_equal(sliced_a2, sliced_b1, check_names=False) + self.assertEqual(sliced_a1.name, ('A', '1')) + self.assertEqual(sliced_a2.name, ('A', '2')) + self.assertEqual(sliced_b1.name, ('B', '1')) def test_getitem_tuple_plus_slice(self): # GH #671 @@ -461,7 +468,9 @@ def test_getitem_multilevel_index_tuple_unsorted(self): df = df.set_index(index_columns) query_index = df.index[:1] rs = df.ix[query_index, "data"] - xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)])) + + xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=['a', 'b', 'c']) + xp = Series(['x'], index=xp_idx, name='data') assert_series_equal(rs, xp) def test_xs(self): @@ -865,7 +874,7 @@ def test_count_level_series(self): def test_count_level_corner(self): s = self.frame['A'][:0] result = s.count(level=0) - expected = Series(0, index=s.index.levels[0]) + expected = Series(0, index=s.index.levels[0], name='A') assert_series_equal(result, expected) df = self.frame[:0] @@ -982,7 +991,9 @@ def test_stack_mixed_dtype(self): df = df.sortlevel(1, axis=1) stacked = df.stack() - assert_series_equal(stacked['foo'], df['foo'].stack()) + result = df['foo'].stack() + assert_series_equal(stacked['foo'], result, check_names=False) + self.assertIs(result.name, None) self.assertEqual(stacked['bar'].dtype, np.float_) def test_unstack_bug(self): @@ -1430,11 +1441,13 @@ def test_count(self): result = series.count(level='b') expect = self.series.count(level=1) - assert_series_equal(result, expect) + assert_series_equal(result, expect, check_names=False) + self.assertEqual(result.index.name, 'b') result = series.count(level='a') expect = self.series.count(level=0) - assert_series_equal(result, expect) + assert_series_equal(result, expect, check_names=False) + self.assertEqual(result.index.name, 'a') self.assertRaises(KeyError, series.count, 'x') self.assertRaises(KeyError, frame.count, level='x') @@ -1738,12 +1751,12 @@ def test_mixed_depth_get(self): result = df['a'] expected = df['a', '', ''] - assert_series_equal(result, expected) + assert_series_equal(result, expected, check_names=False) self.assertEqual(result.name, 'a') result = df['routine1', 'result1'] expected = df['routine1', 'result1', ''] - assert_series_equal(result, expected) + assert_series_equal(result, expected, check_names=False) self.assertEqual(result.name, ('routine1', 'result1')) def test_mixed_depth_insert(self): @@ -1825,7 +1838,7 @@ def test_mixed_depth_pop(self): df2 = df.copy() result = df1.pop('a') expected = df2.pop(('a', '', '')) - assert_series_equal(expected, result) + assert_series_equal(expected, result, check_names=False) assert_frame_equal(df1, df2) self.assertEqual(result.name, 'a') diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 2a605cba8a6c0..1adb8a5d9217c 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -5,7 +5,7 @@ import numpy as np -from pandas.core.common import isnull +from pandas.core.common import isnull, is_integer_dtype import pandas.core.nanops as nanops import pandas.util.testing as tm @@ -323,6 +323,32 @@ def test_nanmean(self): allow_complex=False, allow_obj=False, allow_str=False, allow_date=False, allow_tdelta=True) + def test_nanmean_overflow(self): + # GH 10155 + # In the previous implementation mean can overflow for int dtypes, it + # is now consistent with numpy + from pandas import Series + + # numpy < 1.9.0 is not computing this correctly + from distutils.version import LooseVersion + if LooseVersion(np.__version__) >= '1.9.0': + for a in [2 ** 55, -2 ** 55, 20150515061816532]: + s = Series(a, index=range(500), dtype=np.int64) + result = s.mean() + np_result = s.values.mean() + self.assertEqual(result, a) + self.assertEqual(result, np_result) + self.assertTrue(result.dtype == np.float64) + + # check returned dtype + for dtype in [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]: + s = Series(range(10), dtype=dtype) + result = s.mean() + if is_integer_dtype(dtype): + self.assertTrue(result.dtype == np.float64) + else: + self.assertTrue(result.dtype == dtype) + def test_nanmedian(self): self.check_funs(nanops.nanmedian, np.median, allow_complex=False, allow_str=False, allow_date=False, diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index cab668b3118fd..57fd465993e14 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -404,6 +404,8 @@ def test_abs(self): expected = np.abs(s) assert_series_equal(result, expected) assert_series_equal(result2, expected) + self.assertEqual(result.name, 'A') + self.assertEqual(result2.name, 'A') class CheckIndexing(object): @@ -509,7 +511,9 @@ def test_major_xs(self): idx = self.panel.major_axis[5] xs = self.panel.major_xs(idx) - assert_series_equal(xs['ItemA'], ref.xs(idx)) + result = xs['ItemA'] + assert_series_equal(result, ref.xs(idx), check_names=False) + self.assertEqual(result.name, 'ItemA') # not contained idx = self.panel.major_axis[0] - bday @@ -527,7 +531,7 @@ def test_minor_xs(self): idx = self.panel.minor_axis[1] xs = self.panel.minor_xs(idx) - assert_series_equal(xs['ItemA'], ref[idx]) + assert_series_equal(xs['ItemA'], ref[idx], check_names=False) # not contained self.assertRaises(Exception, self.panel.minor_xs, 'E') @@ -658,7 +662,7 @@ def test_ix_setitem_slice_dataframe(self): def test_ix_align(self): from pandas import Series - b = Series(np.random.randn(10)) + b = Series(np.random.randn(10), name=0) b.sort() df_orig = Panel(np.random.randn(3, 10, 2)) df = df_orig.copy() @@ -960,6 +964,12 @@ def _check_dtype(panel, dtype): panel = Panel(np.random.randn(2,10,5),items=lrange(2),major_axis=lrange(10),minor_axis=lrange(5),dtype=dtype) _check_dtype(panel,dtype) + for dtype in ['float64', 'float32', 'int64', 'int32', 'object']: + df1 = DataFrame(np.random.randn(2, 5), index=lrange(2), columns=lrange(5)) + df2 = DataFrame(np.random.randn(2, 5), index=lrange(2), columns=lrange(5)) + panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype) + _check_dtype(panel, dtype) + def test_constructor_fails_with_not_3d_input(self): with tm.assertRaisesRegexp(ValueError, "The number of dimensions required is 3"): @@ -1696,22 +1706,23 @@ def test_shift(self): # major idx = self.panel.major_axis[0] idx_lag = self.panel.major_axis[1] - shifted = self.panel.shift(1) - assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag)) # minor idx = self.panel.minor_axis[0] idx_lag = self.panel.minor_axis[1] - shifted = self.panel.shift(1, axis='minor') - assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag)) - self.assertRaises(Exception, self.panel.shift, 1, axis='items') + # items + idx = self.panel.items[0] + idx_lag = self.panel.items[1] + shifted = self.panel.shift(1, axis='items') + assert_frame_equal(self.panel[idx], + shifted[idx_lag]) # negative numbers, #2164 result = self.panel.shift(-1) @@ -1984,6 +1995,15 @@ def check_drop(drop_val, axis_number, aliases, expected): expected = Panel({"One": df}) check_drop('Two', 0, ['items'], expected) + self.assertRaises(ValueError, panel.drop, 'Three') + + # errors = 'ignore' + dropped = panel.drop('Three', errors='ignore') + assert_panel_equal(dropped, panel) + dropped = panel.drop(['Two', 'Three'], errors='ignore') + expected = Panel({"One": df}) + assert_panel_equal(dropped, expected) + # Major exp_df = DataFrame({"A": [2], "B": [4]}, index=[1]) expected = Panel({"One": exp_df, "Two": exp_df}) diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py index 66f5110830c72..346c9e2598985 100644 --- a/pandas/tests/test_reshape.py +++ b/pandas/tests/test_reshape.py @@ -151,6 +151,8 @@ def test_multiindex(self): class TestGetDummies(tm.TestCase): + sparse = False + def setUp(self): self.df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'b', 'c'], 'C': [1, 2, 3]}) @@ -163,20 +165,20 @@ def test_basic(self): expected = DataFrame({'a': {0: 1.0, 1: 0.0, 2: 0.0}, 'b': {0: 0.0, 1: 1.0, 2: 0.0}, 'c': {0: 0.0, 1: 0.0, 2: 1.0}}) - assert_frame_equal(get_dummies(s_list), expected) - assert_frame_equal(get_dummies(s_series), expected) + assert_frame_equal(get_dummies(s_list, sparse=self.sparse), expected) + assert_frame_equal(get_dummies(s_series, sparse=self.sparse), expected) expected.index = list('ABC') - assert_frame_equal(get_dummies(s_series_index), expected) + assert_frame_equal(get_dummies(s_series_index, sparse=self.sparse), expected) def test_just_na(self): just_na_list = [np.nan] just_na_series = Series(just_na_list) just_na_series_index = Series(just_na_list, index = ['A']) - res_list = get_dummies(just_na_list) - res_series = get_dummies(just_na_series) - res_series_index = get_dummies(just_na_series_index) + res_list = get_dummies(just_na_list, sparse=self.sparse) + res_series = get_dummies(just_na_series, sparse=self.sparse) + res_series_index = get_dummies(just_na_series_index, sparse=self.sparse) self.assertEqual(res_list.empty, True) self.assertEqual(res_series.empty, True) @@ -188,12 +190,13 @@ def test_just_na(self): def test_include_na(self): s = ['a', 'b', np.nan] - res = get_dummies(s) + res = get_dummies(s, sparse=self.sparse) exp = DataFrame({'a': {0: 1.0, 1: 0.0, 2: 0.0}, 'b': {0: 0.0, 1: 1.0, 2: 0.0}}) assert_frame_equal(res, exp) - res_na = get_dummies(s, dummy_na=True) + # Sparse dataframes do not allow nan labelled columns, see #GH8822 + res_na = get_dummies(s, dummy_na=True, sparse=self.sparse) exp_na = DataFrame({nan: {0: 0.0, 1: 0.0, 2: 1.0}, 'a': {0: 1.0, 1: 0.0, 2: 0.0}, 'b': {0: 0.0, 1: 1.0, 2: 0.0}}).reindex_axis(['a', 'b', nan], 1) @@ -201,7 +204,7 @@ def test_include_na(self): exp_na.columns = res_na.columns assert_frame_equal(res_na, exp_na) - res_just_na = get_dummies([nan], dummy_na=True) + res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse) exp_just_na = DataFrame(Series(1.0,index=[0]),columns=[nan]) assert_array_equal(res_just_na.values, exp_just_na.values) @@ -210,21 +213,21 @@ def test_unicode(self): # See GH 6885 - get_dummies chokes on unicode values e = 'e' eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE') s = [e, eacute, eacute] - res = get_dummies(s, prefix='letter') + res = get_dummies(s, prefix='letter', sparse=self.sparse) exp = DataFrame({'letter_e': {0: 1.0, 1: 0.0, 2: 0.0}, u('letter_%s') % eacute: {0: 0.0, 1: 1.0, 2: 1.0}}) assert_frame_equal(res, exp) def test_dataframe_dummies_all_obj(self): df = self.df[['A', 'B']] - result = get_dummies(df) + result = get_dummies(df, sparse=self.sparse) expected = DataFrame({'A_a': [1., 0, 1], 'A_b': [0., 1, 0], 'B_b': [1., 1, 0], 'B_c': [0., 0, 1]}) assert_frame_equal(result, expected) def test_dataframe_dummies_mix_default(self): df = self.df - result = get_dummies(df) + result = get_dummies(df, sparse=self.sparse) expected = DataFrame({'C': [1, 2, 3], 'A_a': [1., 0, 1], 'A_b': [0., 1, 0], 'B_b': [1., 1, 0], 'B_c': [0., 0, 1]}) @@ -235,7 +238,7 @@ def test_dataframe_dummies_prefix_list(self): prefixes = ['from_A', 'from_B'] df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'b', 'c'], 'C': [1, 2, 3]}) - result = get_dummies(df, prefix=prefixes) + result = get_dummies(df, prefix=prefixes, sparse=self.sparse) expected = DataFrame({'C': [1, 2, 3], 'from_A_a': [1., 0, 1], 'from_A_b': [0., 1, 0], 'from_B_b': [1., 1, 0], 'from_B_c': [0., 0, 1]}) @@ -243,10 +246,10 @@ def test_dataframe_dummies_prefix_list(self): 'from_B_c']] assert_frame_equal(result, expected) - def test_datafrmae_dummies_prefix_str(self): + def test_dataframe_dummies_prefix_str(self): # not that you should do this... df = self.df - result = get_dummies(df, prefix='bad') + result = get_dummies(df, prefix='bad', sparse=self.sparse) expected = DataFrame([[1, 1., 0., 1., 0.], [2, 0., 1., 1., 0.], [3, 1., 0., 0., 1.]], @@ -256,40 +259,40 @@ def test_datafrmae_dummies_prefix_str(self): def test_dataframe_dummies_subset(self): df = self.df result = get_dummies(df, prefix=['from_A'], - columns=['A']) + columns=['A'], sparse=self.sparse) expected = DataFrame({'from_A_a': [1., 0, 1], 'from_A_b': [0., 1, 0], 'B': ['b', 'b', 'c'], 'C': [1, 2, 3]}) assert_frame_equal(result, expected) def test_dataframe_dummies_prefix_sep(self): df = self.df - result = get_dummies(df, prefix_sep='..') + result = get_dummies(df, prefix_sep='..', sparse=self.sparse) expected = DataFrame({'C': [1, 2, 3], 'A..a': [1., 0, 1], 'A..b': [0., 1, 0], 'B..b': [1., 1, 0], 'B..c': [0., 0, 1]}) expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']] assert_frame_equal(result, expected) - result = get_dummies(df, prefix_sep=['..', '__']) + result = get_dummies(df, prefix_sep=['..', '__'], sparse=self.sparse) expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'}) assert_frame_equal(result, expected) - result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'}) + result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'}, sparse=self.sparse) assert_frame_equal(result, expected) def test_dataframe_dummies_prefix_bad_length(self): with tm.assertRaises(ValueError): - get_dummies(self.df, prefix=['too few']) + get_dummies(self.df, prefix=['too few'], sparse=self.sparse) def test_dataframe_dummies_prefix_sep_bad_length(self): with tm.assertRaises(ValueError): - get_dummies(self.df, prefix_sep=['bad']) + get_dummies(self.df, prefix_sep=['bad'], sparse=self.sparse) def test_dataframe_dummies_prefix_dict(self): prefixes = {'A': 'from_A', 'B': 'from_B'} df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'b', 'c'], 'C': [1, 2, 3]}) - result = get_dummies(df, prefix=prefixes) + result = get_dummies(df, prefix=prefixes, sparse=self.sparse) expected = DataFrame({'from_A_a': [1., 0, 1], 'from_A_b': [0., 1, 0], 'from_B_b': [1., 1, 0], 'from_B_c': [0., 0, 1], 'C': [1, 2, 3]}) @@ -298,7 +301,7 @@ def test_dataframe_dummies_prefix_dict(self): def test_dataframe_dummies_with_na(self): df = self.df df.loc[3, :] = [np.nan, np.nan, np.nan] - result = get_dummies(df, dummy_na=True) + result = get_dummies(df, dummy_na=True, sparse=self.sparse) expected = DataFrame({'C': [1, 2, 3, np.nan], 'A_a': [1., 0, 1, 0], 'A_b': [0., 1, 0, 0], 'A_nan': [0., 0, 0, 1], 'B_b': [1., 1, 0, 0], 'B_c': [0., 0, 1, 0], 'B_nan': [0., 0, 0, 1]}) @@ -306,14 +309,14 @@ def test_dataframe_dummies_with_na(self): 'B_nan']] assert_frame_equal(result, expected) - result = get_dummies(df, dummy_na=False) + result = get_dummies(df, dummy_na=False, sparse=self.sparse) expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']] assert_frame_equal(result, expected) def test_dataframe_dummies_with_categorical(self): df = self.df df['cat'] = pd.Categorical(['x', 'y', 'y']) - result = get_dummies(df) + result = get_dummies(df, sparse=self.sparse) expected = DataFrame({'C': [1, 2, 3], 'A_a': [1., 0, 1], 'A_b': [0., 1, 0], 'B_b': [1., 1, 0], 'B_c': [0., 0, 1], 'cat_x': [1., 0, 0], @@ -322,6 +325,11 @@ def test_dataframe_dummies_with_categorical(self): 'cat_x', 'cat_y']] assert_frame_equal(result, expected) + +class TestGetDummiesSparse(TestGetDummies): + sparse = True + + class TestLreshape(tm.TestCase): def test_pairs(self): diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index c021bb1bf2fd6..bbe942e607faf 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -83,7 +83,7 @@ def test_dt_namespace_accessor(self): ok_for_period = ok_for_base + ['qyear'] ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end', 'tz'] - ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert'] + ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert', 'normalize'] ok_for_td = ['days','seconds','microseconds','nanoseconds'] ok_for_td_methods = ['components','to_pytimedelta'] @@ -165,6 +165,7 @@ def compare(s, name): tm.assert_series_equal(s.dt.year,Series(np.array([2014,2014,2014],dtype='int64'),index=index)) tm.assert_series_equal(s.dt.month,Series(np.array([2,2,2],dtype='int64'),index=index)) tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index)) + tm.assert_series_equal(s.dt.normalize(), pd.Series([s[0]] * 3, index=index)) # periodindex for s in [Series(period_range('20130101',periods=5,freq='D'))]: @@ -242,11 +243,32 @@ def test_dt_accessor_api(self): s.dt self.assertFalse(hasattr(s, 'dt')) - def test_binop_maybe_preserve_name(self): + def test_tab_completion(self): + # GH 9910 + s = Series(list('abcd')) + # Series of str values should have .str but not .dt/.cat in __dir__ + self.assertTrue('str' in dir(s)) + self.assertTrue('dt' not in dir(s)) + self.assertTrue('cat' not in dir(s)) + + # similiarly for .dt + s = Series(date_range('1/1/2015', periods=5)) + self.assertTrue('dt' in dir(s)) + self.assertTrue('str' not in dir(s)) + self.assertTrue('cat' not in dir(s)) + + # similiarly for .cat + s = Series(list('abbcd'), dtype="category") + self.assertTrue('cat' in dir(s)) + self.assertTrue('str' not in dir(s)) + self.assertTrue('dt' not in dir(s)) + def test_binop_maybe_preserve_name(self): # names match, preserve result = self.ts * self.ts self.assertEqual(result.name, self.ts.name) + result = self.ts.mul(self.ts) + self.assertEqual(result.name, self.ts.name) result = self.ts * self.ts[:-2] self.assertEqual(result.name, self.ts.name) @@ -256,6 +278,22 @@ def test_binop_maybe_preserve_name(self): cp.name = 'something else' result = self.ts + cp self.assertIsNone(result.name) + result = self.ts.add(cp) + self.assertIsNone(result.name) + + ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow'] + ops = ops + ['r' + op for op in ops] + for op in ops: + # names match, preserve + s = self.ts.copy() + result = getattr(s, op)(s) + self.assertEqual(result.name, self.ts.name) + + # names don't match, don't preserve + cp = self.ts.copy() + cp.name = 'changed' + result = getattr(s, op)(cp) + self.assertIsNone(result.name) def test_combine_first_name(self): result = self.ts.combine_first(self.ts[:5]) @@ -1859,6 +1897,48 @@ def test_where_dups(self): expected = Series([5,11,2,5,11,2],index=[0,1,2,0,1,2]) assert_series_equal(comb, expected) + def test_where_datetime(self): + s = Series(date_range('20130102', periods=2)) + expected = Series([10, 10], dtype='datetime64[ns]') + mask = np.array([False, False]) + + rs = s.where(mask, [10, 10]) + assert_series_equal(rs, expected) + + rs = s.where(mask, 10) + assert_series_equal(rs, expected) + + rs = s.where(mask, 10.0) + assert_series_equal(rs, expected) + + rs = s.where(mask, [10.0, 10.0]) + assert_series_equal(rs, expected) + + rs = s.where(mask, [10.0, np.nan]) + expected = Series([10, None], dtype='datetime64[ns]') + assert_series_equal(rs, expected) + + def test_where_timedelta(self): + s = Series([1, 2], dtype='timedelta64[ns]') + expected = Series([10, 10], dtype='timedelta64[ns]') + mask = np.array([False, False]) + + rs = s.where(mask, [10, 10]) + assert_series_equal(rs, expected) + + rs = s.where(mask, 10) + assert_series_equal(rs, expected) + + rs = s.where(mask, 10.0) + assert_series_equal(rs, expected) + + rs = s.where(mask, [10.0, 10.0]) + assert_series_equal(rs, expected) + + rs = s.where(mask, [10.0, np.nan]) + expected = Series([10, None], dtype='timedelta64[ns]') + assert_series_equal(rs, expected) + def test_mask(self): # compare with tested results in test_where s = Series(np.random.randn(5)) @@ -1954,6 +2034,14 @@ def test_drop(self): self.assertRaises(ValueError, s.drop, 'bc') self.assertRaises(ValueError, s.drop, ('a',)) + # errors='ignore' + s = Series(range(3),index=list('abc')) + result = s.drop('bc', errors='ignore') + assert_series_equal(result, s) + result = s.drop(['a', 'd'], errors='ignore') + expected = s.ix[1:] + assert_series_equal(result, expected) + # bad axis self.assertRaises(ValueError, s.drop, 'one', axis='columns') @@ -3583,6 +3671,16 @@ def test_fillna(self): expected = Series([999,999,np.nan],index=[0,1,2]) assert_series_equal(result,expected) + # GH 9043 + # make sure a string representation of int/float values can be filled + # correctly without raising errors or being converted + vals = ['0', '1.5', '-0.3'] + for val in vals: + s = Series([0, 1, np.nan, np.nan, 4], dtype='float64') + result = s.fillna(val) + expected = Series([0, 1, val, val, 4], dtype='object') + assert_series_equal(result, expected) + def test_fillna_bug(self): x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd']) filled = x.fillna(method='ffill') @@ -4925,6 +5023,19 @@ def test_to_csv_path_is_none(self): csv_str = s.to_csv(path=None) self.assertIsInstance(csv_str, str) + def test_str_attribute(self): + # GH9068 + methods = ['strip', 'rstrip', 'lstrip'] + s = Series([' jack', 'jill ', ' jesse ', 'frank']) + for method in methods: + expected = Series([getattr(str, method)(x) for x in s.values]) + assert_series_equal(getattr(Series.str, method)(s.str), expected) + + # str accessor only valid with string values + s = Series(range(5)) + with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'): + s.str.repeat(2) + def test_clip(self): val = self.ts.median() @@ -4954,6 +5065,20 @@ def test_clip_types_and_nulls(self): self.assertEqual(list(isnull(s)), list(isnull(l))) self.assertEqual(list(isnull(s)), list(isnull(u))) + def test_clip_against_series(self): + # GH #6966 + + s = Series([1.0, 1.0, 4.0]) + threshold = Series([1.0, 2.0, 3.0]) + + assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0])) + assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0])) + + lower = Series([1.0, 2.0, 3.0]) + upper = Series([1.5, 2.5, 3.5]) + assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5])) + assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5])) + def test_valid(self): ts = self.ts.copy() ts[::2] = np.NaN @@ -5290,7 +5415,8 @@ def test_getitem_setitem_datetime_tz_pytz(self): def test_getitem_setitem_datetime_tz_dateutil(self): tm._skip_if_no_dateutil(); from dateutil.tz import tzutc - from dateutil.zoneinfo import gettz + from pandas.tslib import _dateutil_gettz as gettz + tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil from pandas import date_range @@ -5503,6 +5629,24 @@ def test_astype_str(self): expec = s.map(compat.text_type) assert_series_equal(res, expec) + # GH9757 + # Test str and unicode on python 2.x and just str on python 3.x + for tt in set([str, compat.text_type]): + ts = Series([Timestamp('2010-01-04 00:00:00')]) + s = ts.astype(tt) + expected = Series([tt(ts.values[0])]) + assert_series_equal(s, expected) + + ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')]) + s = ts.astype(tt) + expected = Series([tt(ts.values[0])]) + assert_series_equal(s, expected) + + td = Series([Timedelta(1, unit='d')]) + s = td.astype(tt) + expected = Series([tt(td.values[0])]) + assert_series_equal(s, expected) + def test_astype_unicode(self): # GH7758 @@ -5578,6 +5722,22 @@ def test_map_type_inference(self): s2 = s.map(lambda x: np.where(x == 0, 0, 1)) self.assertTrue(issubclass(s2.dtype.type, np.integer)) + def test_divide_decimal(self): + ''' resolves issue #9787 ''' + from decimal import Decimal + + expected = Series([Decimal(5)]) + + s = Series([Decimal(10)]) + s = s/Decimal(2) + + tm.assert_series_equal(expected, s) + + s = Series([Decimal(10)]) + s = s//Decimal(2) + + tm.assert_series_equal(expected, s) + def test_map_decimal(self): from decimal import Decimal @@ -5789,6 +5949,10 @@ def _check_align(a, b, how='left', fill=None): assert_series_equal(aa, ea) assert_series_equal(ab, eb) + self.assertEqual(aa.name, 'ts') + self.assertEqual(ea.name, 'ts') + self.assertEqual(ab.name, 'ts') + self.assertEqual(eb.name, 'ts') for kind in JOIN_TYPES: _check_align(self.ts[2:], self.ts[:-5], how=kind) @@ -5796,12 +5960,15 @@ def _check_align(a, b, how='left', fill=None): # empty left _check_align(self.ts[:0], self.ts[:-5], how=kind) + _check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1) # empty right _check_align(self.ts[:-5], self.ts[:0], how=kind) + _check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1) # both empty _check_align(self.ts[:0], self.ts[:0], how=kind) + _check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1) def test_align_fill_method(self): def _check_align(a, b, how='left', method='pad', limit=None): @@ -6754,6 +6921,22 @@ def test_searchsorted_sorter(self): e = np.array([0, 2]) tm.assert_array_equal(r, e) + def test_to_frame_expanddim(self): + # GH 9762 + + class SubclassedSeries(Series): + @property + def _constructor_expanddim(self): + return SubclassedFrame + + class SubclassedFrame(DataFrame): + pass + + s = SubclassedSeries([1, 2, 3], name='X') + result = s.to_frame() + self.assertTrue(isinstance(result, SubclassedFrame)) + expected = SubclassedFrame({'X': [1, 2, 3]}) + assert_frame_equal(result, expected) class TestSeriesNonUnique(tm.TestCase): diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 9283be566bd8f..b0d8d89d65cf2 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -516,7 +516,6 @@ def test_match(self): def test_extract(self): # Contains tests like those in test_match and some others. - values = Series(['fooBAD__barBAD', NA, 'foo']) er = [NA, NA] # empty row @@ -540,15 +539,31 @@ def test_extract(self): exp = DataFrame([[u('BAD__'), u('BAD')], er, er]) tm.assert_frame_equal(result, exp) - # no groups - s = Series(['A1', 'B2', 'C3']) - f = lambda: s.str.extract('[ABC][123]') - self.assertRaises(ValueError, f) - - # only non-capturing groups - f = lambda: s.str.extract('(?:[AB]).*') - self.assertRaises(ValueError, f) + # GH9980 + # Index only works with one regex group since + # multi-group would expand to a frame + idx = Index(['A1', 'A2', 'A3', 'A4', 'B5']) + with tm.assertRaisesRegexp(ValueError, "supported"): + idx.str.extract('([AB])([123])') + + # these should work for both Series and Index + for klass in [Series, Index]: + # no groups + s_or_idx = klass(['A1', 'B2', 'C3']) + f = lambda: s_or_idx.str.extract('[ABC][123]') + self.assertRaises(ValueError, f) + + # only non-capturing groups + f = lambda: s_or_idx.str.extract('(?:[AB]).*') + self.assertRaises(ValueError, f) + + # single group renames series/index properly + s_or_idx = klass(['A1', 'A2']) + result = s_or_idx.str.extract(r'(?P<uno>A)\d') + tm.assert_equal(result.name, 'uno') + tm.assert_array_equal(result, klass(['A', 'A'])) + s = Series(['A1', 'B2', 'C3']) # one group, no matches result = s.str.extract('(_)') exp = Series([NA, NA, NA], dtype=object) @@ -569,14 +584,16 @@ def test_extract(self): exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]]) tm.assert_frame_equal(result, exp) - # named group/groups - result = s.str.extract('(?P<letter>[AB])(?P<number>[123])') - exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]], columns=['letter', 'number']) - tm.assert_frame_equal(result, exp) + # one named group result = s.str.extract('(?P<letter>[AB])') exp = Series(['A', 'B', NA], name='letter') tm.assert_series_equal(result, exp) + # two named groups + result = s.str.extract('(?P<letter>[AB])(?P<number>[123])') + exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]], columns=['letter', 'number']) + tm.assert_frame_equal(result, exp) + # mix named and unnamed groups result = s.str.extract('([AB])(?P<number>[123])') exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]], columns=[0, 'number']) @@ -602,11 +619,6 @@ def test_extract(self): exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]], columns=['letter', 'number']) tm.assert_frame_equal(result, exp) - # single group renames series properly - s = Series(['A1', 'A2']) - result = s.str.extract(r'(?P<uno>A)\d') - tm.assert_equal(result.name, 'uno') - # GH6348 # not passing index to the extractor def check_index(index): @@ -664,6 +676,8 @@ def test_empty_str_methods(self): tm.assert_series_equal(empty_str, empty.str.pad(42)) tm.assert_series_equal(empty_str, empty.str.center(42)) tm.assert_series_equal(empty_list, empty.str.split('a')) + tm.assert_series_equal(empty_list, empty.str.partition('a', expand=False)) + tm.assert_series_equal(empty_list, empty.str.rpartition('a', expand=False)) tm.assert_series_equal(empty_str, empty.str.slice(stop=1)) tm.assert_series_equal(empty_str, empty.str.slice(step=1)) tm.assert_series_equal(empty_str, empty.str.strip()) @@ -685,6 +699,19 @@ def test_empty_str_methods(self): tm.assert_series_equal(empty_str, empty.str.isdecimal()) tm.assert_series_equal(empty_str, empty.str.capitalize()) tm.assert_series_equal(empty_str, empty.str.swapcase()) + tm.assert_series_equal(empty_str, empty.str.normalize('NFC')) + if compat.PY3: + table = str.maketrans('a', 'b') + else: + import string + table = string.maketrans('a', 'b') + tm.assert_series_equal(empty_str, empty.str.translate(table)) + + def test_empty_str_methods_to_frame(self): + empty_str = empty = Series(dtype=str) + empty_df = DataFrame([]) + tm.assert_frame_equal(empty_df, empty.str.partition('a')) + tm.assert_frame_equal(empty_df, empty.str.rpartition('a')) def test_ismethods(self): values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' '] @@ -752,6 +779,12 @@ def test_get_dummies(self): columns=list('7ab')) tm.assert_frame_equal(result, expected) + # GH9980 + # Index.str does not support get_dummies() as it returns a frame + with tm.assertRaisesRegexp(TypeError, "not supported"): + idx = Index(['a|b', 'a|c', 'b|c']) + idx.str.get_dummies('|') + def test_join(self): values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h']) result = values.str.split('_').str.join('_') @@ -881,6 +914,53 @@ def test_find_nan(self): result = values.str.rfind('EF', 3, 6) tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1])) + def test_index(self): + for klass in [Series, Index]: + s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF']) + + result = s.str.index('EF') + tm.assert_array_equal(result, klass([4, 3, 1, 0])) + expected = np.array([v.index('EF') for v in s.values]) + tm.assert_array_equal(result.values, expected) + + result = s.str.rindex('EF') + tm.assert_array_equal(result, klass([4, 5, 7, 4])) + expected = np.array([v.rindex('EF') for v in s.values]) + tm.assert_array_equal(result.values, expected) + + result = s.str.index('EF', 3) + tm.assert_array_equal(result, klass([4, 3, 7, 4])) + expected = np.array([v.index('EF', 3) for v in s.values]) + tm.assert_array_equal(result.values, expected) + + result = s.str.rindex('EF', 3) + tm.assert_array_equal(result, klass([4, 5, 7, 4])) + expected = np.array([v.rindex('EF', 3) for v in s.values]) + tm.assert_array_equal(result.values, expected) + + result = s.str.index('E', 4, 8) + tm.assert_array_equal(result, klass([4, 5, 7, 4])) + expected = np.array([v.index('E', 4, 8) for v in s.values]) + tm.assert_array_equal(result.values, expected) + + result = s.str.rindex('E', 0, 5) + tm.assert_array_equal(result, klass([4, 3, 1, 4])) + expected = np.array([v.rindex('E', 0, 5) for v in s.values]) + tm.assert_array_equal(result.values, expected) + + with tm.assertRaisesRegexp(ValueError, "substring not found"): + result = s.str.index('DE') + + with tm.assertRaisesRegexp(TypeError, "expected a string object, not int"): + result = s.str.index(0) + + # test with nan + s = Series(['abcb', 'ab', 'bcbe', np.nan]) + result = s.str.index('b') + tm.assert_array_equal(result, Series([1, 1, 0, np.nan])) + result = s.str.rindex('b') + tm.assert_array_equal(result, Series([3, 1, 2, np.nan])) + def test_pad(self): values = Series(['a', 'b', NA, 'c', NA, 'eeeeee']) @@ -965,6 +1045,37 @@ def test_pad_fillchar(self): with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not int"): result = values.str.pad(5, fillchar=5) + def test_translate(self): + for klass in [Series, Index]: + s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg']) + if not compat.PY3: + import string + table = string.maketrans('abc', 'cde') + else: + table = str.maketrans('abc', 'cde') + result = s.str.translate(table) + expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg']) + tm.assert_array_equal(result, expected) + + # use of deletechars is python 2 only + if not compat.PY3: + result = s.str.translate(table, deletechars='fg') + expected = klass(['cdede', 'cdee', 'eddd', 'ede']) + tm.assert_array_equal(result, expected) + + result = s.str.translate(None, deletechars='fg') + expected = klass(['abcde', 'abcc', 'cddd', 'cde']) + tm.assert_array_equal(result, expected) + else: + with tm.assertRaisesRegexp(ValueError, "deletechars is not a valid argument"): + result = s.str.translate(table, deletechars='fg') + + # Series with non-string values + s = Series(['a', 'b', 'c', 1.2]) + expected = Series(['c', 'd', 'e', np.nan]) + result = s.str.translate(table) + tm.assert_array_equal(result, expected) + def test_center_ljust_rjust(self): values = Series(['a', 'b', NA, 'c', NA, 'eeeeee']) @@ -1095,14 +1206,19 @@ def test_split(self): result = values.str.split('__') tm.assert_series_equal(result, exp) + result = values.str.split('__', expand=False) + tm.assert_series_equal(result, exp) + # mixed mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1, 2.]) - - rs = Series(mixed).str.split('_') + rs = mixed.str.split('_') xp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA]) + tm.assert_isinstance(rs, Series) + tm.assert_almost_equal(rs, xp) + rs = mixed.str.split('_', expand=False) tm.assert_isinstance(rs, Series) tm.assert_almost_equal(rs, xp) @@ -1115,6 +1231,9 @@ def test_split(self): [u('f'), u('g'), u('h')]]) tm.assert_series_equal(result, exp) + result = values.str.split('_', expand=False) + tm.assert_series_equal(result, exp) + def test_split_noargs(self): # #1859 s = Series(['Wes McKinney', 'Travis Oliphant']) @@ -1148,7 +1267,10 @@ def test_split_no_pat_with_nonzero_n(self): def test_split_to_dataframe(self): s = Series(['nosplit', 'alsonosplit']) - result = s.str.split('_', return_type='frame') + + with tm.assert_produces_warning(): + result = s.str.split('_', return_type='frame') + exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])}) tm.assert_frame_equal(result, exp) @@ -1171,9 +1293,174 @@ def test_split_to_dataframe(self): index=['preserve', 'me']) tm.assert_frame_equal(result, exp) - with tm.assertRaisesRegexp(ValueError, "return_type must be"): + with tm.assertRaisesRegexp(ValueError, "expand must be"): s.str.split('_', return_type="some_invalid_type") + def test_split_to_dataframe_expand(self): + s = Series(['nosplit', 'alsonosplit']) + result = s.str.split('_', expand=True) + exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])}) + tm.assert_frame_equal(result, exp) + + s = Series(['some_equal_splits', 'with_no_nans']) + result = s.str.split('_', expand=True) + exp = DataFrame({0: ['some', 'with'], 1: ['equal', 'no'], + 2: ['splits', 'nans']}) + tm.assert_frame_equal(result, exp) + + s = Series(['some_unequal_splits', 'one_of_these_things_is_not']) + result = s.str.split('_', expand=True) + exp = DataFrame({0: ['some', 'one'], 1: ['unequal', 'of'], + 2: ['splits', 'these'], 3: [NA, 'things'], + 4: [NA, 'is'], 5: [NA, 'not']}) + tm.assert_frame_equal(result, exp) + + s = Series(['some_splits', 'with_index'], index=['preserve', 'me']) + result = s.str.split('_', expand=True) + exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']}, + index=['preserve', 'me']) + tm.assert_frame_equal(result, exp) + + with tm.assertRaisesRegexp(ValueError, "expand must be"): + s.str.split('_', return_type="some_invalid_type") + + def test_split_to_multiindex_expand(self): + idx = Index(['nosplit', 'alsonosplit']) + result = idx.str.split('_', expand=True) + exp = Index([np.array(['nosplit']), np.array(['alsonosplit'])]) + tm.assert_index_equal(result, exp) + self.assertEqual(result.nlevels, 1) + + idx = Index(['some_equal_splits', 'with_no_nans']) + result = idx.str.split('_', expand=True) + exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), + ('with', 'no', 'nans')]) + tm.assert_index_equal(result, exp) + self.assertEqual(result.nlevels, 3) + + idx = Index(['some_unequal_splits', 'one_of_these_things_is_not']) + result = idx.str.split('_', expand=True) + exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA), + ('one', 'of', 'these', 'things', 'is', 'not')]) + tm.assert_index_equal(result, exp) + self.assertEqual(result.nlevels, 6) + + with tm.assertRaisesRegexp(ValueError, "expand must be"): + idx.str.split('_', return_type="some_invalid_type") + + def test_partition_series(self): + values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h']) + + result = values.str.partition('_', expand=False) + exp = Series([['a', '_', 'b_c'], ['c', '_', 'd_e'], NA, ['f', '_', 'g_h']]) + tm.assert_series_equal(result, exp) + + result = values.str.rpartition('_', expand=False) + exp = Series([['a_b', '_', 'c'], ['c_d', '_', 'e'], NA, ['f_g', '_', 'h']]) + tm.assert_series_equal(result, exp) + + # more than one char + values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h']) + result = values.str.partition('__', expand=False) + exp = Series([['a', '__', 'b__c'], ['c', '__', 'd__e'], NA, ['f', '__', 'g__h']]) + tm.assert_series_equal(result, exp) + + result = values.str.rpartition('__', expand=False) + exp = Series([['a__b', '__', 'c'], ['c__d', '__', 'e'], NA, ['f__g', '__', 'h']]) + tm.assert_series_equal(result, exp) + + # None + values = Series(['a b c', 'c d e', NA, 'f g h']) + result = values.str.partition(expand=False) + exp = Series([['a', ' ', 'b c'], ['c', ' ', 'd e'], NA, ['f', ' ', 'g h']]) + tm.assert_series_equal(result, exp) + + result = values.str.rpartition(expand=False) + exp = Series([['a b', ' ', 'c'], ['c d', ' ', 'e'], NA, ['f g', ' ', 'h']]) + tm.assert_series_equal(result, exp) + + # Not splited + values = Series(['abc', 'cde', NA, 'fgh']) + result = values.str.partition('_', expand=False) + exp = Series([['abc', '', ''], ['cde', '', ''], NA, ['fgh', '', '']]) + tm.assert_series_equal(result, exp) + + result = values.str.rpartition('_', expand=False) + exp = Series([['', '', 'abc'], ['', '', 'cde'], NA, ['', '', 'fgh']]) + tm.assert_series_equal(result, exp) + + # unicode + values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')]) + + result = values.str.partition('_', expand=False) + exp = Series([[u('a'), u('_'), u('b_c')], [u('c'), u('_'), u('d_e')], + NA, [u('f'), u('_'), u('g_h')]]) + tm.assert_series_equal(result, exp) + + result = values.str.rpartition('_', expand=False) + exp = Series([[u('a_b'), u('_'), u('c')], [u('c_d'), u('_'), u('e')], + NA, [u('f_g'), u('_'), u('h')]]) + tm.assert_series_equal(result, exp) + + # compare to standard lib + values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF']) + result = values.str.partition('_', expand=False).tolist() + self.assertEqual(result, [v.partition('_') for v in values]) + result = values.str.rpartition('_', expand=False).tolist() + self.assertEqual(result, [v.rpartition('_') for v in values]) + + def test_partition_index(self): + values = Index(['a_b_c', 'c_d_e', 'f_g_h']) + + result = values.str.partition('_', expand=False) + exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])) + tm.assert_index_equal(result, exp) + self.assertEqual(result.nlevels, 1) + + result = values.str.rpartition('_', expand=False) + exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])) + tm.assert_index_equal(result, exp) + self.assertEqual(result.nlevels, 1) + + result = values.str.partition('_') + exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')]) + tm.assert_index_equal(result, exp) + self.assertTrue(isinstance(result, MultiIndex)) + self.assertEqual(result.nlevels, 3) + + result = values.str.rpartition('_') + exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')]) + tm.assert_index_equal(result, exp) + self.assertTrue(isinstance(result, MultiIndex)) + self.assertEqual(result.nlevels, 3) + + def test_partition_to_dataframe(self): + values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h']) + result = values.str.partition('_') + exp = DataFrame({0: ['a', 'c', np.nan, 'f'], + 1: ['_', '_', np.nan, '_'], + 2: ['b_c', 'd_e', np.nan, 'g_h']}) + tm.assert_frame_equal(result, exp) + + result = values.str.rpartition('_') + exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'], + 1: ['_', '_', np.nan, '_'], + 2: ['c', 'e', np.nan, 'h']}) + tm.assert_frame_equal(result, exp) + + values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h']) + result = values.str.partition('_', expand=True) + exp = DataFrame({0: ['a', 'c', np.nan, 'f'], + 1: ['_', '_', np.nan, '_'], + 2: ['b_c', 'd_e', np.nan, 'g_h']}) + tm.assert_frame_equal(result, exp) + + result = values.str.rpartition('_', expand=True) + exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'], + 1: ['_', '_', np.nan, '_'], + 2: ['c', 'e', np.nan, 'h']}) + tm.assert_frame_equal(result, exp) + def test_pipe_failures(self): # #2119 s = Series(['A|B|C']) @@ -1549,6 +1836,51 @@ def test_encode_decode_errors(self): tm.assert_series_equal(result, exp) + def test_normalize(self): + def unistr(codes): + # build unicode string from unichr + # we cannot use six.u() here because it escapes unicode + return ''.join([unichr(c) for c in codes]) + + values = ['ABC', # ASCII + unistr([0xFF21, 0xFF22, 0xFF23]), # ABC + unistr([0xFF11, 0xFF12, 0xFF13]), # 123 + np.nan, + unistr([0xFF71, 0xFF72, 0xFF74])] # アイエ + s = Series(values, index=['a', 'b', 'c', 'd', 'e']) + + normed = [compat.u_safe('ABC'), + compat.u_safe('ABC'), + compat.u_safe('123'), + np.nan, + unistr([0x30A2, 0x30A4, 0x30A8])] # アイエ + expected = Series(normed, index=['a', 'b', 'c', 'd', 'e']) + + result = s.str.normalize('NFKC') + tm.assert_series_equal(result, expected) + + expected = Series([compat.u_safe('ABC'), + unistr([0xFF21, 0xFF22, 0xFF23]), # ABC + unistr([0xFF11, 0xFF12, 0xFF13]), # 123 + np.nan, + unistr([0xFF71, 0xFF72, 0xFF74])], # アイエ + index=['a', 'b', 'c', 'd', 'e']) + + result = s.str.normalize('NFC') + tm.assert_series_equal(result, expected) + + with tm.assertRaisesRegexp(ValueError, "invalid normalization form"): + s.str.normalize('xxx') + + s = Index([unistr([0xFF21, 0xFF22, 0xFF23]), # ABC + unistr([0xFF11, 0xFF12, 0xFF13]), # 123 + unistr([0xFF71, 0xFF72, 0xFF74])]) # アイエ + expected = Index([compat.u_safe('ABC'), + compat.u_safe('123'), + unistr([0x30A2, 0x30A4, 0x30A8])]) + result = s.str.normalize('NFKC') + tm.assert_index_equal(result, expected) + def test_cat_on_filtered_index(self): df = DataFrame(index=MultiIndex.from_product([[2011, 2012], [1,2,3]], names=['year', 'month'])) @@ -1567,6 +1899,68 @@ def test_cat_on_filtered_index(self): self.assertEqual(str_multiple.loc[1], '2011 2 2') + def test_index_str_accessor_visibility(self): + from pandas.core.strings import StringMethods + + if not compat.PY3: + cases = [(['a', 'b'], 'string'), + (['a', u('b')], 'mixed'), + ([u('a'), u('b')], 'unicode'), + (['a', 'b', 1], 'mixed-integer'), + (['a', 'b', 1.3], 'mixed'), + (['a', 'b', 1.3, 1], 'mixed-integer'), + (['aa', datetime(2011, 1, 1)], 'mixed')] + else: + cases = [(['a', 'b'], 'string'), + (['a', u('b')], 'string'), + ([u('a'), u('b')], 'string'), + (['a', 'b', 1], 'mixed-integer'), + (['a', 'b', 1.3], 'mixed'), + (['a', 'b', 1.3, 1], 'mixed-integer'), + (['aa', datetime(2011, 1, 1)], 'mixed')] + for values, tp in cases: + idx = Index(values) + self.assertTrue(isinstance(Series(values).str, StringMethods)) + self.assertTrue(isinstance(idx.str, StringMethods)) + self.assertEqual(idx.inferred_type, tp) + + for values, tp in cases: + idx = Index(values) + self.assertTrue(isinstance(Series(values).str, StringMethods)) + self.assertTrue(isinstance(idx.str, StringMethods)) + self.assertEqual(idx.inferred_type, tp) + + cases = [([1, np.nan], 'floating'), + ([datetime(2011, 1, 1)], 'datetime64'), + ([timedelta(1)], 'timedelta64')] + for values, tp in cases: + idx = Index(values) + message = 'Can only use .str accessor with string values' + with self.assertRaisesRegexp(AttributeError, message): + Series(values).str + with self.assertRaisesRegexp(AttributeError, message): + idx.str + self.assertEqual(idx.inferred_type, tp) + + # MultiIndex has mixed dtype, but not allow to use accessor + idx = MultiIndex.from_tuples([('a', 'b'), ('a', 'b')]) + self.assertEqual(idx.inferred_type, 'mixed') + message = 'Can only use .str accessor with Index, not MultiIndex' + with self.assertRaisesRegexp(AttributeError, message): + idx.str + + def test_method_on_bytes(self): + lhs = Series(np.array(list('abc'), 'S1').astype(object)) + rhs = Series(np.array(list('def'), 'S1').astype(object)) + if compat.PY3: + self.assertRaises(TypeError, lhs.str.cat, rhs) + else: + result = lhs.str.cat(rhs) + expected = Series(np.array(['ad', 'be', 'cf'], + 'S2').astype(object)) + tm.assert_series_equal(result, expected) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py index 642e50c37874d..cc0a0ea5662db 100644 --- a/pandas/tests/test_testing.py +++ b/pandas/tests/test_testing.py @@ -215,6 +215,14 @@ def test_multiindex_dtype(self): {'a':[1.0,2.0],'b':[2.1,1.5],'c':['l1','l2']}, index=['a','b']) self._assert_not_equal(df1, df2, check_index_type=True) + def test_empty_dtypes(self): + df1=pd.DataFrame(columns=["col1","col2"]) + df1["col1"] = df1["col1"].astype('int64') + df2=pd.DataFrame(columns=["col1","col2"]) + self._assert_equal(df1, df2, check_dtype=False) + self._assert_not_equal(df1, df2, check_dtype=True) + + class TestRNGContext(unittest.TestCase): def test_RNGContext(self): diff --git a/pandas/tests/test_tseries.py b/pandas/tests/test_tseries.py index 1b796ed2d83d1..035b3ac07342d 100644 --- a/pandas/tests/test_tseries.py +++ b/pandas/tests/test_tseries.py @@ -9,6 +9,8 @@ import pandas.lib as lib import pandas._period as period import pandas.algos as algos +from pandas.tseries.holiday import Holiday, SA, next_monday +from pandas import DateOffset class TestTseriesUtil(tm.TestCase): @@ -737,6 +739,17 @@ def test_get_period_field_raises_on_out_of_range(self): def test_get_period_field_array_raises_on_out_of_range(self): self.assertRaises(ValueError, period.get_period_field_arr, -1, np.empty(1), 0) + +class TestHolidayConflictingArguments(tm.TestCase): + + # GH 10217 + + def test_both_offset_observance_raises(self): + + with self.assertRaises(NotImplementedError) as cm: + h = Holiday("Cyber Monday", month=11, day=1, + offset=[DateOffset(weekday=SA(4))], observance=next_monday) + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tests/test_util.py b/pandas/tests/test_util.py index 2e22b33dc769a..38f058358b37f 100644 --- a/pandas/tests/test_util.py +++ b/pandas/tests/test_util.py @@ -3,6 +3,7 @@ import nose +import sys import pandas.util from pandas.util.decorators import deprecate_kwarg import pandas.util.testing as tm @@ -79,6 +80,13 @@ def test_warning(self): with tm.assert_produces_warning(FutureWarning): self.assertNotAlmostEquals(1, 2) + def test_locale(self): + if sys.platform == 'win32': + raise nose.SkipTest("skipping on win platforms as locale not available") + + #GH9744 + locales = pandas.util.testing.get_locales() + self.assertTrue(len(locales) >= 1) def test_rands(): r = tm.rands(10) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 0be030d7c2c8e..76685e2589012 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -12,6 +12,7 @@ from pandas.util.decorators import cache_readonly, deprecate_kwarg import pandas.core.common as com +from pandas.core.common import AbstractMethodError from pandas.core.generic import _shared_docs, _shared_doc_kwargs from pandas.core.index import Index, MultiIndex from pandas.core.series import Series, remove_na @@ -131,7 +132,7 @@ def random_color(column): colors = lmap(random_color, lrange(num_colors)) else: - raise NotImplementedError + raise ValueError("color_type must be either 'default' or 'random'") if len(colors) != num_colors: multiple = num_colors//len(colors) - 1 @@ -809,7 +810,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None, self.rot = self._default_rot if grid is None: - grid = False if secondary_y else True + grid = False if secondary_y else self.plt.rcParams['axes.grid'] self.grid = grid self.legend = legend @@ -867,12 +868,17 @@ def _validate_color_args(self): "simultaneously. Using 'color'") if 'color' in self.kwds and self.style is not None: + if com.is_list_like(self.style): + styles = self.style + else: + styles = [self.style] # need only a single match - if re.match('^[a-z]+?', self.style) is not None: - raise ValueError("Cannot pass 'style' string with a color " - "symbol and 'color' keyword argument. Please" - " use one or the other or pass 'style' " - "without a color symbol") + for s in styles: + if re.match('^[a-z]+?', s) is not None: + raise ValueError("Cannot pass 'style' string with a color " + "symbol and 'color' keyword argument. Please" + " use one or the other or pass 'style' " + "without a color symbol") def _iter_data(self, data=None, keep_index=False, fillna=None): if data is None: @@ -880,28 +886,16 @@ def _iter_data(self, data=None, keep_index=False, fillna=None): if fillna is not None: data = data.fillna(fillna) - from pandas.core.frame import DataFrame - if isinstance(data, (Series, np.ndarray, Index)): - label = self.label if self.label is not None else data.name + if self.sort_columns: + columns = com._try_sort(data.columns) + else: + columns = data.columns + + for col in columns: if keep_index is True: - yield label, data + yield col, data[col] else: - yield label, np.asarray(data) - elif isinstance(data, DataFrame): - if self.sort_columns: - columns = com._try_sort(data.columns) - else: - columns = data.columns - - for col in columns: - # # is this right? - # empty = df[col].count() == 0 - # values = df[col].values if not empty else np.zeros(len(df)) - - if keep_index is True: - yield col, data[col] - else: - yield col, data[col].values + yield col, data[col].values @property def nseries(self): @@ -934,19 +928,21 @@ def _has_plotted_object(self, ax): def _maybe_right_yaxis(self, ax, axes_num): if not self.on_right(axes_num): - if hasattr(ax, 'left_ax'): - # secondary axes may be passed as axes - return ax.left_ax - return ax + # secondary axes may be passed via ax kw + return self._get_ax_layer(ax) if hasattr(ax, 'right_ax'): + # if it has right_ax proparty, ``ax`` must be left axes return ax.right_ax + elif hasattr(ax, 'left_ax'): + # if it has left_ax proparty, ``ax`` must be right axes + return ax else: + # otherwise, create twin axes orig_ax, new_ax = ax, ax.twinx() new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax - new_ax.right_ax = new_ax if not self._has_plotted_object(orig_ax): # no data on left y orig_ax.get_yaxis().set_visible(False) @@ -994,14 +990,21 @@ def result(self): all_sec = (com.is_list_like(self.secondary_y) and len(self.secondary_y) == self.nseries) if (sec_true or all_sec): - # if all data is plotted on secondary, - # return secondary axes - return self.axes[0].right_ax + # if all data is plotted on secondary, return right axes + return self._get_ax_layer(self.axes[0], primary=False) else: return self.axes[0] def _compute_plot_data(self): - numeric_data = self.data.convert_objects()._get_numeric_data() + data = self.data + + if isinstance(data, Series): + label = self.label + if label is None and data.name is None: + label = 'None' + data = data.to_frame(name=label) + + numeric_data = data.convert_objects()._get_numeric_data() try: is_empty = numeric_data.empty @@ -1016,18 +1019,13 @@ def _compute_plot_data(self): self.data = numeric_data def _make_plot(self): - raise NotImplementedError + raise AbstractMethodError(self) def _add_table(self): if self.table is False: return elif self.table is True: - from pandas.core.frame import DataFrame - if isinstance(self.data, Series): - data = DataFrame(self.data, columns=[self.data.name]) - elif isinstance(self.data, DataFrame): - data = self.data - data = data.transpose() + data = self.data.transpose() else: data = self.table ax = self._get_ax(0) @@ -1042,7 +1040,10 @@ def _adorn_subplots(self): if len(self.axes) > 0: all_axes = self._get_axes() nrows, ncols = self._get_axes_layout() - _handle_shared_axes(all_axes, len(all_axes), len(all_axes), nrows, ncols, self.sharex, self.sharey) + _handle_shared_axes(axarr=all_axes, nplots=len(all_axes), + naxes=nrows * ncols, nrows=nrows, + ncols=ncols, sharex=self.sharex, + sharey=self.sharey) for ax in to_adorn: if self.yticks is not None: @@ -1094,18 +1095,15 @@ def _apply_axis_properties(self, axis, rot=None, fontsize=None): @property def legend_title(self): - if hasattr(self.data, 'columns'): - if not isinstance(self.data.columns, MultiIndex): - name = self.data.columns.name - if name is not None: - name = com.pprint_thing(name) - return name - else: - stringified = map(com.pprint_thing, - self.data.columns.names) - return ','.join(stringified) + if not isinstance(self.data.columns, MultiIndex): + name = self.data.columns.name + if name is not None: + name = com.pprint_thing(name) + return name else: - return None + stringified = map(com.pprint_thing, + self.data.columns.names) + return ','.join(stringified) def _add_legend_handle(self, handle, label, index=None): if not label is None: @@ -1236,11 +1234,18 @@ def _get_index_name(self): return name + @classmethod + def _get_ax_layer(cls, ax, primary=True): + """get left (primary) or right (secondary) axes""" + if primary: + return getattr(ax, 'left_ax', ax) + else: + return getattr(ax, 'right_ax', ax) + def _get_ax(self, i): # get the twinx ax if appropriate if self.subplots: ax = self.axes[i] - ax = self._maybe_right_yaxis(ax, i) self.axes[i] = ax else: @@ -1251,12 +1256,10 @@ def _get_ax(self, i): return ax def on_right(self, i): - from pandas.core.frame import DataFrame if isinstance(self.secondary_y, bool): return self.secondary_y - if (isinstance(self.data, DataFrame) and - isinstance(self.secondary_y, (tuple, list, np.ndarray, Index))): + if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)): return self.data.columns[i] in self.secondary_y def _get_style(self, i, col_name): @@ -1548,16 +1551,14 @@ def __init__(self, data, **kwargs): self.x_compat = bool(self.kwds.pop('x_compat')) def _index_freq(self): - from pandas.core.frame import DataFrame - if isinstance(self.data, (Series, DataFrame)): - freq = getattr(self.data.index, 'freq', None) - if freq is None: - freq = getattr(self.data.index, 'inferred_freq', None) - if freq == 'B': - weekdays = np.unique(self.data.index.dayofweek) - if (5 in weekdays) or (6 in weekdays): - freq = None - return freq + freq = getattr(self.data.index, 'freq', None) + if freq is None: + freq = getattr(self.data.index, 'inferred_freq', None) + if freq == 'B': + weekdays = np.unique(self.data.index.dayofweek) + if (5 in weekdays) or (6 in weekdays): + freq = None + return freq def _is_dynamic_freq(self, freq): if isinstance(freq, DateOffset): @@ -1569,9 +1570,7 @@ def _is_dynamic_freq(self, freq): def _no_base(self, freq): # hack this for 0.10.1, creating more technical debt...sigh - from pandas.core.frame import DataFrame - if (isinstance(self.data, (Series, DataFrame)) - and isinstance(self.data.index, DatetimeIndex)): + if isinstance(self.data.index, DatetimeIndex): base = frequencies.get_freq(freq) x = self.data.index if (base <= frequencies.FreqGroup.FR_DAY): @@ -1681,17 +1680,13 @@ def _update_prior(self, y): def _maybe_convert_index(self, data): # tsplot converts automatically, but don't want to convert index # over and over for DataFrames - from pandas.core.frame import DataFrame - if (isinstance(data.index, DatetimeIndex) and - isinstance(data, DataFrame)): + if isinstance(data.index, DatetimeIndex): freq = getattr(data.index, 'freq', None) if freq is None: freq = getattr(data.index, 'inferred_freq', None) if isinstance(freq, DateOffset): freq = freq.rule_code - freq = frequencies.get_base_alias(freq) - freq = frequencies.get_period_alias(freq) if freq is None: ax = self._get_ax(0) @@ -1700,9 +1695,10 @@ def _maybe_convert_index(self, data): if freq is None: raise ValueError('Could not get frequency alias for plotting') - data = DataFrame(data.values, - index=data.index.to_period(freq=freq), - columns=data.columns) + freq = frequencies.get_base_alias(freq) + freq = frequencies.get_period_alias(freq) + + data.index = data.index.to_period(freq=freq) return data def _post_plot_logic(self): @@ -1831,21 +1827,19 @@ def _get_plot_function(self): if self.kind == 'bar': def f(ax, x, y, w, start=None, **kwds): start = start + self.bottom - return ax.bar(x, y, w, bottom=start,log=self.log, **kwds) + return ax.bar(x, y, w, bottom=start, log=self.log, **kwds) elif self.kind == 'barh': + def f(ax, x, y, w, start=None, log=self.log, **kwds): start = start + self.left - return ax.barh(x, y, w, left=start, **kwds) + return ax.barh(x, y, w, left=start, log=self.log, **kwds) else: - raise NotImplementedError + raise ValueError("BarPlot kind must be either 'bar' or 'barh'") return f def _make_plot(self): import matplotlib as mpl - # mpl decided to make their version string unicode across all Python - # versions for mpl >= 1.3 so we have to call str here for python 2 - mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1') colors = self._get_colors() ncolors = len(colors) @@ -1869,11 +1863,8 @@ def _make_plot(self): kwds['ecolor'] = mpl.rcParams['xtick.color'] start = 0 - if self.log: + if self.log and (y >= 1).all(): start = 1 - if any(y < 1): - # GH3254 - start = 0 if mpl_le_1_2_1 else None if self.subplots: w = self.bar_width / 2 @@ -1943,7 +1934,8 @@ def __init__(self, data, bins=10, bottom=0, **kwargs): def _args_adjust(self): if com.is_integer(self.bins): # create common bin edge - values = np.ravel(self.data.values) + values = self.data.convert_objects()._get_numeric_data() + values = np.ravel(values) values = values[~com.isnull(values)] hist, self.bins = np.histogram(values, bins=self.bins, @@ -2515,10 +2507,7 @@ def plot_series(data, kind='line', ax=None, # Series unique """ if ax is None and len(plt.get_fignums()) > 0: ax = _gca() - ax = getattr(ax, 'left_ax', ax) - # is there harm in this? - if label is None: - label = data.name + ax = MPLPlot._get_ax_layer(ax) return _plot(data, kind=kind, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, @@ -3020,7 +3009,7 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, if columns is None: if not isinstance(by, (list, tuple)): by = [by] - columns = data._get_numeric_data().columns - by + columns = data._get_numeric_data().columns.difference(by) naxes = len(columns) fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True, figsize=figsize, ax=ax, layout=layout) @@ -3365,11 +3354,9 @@ def _flatten(axes): def _get_all_lines(ax): lines = ax.get_lines() - # check for right_ax, which can oddly sometimes point back to ax - if hasattr(ax, 'right_ax') and ax.right_ax != ax: + if hasattr(ax, 'right_ax'): lines += ax.right_ax.get_lines() - # no such risk with left_ax if hasattr(ax, 'left_ax'): lines += ax.left_ax.get_lines() diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py index ed11b12871ce5..88b4117d4807c 100644 --- a/pandas/tseries/base.py +++ b/pandas/tseries/base.py @@ -3,21 +3,20 @@ """ import warnings -from datetime import datetime, time, timedelta +from datetime import datetime, timedelta from pandas import compat import numpy as np from pandas.core import common as com -from pandas.core.common import is_integer, is_float +from pandas.core.common import is_integer, is_float, AbstractMethodError import pandas.tslib as tslib import pandas.lib as lib from pandas.core.index import Index from pandas.util.decorators import Appender, cache_readonly -from pandas.tseries.frequencies import ( - infer_freq, to_offset, get_period_alias, - Resolution) +from pandas.tseries.frequencies import infer_freq, to_offset, Resolution import pandas.algos as _algos + class DatetimeIndexOpsMixin(object): """ common ops mixin to support a unified inteface datetimelike Index """ @@ -48,7 +47,7 @@ def _box_func(self): """ box function to get object from internal representation """ - raise NotImplementedError + raise AbstractMethodError(self) def _box_values(self, values): """ @@ -61,13 +60,13 @@ def groupby(self, f): return _algos.groupby_object(objs, f) def _format_with_header(self, header, **kwargs): - return header + self._format_native_types(**kwargs) + return header + list(self._format_native_types(**kwargs)) def __contains__(self, key): try: res = self.get_loc(key) return np.isscalar(res) or type(res) == slice or np.any(res) - except (KeyError, TypeError): + except (KeyError, TypeError, ValueError): return False @property @@ -79,6 +78,11 @@ def freqstr(self): @cache_readonly def inferred_freq(self): + """ + Trys to return a string representing a frequency guess, + generated by infer_freq. Returns None if it can't autodetect the + frequency. + """ try: return infer_freq(self) except ValueError: @@ -255,35 +259,25 @@ def argmax(self, axis=None): @property def _formatter_func(self): - """ - Format function to convert value to representation - """ - return str - - def _format_footer(self): - raise NotImplementedError - - def __unicode__(self): - formatter = self._formatter_func - summary = str(self.__class__) + '\n' - - n = len(self) - if n == 0: - pass - elif n == 1: - first = formatter(self[0]) - summary += '[%s]\n' % first - elif n == 2: - first = formatter(self[0]) - last = formatter(self[-1]) - summary += '[%s, %s]\n' % (first, last) - else: - first = formatter(self[0]) - last = formatter(self[-1]) - summary += '[%s, ..., %s]\n' % (first, last) - - summary += self._format_footer() - return summary + raise AbstractMethodError(self) + + def _format_attrs(self): + """ + Return a list of tuples of the (attr,formatted_value) + """ + attrs = super(DatetimeIndexOpsMixin, self)._format_attrs() + for attrib in self._attributes: + if attrib == 'freq': + freq = self.freqstr + if freq is not None: + freq = "'%s'" % freq + attrs.append(('freq',freq)) + elif attrib == 'tz': + tz = self.tz + if tz is not None: + tz = "'%s'" % tz + attrs.append(('tz',tz)) + return attrs @cache_readonly def _resolution(self): @@ -314,10 +308,10 @@ def _convert_scalar_indexer(self, key, kind=None): return super(DatetimeIndexOpsMixin, self)._convert_scalar_indexer(key, kind=kind) def _add_datelike(self, other): - raise NotImplementedError + raise AbstractMethodError(self) def _sub_datelike(self, other): - raise NotImplementedError + raise AbstractMethodError(self) @classmethod def _add_datetimelike_methods(cls): @@ -505,4 +499,6 @@ def summary(self, name=None): if self.freq: result += '\nFreq: %s' % self.freqstr + # display as values, not quoted + result = result.replace("'","") return result diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py index 2ceece087387e..c273906ef3d05 100644 --- a/pandas/tseries/common.py +++ b/pandas/tseries/common.py @@ -6,7 +6,7 @@ from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex from pandas.tseries.tdi import TimedeltaIndex -from pandas import lib, tslib +from pandas import tslib from pandas.core.common import (_NS_DTYPE, _TD_DTYPE, is_period_arraylike, is_datetime_arraylike, is_integer_dtype, is_list_like, get_dtype_kinds) @@ -125,7 +125,7 @@ def to_pydatetime(self): accessors=DatetimeIndex._datetimelike_ops, typ='property') DatetimeProperties._add_delegate_accessors(delegate=DatetimeIndex, - accessors=["to_period","tz_localize","tz_convert"], + accessors=["to_period","tz_localize","tz_convert","normalize"], typ='method') class TimedeltaProperties(Properties): diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index b220e03fdb327..4af8c68110978 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -671,11 +671,11 @@ def _period_str_to_code(freqstr): def infer_freq(index, warn=True): """ Infer the most likely frequency given the input index. If the frequency is - uncertain, a warning will be printed + uncertain, a warning will be printed. Parameters ---------- - index : DatetimeIndex + index : DatetimeIndex or TimedeltaIndex if passed a Series will use the values of the series (NOT THE INDEX) warn : boolean, default True @@ -684,6 +684,7 @@ def infer_freq(index, warn=True): freq : string or None None if no discernible frequency TypeError if the index is not datetime-like + ValueError if there are less than three values. """ import pandas as pd @@ -742,7 +743,7 @@ def __init__(self, index, warn=True): @cache_readonly def deltas(self): return tslib.unique_deltas(self.values) - + @cache_readonly def deltas_asi8(self): return tslib.unique_deltas(self.index.asi8) @@ -750,7 +751,7 @@ def deltas_asi8(self): @cache_readonly def is_unique(self): return len(self.deltas) == 1 - + @cache_readonly def is_unique_asi8(self): return len(self.deltas_asi8) == 1 @@ -763,10 +764,13 @@ def get_freq(self): if _is_multiple(delta, _ONE_DAY): return self._infer_daily_rule() else: - # Possibly intraday frequency. Here we use the + # Business hourly, maybe. 17: one day / 65: one weekend + if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): + return 'BH' + # Possibly intraday frequency. Here we use the # original .asi8 values as the modified values # will not work around DST transitions. See #8772 - if not self.is_unique_asi8: + elif not self.is_unique_asi8: return None delta = self.deltas_asi8[0] if _is_multiple(delta, _ONE_HOUR): @@ -792,6 +796,10 @@ def get_freq(self): def day_deltas(self): return [x / _ONE_DAY for x in self.deltas] + @cache_readonly + def hour_deltas(self): + return [x / _ONE_HOUR for x in self.deltas] + @cache_readonly def fields(self): return tslib.build_field_sarray(self.values) @@ -927,7 +935,9 @@ def _get_wom_rule(self): return None week_of_months = unique((self.index.day - 1) // 7) - if len(week_of_months) > 1: + # Only attempt to infer up to WOM-4. See #9425 + week_of_months = week_of_months[week_of_months < 4] + if len(week_of_months) == 0 or len(week_of_months) > 1: return None # get which week @@ -989,7 +999,7 @@ def is_subperiod(source, target): return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] elif _is_quarterly(target): return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] - elif target == 'M': + elif _is_monthly(target): return source in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] elif _is_weekly(target): return source in [target, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] @@ -1048,7 +1058,7 @@ def is_superperiod(source, target): return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] elif _is_quarterly(source): return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N'] - elif source == 'M': + elif _is_monthly(source): return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] elif _is_weekly(source): return target in [source, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N'] @@ -1093,7 +1103,12 @@ def _quarter_months_conform(source, target): def _is_quarterly(rule): rule = rule.upper() - return rule == 'Q' or rule.startswith('Q-') + return rule == 'Q' or rule.startswith('Q-') or rule.startswith('BQ') + + +def _is_monthly(rule): + rule = rule.upper() + return rule == 'M' or rule == 'BM' def _is_weekly(rule): diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index 3b3542b760d6f..f55569302ca05 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -148,6 +148,9 @@ class from pandas.tseries.offsets >>> July3rd = Holiday('July 3rd', month=7, day=3, days_of_week=(0, 1, 2, 3)) """ + if offset is not None and observance is not None: + raise NotImplementedError("Cannot use both offset and observance.") + self.name = name self.year = year self.month = month @@ -203,7 +206,10 @@ def dates(self, start_date, end_date, return_name=False): end_date = Timestamp(end_date) year_offset = DateOffset(years=1) - base_date = Timestamp(datetime(start_date.year, self.month, self.day)) + base_date = Timestamp( + datetime(start_date.year, self.month, self.day), + tz=start_date.tz, + ) dates = DatetimeIndex(start=base_date, end=end_date, freq=year_offset) holiday_dates = self._apply_rule(dates) if self.days_of_week is not None: @@ -276,7 +282,7 @@ class AbstractHolidayCalendar(object): rules = [] start_date = Timestamp(datetime(1970, 1, 1)) end_date = Timestamp(datetime(2030, 12, 31)) - _holiday_cache = None + _cache = None def __init__(self, name=None, rules=None): """ @@ -348,14 +354,6 @@ def holidays(self, start=None, end=None, return_name=False): else: return holidays.index - @property - def _cache(self): - return self.__class__._holiday_cache - - @_cache.setter - def _cache(self, values): - self.__class__._holiday_cache = values - @staticmethod def merge_class(base, other): """ diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index ca5119acc8b99..745c536914e47 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1,13 +1,8 @@ # pylint: disable=E1101 import operator - from datetime import time, datetime from datetime import timedelta - import numpy as np - -import warnings - from pandas.core.common import (_NS_DTYPE, _INT64_DTYPE, _values_from_object, _maybe_box, ABCSeries, is_integer, is_float) @@ -597,7 +592,7 @@ def _is_dates_only(self): def _formatter_func(self): from pandas.core.format import _get_format_datetime64 formatter = _get_format_datetime64(is_dates_only=self._is_dates_only) - return lambda x: formatter(x, tz=self.tz) + return lambda x: "'%s'" % formatter(x, tz=self.tz) def __reduce__(self): @@ -658,14 +653,18 @@ def _sub_datelike(self, other): def _add_delta(self, delta): from pandas import TimedeltaIndex + name = self.name + if isinstance(delta, (Tick, timedelta, np.timedelta64)): new_values = self._add_delta_td(delta) elif isinstance(delta, TimedeltaIndex): new_values = self._add_delta_tdi(delta) + # update name when delta is Index + name = com._maybe_match_name(self, delta) else: new_values = self.astype('O') + delta tz = 'UTC' if self.tz is not None else None - result = DatetimeIndex(new_values, tz=tz, freq='infer') + result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer') utc = _utc() if self.tz is not None and self.tz is not utc: result = result.tz_convert(self.tz) @@ -673,20 +672,17 @@ def _add_delta(self, delta): def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs): - data = self.asobject - from pandas.core.format import Datetime64Formatter - return Datetime64Formatter(values=data, - nat_rep=na_rep, - date_format=date_format, - justify='all').get_result() + from pandas.core.format import _get_format_datetime64_from_values + format = _get_format_datetime64_from_values(self, date_format) + + return tslib.format_array_from_datetime(self.asi8, + tz=self.tz, + format=format, + na_rep=na_rep) def to_datetime(self, dayfirst=False): return self.copy() - def _format_footer(self): - tagline = 'Length: %d, Freq: %s, Timezone: %s' - return tagline % (len(self), self.freqstr, self.tz) - def astype(self, dtype): dtype = np.dtype(dtype) @@ -808,6 +804,7 @@ def union(self, other): ------- y : Index or DatetimeIndex """ + self._assert_can_do_setop(other) if not isinstance(other, DatetimeIndex): try: other = DatetimeIndex(other) @@ -1043,6 +1040,7 @@ def intersection(self, other): ------- y : Index or DatetimeIndex """ + self._assert_can_do_setop(other) if not isinstance(other, DatetimeIndex): try: other = DatetimeIndex(other) @@ -1589,6 +1587,11 @@ def tz_convert(self, tz): Returns ------- normalized : DatetimeIndex + + Raises + ------ + TypeError + If DatetimeIndex is tz-naive. """ tz = tslib.maybe_get_tz(tz) @@ -1625,6 +1628,11 @@ def tz_localize(self, tz, ambiguous='raise'): Returns ------- localized : DatetimeIndex + + Raises + ------ + TypeError + If the DatetimeIndex is tz-aware and tz is not None. """ if self.tz is not None: if tz is None: @@ -1655,14 +1663,15 @@ def indexer_at_time(self, time, asof=False): from dateutil.parser import parse if asof: - raise NotImplementedError + raise NotImplementedError("'asof' argument is not supported") if isinstance(time, compat.string_types): time = parse(time).time() if time.tzinfo: # TODO - raise NotImplementedError + raise NotImplementedError("argument 'time' with timezone info is " + "not supported") time_micros = self._get_time_micros() micros = _time_to_micros(time) @@ -1694,7 +1703,8 @@ def indexer_between_time(self, start_time, end_time, include_start=True, end_time = parse(end_time).time() if start_time.tzinfo or end_time.tzinfo: - raise NotImplementedError + raise NotImplementedError("argument 'time' with timezone info is " + "not supported") time_micros = self._get_time_micros() start_micros = _time_to_micros(start_time) @@ -1773,7 +1783,8 @@ def _generate_regular_range(start, end, periods, offset): b = e - np.int64(periods) * stride tz = end.tz else: - raise NotImplementedError + raise ValueError("at least 'start' or 'end' should be specified " + "if a 'period' is given.") data = np.arange(b, e, stride, dtype=np.int64) data = DatetimeIndex._simple_new(data, None, tz=tz) diff --git a/pandas/tseries/interval.py b/pandas/tseries/interval.py index 104e088ee4e84..bcce64c3a71bf 100644 --- a/pandas/tseries/interval.py +++ b/pandas/tseries/interval.py @@ -1,4 +1,3 @@ -import numpy as np from pandas.core.index import Index diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index cb6bd2fb2b250..67e27bbffbf73 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -16,6 +16,7 @@ __all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay', 'CBMonthEnd','CBMonthBegin', 'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd', + 'BusinessHour', 'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd', 'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd', 'LastWeekOfMonth', 'FY5253Quarter', 'FY5253', @@ -404,10 +405,6 @@ def __repr__(self): if hasattr(self, '_named'): return self._named className = getattr(self, '_outputName', self.__class__.__name__) - attrs = [] - - if self.offset: - attrs = ['offset=%s' % repr(self.offset)] if abs(self.n) != 1: plural = 's' @@ -418,10 +415,17 @@ def __repr__(self): if self.n != 1: n_str = "%s * " % self.n - out = '<%s' % n_str + className + plural + out = '<%s' % n_str + className + plural + self._repr_attrs() + '>' + return out + + def _repr_attrs(self): + if self.offset: + attrs = ['offset=%s' % repr(self.offset)] + else: + attrs = None + out = '' if attrs: out += ': ' + ', '.join(attrs) - out += '>' return out class BusinessDay(BusinessMixin, SingleConstructorOffset): @@ -531,6 +535,234 @@ def onOffset(self, dt): return dt.weekday() < 5 +class BusinessHour(BusinessMixin, SingleConstructorOffset): + """ + DateOffset subclass representing possibly n business days + """ + _prefix = 'BH' + _anchor = 0 + + def __init__(self, n=1, normalize=False, **kwds): + self.n = int(n) + self.normalize = normalize + + # must be validated here to equality check + kwds['start'] = self._validate_time(kwds.get('start', '09:00')) + kwds['end'] = self._validate_time(kwds.get('end', '17:00')) + self.kwds = kwds + self.offset = kwds.get('offset', timedelta(0)) + self.start = kwds.get('start', '09:00') + self.end = kwds.get('end', '17:00') + + # used for moving to next businessday + if self.n >= 0: + self.next_bday = BusinessDay(n=1) + else: + self.next_bday = BusinessDay(n=-1) + + def _validate_time(self, t_input): + from datetime import time as dt_time + import time + if isinstance(t_input, compat.string_types): + try: + t = time.strptime(t_input, '%H:%M') + return dt_time(hour=t.tm_hour, minute=t.tm_min) + except ValueError: + raise ValueError("time data must match '%H:%M' format") + elif isinstance(t_input, dt_time): + if t_input.second != 0 or t_input.microsecond != 0: + raise ValueError("time data must be specified only with hour and minute") + return t_input + else: + raise ValueError("time data must be string or datetime.time") + + def _get_daytime_flag(self): + if self.start == self.end: + raise ValueError('start and end must not be the same') + elif self.start < self.end: + return True + else: + return False + + def _repr_attrs(self): + out = super(BusinessHour, self)._repr_attrs() + attrs = ['BH=%s-%s' % (self.start.strftime('%H:%M'), + self.end.strftime('%H:%M'))] + out += ': ' + ', '.join(attrs) + return out + + def _next_opening_time(self, other): + """ + If n is positive, return tomorrow's business day opening time. + Otherwise yesterday's business day's opening time. + + Opening time always locates on BusinessDay. + Otherwise, closing time may not if business hour extends over midnight. + """ + if not self.next_bday.onOffset(other): + other = other + self.next_bday + else: + if self.n >= 0 and self.start < other.time(): + other = other + self.next_bday + elif self.n < 0 and other.time() < self.start: + other = other + self.next_bday + return datetime(other.year, other.month, other.day, + self.start.hour, self.start.minute) + + def _prev_opening_time(self, other): + """ + If n is positive, return yesterday's business day opening time. + Otherwise yesterday business day's opening time. + """ + if not self.next_bday.onOffset(other): + other = other - self.next_bday + else: + if self.n >= 0 and other.time() < self.start: + other = other - self.next_bday + elif self.n < 0 and other.time() > self.start: + other = other - self.next_bday + return datetime(other.year, other.month, other.day, + self.start.hour, self.start.minute) + + def _get_business_hours_by_sec(self): + """ + Return business hours in a day by seconds. + """ + if self._get_daytime_flag(): + # create dummy datetime to calcurate businesshours in a day + dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute) + until = datetime(2014, 4, 1, self.end.hour, self.end.minute) + return tslib.tot_seconds(until - dtstart) + else: + self.daytime = False + dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute) + until = datetime(2014, 4, 2, self.end.hour, self.end.minute) + return tslib.tot_seconds(until - dtstart) + + @apply_wraps + def rollback(self, dt): + """Roll provided date backward to next offset only if not on offset""" + if not self.onOffset(dt): + businesshours = self._get_business_hours_by_sec() + if self.n >= 0: + dt = self._prev_opening_time(dt) + timedelta(seconds=businesshours) + else: + dt = self._next_opening_time(dt) + timedelta(seconds=businesshours) + return dt + + @apply_wraps + def rollforward(self, dt): + """Roll provided date forward to next offset only if not on offset""" + if not self.onOffset(dt): + if self.n >= 0: + return self._next_opening_time(dt) + else: + return self._prev_opening_time(dt) + return dt + + @apply_wraps + def apply(self, other): + # calcurate here because offset is not immutable + daytime = self._get_daytime_flag() + businesshours = self._get_business_hours_by_sec() + bhdelta = timedelta(seconds=businesshours) + + if isinstance(other, datetime): + # used for detecting edge condition + nanosecond = getattr(other, 'nanosecond', 0) + # reset timezone and nanosecond + # other may be a Timestamp, thus not use replace + other = datetime(other.year, other.month, other.day, + other.hour, other.minute, + other.second, other.microsecond) + n = self.n + if n >= 0: + if (other.time() == self.end or + not self._onOffset(other, businesshours)): + other = self._next_opening_time(other) + else: + if other.time() == self.start: + # adjustment to move to previous business day + other = other - timedelta(seconds=1) + if not self._onOffset(other, businesshours): + other = self._next_opening_time(other) + other = other + bhdelta + + bd, r = divmod(abs(n * 60), businesshours // 60) + if n < 0: + bd, r = -bd, -r + + if bd != 0: + skip_bd = BusinessDay(n=bd) + # midnight busienss hour may not on BusinessDay + if not self.next_bday.onOffset(other): + remain = other - self._prev_opening_time(other) + other = self._next_opening_time(other + skip_bd) + remain + else: + other = other + skip_bd + + hours, minutes = divmod(r, 60) + result = other + timedelta(hours=hours, minutes=minutes) + + # because of previous adjustment, time will be larger than start + if ((daytime and (result.time() < self.start or self.end < result.time())) or + not daytime and (self.end < result.time() < self.start)): + if n >= 0: + bday_edge = self._prev_opening_time(other) + bday_edge = bday_edge + bhdelta + # calcurate remainder + bday_remain = result - bday_edge + result = self._next_opening_time(other) + result += bday_remain + else: + bday_edge = self._next_opening_time(other) + bday_remain = result - bday_edge + result = self._next_opening_time(result) + bhdelta + result += bday_remain + # edge handling + if n >= 0: + if result.time() == self.end: + result = self._next_opening_time(result) + else: + if result.time() == self.start and nanosecond == 0: + # adjustment to move to previous business day + result = self._next_opening_time(result- timedelta(seconds=1)) +bhdelta + + return result + else: + raise ApplyTypeError('Only know how to combine business hour with ') + + def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False + + if dt.tzinfo is not None: + dt = datetime(dt.year, dt.month, dt.day, dt.hour, + dt.minute, dt.second, dt.microsecond) + # Valid BH can be on the different BusinessDay during midnight + # Distinguish by the time spent from previous opening time + businesshours = self._get_business_hours_by_sec() + return self._onOffset(dt, businesshours) + + def _onOffset(self, dt, businesshours): + """ + Slight speedups using calcurated values + """ + # if self.normalize and not _is_normalized(dt): + # return False + # Valid BH can be on the different BusinessDay during midnight + # Distinguish by the time spent from previous opening time + if self.n >= 0: + op = self._prev_opening_time(dt) + else: + op = self._next_opening_time(dt) + span = tslib.tot_seconds(dt - op) + if span <= businesshours: + return True + else: + return False + + class CustomBusinessDay(BusinessDay): """ **EXPERIMENTAL** DateOffset subclass representing possibly n business days @@ -2250,6 +2482,7 @@ def generate_range(start=None, end=None, periods=None, BusinessMonthEnd, # 'BM' BQuarterEnd, # 'BQ' BQuarterBegin, # 'BQS' + BusinessHour, # 'BH' CustomBusinessDay, # 'C' CustomBusinessMonthEnd, # 'CBM' CustomBusinessMonthBegin, # 'CBMS' diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index b1f0ba1f127fa..6627047f0c335 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -1,10 +1,6 @@ # pylint: disable=E1101,E1103,W0232 -import operator - -from datetime import datetime, date, timedelta +from datetime import datetime, timedelta import numpy as np -from pandas.core.base import PandasObject - import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc from pandas.tseries.index import DatetimeIndex, Int64Index, Index @@ -114,20 +110,20 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index): Parameters ---------- - data : array-like (1-dimensional), optional + data : array-like (1-dimensional), optional Optional period-like data to construct index with dtype : NumPy dtype (default: i8) - copy : bool + copy : bool Make a copy of input ndarray freq : string or period object, optional One of pandas period strings or corresponding objects start : starting value, period-like, optional If data is None, used as the start point in generating regular period data. - periods : int, optional, > 0 + periods : int, optional, > 0 Number of periods to generate, if generating index. Takes precedence over end argument - end : end value, period-like, optional + end : end value, period-like, optional If periods is none, generated index will extend to first conforming period on or just past end argument year : int, array, or Series, default None @@ -293,6 +289,10 @@ def _to_embed(self, keep_tz=False): """ return an array repr of this object, potentially casting to object """ return self.asobject.values + @property + def _formatter_func(self): + return lambda x: "'%s'" % x + def asof_locs(self, where, mask): """ where : array of timestamps @@ -355,6 +355,44 @@ def freqstr(self): return self.freq def asfreq(self, freq=None, how='E'): + """ + Convert the PeriodIndex to the specified frequency `freq`. + + Parameters + ---------- + + freq : str + a frequency + how : str {'E', 'S'} + 'E', 'END', or 'FINISH' for end, + 'S', 'START', or 'BEGIN' for start. + Whether the elements should be aligned to the end + or start within pa period. January 31st ('END') vs. + Janury 1st ('START') for example. + + Returns + ------- + + new : PeriodIndex with the new frequency + + Examples + -------- + >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') + >>> pidx + <class 'pandas.tseries.period.PeriodIndex'> + [2010, ..., 2015] + Length: 6, Freq: A-DEC + + >>> pidx.asfreq('M') + <class 'pandas.tseries.period.PeriodIndex'> + [2010-12, ..., 2015-12] + Length: 6, Freq: M + + >>> pidx.asfreq('M', how='S') + <class 'pandas.tseries.period.PeriodIndex'> + [2010-01, ..., 2015-01] + Length: 6, Freq: M + """ how = _validate_end_alias(how) freq = frequencies.get_standard_freq(freq) @@ -387,7 +425,7 @@ def to_datetime(self, dayfirst=False): qyear = _field_accessor('qyear', 1) days_in_month = _field_accessor('days_in_month', 11, "The number of days in the month") daysinmonth = days_in_month - + def _get_object_array(self): freq = self.freq return np.array([ Period._from_ordinal(ordinal=x, freq=freq) for x in self.values], copy=False) @@ -463,7 +501,6 @@ def shift(self, n): ---------- n : int Periods to shift by - freq : freq string Returns ------- @@ -642,6 +679,8 @@ def join(self, other, how='left', level=None, return_indexers=False): return self._apply_meta(result) def _assert_can_do_setop(self, other): + super(PeriodIndex, self)._assert_can_do_setop(other) + if not isinstance(other, PeriodIndex): raise ValueError('can only call with other PeriodIndex-ed objects') @@ -687,7 +726,7 @@ def _format_native_types(self, na_rep=u('NaT'), **kwargs): imask = ~mask values[imask] = np.array([u('%s') % dt for dt in values[imask]]) - return values.tolist() + return values def __array_finalize__(self, obj): if not self.ndim: # pragma: no cover @@ -697,10 +736,6 @@ def __array_finalize__(self, obj): self.name = getattr(obj, 'name', None) self._reset_identity() - def _format_footer(self): - tagline = 'Length: %d, Freq: %s' - return tagline % (len(self), self.freqstr) - def take(self, indices, axis=None): """ Analogous to ndarray.take @@ -936,8 +971,8 @@ def period_range(start=None, end=None, periods=None, freq='D', name=None): Parameters ---------- - start : - end : + start : starting value, period-like, optional + end : ending value, period-like, optional periods : int, default None Number of periods in the index freq : str/DateOffset, default 'D' diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py index 899d2bfdc9c76..9d28fa11f646f 100644 --- a/pandas/tseries/plotting.py +++ b/pandas/tseries/plotting.py @@ -5,17 +5,13 @@ #!!! TODO: Use the fact that axis can have units to simplify the process from matplotlib import pylab - -import numpy as np - -from pandas import isnull from pandas.tseries.period import Period from pandas.tseries.offsets import DateOffset import pandas.tseries.frequencies as frequencies from pandas.tseries.index import DatetimeIndex import pandas.core.common as com -from pandas.tseries.converter import (PeriodConverter, TimeSeries_DateLocator, +from pandas.tseries.converter import (TimeSeries_DateLocator, TimeSeries_DateFormatter) #---------------------------------------------------------------------- diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 7607bef0f1d71..53c1292204f71 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -1,14 +1,11 @@ from datetime import timedelta - import numpy as np - from pandas.core.groupby import BinGrouper, Grouper from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod from pandas.tseries.index import DatetimeIndex, date_range from pandas.tseries.tdi import TimedeltaIndex from pandas.tseries.offsets import DateOffset, Tick, Day, _delta_to_nanoseconds from pandas.tseries.period import PeriodIndex, period_range -import pandas.tseries.tools as tools import pandas.core.common as com import pandas.compat as compat @@ -373,11 +370,11 @@ def _take_new_index(obj, indexer, new_index, axis=0): return Series(new_values, index=new_index, name=obj.name) elif isinstance(obj, DataFrame): if axis == 1: - raise NotImplementedError + raise NotImplementedError("axis 1 is not supported") return DataFrame(obj._data.reindex_indexer( new_axis=new_index, indexer=indexer, axis=1)) else: - raise NotImplementedError + raise ValueError("'obj' should be either a Series or a DataFrame") def _get_range_edges(first, last, offset, closed='left', base=0): @@ -467,7 +464,7 @@ def asfreq(obj, freq, method=None, how=None, normalize=False): """ if isinstance(obj.index, PeriodIndex): if method is not None: - raise NotImplementedError + raise NotImplementedError("'method' argument is not supported") if how is None: how = 'E' @@ -480,6 +477,7 @@ def asfreq(obj, freq, method=None, how=None, normalize=False): if len(obj.index) == 0: return obj.copy() dti = date_range(obj.index[0], obj.index[-1], freq=freq) + dti.name = obj.index.name rs = obj.reindex(dti, method=method) if normalize: rs.index = rs.index.normalize() diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py index e01ff54feab57..de68dd763d68c 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/tseries/tdi.py @@ -1,17 +1,13 @@ """ implement the TimedeltaIndex """ -import operator -import datetime from datetime import timedelta import numpy as np - from pandas.core.common import (ABCSeries, _TD_DTYPE, _INT64_DTYPE, is_timedelta64_dtype, _maybe_box, _values_from_object, isnull, is_integer, is_float) from pandas.core.index import Index, Int64Index import pandas.compat as compat from pandas.compat import u -from pandas.core.base import PandasObject from pandas.util.decorators import cache_readonly from pandas.tseries.frequencies import to_offset import pandas.core.common as com @@ -140,7 +136,7 @@ def __new__(cls, data=None, unit=None, copy=False, name=None, closed=None, verify_integrity=True, **kwargs): - if isinstance(data, TimedeltaIndex) and freq is None: + if isinstance(data, TimedeltaIndex) and freq is None and name is None: if copy: data = data.copy() return data @@ -274,10 +270,6 @@ def _formatter_func(self): from pandas.core.format import _get_format_timedelta64 return _get_format_timedelta64(self, box=True) - def _format_footer(self): - tagline = 'Length: %d, Freq: %s' - return tagline % (len(self), self.freqstr) - def __setstate__(self, state): """Necessary for making this object picklable""" if isinstance(state, dict): @@ -289,12 +281,15 @@ def __setstate__(self, state): def _add_delta(self, delta): if isinstance(delta, (Tick, timedelta, np.timedelta64)): new_values = self._add_delta_td(delta) + name = self.name elif isinstance(delta, TimedeltaIndex): new_values = self._add_delta_tdi(delta) + # update name when delta is index + name = com._maybe_match_name(self, delta) else: raise ValueError("cannot add the type {0} to a TimedeltaIndex".format(type(delta))) - result = TimedeltaIndex(new_values, freq='infer') + result = TimedeltaIndex(new_values, freq='infer', name=name) return result def _evaluate_with_timedelta_like(self, other, op, opstr): @@ -441,12 +436,12 @@ def union(self, other): ------- y : Index or TimedeltaIndex """ - if _is_convertible_to_index(other): + self._assert_can_do_setop(other) + if not isinstance(other, TimedeltaIndex): try: other = TimedeltaIndex(other) - except TypeError: + except (TypeError, ValueError): pass - this, other = self, other if this._can_fast_union(other): @@ -586,6 +581,7 @@ def intersection(self, other): ------- y : Index or TimedeltaIndex """ + self._assert_can_do_setop(other) if not isinstance(other, TimedeltaIndex): try: other = TimedeltaIndex(other) @@ -927,7 +923,8 @@ def _generate_regular_range(start, end, periods, offset): e = Timedelta(end).value + stride b = e - periods * stride else: - raise NotImplementedError + raise ValueError("at least 'start' or 'end' should be specified " + "if a 'period' is given.") data = np.arange(b, e, stride, dtype=np.int64) data = TimedeltaIndex._simple_new(data, None) diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py index c42802bdb31ad..55482401a20f4 100644 --- a/pandas/tseries/tests/test_base.py +++ b/pandas/tseries/tests/test_base.py @@ -119,29 +119,24 @@ def test_representation(self): idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern') - exp1 = """<class 'pandas.tseries.index.DatetimeIndex'> -Length: 0, Freq: D, Timezone: None""" - exp2 = """<class 'pandas.tseries.index.DatetimeIndex'> -[2011-01-01] -Length: 1, Freq: D, Timezone: None""" - exp3 = """<class 'pandas.tseries.index.DatetimeIndex'> -[2011-01-01, 2011-01-02] -Length: 2, Freq: D, Timezone: None""" - exp4 = """<class 'pandas.tseries.index.DatetimeIndex'> -[2011-01-01, ..., 2011-01-03] -Length: 3, Freq: D, Timezone: None""" - exp5 = """<class 'pandas.tseries.index.DatetimeIndex'> -[2011-01-01 09:00:00+09:00, ..., 2011-01-01 11:00:00+09:00] -Length: 3, Freq: H, Timezone: Asia/Tokyo""" - exp6 = """<class 'pandas.tseries.index.DatetimeIndex'> -[2011-01-01 09:00:00-05:00, ..., NaT] -Length: 3, Freq: None, Timezone: US/Eastern""" + exp1 = """DatetimeIndex([], dtype='datetime64[ns]', freq='D', tz=None)""" - for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6], - [exp1, exp2, exp3, exp4, exp5, exp6]): - for func in ['__repr__', '__unicode__', '__str__']: - result = getattr(idx, func)() - self.assertEqual(result, expected) + exp2 = """DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', freq='D', tz=None)""" + + exp3 = """DatetimeIndex(['2011-01-01', '2011-01-02'], dtype='datetime64[ns]', freq='D', tz=None)""" + + exp4 = """DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='datetime64[ns]', freq='D', tz=None)""" + + exp5 = """DatetimeIndex(['2011-01-01 09:00:00+09:00', '2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00'], dtype='datetime64[ns]', freq='H', tz='Asia/Tokyo')""" + + exp6 = """DatetimeIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', 'NaT'], dtype='datetime64[ns]', freq=None, tz='US/Eastern')""" + + with pd.option_context('display.width', 300): + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6], + [exp1, exp2, exp3, exp4, exp5, exp6]): + for func in ['__repr__', '__unicode__', '__str__']: + result = getattr(idx, func)() + self.assertEqual(result, expected) def test_summary(self): # GH9116 @@ -372,27 +367,22 @@ def test_representation(self): idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) + exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')""" - exp1 = """<class 'pandas.tseries.tdi.TimedeltaIndex'> -Length: 0, Freq: D""" - exp2 = """<class 'pandas.tseries.tdi.TimedeltaIndex'> -['1 days'] -Length: 1, Freq: D""" - exp3 = """<class 'pandas.tseries.tdi.TimedeltaIndex'> -['1 days', '2 days'] -Length: 2, Freq: D""" - exp4 = """<class 'pandas.tseries.tdi.TimedeltaIndex'> -['1 days', ..., '3 days'] -Length: 3, Freq: D""" - exp5 = """<class 'pandas.tseries.tdi.TimedeltaIndex'> -['1 days 00:00:01', ..., '3 days 00:00:00'] -Length: 3, Freq: None""" + exp2 = """TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')""" - for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], - [exp1, exp2, exp3, exp4, exp5]): - for func in ['__repr__', '__unicode__', '__str__']: - result = getattr(idx, func)() - self.assertEqual(result, expected) + exp3 = """TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')""" + + exp4 = """TimedeltaIndex(['1 days', '2 days', '3 days'], dtype='timedelta64[ns]', freq='D')""" + + exp5 = """TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', '3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)""" + + with pd.option_context('display.width',300): + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], + [exp1, exp2, exp3, exp4, exp5]): + for func in ['__repr__', '__unicode__', '__str__']: + result = getattr(idx, func)() + self.assertEqual(result, expected) def test_summary(self): # GH9116 @@ -404,13 +394,13 @@ def test_summary(self): exp1 = """TimedeltaIndex: 0 entries Freq: D""" - exp2 = """TimedeltaIndex: 1 entries, '1 days' to '1 days' + exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days Freq: D""" - exp3 = """TimedeltaIndex: 2 entries, '1 days' to '2 days' + exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days Freq: D""" - exp4 = """TimedeltaIndex: 3 entries, '1 days' to '3 days' + exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days Freq: D""" - exp5 = """TimedeltaIndex: 3 entries, '1 days 00:00:01' to '3 days 00:00:00'""" + exp5 = """TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]): @@ -483,8 +473,8 @@ def test_ops_compat(self): tm.assert_index_equal(result,expected) # divide with nats - rng = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo') - expected = Float64Index([12,np.nan,24]) + rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') + expected = Float64Index([12, np.nan, 24], name='foo') for offset in offsets: result = rng / offset tm.assert_index_equal(result,expected) @@ -495,8 +485,8 @@ def test_ops_compat(self): def test_subtraction_ops(self): # with datetimes/timedelta and tdi/dti - tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo') - dti = date_range('20130101',periods=3) + tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') + dti = date_range('20130101', periods=3, name='bar') td = Timedelta('1 days') dt = Timestamp('20130101') @@ -505,29 +495,29 @@ def test_subtraction_ops(self): self.assertRaises(TypeError, lambda : td - dt) self.assertRaises(TypeError, lambda : td - dti) - result = dt-dti - expected = TimedeltaIndex(['0 days','-1 days','-2 days']) - tm.assert_index_equal(result,expected) + result = dt - dti + expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar') + tm.assert_index_equal(result, expected) - result = dti-dt - expected = TimedeltaIndex(['0 days','1 days','2 days']) - tm.assert_index_equal(result,expected) + result = dti - dt + expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar') + tm.assert_index_equal(result, expected) - result = tdi-td - expected = TimedeltaIndex(['0 days',pd.NaT,'1 days']) - tm.assert_index_equal(result,expected) + result = tdi - td + expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo') + tm.assert_index_equal(result, expected, check_names=False) - result = td-tdi - expected = TimedeltaIndex(['0 days',pd.NaT,'-1 days']) - tm.assert_index_equal(result,expected) + result = td - tdi + expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo') + tm.assert_index_equal(result, expected, check_names=False) - result = dti-td - expected = DatetimeIndex(['20121231','20130101','20130102']) - tm.assert_index_equal(result,expected) + result = dti - td + expected = DatetimeIndex(['20121231', '20130101', '20130102'], name='bar') + tm.assert_index_equal(result, expected, check_names=False) - result = dt-tdi - expected = DatetimeIndex(['20121231',pd.NaT,'20121230']) - tm.assert_index_equal(result,expected) + result = dt - tdi + expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo') + tm.assert_index_equal(result, expected) def test_subtraction_ops_with_tz(self): @@ -644,46 +634,46 @@ def test_dti_dti_deprecated_ops(self): def test_dti_tdi_numeric_ops(self): # These are normally union/diff set-like ops - tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo') - dti = date_range('20130101',periods=3) + tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') + dti = date_range('20130101', periods=3, name='bar') td = Timedelta('1 days') dt = Timestamp('20130101') - result = tdi-tdi - expected = TimedeltaIndex(['0 days',pd.NaT,'0 days']) - tm.assert_index_equal(result,expected) + result = tdi - tdi + expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo') + tm.assert_index_equal(result, expected) - result = tdi+tdi - expected = TimedeltaIndex(['2 days',pd.NaT,'4 days']) - tm.assert_index_equal(result,expected) + result = tdi + tdi + expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo') + tm.assert_index_equal(result, expected) - result = dti-tdi - expected = DatetimeIndex(['20121231',pd.NaT,'20130101']) - tm.assert_index_equal(result,expected) + result = dti - tdi # name will be reset + expected = DatetimeIndex(['20121231', pd.NaT, '20130101']) + tm.assert_index_equal(result, expected) def test_addition_ops(self): # with datetimes/timedelta and tdi/dti - tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo') - dti = date_range('20130101',periods=3) + tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') + dti = date_range('20130101', periods=3, name='bar') td = Timedelta('1 days') dt = Timestamp('20130101') result = tdi + dt - expected = DatetimeIndex(['20130102',pd.NaT,'20130103']) - tm.assert_index_equal(result,expected) + expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo') + tm.assert_index_equal(result, expected) result = dt + tdi - expected = DatetimeIndex(['20130102',pd.NaT,'20130103']) - tm.assert_index_equal(result,expected) + expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo') + tm.assert_index_equal(result, expected) result = td + tdi - expected = TimedeltaIndex(['2 days',pd.NaT,'3 days']) - tm.assert_index_equal(result,expected) + expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo') + tm.assert_index_equal(result, expected) result = tdi + td - expected = TimedeltaIndex(['2 days',pd.NaT,'3 days']) - tm.assert_index_equal(result,expected) + expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo') + tm.assert_index_equal(result, expected) # unequal length self.assertRaises(ValueError, lambda : tdi + dti[0:1]) @@ -695,21 +685,21 @@ def test_addition_ops(self): # this is a union! #self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi) - result = tdi + dti - expected = DatetimeIndex(['20130102',pd.NaT,'20130105']) - tm.assert_index_equal(result,expected) + result = tdi + dti # name will be reset + expected = DatetimeIndex(['20130102', pd.NaT, '20130105']) + tm.assert_index_equal(result, expected) - result = dti + tdi - expected = DatetimeIndex(['20130102',pd.NaT,'20130105']) - tm.assert_index_equal(result,expected) + result = dti + tdi # name will be reset + expected = DatetimeIndex(['20130102', pd.NaT, '20130105']) + tm.assert_index_equal(result, expected) result = dt + td expected = Timestamp('20130102') - self.assertEqual(result,expected) + self.assertEqual(result, expected) result = td + dt expected = Timestamp('20130102') - self.assertEqual(result,expected) + self.assertEqual(result, expected) def test_value_counts_unique(self): # GH 7735 @@ -745,6 +735,13 @@ def test_nonunique_contains(self): ['00:01:00', '00:01:00', '00:00:01'])): tm.assertIn(idx[0], idx) + def test_unknown_attribute(self): + #GH 9680 + tdi = pd.timedelta_range(start=0,periods=10,freq='1s') + ts = pd.Series(np.random.normal(size=10),index=tdi) + self.assertNotIn('foo',ts.__dict__.keys()) + self.assertRaises(AttributeError,lambda : ts.foo) + class TestPeriodIndexOps(Ops): @@ -835,32 +832,23 @@ def test_representation(self): idx8 = pd.period_range('2013Q1', periods=2, freq="Q") idx9 = pd.period_range('2013Q1', periods=3, freq="Q") - exp1 = """<class 'pandas.tseries.period.PeriodIndex'> -Length: 0, Freq: D""" - exp2 = """<class 'pandas.tseries.period.PeriodIndex'> -[2011-01-01] -Length: 1, Freq: D""" - exp3 = """<class 'pandas.tseries.period.PeriodIndex'> -[2011-01-01, 2011-01-02] -Length: 2, Freq: D""" - exp4 = """<class 'pandas.tseries.period.PeriodIndex'> -[2011-01-01, ..., 2011-01-03] -Length: 3, Freq: D""" - exp5 = """<class 'pandas.tseries.period.PeriodIndex'> -[2011, ..., 2013] -Length: 3, Freq: A-DEC""" - exp6 = """<class 'pandas.tseries.period.PeriodIndex'> -[2011-01-01 09:00, ..., NaT] -Length: 3, Freq: H""" - exp7 = """<class 'pandas.tseries.period.PeriodIndex'> -[2013Q1] -Length: 1, Freq: Q-DEC""" - exp8 = """<class 'pandas.tseries.period.PeriodIndex'> -[2013Q1, 2013Q2] -Length: 2, Freq: Q-DEC""" - exp9 = """<class 'pandas.tseries.period.PeriodIndex'> -[2013Q1, ..., 2013Q3] -Length: 3, Freq: Q-DEC""" + exp1 = """PeriodIndex([], dtype='int64', freq='D')""" + + exp2 = """PeriodIndex(['2011-01-01'], dtype='int64', freq='D')""" + + exp3 = """PeriodIndex(['2011-01-01', '2011-01-02'], dtype='int64', freq='D')""" + + exp4 = """PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='int64', freq='D')""" + + exp5 = """PeriodIndex(['2011', '2012', '2013'], dtype='int64', freq='A-DEC')""" + + exp6 = """PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], dtype='int64', freq='H')""" + + exp7 = """PeriodIndex(['2013Q1'], dtype='int64', freq='Q-DEC')""" + + exp8 = """PeriodIndex(['2013Q1', '2013Q2'], dtype='int64', freq='Q-DEC')""" + + exp9 = """PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], dtype='int64', freq='Q-DEC')""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9], [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]): diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py index 841d81c15b4e9..69b1d84670d45 100644 --- a/pandas/tseries/tests/test_daterange.py +++ b/pandas/tseries/tests/test_daterange.py @@ -441,7 +441,7 @@ def test_month_range_union_tz_pytz(self): def test_month_range_union_tz_dateutil(self): _skip_if_windows_python_3() tm._skip_if_no_dateutil() - from dateutil.zoneinfo import gettz as timezone + from pandas.tslib import _dateutil_gettz as timezone tz = timezone('US/Eastern') early_start = datetime(2011, 1, 1) diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py index 965c198eb7c95..823c762c692e5 100644 --- a/pandas/tseries/tests/test_frequencies.py +++ b/pandas/tseries/tests/test_frequencies.py @@ -196,6 +196,7 @@ def _check_tick(self, base_delta, code): index = _dti([b + base_delta * j for j in range(3)] + [b + base_delta * 7]) + self.assertIsNone(frequencies.infer_freq(index)) def test_weekly(self): @@ -211,6 +212,16 @@ def test_week_of_month(self): for i in range(1, 5): self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day)) + def test_fifth_week_of_month(self): + # Only supports freq up to WOM-4. See #9425 + func = lambda: date_range('2014-01-01', freq='WOM-5MON') + self.assertRaises(ValueError, func) + + def test_fifth_week_of_month_infer(self): + # Only attempts to infer up to WOM-4. See #9425 + index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"]) + assert frequencies.infer_freq(index) is None + def test_week_of_month_fake(self): #All of these dates are on same day of week and are 4 or 5 weeks apart index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"]) @@ -324,10 +335,40 @@ def test_infer_freq_tz_transition(self): idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz) print(idx) self.assertEqual(idx.inferred_freq, freq) - + index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago") self.assertIsNone(index.inferred_freq) + def test_infer_freq_businesshour(self): + # GH 7905 + idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00', + '2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00']) + # hourly freq in a day must result in 'H' + self.assertEqual(idx.inferred_freq, 'H') + + idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00', + '2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00', + '2014-07-01 15:00', '2014-07-01 16:00', + '2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00']) + self.assertEqual(idx.inferred_freq, 'BH') + + idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00', + '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00', + '2014-07-04 15:00', '2014-07-04 16:00', + '2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00']) + self.assertEqual(idx.inferred_freq, 'BH') + + idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00', + '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00', + '2014-07-04 15:00', '2014-07-04 16:00', + '2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00', + '2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00', + '2014-07-07 15:00', '2014-07-07 16:00', + '2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00', + '2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00', + '2014-07-08 15:00', '2014-07-08 16:00']) + self.assertEqual(idx.inferred_freq, 'BH') + def test_not_monotonic(self): rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002']) rng = rng[::-1] diff --git a/pandas/tseries/tests/test_holiday.py b/pandas/tseries/tests/test_holiday.py index c2300481eca43..7d233ba78e7b6 100644 --- a/pandas/tseries/tests/test_holiday.py +++ b/pandas/tseries/tests/test_holiday.py @@ -1,6 +1,7 @@ from datetime import datetime import pandas.util.testing as tm +from pandas import DatetimeIndex from pandas.tseries.holiday import ( USFederalHolidayCalendar, USMemorialDay, USThanksgivingDay, nearest_workday, next_monday_or_tuesday, next_monday, @@ -9,6 +10,7 @@ HolidayCalendarFactory, next_workday, previous_workday, before_nearest_workday, EasterMonday, GoodFriday, after_nearest_workday, weekend_to_monday) +from pytz import utc import nose class TestCalendar(tm.TestCase): @@ -49,93 +51,148 @@ def test_calendar(self): self.assertEqual(list(holidays_2.to_pydatetime()), self.holiday_list) + def test_calendar_caching(self): + # Test for issue #9552 + + class TestCalendar(AbstractHolidayCalendar): + def __init__(self, name=None, rules=None): + super(TestCalendar, self).__init__( + name=name, + rules=rules + ) + + jan1 = TestCalendar(rules=[Holiday('jan1', year=2015, month=1, day=1)]) + jan2 = TestCalendar(rules=[Holiday('jan2', year=2015, month=1, day=2)]) + + tm.assert_index_equal( + jan1.holidays(), + DatetimeIndex(['01-Jan-2015']) + ) + tm.assert_index_equal( + jan2.holidays(), + DatetimeIndex(['02-Jan-2015']) + ) + + class TestHoliday(tm.TestCase): def setUp(self): self.start_date = datetime(2011, 1, 1) self.end_date = datetime(2020, 12, 31) + def check_results(self, holiday, start, end, expected): + self.assertEqual(list(holiday.dates(start, end)), expected) + # Verify that timezone info is preserved. + self.assertEqual( + list( + holiday.dates( + utc.localize(Timestamp(start)), + utc.localize(Timestamp(end)), + ) + ), + [utc.localize(dt) for dt in expected], + ) + def test_usmemorialday(self): - holidays = USMemorialDay.dates(self.start_date, - self.end_date) - holidayList = [ - datetime(2011, 5, 30), - datetime(2012, 5, 28), - datetime(2013, 5, 27), - datetime(2014, 5, 26), - datetime(2015, 5, 25), - datetime(2016, 5, 30), - datetime(2017, 5, 29), - datetime(2018, 5, 28), - datetime(2019, 5, 27), - datetime(2020, 5, 25), - ] - self.assertEqual(list(holidays), holidayList) + self.check_results( + holiday=USMemorialDay, + start=self.start_date, + end=self.end_date, + expected=[ + datetime(2011, 5, 30), + datetime(2012, 5, 28), + datetime(2013, 5, 27), + datetime(2014, 5, 26), + datetime(2015, 5, 25), + datetime(2016, 5, 30), + datetime(2017, 5, 29), + datetime(2018, 5, 28), + datetime(2019, 5, 27), + datetime(2020, 5, 25), + ], + ) def test_non_observed_holiday(self): - july_3rd = Holiday('July 4th Eve', month=7, day=3) - result = july_3rd.dates("2001-01-01", "2003-03-03") - expected = [Timestamp('2001-07-03 00:00:00'), - Timestamp('2002-07-03 00:00:00')] - self.assertEqual(list(result), expected) - july_3rd = Holiday('July 4th Eve', month=7, day=3, - days_of_week=(0, 1, 2, 3)) - result = july_3rd.dates("2001-01-01", "2008-03-03") - expected = [Timestamp('2001-07-03 00:00:00'), - Timestamp('2002-07-03 00:00:00'), - Timestamp('2003-07-03 00:00:00'), - Timestamp('2006-07-03 00:00:00'), - Timestamp('2007-07-03 00:00:00')] - self.assertEqual(list(result), expected) + + self.check_results( + Holiday('July 4th Eve', month=7, day=3), + start="2001-01-01", + end="2003-03-03", + expected=[ + Timestamp('2001-07-03 00:00:00'), + Timestamp('2002-07-03 00:00:00') + ] + ) + + self.check_results( + Holiday('July 4th Eve', month=7, day=3, days_of_week=(0, 1, 2, 3)), + start="2001-01-01", + end="2008-03-03", + expected=[ + Timestamp('2001-07-03 00:00:00'), + Timestamp('2002-07-03 00:00:00'), + Timestamp('2003-07-03 00:00:00'), + Timestamp('2006-07-03 00:00:00'), + Timestamp('2007-07-03 00:00:00'), + ] + ) def test_easter(self): - holidays = EasterMonday.dates(self.start_date, - self.end_date) - holidayList = [Timestamp('2011-04-25 00:00:00'), - Timestamp('2012-04-09 00:00:00'), - Timestamp('2013-04-01 00:00:00'), - Timestamp('2014-04-21 00:00:00'), - Timestamp('2015-04-06 00:00:00'), - Timestamp('2016-03-28 00:00:00'), - Timestamp('2017-04-17 00:00:00'), - Timestamp('2018-04-02 00:00:00'), - Timestamp('2019-04-22 00:00:00'), - Timestamp('2020-04-13 00:00:00')] - - - self.assertEqual(list(holidays), holidayList) - holidays = GoodFriday.dates(self.start_date, - self.end_date) - holidayList = [Timestamp('2011-04-22 00:00:00'), - Timestamp('2012-04-06 00:00:00'), - Timestamp('2013-03-29 00:00:00'), - Timestamp('2014-04-18 00:00:00'), - Timestamp('2015-04-03 00:00:00'), - Timestamp('2016-03-25 00:00:00'), - Timestamp('2017-04-14 00:00:00'), - Timestamp('2018-03-30 00:00:00'), - Timestamp('2019-04-19 00:00:00'), - Timestamp('2020-04-10 00:00:00')] - self.assertEqual(list(holidays), holidayList) - + + self.check_results( + EasterMonday, + start=self.start_date, + end=self.end_date, + expected=[ + Timestamp('2011-04-25 00:00:00'), + Timestamp('2012-04-09 00:00:00'), + Timestamp('2013-04-01 00:00:00'), + Timestamp('2014-04-21 00:00:00'), + Timestamp('2015-04-06 00:00:00'), + Timestamp('2016-03-28 00:00:00'), + Timestamp('2017-04-17 00:00:00'), + Timestamp('2018-04-02 00:00:00'), + Timestamp('2019-04-22 00:00:00'), + Timestamp('2020-04-13 00:00:00'), + ], + ) + self.check_results( + GoodFriday, + start=self.start_date, + end=self.end_date, + expected=[ + Timestamp('2011-04-22 00:00:00'), + Timestamp('2012-04-06 00:00:00'), + Timestamp('2013-03-29 00:00:00'), + Timestamp('2014-04-18 00:00:00'), + Timestamp('2015-04-03 00:00:00'), + Timestamp('2016-03-25 00:00:00'), + Timestamp('2017-04-14 00:00:00'), + Timestamp('2018-03-30 00:00:00'), + Timestamp('2019-04-19 00:00:00'), + Timestamp('2020-04-10 00:00:00'), + ], + ) def test_usthanksgivingday(self): - holidays = USThanksgivingDay.dates(self.start_date, - self.end_date) - holidayList = [ - datetime(2011, 11, 24), - datetime(2012, 11, 22), - datetime(2013, 11, 28), - datetime(2014, 11, 27), - datetime(2015, 11, 26), - datetime(2016, 11, 24), - datetime(2017, 11, 23), - datetime(2018, 11, 22), - datetime(2019, 11, 28), - datetime(2020, 11, 26), - ] - - self.assertEqual(list(holidays), holidayList) + + self.check_results( + USThanksgivingDay, + start=self.start_date, + end=self.end_date, + expected=[ + datetime(2011, 11, 24), + datetime(2012, 11, 22), + datetime(2013, 11, 28), + datetime(2014, 11, 27), + datetime(2015, 11, 26), + datetime(2016, 11, 24), + datetime(2017, 11, 23), + datetime(2018, 11, 22), + datetime(2019, 11, 28), + datetime(2020, 11, 26), + ], + ) def test_argument_types(self): holidays = USThanksgivingDay.dates(self.start_date, diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index 0793508b4912c..a051560617604 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -10,7 +10,7 @@ import numpy as np from pandas.core.datetools import ( - bday, BDay, CDay, BQuarterEnd, BMonthEnd, + bday, BDay, CDay, BQuarterEnd, BMonthEnd, BusinessHour, CBMonthEnd, CBMonthBegin, BYearEnd, MonthEnd, MonthBegin, BYearBegin, CustomBusinessDay, QuarterBegin, BQuarterBegin, BMonthBegin, DateOffset, Week, @@ -23,7 +23,6 @@ from pandas.tseries.index import _to_m8, DatetimeIndex, _daterange_cache, date_range from pandas.tseries.tools import parse_time_string, DateParseError import pandas.tseries.offsets as offsets - from pandas.io.pickle import read_pickle from pandas.tslib import NaT, Timestamp, Timedelta import pandas.tslib as tslib @@ -133,7 +132,11 @@ def test_apply_out_of_range(self): # try to create an out-of-bounds result timestamp; if we can't create the offset # skip try: - offset = self._get_offset(self._offset, value=10000) + if self._offset is BusinessHour: + # Using 10000 in BusinessHour fails in tz check because of DST difference + offset = self._get_offset(self._offset, value=100000) + else: + offset = self._get_offset(self._offset, value=10000) result = Timestamp('20080101') + offset self.assertIsInstance(result, datetime) @@ -179,6 +182,7 @@ def setUp(self): 'BQuarterBegin': Timestamp('2011-03-01 09:00:00'), 'QuarterEnd': Timestamp('2011-03-31 09:00:00'), 'BQuarterEnd': Timestamp('2011-03-31 09:00:00'), + 'BusinessHour': Timestamp('2011-01-03 10:00:00'), 'WeekOfMonth': Timestamp('2011-01-08 09:00:00'), 'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'), 'FY5253Quarter': Timestamp('2011-01-25 09:00:00'), @@ -278,6 +282,8 @@ def test_rollforward(self): for n in no_changes: expecteds[n] = Timestamp('2011/01/01 09:00') + expecteds['BusinessHour'] = Timestamp('2011-01-03 09:00:00') + # but be changed when normalize=True norm_expected = expecteds.copy() for k in norm_expected: @@ -321,6 +327,7 @@ def test_rollback(self): 'BQuarterBegin': Timestamp('2010-12-01 09:00:00'), 'QuarterEnd': Timestamp('2010-12-31 09:00:00'), 'BQuarterEnd': Timestamp('2010-12-31 09:00:00'), + 'BusinessHour': Timestamp('2010-12-31 17:00:00'), 'WeekOfMonth': Timestamp('2010-12-11 09:00:00'), 'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'), 'FY5253Quarter': Timestamp('2010-10-26 09:00:00'), @@ -371,6 +378,10 @@ def test_onOffset(self): offset_n = self._get_offset(offset, normalize=True) self.assertFalse(offset_n.onOffset(dt)) + if offset is BusinessHour: + # In default BusinessHour (9:00-17:00), normalized time + # cannot be in business hour range + continue date = datetime(dt.year, dt.month, dt.day) self.assertTrue(offset_n.onOffset(date)) @@ -642,6 +653,593 @@ def test_offsets_compare_equal(self): self.assertFalse(offset1 != offset2) +class TestBusinessHour(Base): + _multiprocess_can_split_ = True + _offset = BusinessHour + + def setUp(self): + self.d = datetime(2014, 7, 1, 10, 00) + + self.offset1 = BusinessHour() + self.offset2 = BusinessHour(n=3) + + self.offset3 = BusinessHour(n=-1) + self.offset4 = BusinessHour(n=-4) + + from datetime import time as dt_time + self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30)) + self.offset6 = BusinessHour(start='20:00', end='05:00') + self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30), end=dt_time(6, 30)) + + def test_constructor_errors(self): + from datetime import time as dt_time + with tm.assertRaises(ValueError): + BusinessHour(start=dt_time(11, 0, 5)) + with tm.assertRaises(ValueError): + BusinessHour(start='AAA') + with tm.assertRaises(ValueError): + BusinessHour(start='14:00:05') + + def test_different_normalize_equals(self): + # equivalent in this special case + offset = self._offset() + offset2 = self._offset() + offset2.normalize = True + self.assertEqual(offset, offset2) + + def test_repr(self): + self.assertEqual(repr(self.offset1), '<BusinessHour: BH=09:00-17:00>') + self.assertEqual(repr(self.offset2), '<3 * BusinessHours: BH=09:00-17:00>') + self.assertEqual(repr(self.offset3), '<-1 * BusinessHour: BH=09:00-17:00>') + self.assertEqual(repr(self.offset4), '<-4 * BusinessHours: BH=09:00-17:00>') + + self.assertEqual(repr(self.offset5), '<BusinessHour: BH=11:00-14:30>') + self.assertEqual(repr(self.offset6), '<BusinessHour: BH=20:00-05:00>') + self.assertEqual(repr(self.offset7), '<-2 * BusinessHours: BH=21:30-06:30>') + + def test_with_offset(self): + expected = Timestamp('2014-07-01 13:00') + + self.assertEqual(self.d + BusinessHour() * 3, expected) + self.assertEqual(self.d + BusinessHour(n=3), expected) + + def testEQ(self): + for offset in [self.offset1, self.offset2, self.offset3, self.offset4]: + self.assertEqual(offset, offset) + + self.assertNotEqual(BusinessHour(), BusinessHour(-1)) + self.assertEqual(BusinessHour(start='09:00'), BusinessHour()) + self.assertNotEqual(BusinessHour(start='09:00'), BusinessHour(start='09:01')) + self.assertNotEqual(BusinessHour(start='09:00', end='17:00'), + BusinessHour(start='17:00', end='09:01')) + + def test_hash(self): + self.assertEqual(hash(self.offset2), hash(self.offset2)) + + def testCall(self): + self.assertEqual(self.offset1(self.d), datetime(2014, 7, 1, 11)) + self.assertEqual(self.offset2(self.d), datetime(2014, 7, 1, 13)) + self.assertEqual(self.offset3(self.d), datetime(2014, 6, 30, 17)) + self.assertEqual(self.offset4(self.d), datetime(2014, 6, 30, 14)) + + def testRAdd(self): + self.assertEqual(self.d + self.offset2, self.offset2 + self.d) + + def testSub(self): + off = self.offset2 + self.assertRaises(Exception, off.__sub__, self.d) + self.assertEqual(2 * off - off, off) + + self.assertEqual(self.d - self.offset2, self.d + self._offset(-3)) + + def testRSub(self): + self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d)) + + def testMult1(self): + self.assertEqual(self.d + 5 * self.offset1, self.d + self._offset(5)) + + def testMult2(self): + self.assertEqual(self.d + (-3 * self._offset(-2)), + self.d + self._offset(6)) + + def testRollback1(self): + self.assertEqual(self.offset1.rollback(self.d), self.d) + self.assertEqual(self.offset2.rollback(self.d), self.d) + self.assertEqual(self.offset3.rollback(self.d), self.d) + self.assertEqual(self.offset4.rollback(self.d), self.d) + self.assertEqual(self.offset5.rollback(self.d), datetime(2014, 6, 30, 14, 30)) + self.assertEqual(self.offset6.rollback(self.d), datetime(2014, 7, 1, 5, 0)) + self.assertEqual(self.offset7.rollback(self.d), datetime(2014, 7, 1, 6, 30)) + + d = datetime(2014, 7, 1, 0) + self.assertEqual(self.offset1.rollback(d), datetime(2014, 6, 30, 17)) + self.assertEqual(self.offset2.rollback(d), datetime(2014, 6, 30, 17)) + self.assertEqual(self.offset3.rollback(d), datetime(2014, 6, 30, 17)) + self.assertEqual(self.offset4.rollback(d), datetime(2014, 6, 30, 17)) + self.assertEqual(self.offset5.rollback(d), datetime(2014, 6, 30, 14, 30)) + self.assertEqual(self.offset6.rollback(d), d) + self.assertEqual(self.offset7.rollback(d), d) + + self.assertEqual(self._offset(5).rollback(self.d), self.d) + + def testRollback2(self): + self.assertEqual(self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)), + datetime(2014, 7, 4, 17, 0)) + + def testRollforward1(self): + self.assertEqual(self.offset1.rollforward(self.d), self.d) + self.assertEqual(self.offset2.rollforward(self.d), self.d) + self.assertEqual(self.offset3.rollforward(self.d), self.d) + self.assertEqual(self.offset4.rollforward(self.d), self.d) + self.assertEqual(self.offset5.rollforward(self.d), datetime(2014, 7, 1, 11, 0)) + self.assertEqual(self.offset6.rollforward(self.d), datetime(2014, 7, 1, 20, 0)) + self.assertEqual(self.offset7.rollforward(self.d), datetime(2014, 7, 1, 21, 30)) + + d = datetime(2014, 7, 1, 0) + self.assertEqual(self.offset1.rollforward(d), datetime(2014, 7, 1, 9)) + self.assertEqual(self.offset2.rollforward(d), datetime(2014, 7, 1, 9)) + self.assertEqual(self.offset3.rollforward(d), datetime(2014, 7, 1, 9)) + self.assertEqual(self.offset4.rollforward(d), datetime(2014, 7, 1, 9)) + self.assertEqual(self.offset5.rollforward(d), datetime(2014, 7, 1, 11)) + self.assertEqual(self.offset6.rollforward(d), d) + self.assertEqual(self.offset7.rollforward(d), d) + + self.assertEqual(self._offset(5).rollforward(self.d), self.d) + + def testRollforward2(self): + self.assertEqual(self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)), + datetime(2014, 7, 7, 9)) + + def test_roll_date_object(self): + offset = BusinessHour() + + dt = datetime(2014, 7, 6, 15, 0) + + result = offset.rollback(dt) + self.assertEqual(result, datetime(2014, 7, 4, 17)) + + result = offset.rollforward(dt) + self.assertEqual(result, datetime(2014, 7, 7, 9)) + + def test_normalize(self): + tests = [] + + tests.append((BusinessHour(normalize=True), + {datetime(2014, 7, 1, 8): datetime(2014, 7, 1), + datetime(2014, 7, 1, 17): datetime(2014, 7, 2), + datetime(2014, 7, 1, 16): datetime(2014, 7, 2), + datetime(2014, 7, 1, 23): datetime(2014, 7, 2), + datetime(2014, 7, 1, 0): datetime(2014, 7, 1), + datetime(2014, 7, 4, 15): datetime(2014, 7, 4), + datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4), + datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7), + datetime(2014, 7, 5, 23): datetime(2014, 7, 7), + datetime(2014, 7, 6, 10): datetime(2014, 7, 7)})) + + tests.append((BusinessHour(-1, normalize=True), + {datetime(2014, 7, 1, 8): datetime(2014, 6, 30), + datetime(2014, 7, 1, 17): datetime(2014, 7, 1), + datetime(2014, 7, 1, 16): datetime(2014, 7, 1), + datetime(2014, 7, 1, 10): datetime(2014, 6, 30), + datetime(2014, 7, 1, 0): datetime(2014, 6, 30), + datetime(2014, 7, 7, 10): datetime(2014, 7, 4), + datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7), + datetime(2014, 7, 5, 23): datetime(2014, 7, 4), + datetime(2014, 7, 6, 10): datetime(2014, 7, 4)})) + + tests.append((BusinessHour(1, normalize=True, start='17:00', end='04:00'), + {datetime(2014, 7, 1, 8): datetime(2014, 7, 1), + datetime(2014, 7, 1, 17): datetime(2014, 7, 1), + datetime(2014, 7, 1, 23): datetime(2014, 7, 2), + datetime(2014, 7, 2, 2): datetime(2014, 7, 2), + datetime(2014, 7, 2, 3): datetime(2014, 7, 2), + datetime(2014, 7, 4, 23): datetime(2014, 7, 5), + datetime(2014, 7, 5, 2): datetime(2014, 7, 5), + datetime(2014, 7, 7, 2): datetime(2014, 7, 7), + datetime(2014, 7, 7, 17): datetime(2014, 7, 7)})) + + for offset, cases in tests: + for dt, expected in compat.iteritems(cases): + self.assertEqual(offset.apply(dt), expected) + + def test_onOffset(self): + tests = [] + + tests.append((BusinessHour(), + {datetime(2014, 7, 1, 9): True, + datetime(2014, 7, 1, 8, 59): False, + datetime(2014, 7, 1, 8): False, + datetime(2014, 7, 1, 17): True, + datetime(2014, 7, 1, 17, 1): False, + datetime(2014, 7, 1, 18): False, + datetime(2014, 7, 5, 9): False, + datetime(2014, 7, 6, 12): False})) + + tests.append((BusinessHour(start='10:00', end='15:00'), + {datetime(2014, 7, 1, 9): False, + datetime(2014, 7, 1, 10): True, + datetime(2014, 7, 1, 15): True, + datetime(2014, 7, 1, 15, 1): False, + datetime(2014, 7, 5, 12): False, + datetime(2014, 7, 6, 12): False})) + + tests.append((BusinessHour(start='19:00', end='05:00'), + {datetime(2014, 7, 1, 9, 0): False, + datetime(2014, 7, 1, 10, 0): False, + datetime(2014, 7, 1, 15): False, + datetime(2014, 7, 1, 15, 1): False, + datetime(2014, 7, 5, 12, 0): False, + datetime(2014, 7, 6, 12, 0): False, + datetime(2014, 7, 1, 19, 0): True, + datetime(2014, 7, 2, 0, 0): True, + datetime(2014, 7, 4, 23): True, + datetime(2014, 7, 5, 1): True, + datetime(2014, 7, 5, 5, 0): True, + datetime(2014, 7, 6, 23, 0): False, + datetime(2014, 7, 7, 3, 0): False})) + + for offset, cases in tests: + for dt, expected in compat.iteritems(cases): + self.assertEqual(offset.onOffset(dt), expected) + + def test_opening_time(self): + tests = [] + + # opening time should be affected by sign of n, not by n's value and end + tests.append(([BusinessHour(), BusinessHour(n=2), BusinessHour(n=4), + BusinessHour(end='10:00'), BusinessHour(n=2, end='4:00'), + BusinessHour(n=4, end='15:00')], + {datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)), + datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)), + datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)), + datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9), datetime(2014, 7, 1, 9)), + # if timestamp is on opening time, next opening time is as it is + datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9), datetime(2014, 7, 2, 9)), + datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9), datetime(2014, 7, 2, 9)), + # 2014-07-05 is saturday + datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)), + datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)), + datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)), + datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)), + datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9), datetime(2014, 7, 4, 9)), + datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9), datetime(2014, 7, 7, 9))})) + + tests.append(([BusinessHour(start='11:15'), BusinessHour(n=2, start='11:15'), + BusinessHour(n=3, start='11:15'), + BusinessHour(start='11:15', end='10:00'), + BusinessHour(n=2, start='11:15', end='4:00'), + BusinessHour(n=3, start='11:15', end='15:00')], + {datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 11, 15), datetime(2014, 6, 30, 11, 15)), + datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)), + datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)), + datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)), + datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)), + datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)), + datetime(2014, 7, 2, 11, 15): (datetime(2014, 7, 2, 11, 15), datetime(2014, 7, 2, 11, 15)), + datetime(2014, 7, 2, 11, 15, 1): (datetime(2014, 7, 3, 11, 15), datetime(2014, 7, 2, 11, 15)), + datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)), + datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 11, 15), datetime(2014, 7, 3, 11, 15)), + datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)), + datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)), + datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)), + datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15))})) + + tests.append(([BusinessHour(-1), BusinessHour(n=-2), BusinessHour(n=-4), + BusinessHour(n=-1, end='10:00'), BusinessHour(n=-2, end='4:00'), + BusinessHour(n=-4, end='15:00')], + {datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)), + datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)), + datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)), + datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9), datetime(2014, 7, 2, 9)), + datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9), datetime(2014, 7, 2, 9)), + datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9), datetime(2014, 7, 3, 9)), + datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)), + datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)), + datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)), + datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)), + datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9), datetime(2014, 7, 7, 9)), + datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9), datetime(2014, 7, 7, 9)), + datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9), datetime(2014, 7, 8, 9))})) + + tests.append(([BusinessHour(start='17:00', end='05:00'), + BusinessHour(n=3, start='17:00', end='03:00')], + {datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17), datetime(2014, 6, 30, 17)), + datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)), + datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)), + datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)), + datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17), datetime(2014, 7, 1, 17)), + datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17), datetime(2014, 7, 4, 17)), + datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)), + datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17), datetime(2014, 7, 3, 17)), + datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)), + datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)), + datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17), datetime(2014, 7, 4, 17)), + datetime(2014, 7, 7, 17, 1): (datetime(2014, 7, 8, 17), datetime(2014, 7, 7, 17)),})) + + tests.append(([BusinessHour(-1, start='17:00', end='05:00'), + BusinessHour(n=-2, start='17:00', end='03:00')], + {datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17), datetime(2014, 7, 1, 17)), + datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)), + datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)), + datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)), + datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)), + datetime(2014, 7, 2, 16, 59): (datetime(2014, 7, 1, 17), datetime(2014, 7, 2, 17)), + datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)), + datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17), datetime(2014, 7, 4, 17)), + datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)), + datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)), + datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17), datetime(2014, 7, 7, 17)), + datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17), datetime(2014, 7, 8, 17))})) + + for offsets, cases in tests: + for offset in offsets: + for dt, (exp_next, exp_prev) in compat.iteritems(cases): + self.assertEqual(offset._next_opening_time(dt), exp_next) + self.assertEqual(offset._prev_opening_time(dt), exp_prev) + + def test_apply(self): + tests = [] + + tests.append((BusinessHour(), + {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12), + datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14), + datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16), + datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10), + datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9), + datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15), + datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10), + datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12), + # out of business hours + datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10), + datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10), + datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10), + datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10), + # saturday + datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10), + datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10), + datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30), + datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)})) + + tests.append((BusinessHour(4), + {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15), + datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9), + datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11), + datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12), + datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13), + datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15), + datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13), + datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13), + datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13), + datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13), + datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13), + datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13), + datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30), + datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)})) + + tests.append((BusinessHour(-1), + {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10), + datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12), + datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14), + datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15), + datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17), + datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15), + datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15), + datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16), + datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16), + datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10), + # out of business hours + datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16), + datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16), + datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16), + datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16), + # saturday + datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16), + datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16), + datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30), + datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30)})) + + tests.append((BusinessHour(-4), + {datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15), + datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17), + datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11), + datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12), + datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13), + datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15), + datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13), + datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13), + datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13), + datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13), + datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13), + datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13), + datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30), + datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30)})) + + tests.append((BusinessHour(start='13:00', end='16:00'), + {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14), + datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14), + datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13), + datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14), + datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14), + datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15), + datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14), + datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)})) + + tests.append((BusinessHour(n=2, start='13:00', end='16:00'), + {datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15), + datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13), + datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15), + datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15), + datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30), + datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15), + datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15), + datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15), + datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30), + datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)})) + + tests.append((BusinessHour(n=-1, start='13:00', end='16:00'), + {datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15), + datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15), + datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16), + datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14), + datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15), + datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15), + datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15), + datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15), + datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)})) + + tests.append((BusinessHour(n=-3, start='10:00', end='16:00'), + {datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13), + datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11), + datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13), + datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16), + datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13), + datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30), + datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13), + datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13), + datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13), + datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13), + datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30), + datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)})) + + tests.append((BusinessHour(start='19:00', end='05:00'), + {datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20), + datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20), + datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20), + datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20), + datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20), + datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30), + datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1), + datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20), + datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0), + datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1), + datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19), + datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30), + datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)})) + + tests.append((BusinessHour(n=-1, start='19:00', end='05:00'), + {datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4), + datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4), + datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4), + datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4), + datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5), + datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4), + datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30), + datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23), + datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4), + datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22), + datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23), + datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3), + datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30), + datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)})) + + for offset, cases in tests: + for base, expected in compat.iteritems(cases): + assertEq(offset, base, expected) + + def test_apply_large_n(self): + tests = [] + + tests.append((BusinessHour(40), # A week later + {datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11), + datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13), + datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15), + datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16), + datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9), + datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11), + datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9), + datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9), + datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9), + datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9), + datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9), + datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9), + datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30), + datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30)})) + + tests.append((BusinessHour(-25), # 3 days and 1 hour before + {datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10), + datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12), + datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16), + datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17), + datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10), + datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16), + datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16), + datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16), + datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16), + datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16), + datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16), + datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30), + datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30)})) + + tests.append((BusinessHour(28, start='21:00', end='02:00'), # 5 days and 3 hours later + {datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0), + datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1), + datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21), + datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0), + datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0), + datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23), + datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0), + datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0), + datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23), + datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0), + datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0), + datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0), + datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30)})) + + for offset, cases in tests: + for base, expected in compat.iteritems(cases): + assertEq(offset, base, expected) + + def test_apply_nanoseconds(self): + tests = [] + + tests.append((BusinessHour(), + {Timestamp('2014-07-04 15:00') + Nano(5): Timestamp('2014-07-04 16:00') + Nano(5), + Timestamp('2014-07-04 16:00') + Nano(5): Timestamp('2014-07-07 09:00') + Nano(5), + Timestamp('2014-07-04 16:00') - Nano(5): Timestamp('2014-07-04 17:00') - Nano(5) + })) + + tests.append((BusinessHour(-1), + {Timestamp('2014-07-04 15:00') + Nano(5): Timestamp('2014-07-04 14:00') + Nano(5), + Timestamp('2014-07-04 10:00') + Nano(5): Timestamp('2014-07-04 09:00') + Nano(5), + Timestamp('2014-07-04 10:00') - Nano(5): Timestamp('2014-07-03 17:00') - Nano(5), + })) + + for offset, cases in tests: + for base, expected in compat.iteritems(cases): + assertEq(offset, base, expected) + + def test_offsets_compare_equal(self): + # root cause of #456 + offset1 = self._offset() + offset2 = self._offset() + self.assertFalse(offset1 != offset2) + + def test_datetimeindex(self): + idx1 = DatetimeIndex(start='2014-07-04 15:00', end='2014-07-08 10:00', freq='BH') + idx2 = DatetimeIndex(start='2014-07-04 15:00', periods=12, freq='BH') + idx3 = DatetimeIndex(end='2014-07-08 10:00', periods=12, freq='BH') + expected = DatetimeIndex(['2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00', + '2014-07-07 10:00', '2014-07-07 11:00', '2014-07-07 12:00', + '2014-07-07 13:00', '2014-07-07 14:00', '2014-07-07 15:00', + '2014-07-07 16:00', '2014-07-08 09:00', '2014-07-08 10:00'], + freq='BH') + for idx in [idx1, idx2, idx3]: + tm.assert_index_equal(idx, expected) + + idx1 = DatetimeIndex(start='2014-07-04 15:45', end='2014-07-08 10:45', freq='BH') + idx2 = DatetimeIndex(start='2014-07-04 15:45', periods=12, freq='BH') + idx3 = DatetimeIndex(end='2014-07-08 10:45', periods=12, freq='BH') + + expected = DatetimeIndex(['2014-07-04 15:45', '2014-07-04 16:45', '2014-07-07 09:45', + '2014-07-07 10:45', '2014-07-07 11:45', '2014-07-07 12:45', + '2014-07-07 13:45', '2014-07-07 14:45', '2014-07-07 15:45', + '2014-07-07 16:45', '2014-07-08 09:45', '2014-07-08 10:45'], + freq='BH') + expected = idx1 + for idx in [idx1, idx2, idx3]: + tm.assert_index_equal(idx, expected) + + class TestCustomBusinessDay(Base): _multiprocess_can_split_ = True _offset = CDay diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 17edcd7504102..0218af63ca7d6 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -101,15 +101,15 @@ def test_timestamp_tz_arg(self): pytz.timezone('Europe/Brussels').normalize(p).tzinfo) def test_timestamp_tz_arg_dateutil(self): - import dateutil + from pandas.tslib import _dateutil_gettz as gettz from pandas.tslib import maybe_get_tz p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz('dateutil/Europe/Brussels')) - self.assertEqual(p.tz, dateutil.zoneinfo.gettz('Europe/Brussels')) + self.assertEqual(p.tz, gettz('Europe/Brussels')) def test_timestamp_tz_arg_dateutil_from_string(self): - import dateutil + from pandas.tslib import _dateutil_gettz as gettz p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels') - self.assertEqual(p.tz, dateutil.zoneinfo.gettz('Europe/Brussels')) + self.assertEqual(p.tz, gettz('Europe/Brussels')) def test_timestamp_nat_tz(self): t = Period('NaT', freq='M').to_timestamp() @@ -226,16 +226,29 @@ def test_period_constructor(self): i1 = Period(date(2007, 1, 1), freq='M') i2 = Period(datetime(2007, 1, 1), freq='M') + i3 = Period(np.datetime64('2007-01-01'), freq='M') + i4 = Period(np.datetime64('2007-01-01 00:00:00Z'), freq='M') + i5 = Period(np.datetime64('2007-01-01 00:00:00.000Z'), freq='M') self.assertEqual(i1, i2) + self.assertEqual(i1, i3) + self.assertEqual(i1, i4) + self.assertEqual(i1, i5) i1 = Period('2007-01-01 09:00:00.001') expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L') self.assertEqual(i1, expected) + expected = Period(np.datetime64('2007-01-01 09:00:00.001Z'), freq='L') + self.assertEqual(i1, expected) + i1 = Period('2007-01-01 09:00:00.00101') expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U') self.assertEqual(i1, expected) + expected = Period(np.datetime64('2007-01-01 09:00:00.00101Z'), + freq='U') + self.assertEqual(i1, expected) + self.assertRaises(ValueError, Period, ordinal=200701) self.assertRaises(ValueError, Period, '2007-1-1', freq='X') @@ -434,7 +447,7 @@ def test_properties_weekly(self): assert_equal((w_date - 1).week, 52) assert_equal(w_date.days_in_month, 31) assert_equal(Period(freq='WK', year=2012, month=2, day=1).days_in_month, 29) - + def test_properties_daily(self): # Test properties on Periods with daily frequency. b_date = Period(freq='B', year=2007, month=1, day=1) @@ -2105,6 +2118,7 @@ def test_range_slice_outofbounds(self): for idx in [didx, pidx]: df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx) empty = DataFrame(index=idx.__class__([], freq='D'), columns=['units']) + empty['units'] = empty['units'].astype('int64') tm.assert_frame_equal(df['2013/09/01':'2013/09/30'], empty) tm.assert_frame_equal(df['2013/09/30':'2013/10/02'], df.iloc[:2]) diff --git a/pandas/tseries/tests/test_plotting.py b/pandas/tseries/tests/test_plotting.py index c4e642ffe43b0..c5ed8a1ac3e31 100644 --- a/pandas/tseries/tests/test_plotting.py +++ b/pandas/tseries/tests/test_plotting.py @@ -528,7 +528,9 @@ def test_secondary_y(self): ser = Series(np.random.randn(10)) ser2 = Series(np.random.randn(10)) - ax = ser.plot(secondary_y=True).right_ax + ax = ser.plot(secondary_y=True) + self.assertTrue(hasattr(ax, 'left_ax')) + self.assertFalse(hasattr(ax, 'right_ax')) fig = ax.get_figure() axes = fig.get_axes() l = ax.get_lines()[0] @@ -543,8 +545,12 @@ def test_secondary_y(self): plt.close(ax2.get_figure()) ax = ser2.plot() - ax2 = ser.plot(secondary_y=True).right_ax + ax2 = ser.plot(secondary_y=True) self.assertTrue(ax.get_yaxis().get_visible()) + self.assertFalse(hasattr(ax, 'left_ax')) + self.assertTrue(hasattr(ax, 'right_ax')) + self.assertTrue(hasattr(ax2, 'left_ax')) + self.assertFalse(hasattr(ax2, 'right_ax')) @slow def test_secondary_y_ts(self): @@ -552,7 +558,9 @@ def test_secondary_y_ts(self): idx = date_range('1/1/2000', periods=10) ser = Series(np.random.randn(10), idx) ser2 = Series(np.random.randn(10), idx) - ax = ser.plot(secondary_y=True).right_ax + ax = ser.plot(secondary_y=True) + self.assertTrue(hasattr(ax, 'left_ax')) + self.assertFalse(hasattr(ax, 'right_ax')) fig = ax.get_figure() axes = fig.get_axes() l = ax.get_lines()[0] @@ -577,7 +585,9 @@ def test_secondary_kde(self): import matplotlib.pyplot as plt ser = Series(np.random.randn(10)) - ax = ser.plot(secondary_y=True, kind='density').right_ax + ax = ser.plot(secondary_y=True, kind='density') + self.assertTrue(hasattr(ax, 'left_ax')) + self.assertFalse(hasattr(ax, 'right_ax')) fig = ax.get_figure() axes = fig.get_axes() self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right') @@ -636,6 +646,38 @@ def test_mixed_freq_irregular_first(self): x2 = lines[1].get_xdata() assert_array_equal(x2, s1.index.asobject.values) + def test_mixed_freq_regular_first_df(self): + # GH 9852 + import matplotlib.pyplot as plt + s1 = tm.makeTimeSeries().to_frame() + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :] + ax = s1.plot() + ax2 = s2.plot(style='g', ax=ax) + lines = ax2.get_lines() + idx1 = PeriodIndex(lines[0].get_xdata()) + idx2 = PeriodIndex(lines[1].get_xdata()) + self.assertTrue(idx1.equals(s1.index.to_period('B'))) + self.assertTrue(idx2.equals(s2.index.to_period('B'))) + left, right = ax2.get_xlim() + pidx = s1.index.to_period() + self.assertEqual(left, pidx[0].ordinal) + self.assertEqual(right, pidx[-1].ordinal) + + @slow + def test_mixed_freq_irregular_first_df(self): + # GH 9852 + import matplotlib.pyplot as plt + s1 = tm.makeTimeSeries().to_frame() + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :] + ax = s2.plot(style='g') + ax = s1.plot(ax=ax) + self.assertFalse(hasattr(ax, 'freq')) + lines = ax.get_lines() + x1 = lines[0].get_xdata() + assert_array_equal(x1, s2.index.asobject.values) + x2 = lines[1].get_xdata() + assert_array_equal(x2, s1.index.asobject.values) + def test_mixed_freq_hf_first(self): idxh = date_range('1/1/1999', periods=365, freq='D') idxl = date_range('1/1/1999', periods=12, freq='M') @@ -890,7 +932,9 @@ def test_secondary_upsample(self): ax = high.plot(secondary_y=True) for l in ax.get_lines(): self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D') - for l in ax.right_ax.get_lines(): + self.assertTrue(hasattr(ax, 'left_ax')) + self.assertFalse(hasattr(ax, 'right_ax')) + for l in ax.left_ax.get_lines(): self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D') @slow diff --git a/pandas/tseries/tests/test_resample.py b/pandas/tseries/tests/test_resample.py index c338bbeae79c7..d7b1256329cc3 100644 --- a/pandas/tseries/tests/test_resample.py +++ b/pandas/tseries/tests/test_resample.py @@ -82,15 +82,17 @@ def test_resample_basic(self): name='index') s = Series(np.random.randn(14), index=rng) result = s.resample('5min', how='mean', closed='right', label='right') + + exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index') expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()], - index=date_range('1/1/2000', periods=4, freq='5min')) + index=exp_idx) assert_series_equal(result, expected) self.assertEqual(result.index.name, 'index') result = s.resample('5min', how='mean', closed='left', label='right') - expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()], - index=date_range('1/1/2000 00:05', periods=3, - freq='5min')) + + exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min', name='index') + expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()], index=exp_idx) assert_series_equal(result, expected) s = self.series @@ -115,7 +117,7 @@ def _ohlc(group): if isnull(group).all(): return np.repeat(np.nan, 4) return [group[0], group.max(), group.min(), group[-1]] - inds = date_range('1/1/2000', periods=4, freq='5min') + inds = date_range('1/1/2000', periods=4, freq='5min', name='index') for arg in args: if arg == 'ohlc': @@ -376,6 +378,16 @@ def test_resample_upsample(self): self.assertEqual(result.index.name, 'index') + def test_resample_extra_index_point(self): + # GH 9756 + index = DatetimeIndex(start='20150101', end='20150331', freq='BM') + expected = DataFrame({'A' : Series([21,41,63], index=index)}) + + index = DatetimeIndex(start='20150101', end='20150331', freq='B') + df = DataFrame({'A' : Series(range(len(index)),index=index)},dtype='int64') + result = df.resample('BM', how='last') + assert_frame_equal(result, expected) + def test_upsample_with_limit(self): rng = date_range('1/1/2000', periods=3, freq='5t') ts = Series(np.random.randn(len(rng)), rng) @@ -875,23 +887,23 @@ def test_resmaple_dst_anchor(self): # 5172 dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern') df = DataFrame([5], index=dti) - assert_frame_equal(df.resample(rule='D', how='sum'), + assert_frame_equal(df.resample(rule='D', how='sum'), DataFrame([5], index=df.index.normalize())) df.resample(rule='MS', how='sum') assert_frame_equal(df.resample(rule='MS', how='sum'), - DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)], + DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)], tz='US/Eastern'))) dti = date_range('2013-09-30', '2013-11-02', freq='30Min', tz='Europe/Paris') values = range(dti.size) df = DataFrame({"a": values, "b": values, "c": values}, index=dti, dtype='int64') how = {"a": "min", "b": "max", "c": "count"} - + assert_frame_equal(df.resample("W-MON", how=how)[["a", "b", "c"]], DataFrame({"a": [0, 48, 384, 720, 1056, 1394], "b": [47, 383, 719, 1055, 1393, 1586], "c": [48, 336, 336, 336, 338, 193]}, - index=date_range('9/30/2013', '11/4/2013', + index=date_range('9/30/2013', '11/4/2013', freq='W-MON', tz='Europe/Paris')), 'W-MON Frequency') @@ -899,7 +911,7 @@ def test_resmaple_dst_anchor(self): DataFrame({"a": [0, 48, 720, 1394], "b": [47, 719, 1393, 1586], "c": [48, 672, 674, 193]}, - index=date_range('9/30/2013', '11/11/2013', + index=date_range('9/30/2013', '11/11/2013', freq='2W-MON', tz='Europe/Paris')), '2W-MON Frequency') @@ -907,7 +919,7 @@ def test_resmaple_dst_anchor(self): DataFrame({"a": [0, 48, 1538], "b": [47, 1537, 1586], "c": [48, 1490, 49]}, - index=date_range('9/1/2013', '11/1/2013', + index=date_range('9/1/2013', '11/1/2013', freq='MS', tz='Europe/Paris')), 'MS Frequency') @@ -915,7 +927,7 @@ def test_resmaple_dst_anchor(self): DataFrame({"a": [0, 1538], "b": [1537, 1586], "c": [1538, 49]}, - index=date_range('9/1/2013', '11/1/2013', + index=date_range('9/1/2013', '11/1/2013', freq='2MS', tz='Europe/Paris')), '2MS Frequency') @@ -1553,6 +1565,8 @@ def test_aggregate_with_nat(self): expected.index = date_range(start='2013-01-01', freq='D', periods=5, name='key') dt_result = getattr(dt_grouped, func)() assert_series_equal(expected, dt_result) + # GH 9925 + self.assertEqual(dt_result.index.name, 'key') # if NaT is included, 'var', 'std', 'mean', 'first','last' and 'nth' doesn't work yet diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index b74a3a59d3bca..948a0be91b276 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -23,6 +23,8 @@ import pandas.util.testing as tm from numpy.random import rand, randn from pandas import _np_version_under1p8 +import pandas.compat as compat + iNaT = tslib.iNaT @@ -64,6 +66,13 @@ def test_construction(self): self.assertEqual(Timedelta(123072001000000).value, 123072001000000) self.assertTrue('1 days 10:11:12.001' in str(Timedelta(123072001000000))) + # string conversion with/without leading zero + # GH 9570 + self.assertEqual(Timedelta('0:00:00'), timedelta(hours=0)) + self.assertEqual(Timedelta('00:00:00'), timedelta(hours=0)) + self.assertEqual(Timedelta('-1:00:00'), -timedelta(hours=1)) + self.assertEqual(Timedelta('-01:00:00'), -timedelta(hours=1)) + # more strings # GH 8190 self.assertEqual(Timedelta('1 h'), timedelta(hours=1)) @@ -302,51 +311,70 @@ class Other: def test_fields(self): + def check(value): + # that we are int/long like + self.assertTrue(isinstance(value, (int, compat.long))) + # compat to datetime.timedelta rng = to_timedelta('1 days, 10:11:12') - self.assertEqual(rng.days,1) - self.assertEqual(rng.seconds,10*3600+11*60+12) - self.assertEqual(rng.microseconds,0) - self.assertEqual(rng.nanoseconds,0) + self.assertEqual(rng.days, 1) + self.assertEqual(rng.seconds, 10*3600+11*60+12) + self.assertEqual(rng.microseconds, 0) + self.assertEqual(rng.nanoseconds, 0) self.assertRaises(AttributeError, lambda : rng.hours) self.assertRaises(AttributeError, lambda : rng.minutes) self.assertRaises(AttributeError, lambda : rng.milliseconds) + # GH 10050 + check(rng.days) + check(rng.seconds) + check(rng.microseconds) + check(rng.nanoseconds) + td = Timedelta('-1 days, 10:11:12') - self.assertEqual(abs(td),Timedelta('13:48:48')) + self.assertEqual(abs(td), Timedelta('13:48:48')) self.assertTrue(str(td) == "-1 days +10:11:12") - self.assertEqual(-td,Timedelta('0 days 13:48:48')) - self.assertEqual(-Timedelta('-1 days, 10:11:12').value,49728000000000) - self.assertEqual(Timedelta('-1 days, 10:11:12').value,-49728000000000) + self.assertEqual(-td, Timedelta('0 days 13:48:48')) + self.assertEqual(-Timedelta('-1 days, 10:11:12').value, 49728000000000) + self.assertEqual(Timedelta('-1 days, 10:11:12').value, -49728000000000) rng = to_timedelta('-1 days, 10:11:12.100123456') - self.assertEqual(rng.days,-1) - self.assertEqual(rng.seconds,10*3600+11*60+12) - self.assertEqual(rng.microseconds,100*1000+123) - self.assertEqual(rng.nanoseconds,456) + self.assertEqual(rng.days, -1) + self.assertEqual(rng.seconds, 10*3600+11*60+12) + self.assertEqual(rng.microseconds, 100*1000+123) + self.assertEqual(rng.nanoseconds, 456) self.assertRaises(AttributeError, lambda : rng.hours) self.assertRaises(AttributeError, lambda : rng.minutes) self.assertRaises(AttributeError, lambda : rng.milliseconds) # components tup = pd.to_timedelta(-1, 'us').components - self.assertEqual(tup.days,-1) - self.assertEqual(tup.hours,23) - self.assertEqual(tup.minutes,59) - self.assertEqual(tup.seconds,59) - self.assertEqual(tup.milliseconds,999) - self.assertEqual(tup.microseconds,999) - self.assertEqual(tup.nanoseconds,0) + self.assertEqual(tup.days, -1) + self.assertEqual(tup.hours, 23) + self.assertEqual(tup.minutes, 59) + self.assertEqual(tup.seconds, 59) + self.assertEqual(tup.milliseconds, 999) + self.assertEqual(tup.microseconds, 999) + self.assertEqual(tup.nanoseconds, 0) + + # GH 10050 + check(tup.days) + check(tup.hours) + check(tup.minutes) + check(tup.seconds) + check(tup.milliseconds) + check(tup.microseconds) + check(tup.nanoseconds) tup = Timedelta('-1 days 1 us').components - self.assertEqual(tup.days,-2) - self.assertEqual(tup.hours,23) - self.assertEqual(tup.minutes,59) - self.assertEqual(tup.seconds,59) - self.assertEqual(tup.milliseconds,999) - self.assertEqual(tup.microseconds,999) - self.assertEqual(tup.nanoseconds,0) + self.assertEqual(tup.days, -2) + self.assertEqual(tup.hours, 23) + self.assertEqual(tup.minutes, 59) + self.assertEqual(tup.seconds, 59) + self.assertEqual(tup.milliseconds, 999) + self.assertEqual(tup.microseconds, 999) + self.assertEqual(tup.nanoseconds, 0) def test_timedelta_range(self): @@ -607,7 +635,7 @@ def test_timedelta_ops(self): self.assertEqual(result, expected) result = td.median() - expected = to_timedelta('00:00:08') + expected = to_timedelta('00:00:09') self.assertEqual(result, expected) result = td.to_frame().median() @@ -634,6 +662,14 @@ def test_timedelta_ops(self): for op in ['skew','kurt','sem','var','prod']: self.assertRaises(TypeError, lambda : getattr(td,op)()) + # GH 10040 + # make sure NaT is properly handled by median() + s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')]) + self.assertEqual(s.diff().median(), timedelta(days=4)) + + s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'), Timestamp('2015-02-15')]) + self.assertEqual(s.diff().median(), timedelta(days=6)) + def test_timedelta_ops_scalar(self): # GH 6808 base = pd.to_datetime('20130101 09:01:12.123456') @@ -942,6 +978,10 @@ def test_constructor_name(self): name='TEST') self.assertEqual(idx.name, 'TEST') + # GH10025 + idx2 = TimedeltaIndex(idx, name='something else') + self.assertEqual(idx2.name, 'something else') + def test_freq_conversion(self): # doc example diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 436a976c72e7e..8412ba8d4aad1 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -288,7 +288,7 @@ def test_indexing(self): self.assertRaises(KeyError, df.__getitem__, df.index[2],) def test_recreate_from_data(self): - freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C'] + freqs = ['M', 'Q', 'A', 'D', 'B', 'BH', 'T', 'S', 'L', 'U', 'H', 'N', 'C'] for f in freqs: org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1) @@ -417,9 +417,9 @@ def test_timestamp_to_datetime_explicit_pytz(self): def test_timestamp_to_datetime_explicit_dateutil(self): _skip_if_windows_python_3() tm._skip_if_no_dateutil() - import dateutil + from pandas.tslib import _dateutil_gettz as gettz rng = date_range('20090415', '20090519', - tz=dateutil.zoneinfo.gettz('US/Eastern')) + tz=gettz('US/Eastern')) stamp = rng[0] dtval = stamp.to_pydatetime() @@ -791,7 +791,7 @@ def test_series_repr_nat(self): series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]') result = repr(series) - expected = ('0 1970-01-01 00:00:00\n' + expected = ('0 1970-01-01 00:00:00.000000\n' '1 1970-01-01 00:00:00.000001\n' '2 1970-01-01 00:00:00.000002\n' '3 NaT\n' @@ -1131,6 +1131,15 @@ def test_reindex_with_datetimes(self): result = ts[list(ts.index[5:10])] tm.assert_series_equal(result, expected) + def test_asfreq_keep_index_name(self): + # GH #9854 + index_name = 'bar' + index = pd.date_range('20130101',periods=20,name=index_name) + df = pd.DataFrame([x for x in range(20)],columns=['foo'],index=index) + + tm.assert_equal(index_name, df.index.name) + tm.assert_equal(index_name, df.asfreq('10D').index.name) + def test_promote_datetime_date(self): rng = date_range('1/1/2000', periods=20) ts = Series(np.random.randn(20), index=rng) @@ -1798,7 +1807,7 @@ def test_append_concat_tz_explicit_pytz(self): def test_append_concat_tz_dateutil(self): # GH 2938 tm._skip_if_no_dateutil() - from dateutil.zoneinfo import gettz as timezone + from pandas.tslib import _dateutil_gettz as timezone rng = date_range('5/8/2012 1:45', periods=10, freq='5T', tz='dateutil/US/Eastern') @@ -3338,6 +3347,29 @@ def test_date_range_bms_bug(self): ex_first = Timestamp('2000-01-03') self.assertEqual(rng[0], ex_first) + def test_date_range_businesshour(self): + idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00', + '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00', + '2014-07-04 15:00', '2014-07-04 16:00'], freq='BH') + rng = date_range('2014-07-04 09:00', '2014-07-04 16:00', freq='BH') + tm.assert_index_equal(idx, rng) + + idx = DatetimeIndex(['2014-07-04 16:00', '2014-07-07 09:00'], freq='BH') + rng = date_range('2014-07-04 16:00', '2014-07-07 09:00', freq='BH') + tm.assert_index_equal(idx, rng) + + idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00', + '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00', + '2014-07-04 15:00', '2014-07-04 16:00', + '2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00', + '2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00', + '2014-07-07 15:00', '2014-07-07 16:00', + '2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00', + '2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00', + '2014-07-08 15:00', '2014-07-08 16:00'], freq='BH') + rng = date_range('2014-07-04 09:00', '2014-07-08 16:00', freq='BH') + tm.assert_index_equal(idx, rng) + def test_string_index_series_name_converted(self): # #1644 df = DataFrame(np.random.randn(10, 4), diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index e452ddee9d8db..341450f504e2a 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -14,6 +14,8 @@ import pandas.tseries.offsets as offsets import pandas.util.testing as tm from pandas.util.testing import assert_series_equal +import pandas.compat as compat + class TestTimestamp(tm.TestCase): @@ -369,6 +371,50 @@ def test_today(self): self.assertTrue(abs(ts_from_string_tz.tz_localize(None) - ts_from_method_tz.tz_localize(None)) < delta) + def test_fields(self): + + def check(value, equal): + # that we are int/long like + self.assertTrue(isinstance(value, (int, compat.long))) + self.assertEqual(value, equal) + + # GH 10050 + ts = Timestamp('2015-05-10 09:06:03.000100001') + check(ts.year, 2015) + check(ts.month, 5) + check(ts.day, 10) + check(ts.hour, 9) + check(ts.minute, 6) + check(ts.second, 3) + self.assertRaises(AttributeError, lambda : ts.millisecond) + check(ts.microsecond, 100) + check(ts.nanosecond, 1) + check(ts.dayofweek, 6) + check(ts.quarter, 2) + check(ts.dayofyear, 130) + check(ts.week, 19) + check(ts.daysinmonth, 31) + check(ts.daysinmonth, 31) + + def test_nat_fields(self): + # GH 10050 + ts = Timestamp('NaT') + self.assertTrue(np.isnan(ts.year)) + self.assertTrue(np.isnan(ts.month)) + self.assertTrue(np.isnan(ts.day)) + self.assertTrue(np.isnan(ts.hour)) + self.assertTrue(np.isnan(ts.minute)) + self.assertTrue(np.isnan(ts.second)) + self.assertTrue(np.isnan(ts.microsecond)) + self.assertTrue(np.isnan(ts.nanosecond)) + self.assertTrue(np.isnan(ts.dayofweek)) + self.assertTrue(np.isnan(ts.quarter)) + self.assertTrue(np.isnan(ts.dayofyear)) + self.assertTrue(np.isnan(ts.week)) + self.assertTrue(np.isnan(ts.daysinmonth)) + self.assertTrue(np.isnan(ts.days_in_month)) + + class TestDatetimeParsingWrappers(tm.TestCase): def test_does_not_convert_mixed_integer(self): bad_date_strings = ( diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py index 91e75da1b551c..624981c5536f5 100644 --- a/pandas/tseries/timedeltas.py +++ b/pandas/tseries/timedeltas.py @@ -3,14 +3,12 @@ """ import re -from datetime import timedelta - import numpy as np import pandas.tslib as tslib from pandas import compat -from pandas.core.common import (ABCSeries, is_integer, is_integer_dtype, - is_timedelta64_dtype, _values_from_object, - is_list_like, isnull, _ensure_object) +from pandas.core.common import (ABCSeries, is_integer_dtype, + is_timedelta64_dtype, is_list_like, + isnull, _ensure_object) def to_timedelta(arg, unit='ns', box=True, coerce=False): """ @@ -119,7 +117,7 @@ def _validate_timedelta_unit(arg): _short_search = re.compile( "^\s*(?P<neg>-?)\s*(?P<value>\d*\.?\d*)\s*(?P<unit>d|s|ms|us|ns)?\s*$",re.IGNORECASE) _full_search = re.compile( - "^\s*(?P<neg>-?)\s*(?P<days>\d*\.?\d*)?\s*(days|d|day)?,?\s*\+?(?P<time>\d{2}:\d{2}:\d{2})?(?P<frac>\.\d+)?\s*$",re.IGNORECASE) + "^\s*(?P<neg>-?)\s*(?P<days>\d*?\.?\d*?)?\s*(days|d|day)?,?\s*\+?(?P<time>\d{1,2}:\d{2}:\d{2})?(?P<frac>\.\d+)?\s*$",re.IGNORECASE) _nat_search = re.compile( "^\s*(nat|nan)\s*$",re.IGNORECASE) _whitespace = re.compile('^\s*$') @@ -209,13 +207,12 @@ def convert(r=None, unit=None, m=m): is_neg = gd['neg'] if gd['days']: days = int((float(gd['days'] or 0) * 86400)*1e9) - if gd['neg']: + if is_neg: days *= -1 value += days else: - if gd['neg']: + if is_neg: value *= -1 - return tslib.cast_from_unit(value, 'ns') return convert diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 8430e0209fd78..ef37e003ab67f 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -210,10 +210,13 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, Returns ------- - ret : datetime if parsing succeeded. Return type depends on input: + ret : datetime if parsing succeeded. + Return type depends on input: + - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp + In case when it is not possible to return designated types (e.g. when any element of input is before Timestamp.min or after Timestamp.max) return will have datetime.datetime type (or correspoding array/Series). diff --git a/pandas/tseries/util.py b/pandas/tseries/util.py index 72b12ea495ba0..6c534de0a7aaa 100644 --- a/pandas/tseries/util.py +++ b/pandas/tseries/util.py @@ -1,8 +1,5 @@ from pandas.compat import range, lrange import numpy as np - -import pandas as pd - import pandas.core.common as com from pandas.core.frame import DataFrame import pandas.core.nanops as nanops diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 3f04f80406fca..59eb432844ee3 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -41,7 +41,11 @@ from datetime import time as datetime_time # dateutil compat from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal, tzfile as _dateutil_tzfile, tzutc as _dateutil_tzutc) -from dateutil.zoneinfo import gettz as _dateutil_gettz +from pandas.compat import is_platform_windows +if is_platform_windows(): + from dateutil.zoneinfo import gettz as _dateutil_gettz +else: + from dateutil.tz import gettz as _dateutil_gettz from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo from pandas.compat import parse_date, string_types, PY3, iteritems @@ -447,6 +451,11 @@ class Timestamp(_Timestamp): Returns ------- localized : Timestamp + + Raises + ------ + TypeError + If the Timestamp is tz-aware and tz is not None. """ if ambiguous == 'infer': raise ValueError('Cannot infer offset with only one time.') @@ -471,8 +480,7 @@ class Timestamp(_Timestamp): def tz_convert(self, tz): """ - Convert Timestamp to another time zone or localize to requested time - zone + Convert tz-aware Timestamp to another time zone. Parameters ---------- @@ -483,6 +491,11 @@ class Timestamp(_Timestamp): Returns ------- converted : Timestamp + + Raises + ------ + TypeError + If Timestamp is tz-naive. """ if self.tzinfo is None: # tz naive, use tz_localize @@ -618,7 +631,7 @@ class NaTType(_NaT): fields = ['year', 'quarter', 'month', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond', 'nanosecond', - 'week', 'dayofyear', 'days_in_month'] + 'week', 'dayofyear', 'days_in_month', 'daysinmonth', 'dayofweek'] for field in fields: prop = property(fget=lambda self: np.nan) setattr(NaTType, field, prop) @@ -943,7 +956,7 @@ cdef class _Timestamp(datetime): cpdef _get_field(self, field): out = get_date_field(np.array([self.value], dtype=np.int64), field) - return out[0] + return int(out[0]) cpdef _get_start_end_field(self, field): month_kw = self.freq.kwds.get('startingMonth', self.freq.kwds.get('month', 12)) if self.freq else 12 @@ -1389,6 +1402,87 @@ def parse_datetime_string(date_string, **kwargs): dt = parse_date(date_string, **kwargs) return dt +def format_array_from_datetime(ndarray[int64_t] values, object tz=None, object format=None, object na_rep=None): + """ + return a np object array of the string formatted values + + Parameters + ---------- + values : a 1-d i8 array + tz : the timezone (or None) + format : optional, default is None + a strftime capable string + na_rep : optional, default is None + a nat format + + """ + cdef: + int64_t val, ns, N = len(values) + ndarray[int64_t] consider_values + bint show_ms = 0, show_us = 0, show_ns = 0, basic_format = 0 + ndarray[object] result = np.empty(N, dtype=object) + object ts, res + pandas_datetimestruct dts + + if na_rep is None: + na_rep = 'NaT' + + # if we don't have a format nor tz, then choose + # a format based on precision + basic_format = format is None and tz is None + if basic_format: + consider_values = values[values != iNaT] + show_ns = (consider_values%1000).any() + + if not show_ns: + consider_values //= 1000 + show_us = (consider_values%1000).any() + + if not show_ms: + consider_values //= 1000 + show_ms = (consider_values%1000).any() + + for i in range(N): + val = values[i] + + if val == iNaT: + result[i] = na_rep + elif basic_format: + + pandas_datetime_to_datetimestruct(val, PANDAS_FR_ns, &dts) + res = '%d-%.2d-%.2d %.2d:%.2d:%.2d' % (dts.year, + dts.month, + dts.day, + dts.hour, + dts.min, + dts.sec) + + if show_ns: + ns = dts.ps / 1000 + res += '.%.9d' % (ns + 1000 * dts.us) + elif show_us: + res += '.%.6d' % dts.us + elif show_ms: + res += '.%.3d' % (dts.us/1000) + + result[i] = res + + else: + + ts = Timestamp(val, tz=tz) + if format is None: + result[i] = str(ts) + else: + + # invalid format string + # requires dates > 1900 + try: + result[i] = ts.strftime(format) + except ValueError: + result[i] = str(ts) + + return result + def array_to_datetime(ndarray[object] values, raise_=False, dayfirst=False, format=None, utc=None, coerce=False, unit=None): cdef: diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py index d839437a6fe33..9cd538511e946 100644 --- a/pandas/util/decorators.py +++ b/pandas/util/decorators.py @@ -26,7 +26,7 @@ def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None): Name of prefered argument in function mapping : dict or callable If mapping is present, use it to translate old arguments to - new arguments. A callable must do its own value checking; + new arguments. A callable must do its own value checking; values not found in a dict will be forwarded unchanged. Examples @@ -45,7 +45,7 @@ def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None): should raise warning >>> f(cols='should error', columns="can't pass do both") TypeError: Can only specify 'cols' or 'columns', not both - >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no', False}) + >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) ... def f(new=False): ... print('yes!' if new else 'no!') ... diff --git a/pandas/util/doctools.py b/pandas/util/doctools.py new file mode 100644 index 0000000000000..20a2a68ce6b03 --- /dev/null +++ b/pandas/util/doctools.py @@ -0,0 +1,184 @@ +import numpy as np +import pandas as pd +import pandas.compat as compat + + +class TablePlotter(object): + """ + Layout some DataFrames in vertical/horizontal layout for explanation. + Used in merging.rst + """ + + def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5): + self.cell_width = cell_width + self.cell_height = cell_height + self.font_size = font_size + + def _shape(self, df): + """Calcurate table chape considering index levels""" + row, col = df.shape + return row + df.columns.nlevels, col + df.index.nlevels + + def _get_cells(self, left, right, vertical): + """Calcurate appropriate figure size based on left and right data""" + if vertical: + # calcurate required number of cells + vcells = max(sum([self._shape(l)[0] for l in left]), self._shape(right)[0]) + hcells = max([self._shape(l)[1] for l in left]) + self._shape(right)[1] + else: + vcells = max([self._shape(l)[0] for l in left] + [self._shape(right)[0]]) + hcells = sum([self._shape(l)[1] for l in left] + [self._shape(right)[1]]) + return hcells, vcells + + def plot(self, left, right, labels=None, vertical=True): + """ + Plot left / right DataFrames in specified layout. + + Parameters + ---------- + left : list of DataFrames before operation is applied + right : DataFrame of operation result + labels : list of str to be drawn as titles of left DataFrames + vertical : bool + If True, use vertical layout. If False, use horizontal layout. + """ + import matplotlib.pyplot as plt + import matplotlib.gridspec as gridspec + + if not isinstance(left, list): + left = [left] + left = [self._conv(l) for l in left] + right = self._conv(right) + + hcells, vcells = self._get_cells(left, right, vertical) + + if vertical: + figsize = self.cell_width * hcells, self.cell_height * vcells + else: + # include margin for titles + figsize = self.cell_width * hcells, self.cell_height * vcells + fig = plt.figure(figsize=figsize) + + if vertical: + gs = gridspec.GridSpec(len(left), hcells) + # left + max_left_cols = max([self._shape(l)[1] for l in left]) + max_left_rows = max([self._shape(l)[0] for l in left]) + for i, (l, label) in enumerate(zip(left, labels)): + ax = fig.add_subplot(gs[i, 0:max_left_cols]) + self._make_table(ax, l, title=label, height=1.0/max_left_rows) + # right + ax = plt.subplot(gs[:, max_left_cols:]) + self._make_table(ax, right, title='Result', height=1.05/vcells) + fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95) + else: + max_rows = max([self._shape(df)[0] for df in left + [right]]) + height = 1.0 / np.max(max_rows) + gs = gridspec.GridSpec(1, hcells) + # left + i = 0 + for l, label in zip(left, labels): + sp = self._shape(l) + ax = fig.add_subplot(gs[0, i:i+sp[1]]) + self._make_table(ax, l, title=label, height=height) + i += sp[1] + # right + ax = plt.subplot(gs[0, i:]) + self._make_table(ax, right, title='Result', height=height) + fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95) + + return fig + + def _conv(self, data): + """Convert each input to appropriate for table outplot""" + if isinstance(data, pd.Series): + if data.name is None: + data = data.to_frame(name='') + else: + data = data.to_frame() + data = data.fillna('NaN') + return data + + def _insert_index(self, data): + # insert is destructive + data = data.copy() + idx_nlevels = data.index.nlevels + if idx_nlevels == 1: + data.insert(0, 'Index', data.index) + else: + for i in range(idx_nlevels): + data.insert(i, 'Index{0}'.format(i), data.index.get_level_values(i)) + + col_nlevels = data.columns.nlevels + if col_nlevels > 1: + col = data.columns.get_level_values(0) + values = [data.columns.get_level_values(i).values for i in range(1, col_nlevels)] + col_df = pd.DataFrame(values) + data.columns = col_df.columns + data = pd.concat([col_df, data]) + data.columns = col + return data + + def _make_table(self, ax, df, title, height=None): + if df is None: + ax.set_visible(False) + return + + import pandas.tools.plotting as plotting + + idx_nlevels = df.index.nlevels + col_nlevels = df.columns.nlevels + # must be convert here to get index levels for colorization + df = self._insert_index(df) + tb = plotting.table(ax, df, loc=9) + tb.set_fontsize(self.font_size) + + if height is None: + height = 1.0 / (len(df) + 1) + + props = tb.properties() + for (r, c), cell in compat.iteritems(props['celld']): + if c == -1: + cell.set_visible(False) + elif r < col_nlevels and c < idx_nlevels: + cell.set_visible(False) + elif r < col_nlevels or c < idx_nlevels: + cell.set_facecolor('#AAAAAA') + cell.set_height(height) + + ax.set_title(title, size=self.font_size) + ax.axis('off') + + +if __name__ == "__main__": + import pandas as pd + import matplotlib.pyplot as plt + + p = TablePlotter() + + df1 = pd.DataFrame({'A': [10, 11, 12], + 'B': [20, 21, 22], + 'C': [30, 31, 32]}) + df2 = pd.DataFrame({'A': [10, 12], + 'C': [30, 32]}) + + p.plot([df1, df2], pd.concat([df1, df2]), + labels=['df1', 'df2'], vertical=True) + plt.show() + + df3 = pd.DataFrame({'X': [10, 12], + 'Z': [30, 32]}) + + p.plot([df1, df3], pd.concat([df1, df3], axis=1), + labels=['df1', 'df2'], vertical=False) + plt.show() + + idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'), + (2, 'A'), (2, 'B'), (2, 'C')]) + col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')]) + df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6], + 'v2': [5, 6, 7, 8, 9, 10]}, + index=idx) + df3.columns = col + p.plot(df3, df3, labels=['df3']) + plt.show() diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 3d9a0e7b43634..55f95b602779f 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -25,11 +25,6 @@ import pandas as pd from pandas.core.common import is_sequence, array_equivalent, is_list_like -import pandas.core.index as index -import pandas.core.series as series -import pandas.core.frame as frame -import pandas.core.panel as panel -import pandas.core.panel4d as panel4d import pandas.compat as compat from pandas.compat import( filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter, @@ -38,24 +33,12 @@ from pandas.computation import expressions as expr -from pandas import bdate_range -from pandas.tseries.index import DatetimeIndex -from pandas.tseries.tdi import TimedeltaIndex -from pandas.tseries.period import PeriodIndex +from pandas import (bdate_range, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, + Index, MultiIndex, Series, DataFrame, Panel, Panel4D) from pandas.util.decorators import deprecate - from pandas import _testing - - from pandas.io.common import urlopen -Index = index.Index -MultiIndex = index.MultiIndex -Series = series.Series -DataFrame = frame.DataFrame -Panel = panel.Panel -Panel4D = panel4d.Panel4D - N = 30 K = 4 _RAISE_NETWORK_ERROR_DEFAULT = False @@ -331,19 +314,21 @@ def get_locales(prefix=None, normalize=True, # raw_locales is "\n" seperated list of locales # it may contain non-decodable parts, so split # extract what we can and then rejoin. - raw_locales = [] + raw_locales = raw_locales.split(b'\n') + out_locales = [] for x in raw_locales: - try: - raw_locales.append(str(x, encoding=pd.options.display.encoding)) - except: - pass + if compat.PY3: + out_locales.append(str(x, encoding=pd.options.display.encoding)) + else: + out_locales.append(str(x)) + except TypeError: pass if prefix is None: - return _valid_locales(raw_locales, normalize) + return _valid_locales(out_locales, normalize) - found = re.compile('%s.*' % prefix).findall('\n'.join(raw_locales)) + found = re.compile('%s.*' % prefix).findall('\n'.join(out_locales)) return _valid_locales(found, normalize) @@ -548,14 +533,16 @@ def assert_equal(a, b, msg=""): assert a == b, "%s: %r != %r" % (msg.format(a,b), a, b) -def assert_index_equal(left, right): +def assert_index_equal(left, right, exact=False, check_names=True): assert_isinstance(left, Index, '[index] ') assert_isinstance(right, Index, '[index] ') - if not left.equals(right): + if not left.equals(right) or (exact and type(left) != type(right)): raise AssertionError("[index] left [{0} {1}], right [{2} {3}]".format(left.dtype, left, right, right.dtype)) + if check_names: + assert_attr_equal('names', left, right) def assert_attr_equal(attr, left, right): @@ -625,6 +612,7 @@ def assertNotIsInstance(obj, cls, msg=''): def assert_categorical_equal(res, exp): + if not array_equivalent(res.categories, exp.categories): raise AssertionError( 'categories not equivalent: {0} vs {1}.'.format(res.categories, @@ -679,7 +667,8 @@ def assert_series_equal(left, right, check_dtype=True, check_index_type=False, check_series_type=False, check_less_precise=False, - check_exact=False): + check_exact=False, + check_names=True): if check_series_type: assert_isinstance(left, type(right)) if check_dtype: @@ -694,7 +683,7 @@ def assert_series_equal(left, right, check_dtype=True, assert_almost_equal( left.index.values, right.index.values, check_less_precise) else: - assert_index_equal(left.index, right.index) + assert_index_equal(left.index, right.index, check_names=check_names) if check_index_type: for level in range(left.index.nlevels): lindex = left.index.get_level_values(level) @@ -703,6 +692,7 @@ def assert_series_equal(left, right, check_dtype=True, assert_attr_equal('dtype', lindex, rindex) assert_attr_equal('inferred_type', lindex, rindex) + # This could be refactored to use the NDFrame.equals method def assert_frame_equal(left, right, check_dtype=True, check_index_type=False, @@ -723,8 +713,7 @@ def assert_frame_equal(left, right, check_dtype=True, assert_almost_equal(left.index, right.index) else: if not by_blocks: - assert_index_equal(left.columns, right.columns) - assert_index_equal(left.index, right.index) + assert_index_equal(left.columns, right.columns, check_names=check_names) # compare by blocks if by_blocks: @@ -733,7 +722,7 @@ def assert_frame_equal(left, right, check_dtype=True, for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))): assert dtype in lblocks assert dtype in rblocks - assert_frame_equal(lblocks[dtype],rblocks[dtype],check_dtype=check_dtype) + assert_frame_equal(lblocks[dtype],rblocks[dtype], check_dtype=check_dtype) # compare by columns else: @@ -745,7 +734,8 @@ def assert_frame_equal(left, right, check_dtype=True, check_dtype=check_dtype, check_index_type=check_index_type, check_less_precise=check_less_precise, - check_exact=check_exact) + check_exact=check_exact, + check_names=check_names) if check_index_type: for level in range(left.index.nlevels): @@ -766,14 +756,15 @@ def assert_frame_equal(left, right, check_dtype=True, def assert_panelnd_equal(left, right, check_panel_type=False, check_less_precise=False, - assert_func=assert_frame_equal): + assert_func=assert_frame_equal, + check_names=False): if check_panel_type: assert_isinstance(left, type(right)) for axis in ['items', 'major_axis', 'minor_axis']: left_ind = getattr(left, axis) right_ind = getattr(right, axis) - assert_index_equal(left_ind, right_ind) + assert_index_equal(left_ind, right_ind, check_names=check_names) for i, item in enumerate(left._get_axis(0)): assert item in right, "non-matching item (right) '%s'" % item @@ -825,6 +816,11 @@ def makeStringIndex(k=10): def makeUnicodeIndex(k=10): return Index(randu_array(nchars=10, size=k)) +def makeCategoricalIndex(k=10, n=3): + """ make a length k index or n categories """ + x = rands_array(nchars=4, size=n) + return CategoricalIndex(np.random.choice(x,k)) + def makeBoolIndex(k=10): if k == 1: return Index([True]) @@ -1631,7 +1627,7 @@ class _AssertRaisesContextmanager(object): def __init__(self, exception, regexp=None, *args, **kwargs): self.exception = exception if regexp is not None and not hasattr(regexp, "search"): - regexp = re.compile(regexp) + regexp = re.compile(regexp, re.DOTALL) self.regexp = regexp def __enter__(self): diff --git a/setup.py b/setup.py index d4ef6a1d896d8..4375aa550f020 100755 --- a/setup.py +++ b/setup.py @@ -11,13 +11,17 @@ import shutil import warnings import re +from distutils.version import LooseVersion # may need to work around setuptools bug by providing a fake Pyrex +min_cython_ver = '0.19.1' try: import Cython sys.path.insert(0, os.path.join(os.path.dirname(__file__), "fake_pyrex")) + ver = Cython.__version__ + _CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver) except ImportError: - pass + _CYTHON_INSTALLED = False # try bootstrapping setuptools if it doesn't exist try: @@ -74,6 +78,8 @@ from distutils.command.build_ext import build_ext as _build_ext try: + if not _CYTHON_INSTALLED: + raise ImportError('No supported version of Cython installed.') from Cython.Distutils import build_ext as _build_ext # from Cython.Distutils import Extension # to get pyrex debugging symbols cython = True @@ -188,7 +194,7 @@ def build_extensions(self): MAJOR = 0 MINOR = 16 -MICRO = 0 +MICRO = 1 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) QUALIFIER = '' @@ -596,7 +602,7 @@ def pxd(name): ], package_data={'pandas.io': ['tests/data/legacy_hdf/*.h5', 'tests/data/legacy_pickle/*/*.pickle', - 'tests/data/*.csv', + 'tests/data/*.csv*', 'tests/data/*.dta', 'tests/data/*.txt', 'tests/data/*.xls', diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py index f0c3961ae0277..57fb1ada78691 100644 --- a/vb_suite/timeseries.py +++ b/vb_suite/timeseries.py @@ -135,6 +135,16 @@ def date_range(start=None, end=None, periods=None, freq=None): Benchmark("ts.resample('D', how='mean')", setup, start_date=datetime(2012, 4, 25)) +# GH 7754 +setup = common_setup + """ +rng = date_range(start='2000-01-01 00:00:00', + end='2000-01-01 10:00:00', freq='555000U') +int_ts = Series(5, rng, dtype='int64') +ts = int_ts.astype('datetime64[ns]') +""" + +timeseries_resample_datetime64 = Benchmark("ts.resample('1S', how='last')", setup) + #---------------------------------------------------------------------- # to_datetime
closes #9266
https://api.github.com/repos/pandas-dev/pandas/pulls/9350
2015-01-24T21:16:23Z
2015-07-15T12:42:57Z
null
2015-07-15T12:42:57Z
Fix gbq client to only return results when jobCompleted is True.
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 572a8be5c65e8..1b39bceed30ad 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -185,7 +185,7 @@ def run_query(self, query): job_reference = query_reply['jobReference'] - while(not 'jobComplete' in query_reply): + while(not 'jobComplete' in query_reply and not query_reply['jobComplete']): print('Job not yet complete...') query_reply = job_collection.getQueryResults( projectId=job_reference['projectId'],
When polling for a long-running gbq job to determine if it is complete, we should only return results once `query_results['jobCompleted']` is `True`, not just when the `jobCompleted` key exists. Otherwise, the gbq client thinks results exist and it will start attempting to parse the results, leading to a weird KeyError: ``` /Library/Python/2.7/site-packages/pandas/io/gbq.pyc in read_gbq(query, project_id, index_col, col_order, reauth) 368▓ 369 connector = GbqConnector(project_id, reauth = reauth) --> 370 schema, pages = connector.run_query(query) 371 dataframe_list = [] 372 while len(pages) > 0: /Library/Python/2.7/site-packages/pandas/io/gbq.pyc in run_query(self, query) 192 jobId=job_reference['jobId']).execute() 193▓ --> 194 total_rows = int(query_reply['totalRows']) 195 result_pages = list() 196 seen_page_tokens = list() KeyError: 'totalRows' ``` This simple patch accounts for the case where `query_results['jobCompleted']` is `False`.
https://api.github.com/repos/pandas-dev/pandas/pulls/9348
2015-01-23T23:15:05Z
2015-01-23T23:40:28Z
null
2015-01-23T23:40:41Z
Categorical: don't sort the categoricals if Categorical(..., ordered=False)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index fe8b1079f0942..8d7d8e2dbb947 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -268,7 +268,7 @@ def __init__(self, values, categories=None, ordered=None, name=None, fastpath=Fa if categories is None: try: - codes, categories = factorize(values, sort=True) + codes, categories = factorize(values, sort=ordered if not ordered is None else True) # If the underlying data structure was sortable, and the user doesn't want to # "forget" this order, the categorical also is sorted/ordered if ordered is None: diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 4852e142d2f29..9e61fa5c27cb8 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -169,6 +169,20 @@ def f(): c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) cat = Categorical([1,2], categories=[1,2,3]) + # if the categorical is constructed without ordering, use the "order of appearance" in + # the categories instead of sorting the lexiographicaly. + # see https://github.com/mwaskom/seaborn/issues/361 for a discussion on this topic + c1 = Categorical(["a", "c", "b", "a"], ordered=False) + self.assert_numpy_array_equal(c1.categories, np.array(["a","c","b"])) + # mae sure that construction with (implicit) ordered=True sorts the categories + c2 = Categorical(["a", "c", "b", "a"]) + self.assert_numpy_array_equal(c2.categories, np.array(["a","b","c"])) + c2 = Categorical(["a", "c", "b", "a"], ordered=True) + self.assert_numpy_array_equal(c2.categories, np.array(["a","b","c"])) + # ensure that the order in the categories is preserved when setting ordered=False + c2.ordered = False + self.assert_numpy_array_equal(c2.categories, np.array(["a","b","c"])) + def test_constructor_with_generator(self): # This was raising an Error in isnull(single_val).any() because isnull returned a scalar # for a generator
In https://github.com/mwaskom/seaborn/issues/361 it was discussed that lexicographical sorting the categories is only appropiate if an order is specified/implied. If this is explicitly not done, e.g. with `Categorical(..., ordered=False)` then the order should be taken from the order of appearance, similar to the current `Series.unique()` implementation.
https://api.github.com/repos/pandas-dev/pandas/pulls/9347
2015-01-23T21:58:53Z
2015-03-07T23:14:32Z
null
2016-04-03T18:40:32Z
BUG: Fixes GH9311 groupby on datetime64
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 9e1546f5e50a9..cd7cdbb645686 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -187,6 +187,8 @@ Bug Fixes - Bug in the returned ``Series.dt.components`` index was reset to the default index (:issue:`9247`) - Bug in ``Categorical.__getitem__/__setitem__`` with listlike input getting incorrect results from indexer coercion (:issue:`9469`) - Bug in partial setting with a DatetimeIndex (:issue:`9478`) +- Bug in groupby for integer and datetime64 columns when applying an aggregator that caused the value to be + changed when the number was sufficiently large (:issue:`9311`, :issue:`6620`) - Fixed bug in ``to_sql`` when mapping a ``Timestamp`` object column (datetime column with timezone info) to the according sqlalchemy type (:issue:`9085`). - Fixed bug in ``to_sql`` ``dtype`` argument not accepting an instantiated diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 440c0966ac066..9d5fde5600be3 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -24,7 +24,8 @@ from pandas.core.common import(_possibly_downcast_to_dtype, isnull, notnull, _DATELIKE_DTYPES, is_numeric_dtype, is_timedelta64_dtype, is_datetime64_dtype, - is_categorical_dtype, _values_from_object) + is_categorical_dtype, _values_from_object, + _is_datetime_or_timedelta_dtype, is_bool_dtype) from pandas.core.config import option_context import pandas.lib as lib from pandas.lib import Timestamp @@ -1444,7 +1445,9 @@ def get_func(fname): f = getattr(_algos, "%s_%s" % (fname, dtype_str), None) if f is not None: return f - return getattr(_algos, fname, None) + + if dtype_str == 'float64': + return getattr(_algos, fname, None) ftype = self._cython_functions[how] @@ -1471,7 +1474,6 @@ def wrapper(*args, **kwargs): return func, dtype_str def aggregate(self, values, how, axis=0): - arity = self._cython_arity.get(how, 1) vdim = values.ndim @@ -1487,27 +1489,44 @@ def aggregate(self, values, how, axis=0): raise NotImplementedError out_shape = (self.ngroups,) + values.shape[1:] - if is_numeric_dtype(values.dtype): - values = com.ensure_float(values) - is_numeric = True - out_dtype = 'f%d' % values.dtype.itemsize + is_numeric = is_numeric_dtype(values.dtype) + + if _is_datetime_or_timedelta_dtype(values.dtype): + values = values.view('int64') + elif is_bool_dtype(values.dtype): + values = _algos.ensure_float64(values) + elif com.is_integer_dtype(values): + values = values.astype('int64', copy=False) + elif is_numeric: + values = _algos.ensure_float64(values) else: - is_numeric = issubclass(values.dtype.type, (np.datetime64, - np.timedelta64)) + values = values.astype(object) + + try: + agg_func, dtype_str = self._get_aggregate_function(how, values) + except NotImplementedError: if is_numeric: - out_dtype = 'float64' - values = values.view('int64') + values = _algos.ensure_float64(values) + agg_func, dtype_str = self._get_aggregate_function(how, values) else: - out_dtype = 'object' - values = values.astype(object) + raise + + if is_numeric: + out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize) + else: + out_dtype = 'object' # will be filled in Cython function result = np.empty(out_shape, dtype=out_dtype) - result.fill(np.nan) counts = np.zeros(self.ngroups, dtype=np.int64) - result = self._aggregate(result, counts, values, how, is_numeric) + result = self._aggregate(result, counts, values, agg_func, is_numeric) + + if com.is_integer_dtype(result): + if len(result[result == tslib.iNaT]) > 0: + result = result.astype('float64') + result[result == tslib.iNaT] = np.nan if self._filter_empty_groups and not counts.all(): if result.ndim == 2: @@ -1535,9 +1554,7 @@ def aggregate(self, values, how, axis=0): return result, names - def _aggregate(self, result, counts, values, how, is_numeric): - agg_func, dtype = self._get_aggregate_function(how, values) - + def _aggregate(self, result, counts, values, agg_func, is_numeric): comp_ids, _, ngroups = self.group_info if values.ndim > 3: # punting for now @@ -1796,9 +1813,7 @@ def size(self): 'ohlc': lambda *args: ['open', 'high', 'low', 'close'] } - def _aggregate(self, result, counts, values, how, is_numeric=True): - - agg_func, dtype = self._get_aggregate_function(how, values) + def _aggregate(self, result, counts, values, agg_func, is_numeric=True): if values.ndim > 3: # punting for now @@ -2535,9 +2550,6 @@ def _cython_agg_blocks(self, how, numeric_only=True): values = block._try_operate(block.values) - if block.is_numeric: - values = _algos.ensure_float64(values) - result, _ = self.grouper.aggregate(values, how, axis=agg_axis) # see if we can cast the block back to the original dtype diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f4abe05097cff..6cf7fa5888539 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1811,10 +1811,7 @@ def _try_coerce_args(self, values, other): def _try_coerce_result(self, result): """ reverse of try_coerce_args """ if isinstance(result, np.ndarray): - if result.dtype == 'i8': - result = tslib.array_to_datetime( - result.astype(object).ravel()).reshape(result.shape) - elif result.dtype.kind in ['i', 'f', 'O']: + if result.dtype.kind in ['i', 'f', 'O']: result = result.astype('M8[ns]') elif isinstance(result, (np.integer, np.datetime64)): result = lib.Timestamp(result) diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index d04f55bb19fff..575fcf386f570 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -3,6 +3,9 @@ # don't introduce a pandas/pandas.compat import # or we get a bootstrapping problem from StringIO import StringIO +import numpy as np + +_int64_max = np.iinfo(np.int64).max header = """ cimport numpy as np @@ -680,7 +683,7 @@ def group_last_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, for i in range(len(counts)): for j in range(K): if nobs[i, j] == 0: - out[i, j] = nan + out[i, j] = %(nan_val)s else: out[i, j] = resx[i, j] """ @@ -726,7 +729,7 @@ def group_last_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, for i in range(ngroups): for j in range(K): if nobs[i, j] == 0: - out[i, j] = nan + out[i, j] = %(nan_val)s else: out[i, j] = resx[i, j] """ @@ -773,7 +776,7 @@ def group_nth_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, for i in range(ngroups): for j in range(K): if nobs[i, j] == 0: - out[i, j] = nan + out[i, j] = %(nan_val)s else: out[i, j] = resx[i, j] """ @@ -819,7 +822,7 @@ def group_nth_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, for i in range(len(counts)): for j in range(K): if nobs[i, j] == 0: - out[i, j] = nan + out[i, j] = %(nan_val)s else: out[i, j] = resx[i, j] """ @@ -1278,7 +1281,7 @@ def group_min_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, nobs = np.zeros_like(out) minx = np.empty_like(out) - minx.fill(np.inf) + minx.fill(%(inf_val)s) if bins[len(bins) - 1] == len(values): ngroups = len(bins) @@ -1319,7 +1322,7 @@ def group_min_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, for i in range(ngroups): for j in range(K): if nobs[i, j] == 0: - out[i, j] = nan + out[i, j] = %(nan_val)s else: out[i, j] = minx[i, j] """ @@ -1344,7 +1347,7 @@ def group_max_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, nobs = np.zeros_like(out) maxx = np.empty_like(out) - maxx.fill(-np.inf) + maxx.fill(-%(inf_val)s) N, K = (<object> values).shape @@ -1381,7 +1384,7 @@ def group_max_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, for i in range(len(counts)): for j in range(K): if nobs[i, j] == 0: - out[i, j] = nan + out[i, j] = %(nan_val)s else: out[i, j] = maxx[i, j] """ @@ -1402,7 +1405,7 @@ def group_max_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, nobs = np.zeros_like(out) maxx = np.empty_like(out) - maxx.fill(-np.inf) + maxx.fill(-%(inf_val)s) if bins[len(bins) - 1] == len(values): ngroups = len(bins) @@ -1443,7 +1446,7 @@ def group_max_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, for i in range(ngroups): for j in range(K): if nobs[i, j] == 0: - out[i, j] = nan + out[i, j] = %(nan_val)s else: out[i, j] = maxx[i, j] """ @@ -1469,7 +1472,7 @@ def group_min_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, nobs = np.zeros_like(out) minx = np.empty_like(out) - minx.fill(np.inf) + minx.fill(%(inf_val)s) N, K = (<object> values).shape @@ -1506,7 +1509,7 @@ def group_min_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, for i in range(len(counts)): for j in range(K): if nobs[i, j] == 0: - out[i, j] = nan + out[i, j] = %(nan_val)s else: out[i, j] = minx[i, j] """ @@ -2286,6 +2289,70 @@ def generate_put_template(template, use_ints=True, use_floats=True, output.write(func) return output.getvalue() +def generate_put_min_max_template(template, use_ints=True, use_floats=True, + use_objects=False, use_datelikes=False): + floats_list = [ + ('float64', 'float64_t', 'nan', 'np.inf'), + ('float32', 'float32_t', 'nan', 'np.inf'), + ] + ints_list = [ + ('int64', 'int64_t', 'iNaT', _int64_max), + ] + date_like_list = [ + ('int64', 'int64_t', 'iNaT', _int64_max), + ] + object_list = [('object', 'object', 'nan', 'np.inf')] + function_list = [] + if use_floats: + function_list.extend(floats_list) + if use_ints: + function_list.extend(ints_list) + if use_objects: + function_list.extend(object_list) + if use_datelikes: + function_list.extend(date_like_list) + + output = StringIO() + for name, dest_type, nan_val, inf_val in function_list: + func = template % {'name': name, + 'dest_type2': dest_type, + 'nan_val': nan_val, + 'inf_val': inf_val} + output.write(func) + return output.getvalue() + +def generate_put_selection_template(template, use_ints=True, use_floats=True, + use_objects=False, use_datelikes=False): + floats_list = [ + ('float64', 'float64_t', 'float64_t', 'nan'), + ('float32', 'float32_t', 'float32_t', 'nan'), + ] + ints_list = [ + ('int64', 'int64_t', 'int64_t', 'iNaT'), + ] + date_like_list = [ + ('int64', 'int64_t', 'int64_t', 'iNaT'), + ] + object_list = [('object', 'object', 'object', 'nan')] + function_list = [] + if use_floats: + function_list.extend(floats_list) + if use_ints: + function_list.extend(ints_list) + if use_objects: + function_list.extend(object_list) + if use_datelikes: + function_list.extend(date_like_list) + + output = StringIO() + for name, c_type, dest_type, nan_val in function_list: + func = template % {'name': name, + 'c_type': c_type, + 'dest_type2': dest_type, + 'nan_val': nan_val} + output.write(func) + return output.getvalue() + def generate_take_template(template, exclude=None): # name, dest, ctypein, ctypeout, preval, postval, cancopy function_list = [ @@ -2347,11 +2414,8 @@ def generate_from_template(template, exclude=None): return output.getvalue() put_2d = [diff_2d_template] -groupbys = [group_last_template, - group_last_bin_template, - group_nth_template, - group_nth_bin_template, - group_add_template, + +groupbys = [group_add_template, group_add_bin_template, group_prod_template, group_prod_bin_template, @@ -2359,12 +2423,18 @@ def generate_from_template(template, exclude=None): group_var_bin_template, group_mean_template, group_mean_bin_template, - group_min_template, - group_min_bin_template, - group_max_template, - group_max_bin_template, group_ohlc_template] +groupby_selection = [group_last_template, + group_last_bin_template, + group_nth_template, + group_nth_bin_template] + +groupby_min_max = [group_min_template, + group_min_bin_template, + group_max_template, + group_max_bin_template] + groupby_count = [group_count_template, group_count_bin_template] templates_1d = [map_indices_template, @@ -2407,9 +2477,18 @@ def generate_take_cython_file(path='generated.pyx'): for template in groupbys: print(generate_put_template(template, use_ints=False), file=f) + for template in groupby_selection: + print(generate_put_selection_template(template, use_ints=True), + file=f) + + for template in groupby_min_max: + print(generate_put_min_max_template(template, use_ints=True), + file=f) + for template in groupby_count: - print(generate_put_template(template, use_ints=False, - use_datelikes=True, use_objects=True), + print(generate_put_selection_template(template, use_ints=True, + use_datelikes=True, + use_objects=True), file=f) # for template in templates_1d_datetime: diff --git a/pandas/src/generated.pyx b/pandas/src/generated.pyx index 01c80518ca21a..cab3a84f6ffe8 100644 --- a/pandas/src/generated.pyx +++ b/pandas/src/generated.pyx @@ -4845,391 +4845,487 @@ def diff_2d_int64(ndarray[int64_t, ndim=2] arr, for j in range(start, stop): out[i, j] = arr[i, j] - arr[i, j - periods] +@cython.boundscheck(False) @cython.wraparound(False) -@cython.wraparound(False) -def group_last_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels): +def group_add_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: Py_ssize_t i, j, N, K, lab float64_t val, count - ndarray[float64_t, ndim=2] resx - ndarray[int64_t, ndim=2] nobs + ndarray[float64_t, ndim=2] sumx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") - nobs = np.zeros((<object> out).shape, dtype=np.int64) - resx = np.empty_like(out) + nobs = np.zeros_like(out) + sumx = np.zeros_like(out) N, K = (<object> values).shape - for i in range(N): - lab = labels[i] - if lab < 0: - continue + if K > 1: + for i in range(N): + lab = labels[i] + if lab < 0: + continue - counts[lab] += 1 - for j in range(K): - val = values[i, j] + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + sumx[lab, j] += val + else: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + val = values[i, 0] # not nan if val == val: - nobs[lab, j] += 1 - resx[lab, j] = val + nobs[lab, 0] += 1 + sumx[lab, 0] += val for i in range(len(counts)): for j in range(K): if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = resx[i, j] -@cython.wraparound(False) + out[i, j] = sumx[i, j] +@cython.boundscheck(False) @cython.wraparound(False) -def group_last_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] labels): +def group_add_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: Py_ssize_t i, j, N, K, lab float32_t val, count - ndarray[float32_t, ndim=2] resx - ndarray[int64_t, ndim=2] nobs + ndarray[float32_t, ndim=2] sumx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") - nobs = np.zeros((<object> out).shape, dtype=np.int64) - resx = np.empty_like(out) + nobs = np.zeros_like(out) + sumx = np.zeros_like(out) N, K = (<object> values).shape - for i in range(N): - lab = labels[i] - if lab < 0: - continue + if K > 1: + for i in range(N): + lab = labels[i] + if lab < 0: + continue - counts[lab] += 1 - for j in range(K): - val = values[i, j] + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + sumx[lab, j] += val + else: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + val = values[i, 0] # not nan if val == val: - nobs[lab, j] += 1 - resx[lab, j] = val + nobs[lab, 0] += 1 + sumx[lab, 0] += val for i in range(len(counts)): for j in range(K): if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = resx[i, j] + out[i, j] = sumx[i, j] +@cython.boundscheck(False) @cython.wraparound(False) -@cython.wraparound(False) -def group_last_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): +def group_add_bin_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] bins): ''' Only aggregates on axis=0 ''' cdef: - Py_ssize_t i, j, N, K, ngroups, b + Py_ssize_t i, j, N, K, ngroups, b, nbins float64_t val, count - ndarray[float64_t, ndim=2] resx, nobs + ndarray[float64_t, ndim=2] sumx, nobs nobs = np.zeros_like(out) - resx = np.empty_like(out) + sumx = np.zeros_like(out) if bins[len(bins) - 1] == len(values): ngroups = len(bins) else: ngroups = len(bins) + 1 - N, K = (<object> values).shape b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + if K > 1: + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 - counts[b] += 1 - for j in range(K): - val = values[i, j] + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[b, j] += 1 + sumx[b, j] += val + else: + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + val = values[i, 0] # not nan if val == val: - nobs[b, j] += 1 - resx[b, j] = val + nobs[b, 0] += 1 + sumx[b, 0] += val for i in range(ngroups): for j in range(K): if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = resx[i, j] -@cython.wraparound(False) + out[i, j] = sumx[i, j] +@cython.boundscheck(False) @cython.wraparound(False) -def group_last_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): +def group_add_bin_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] bins): ''' Only aggregates on axis=0 ''' cdef: - Py_ssize_t i, j, N, K, ngroups, b + Py_ssize_t i, j, N, K, ngroups, b, nbins float32_t val, count - ndarray[float32_t, ndim=2] resx, nobs + ndarray[float32_t, ndim=2] sumx, nobs nobs = np.zeros_like(out) - resx = np.empty_like(out) + sumx = np.zeros_like(out) if bins[len(bins) - 1] == len(values): ngroups = len(bins) else: ngroups = len(bins) + 1 - N, K = (<object> values).shape b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + if K > 1: + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 - counts[b] += 1 - for j in range(K): - val = values[i, j] + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[b, j] += 1 + sumx[b, j] += val + else: + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + val = values[i, 0] # not nan if val == val: - nobs[b, j] += 1 - resx[b, j] = val + nobs[b, 0] += 1 + sumx[b, 0] += val for i in range(ngroups): for j in range(K): if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = resx[i, j] + out[i, j] = sumx[i, j] @cython.boundscheck(False) @cython.wraparound(False) -def group_nth_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels, int64_t rank): +def group_prod_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: Py_ssize_t i, j, N, K, lab float64_t val, count - ndarray[float64_t, ndim=2] resx - ndarray[int64_t, ndim=2] nobs + ndarray[float64_t, ndim=2] prodx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") - nobs = np.zeros((<object> out).shape, dtype=np.int64) - resx = np.empty_like(out) + nobs = np.zeros_like(out) + prodx = np.ones_like(out) N, K = (<object> values).shape - for i in range(N): - lab = labels[i] - if lab < 0: - continue + if K > 1: + for i in range(N): + lab = labels[i] + if lab < 0: + continue - counts[lab] += 1 - for j in range(K): - val = values[i, j] + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + prodx[lab, j] *= val + else: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + val = values[i, 0] # not nan if val == val: - nobs[lab, j] += 1 - if nobs[lab, j] == rank: - resx[lab, j] = val + nobs[lab, 0] += 1 + prodx[lab, 0] *= val for i in range(len(counts)): for j in range(K): if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = resx[i, j] + out[i, j] = prodx[i, j] @cython.boundscheck(False) @cython.wraparound(False) -def group_nth_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] labels, int64_t rank): +def group_prod_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: Py_ssize_t i, j, N, K, lab float32_t val, count - ndarray[float32_t, ndim=2] resx - ndarray[int64_t, ndim=2] nobs + ndarray[float32_t, ndim=2] prodx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") - nobs = np.zeros((<object> out).shape, dtype=np.int64) - resx = np.empty_like(out) + nobs = np.zeros_like(out) + prodx = np.ones_like(out) N, K = (<object> values).shape - for i in range(N): - lab = labels[i] - if lab < 0: - continue + if K > 1: + for i in range(N): + lab = labels[i] + if lab < 0: + continue - counts[lab] += 1 - for j in range(K): - val = values[i, j] + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + prodx[lab, j] *= val + else: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + val = values[i, 0] # not nan if val == val: - nobs[lab, j] += 1 - if nobs[lab, j] == rank: - resx[lab, j] = val + nobs[lab, 0] += 1 + prodx[lab, 0] *= val for i in range(len(counts)): for j in range(K): if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = resx[i, j] + out[i, j] = prodx[i, j] @cython.boundscheck(False) @cython.wraparound(False) -def group_nth_bin_float64(ndarray[float64_t, ndim=2] out, +def group_prod_bin_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins, int64_t rank): + ndarray[int64_t] bins): ''' Only aggregates on axis=0 ''' cdef: Py_ssize_t i, j, N, K, ngroups, b float64_t val, count - ndarray[float64_t, ndim=2] resx, nobs + ndarray[float64_t, ndim=2] prodx, nobs nobs = np.zeros_like(out) - resx = np.empty_like(out) + prodx = np.ones_like(out) if bins[len(bins) - 1] == len(values): ngroups = len(bins) else: ngroups = len(bins) + 1 - N, K = (<object> values).shape b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + if K > 1: + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 - counts[b] += 1 - for j in range(K): - val = values[i, j] + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[b, j] += 1 + prodx[b, j] *= val + else: + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + val = values[i, 0] # not nan if val == val: - nobs[b, j] += 1 - if nobs[b, j] == rank: - resx[b, j] = val + nobs[b, 0] += 1 + prodx[b, 0] *= val for i in range(ngroups): for j in range(K): if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = resx[i, j] + out[i, j] = prodx[i, j] @cython.boundscheck(False) @cython.wraparound(False) -def group_nth_bin_float32(ndarray[float32_t, ndim=2] out, +def group_prod_bin_float32(ndarray[float32_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins, int64_t rank): + ndarray[int64_t] bins): ''' Only aggregates on axis=0 ''' cdef: Py_ssize_t i, j, N, K, ngroups, b float32_t val, count - ndarray[float32_t, ndim=2] resx, nobs + ndarray[float32_t, ndim=2] prodx, nobs nobs = np.zeros_like(out) - resx = np.empty_like(out) + prodx = np.ones_like(out) if bins[len(bins) - 1] == len(values): ngroups = len(bins) else: ngroups = len(bins) + 1 - N, K = (<object> values).shape b = 0 - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + if K > 1: + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 - counts[b] += 1 - for j in range(K): - val = values[i, j] + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[b, j] += 1 + prodx[b, j] *= val + else: + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + val = values[i, 0] # not nan if val == val: - nobs[b, j] += 1 - if nobs[b, j] == rank: - resx[b, j] = val + nobs[b, 0] += 1 + prodx[b, 0] *= val for i in range(ngroups): for j in range(K): if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = resx[i, j] + out[i, j] = prodx[i, j] -@cython.boundscheck(False) @cython.wraparound(False) -def group_add_float64(ndarray[float64_t, ndim=2] out, +@cython.boundscheck(False) +def group_var_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' cdef: Py_ssize_t i, j, N, K, lab - float64_t val, count - ndarray[float64_t, ndim=2] sumx, nobs + float64_t val, ct + ndarray[float64_t, ndim=2] nobs, sumx, sumxx if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") nobs = np.zeros_like(out) sumx = np.zeros_like(out) + sumxx = np.zeros_like(out) N, K = (<object> values).shape if K > 1: for i in range(N): + lab = labels[i] if lab < 0: continue counts[lab] += 1 + for j in range(K): val = values[i, j] @@ -5237,55 +5333,60 @@ def group_add_float64(ndarray[float64_t, ndim=2] out, if val == val: nobs[lab, j] += 1 sumx[lab, j] += val + sumxx[lab, j] += val * val else: for i in range(N): + lab = labels[i] if lab < 0: continue counts[lab] += 1 val = values[i, 0] - # not nan if val == val: nobs[lab, 0] += 1 sumx[lab, 0] += val + sumxx[lab, 0] += val * val + for i in range(len(counts)): for j in range(K): - if nobs[i, j] == 0: + ct = nobs[i, j] + if ct < 2: out[i, j] = nan else: - out[i, j] = sumx[i, j] -@cython.boundscheck(False) + out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / + (ct * ct - ct)) @cython.wraparound(False) -def group_add_float32(ndarray[float32_t, ndim=2] out, +@cython.boundscheck(False) +def group_var_float32(ndarray[float32_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float32_t, ndim=2] values, ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' cdef: Py_ssize_t i, j, N, K, lab - float32_t val, count - ndarray[float32_t, ndim=2] sumx, nobs + float32_t val, ct + ndarray[float32_t, ndim=2] nobs, sumx, sumxx if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") nobs = np.zeros_like(out) sumx = np.zeros_like(out) + sumxx = np.zeros_like(out) N, K = (<object> values).shape if K > 1: for i in range(N): + lab = labels[i] if lab < 0: continue counts[lab] += 1 + for j in range(K): val = values[i, j] @@ -5293,48 +5394,53 @@ def group_add_float32(ndarray[float32_t, ndim=2] out, if val == val: nobs[lab, j] += 1 sumx[lab, j] += val + sumxx[lab, j] += val * val else: for i in range(N): + lab = labels[i] if lab < 0: continue counts[lab] += 1 val = values[i, 0] - # not nan if val == val: nobs[lab, 0] += 1 sumx[lab, 0] += val + sumxx[lab, 0] += val * val + for i in range(len(counts)): for j in range(K): - if nobs[i, j] == 0: + ct = nobs[i, j] + if ct < 2: out[i, j] = nan else: - out[i, j] = sumx[i, j] + out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / + (ct * ct - ct)) -@cython.boundscheck(False) @cython.wraparound(False) -def group_add_bin_float64(ndarray[float64_t, ndim=2] out, +@cython.boundscheck(False) +def group_var_bin_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' + cdef: - Py_ssize_t i, j, N, K, ngroups, b, nbins - float64_t val, count - ndarray[float64_t, ndim=2] sumx, nobs + Py_ssize_t i, j, N, K, ngroups, b + float64_t val, ct + ndarray[float64_t, ndim=2] nobs, sumx, sumxx nobs = np.zeros_like(out) sumx = np.zeros_like(out) + sumxx = np.zeros_like(out) if bins[len(bins) - 1] == len(values): ngroups = len(bins) else: ngroups = len(bins) + 1 + N, K = (<object> values).shape b = 0 @@ -5344,6 +5450,7 @@ def group_add_bin_float64(ndarray[float64_t, ndim=2] out, b += 1 counts[b] += 1 + for j in range(K): val = values[i, j] @@ -5351,6 +5458,7 @@ def group_add_bin_float64(ndarray[float64_t, ndim=2] out, if val == val: nobs[b, j] += 1 sumx[b, j] += val + sumxx[b, j] += val * val else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: @@ -5363,34 +5471,37 @@ def group_add_bin_float64(ndarray[float64_t, ndim=2] out, if val == val: nobs[b, 0] += 1 sumx[b, 0] += val + sumxx[b, 0] += val * val for i in range(ngroups): for j in range(K): - if nobs[i, j] == 0: + ct = nobs[i, j] + if ct < 2: out[i, j] = nan else: - out[i, j] = sumx[i, j] -@cython.boundscheck(False) + out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / + (ct * ct - ct)) @cython.wraparound(False) -def group_add_bin_float32(ndarray[float32_t, ndim=2] out, +@cython.boundscheck(False) +def group_var_bin_float32(ndarray[float32_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float32_t, ndim=2] values, ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' + cdef: - Py_ssize_t i, j, N, K, ngroups, b, nbins - float32_t val, count - ndarray[float32_t, ndim=2] sumx, nobs + Py_ssize_t i, j, N, K, ngroups, b + float32_t val, ct + ndarray[float32_t, ndim=2] nobs, sumx, sumxx nobs = np.zeros_like(out) sumx = np.zeros_like(out) + sumxx = np.zeros_like(out) if bins[len(bins) - 1] == len(values): ngroups = len(bins) else: ngroups = len(bins) + 1 + N, K = (<object> values).shape b = 0 @@ -5400,6 +5511,7 @@ def group_add_bin_float32(ndarray[float32_t, ndim=2] out, b += 1 counts[b] += 1 + for j in range(K): val = values[i, j] @@ -5407,6 +5519,7 @@ def group_add_bin_float32(ndarray[float32_t, ndim=2] out, if val == val: nobs[b, j] += 1 sumx[b, j] += val + sumxx[b, j] += val * val else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: @@ -5419,33 +5532,33 @@ def group_add_bin_float32(ndarray[float32_t, ndim=2] out, if val == val: nobs[b, 0] += 1 sumx[b, 0] += val + sumxx[b, 0] += val * val for i in range(ngroups): for j in range(K): - if nobs[i, j] == 0: + ct = nobs[i, j] + if ct < 2: out[i, j] = nan else: - out[i, j] = sumx[i, j] + out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / + (ct * ct - ct)) -@cython.boundscheck(False) @cython.wraparound(False) -def group_prod_float64(ndarray[float64_t, ndim=2] out, +@cython.boundscheck(False) +def group_mean_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' cdef: Py_ssize_t i, j, N, K, lab float64_t val, count - ndarray[float64_t, ndim=2] prodx, nobs + ndarray[float64_t, ndim=2] sumx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") nobs = np.zeros_like(out) - prodx = np.ones_like(out) + sumx = np.zeros_like(out) N, K = (<object> values).shape @@ -5458,11 +5571,10 @@ def group_prod_float64(ndarray[float64_t, ndim=2] out, counts[lab] += 1 for j in range(K): val = values[i, j] - # not nan if val == val: nobs[lab, j] += 1 - prodx[lab, j] *= val + sumx[lab, j] += val else: for i in range(N): lab = labels[i] @@ -5471,37 +5583,34 @@ def group_prod_float64(ndarray[float64_t, ndim=2] out, counts[lab] += 1 val = values[i, 0] - # not nan if val == val: nobs[lab, 0] += 1 - prodx[lab, 0] *= val + sumx[lab, 0] += val for i in range(len(counts)): for j in range(K): + count = nobs[i, j] if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = prodx[i, j] -@cython.boundscheck(False) + out[i, j] = sumx[i, j] / count @cython.wraparound(False) -def group_prod_float32(ndarray[float32_t, ndim=2] out, +@cython.boundscheck(False) +def group_mean_float32(ndarray[float32_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float32_t, ndim=2] values, ndarray[int64_t] labels): - ''' - Only aggregates on axis=0 - ''' cdef: Py_ssize_t i, j, N, K, lab float32_t val, count - ndarray[float32_t, ndim=2] prodx, nobs + ndarray[float32_t, ndim=2] sumx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") nobs = np.zeros_like(out) - prodx = np.ones_like(out) + sumx = np.zeros_like(out) N, K = (<object> values).shape @@ -5514,11 +5623,10 @@ def group_prod_float32(ndarray[float32_t, ndim=2] out, counts[lab] += 1 for j in range(K): val = values[i, j] - # not nan if val == val: nobs[lab, j] += 1 - prodx[lab, j] *= val + sumx[lab, j] += val else: for i in range(N): lab = labels[i] @@ -5527,41 +5635,37 @@ def group_prod_float32(ndarray[float32_t, ndim=2] out, counts[lab] += 1 val = values[i, 0] - # not nan if val == val: nobs[lab, 0] += 1 - prodx[lab, 0] *= val + sumx[lab, 0] += val for i in range(len(counts)): for j in range(K): + count = nobs[i, j] if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = prodx[i, j] + out[i, j] = sumx[i, j] / count -@cython.boundscheck(False) -@cython.wraparound(False) -def group_prod_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' + +def group_mean_bin_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] bins): cdef: Py_ssize_t i, j, N, K, ngroups, b float64_t val, count - ndarray[float64_t, ndim=2] prodx, nobs + ndarray[float64_t, ndim=2] sumx, nobs nobs = np.zeros_like(out) - prodx = np.ones_like(out) + sumx = np.zeros_like(out) + N, K = (<object> values).shape if bins[len(bins) - 1] == len(values): ngroups = len(bins) else: ngroups = len(bins) + 1 - N, K = (<object> values).shape b = 0 if K > 1: @@ -5576,7 +5680,7 @@ def group_prod_bin_float64(ndarray[float64_t, ndim=2] out, # not nan if val == val: nobs[b, j] += 1 - prodx[b, j] *= val + sumx[b, j] += val else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: @@ -5588,36 +5692,33 @@ def group_prod_bin_float64(ndarray[float64_t, ndim=2] out, # not nan if val == val: nobs[b, 0] += 1 - prodx[b, 0] *= val + sumx[b, 0] += val for i in range(ngroups): for j in range(K): - if nobs[i, j] == 0: + count = nobs[i, j] + if count == 0: out[i, j] = nan else: - out[i, j] = prodx[i, j] -@cython.boundscheck(False) -@cython.wraparound(False) -def group_prod_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' + out[i, j] = sumx[i, j] / count + +def group_mean_bin_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] bins): cdef: Py_ssize_t i, j, N, K, ngroups, b float32_t val, count - ndarray[float32_t, ndim=2] prodx, nobs + ndarray[float32_t, ndim=2] sumx, nobs nobs = np.zeros_like(out) - prodx = np.ones_like(out) + sumx = np.zeros_like(out) + N, K = (<object> values).shape if bins[len(bins) - 1] == len(values): ngroups = len(bins) else: ngroups = len(bins) + 1 - N, K = (<object> values).shape b = 0 if K > 1: @@ -5632,7 +5733,7 @@ def group_prod_bin_float32(ndarray[float32_t, ndim=2] out, # not nan if val == val: nobs[b, j] += 1 - prodx[b, j] *= val + sumx[b, j] += val else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: @@ -5644,153 +5745,103 @@ def group_prod_bin_float32(ndarray[float32_t, ndim=2] out, # not nan if val == val: nobs[b, 0] += 1 - prodx[b, 0] *= val + sumx[b, 0] += val for i in range(ngroups): for j in range(K): - if nobs[i, j] == 0: + count = nobs[i, j] + if count == 0: out[i, j] = nan else: - out[i, j] = prodx[i, j] + out[i, j] = sumx[i, j] / count @cython.wraparound(False) @cython.boundscheck(False) -def group_var_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels): +def group_ohlc_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' cdef: - Py_ssize_t i, j, N, K, lab - float64_t val, ct - ndarray[float64_t, ndim=2] nobs, sumx, sumxx - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - sumxx = np.zeros_like(out) - - N, K = (<object> values).shape - - if K > 1: - for i in range(N): - - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - - for j in range(K): - val = values[i, j] + Py_ssize_t i, j, N, K, ngroups, b + float64_t val, count + float64_t vopen, vhigh, vlow, vclose, NA + bint got_first = 0 - # not nan - if val == val: - nobs[lab, j] += 1 - sumx[lab, j] += val - sumxx[lab, j] += val * val + if bins[len(bins) - 1] == len(values): + ngroups = len(bins) else: - for i in range(N): - - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - val = values[i, 0] - # not nan - if val == val: - nobs[lab, 0] += 1 - sumx[lab, 0] += val - sumxx[lab, 0] += val * val - - - for i in range(len(counts)): - for j in range(K): - ct = nobs[i, j] - if ct < 2: - out[i, j] = nan - else: - out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / - (ct * ct - ct)) -@cython.wraparound(False) -@cython.boundscheck(False) -def group_var_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] labels): - cdef: - Py_ssize_t i, j, N, K, lab - float32_t val, ct - ndarray[float32_t, ndim=2] nobs, sumx, sumxx - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - sumxx = np.zeros_like(out) + ngroups = len(bins) + 1 N, K = (<object> values).shape - if K > 1: - for i in range(N): - - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 + if out.shape[1] != 4: + raise ValueError('Output array must have 4 columns') - for j in range(K): - val = values[i, j] + NA = np.nan - # not nan - if val == val: - nobs[lab, j] += 1 - sumx[lab, j] += val - sumxx[lab, j] += val * val + b = 0 + if K > 1: + raise NotImplementedError else: for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + if not got_first: + out[b, 0] = NA + out[b, 1] = NA + out[b, 2] = NA + out[b, 3] = NA + else: + out[b, 0] = vopen + out[b, 1] = vhigh + out[b, 2] = vlow + out[b, 3] = vclose + b += 1 + got_first = 0 - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 + counts[b] += 1 val = values[i, 0] + # not nan if val == val: - nobs[lab, 0] += 1 - sumx[lab, 0] += val - sumxx[lab, 0] += val * val - - - for i in range(len(counts)): - for j in range(K): - ct = nobs[i, j] - if ct < 2: - out[i, j] = nan - else: - out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / - (ct * ct - ct)) + if not got_first: + got_first = 1 + vopen = val + vlow = val + vhigh = val + else: + if val < vlow: + vlow = val + if val > vhigh: + vhigh = val + vclose = val + if not got_first: + out[b, 0] = NA + out[b, 1] = NA + out[b, 2] = NA + out[b, 3] = NA + else: + out[b, 0] = vopen + out[b, 1] = vhigh + out[b, 2] = vlow + out[b, 3] = vclose @cython.wraparound(False) @cython.boundscheck(False) -def group_var_bin_float64(ndarray[float64_t, ndim=2] out, +def group_ohlc_float32(ndarray[float32_t, ndim=2] out, ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, + ndarray[float32_t, ndim=2] values, ndarray[int64_t] bins): - + ''' + Only aggregates on axis=0 + ''' cdef: Py_ssize_t i, j, N, K, ngroups, b - float64_t val, ct - ndarray[float64_t, ndim=2] nobs, sumx, sumxx - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - sumxx = np.zeros_like(out) + float32_t val, count + float32_t vopen, vhigh, vlow, vclose, NA + bint got_first = 0 if bins[len(bins) - 1] == len(values): ngroups = len(bins) @@ -5799,59 +5850,515 @@ def group_var_bin_float64(ndarray[float64_t, ndim=2] out, N, K = (<object> values).shape + if out.shape[1] != 4: + raise ValueError('Output array must have 4 columns') + + NA = np.nan + b = 0 if K > 1: + raise NotImplementedError + else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - - for j in range(K): - val = values[i, j] + if not got_first: + out[b, 0] = NA + out[b, 1] = NA + out[b, 2] = NA + out[b, 3] = NA + else: + out[b, 0] = vopen + out[b, 1] = vhigh + out[b, 2] = vlow + out[b, 3] = vclose + b += 1 + got_first = 0 + + counts[b] += 1 + val = values[i, 0] + + # not nan + if val == val: + if not got_first: + got_first = 1 + vopen = val + vlow = val + vhigh = val + else: + if val < vlow: + vlow = val + if val > vhigh: + vhigh = val + vclose = val + + if not got_first: + out[b, 0] = NA + out[b, 1] = NA + out[b, 2] = NA + out[b, 3] = NA + else: + out[b, 0] = vopen + out[b, 1] = vhigh + out[b, 2] = vlow + out[b, 3] = vclose + +@cython.wraparound(False) +@cython.wraparound(False) +def group_last_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, lab + float64_t val, count + ndarray[float64_t, ndim=2] resx + ndarray[int64_t, ndim=2] nobs + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros((<object> out).shape, dtype=np.int64) + resx = np.empty_like(out) + + N, K = (<object> values).shape + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + resx[lab, j] = val + + for i in range(len(counts)): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = nan + else: + out[i, j] = resx[i, j] +@cython.wraparound(False) +@cython.wraparound(False) +def group_last_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, lab + float32_t val, count + ndarray[float32_t, ndim=2] resx + ndarray[int64_t, ndim=2] nobs + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros((<object> out).shape, dtype=np.int64) + resx = np.empty_like(out) + + N, K = (<object> values).shape + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + resx[lab, j] = val + + for i in range(len(counts)): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = nan + else: + out[i, j] = resx[i, j] +@cython.wraparound(False) +@cython.wraparound(False) +def group_last_int64(ndarray[int64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int64_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, lab + int64_t val, count + ndarray[int64_t, ndim=2] resx + ndarray[int64_t, ndim=2] nobs + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros((<object> out).shape, dtype=np.int64) + resx = np.empty_like(out) + + N, K = (<object> values).shape + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + resx[lab, j] = val + + for i in range(len(counts)): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = iNaT + else: + out[i, j] = resx[i, j] + +@cython.wraparound(False) +@cython.wraparound(False) +def group_last_bin_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, ngroups, b + float64_t val, count + ndarray[float64_t, ndim=2] resx, nobs + + nobs = np.zeros_like(out) + resx = np.empty_like(out) + + if bins[len(bins) - 1] == len(values): + ngroups = len(bins) + else: + ngroups = len(bins) + 1 + + N, K = (<object> values).shape + + b = 0 + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[b, j] += 1 + resx[b, j] = val + + for i in range(ngroups): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = nan + else: + out[i, j] = resx[i, j] +@cython.wraparound(False) +@cython.wraparound(False) +def group_last_bin_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, ngroups, b + float32_t val, count + ndarray[float32_t, ndim=2] resx, nobs + + nobs = np.zeros_like(out) + resx = np.empty_like(out) + + if bins[len(bins) - 1] == len(values): + ngroups = len(bins) + else: + ngroups = len(bins) + 1 + + N, K = (<object> values).shape + + b = 0 + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[b, j] += 1 + resx[b, j] = val + + for i in range(ngroups): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = nan + else: + out[i, j] = resx[i, j] +@cython.wraparound(False) +@cython.wraparound(False) +def group_last_bin_int64(ndarray[int64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int64_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, ngroups, b + int64_t val, count + ndarray[int64_t, ndim=2] resx, nobs + + nobs = np.zeros_like(out) + resx = np.empty_like(out) + + if bins[len(bins) - 1] == len(values): + ngroups = len(bins) + else: + ngroups = len(bins) + 1 + + N, K = (<object> values).shape + + b = 0 + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[b, j] += 1 + resx[b, j] = val - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - sumxx[b, j] += val * val + for i in range(ngroups): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = iNaT + else: + out[i, j] = resx[i, j] + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_nth_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] labels, int64_t rank): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, lab + float64_t val, count + ndarray[float64_t, ndim=2] resx + ndarray[int64_t, ndim=2] nobs + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros((<object> out).shape, dtype=np.int64) + resx = np.empty_like(out) + + N, K = (<object> values).shape + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + if nobs[lab, j] == rank: + resx[lab, j] = val + + for i in range(len(counts)): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = nan + else: + out[i, j] = resx[i, j] +@cython.boundscheck(False) +@cython.wraparound(False) +def group_nth_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] labels, int64_t rank): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, lab + float32_t val, count + ndarray[float32_t, ndim=2] resx + ndarray[int64_t, ndim=2] nobs + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros((<object> out).shape, dtype=np.int64) + resx = np.empty_like(out) + + N, K = (<object> values).shape + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + if nobs[lab, j] == rank: + resx[lab, j] = val + + for i in range(len(counts)): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = nan + else: + out[i, j] = resx[i, j] +@cython.boundscheck(False) +@cython.wraparound(False) +def group_nth_int64(ndarray[int64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int64_t, ndim=2] values, + ndarray[int64_t] labels, int64_t rank): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, lab + int64_t val, count + ndarray[int64_t, ndim=2] resx + ndarray[int64_t, ndim=2] nobs + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros((<object> out).shape, dtype=np.int64) + resx = np.empty_like(out) + + N, K = (<object> values).shape + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + if nobs[lab, j] == rank: + resx[lab, j] = val + + for i in range(len(counts)): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = iNaT + else: + out[i, j] = resx[i, j] + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_nth_bin_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] bins, int64_t rank): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, ngroups, b + float64_t val, count + ndarray[float64_t, ndim=2] resx, nobs + + nobs = np.zeros_like(out) + resx = np.empty_like(out) + + if bins[len(bins) - 1] == len(values): + ngroups = len(bins) else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + ngroups = len(bins) + 1 - counts[b] += 1 - val = values[i, 0] + N, K = (<object> values).shape + + b = 0 + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] # not nan if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - sumxx[b, 0] += val * val + nobs[b, j] += 1 + if nobs[b, j] == rank: + resx[b, j] = val for i in range(ngroups): for j in range(K): - ct = nobs[i, j] - if ct < 2: + if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / - (ct * ct - ct)) -@cython.wraparound(False) + out[i, j] = resx[i, j] @cython.boundscheck(False) -def group_var_bin_float32(ndarray[float32_t, ndim=2] out, +@cython.wraparound(False) +def group_nth_bin_float32(ndarray[float32_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - + ndarray[int64_t] bins, int64_t rank): + ''' + Only aggregates on axis=0 + ''' cdef: Py_ssize_t i, j, N, K, ngroups, b - float32_t val, ct - ndarray[float32_t, ndim=2] nobs, sumx, sumxx + float32_t val, count + ndarray[float32_t, ndim=2] resx, nobs nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - sumxx = np.zeros_like(out) + resx = np.empty_like(out) if bins[len(bins) - 1] == len(values): ngroups = len(bins) @@ -5861,60 +6368,93 @@ def group_var_bin_float32(ndarray[float32_t, ndim=2] out, N, K = (<object> values).shape b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 - counts[b] += 1 + counts[b] += 1 + for j in range(K): + val = values[i, j] - for j in range(K): - val = values[i, j] + # not nan + if val == val: + nobs[b, j] += 1 + if nobs[b, j] == rank: + resx[b, j] = val - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - sumxx[b, j] += val * val + for i in range(ngroups): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = nan + else: + out[i, j] = resx[i, j] +@cython.boundscheck(False) +@cython.wraparound(False) +def group_nth_bin_int64(ndarray[int64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int64_t, ndim=2] values, + ndarray[int64_t] bins, int64_t rank): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, ngroups, b + int64_t val, count + ndarray[int64_t, ndim=2] resx, nobs + + nobs = np.zeros_like(out) + resx = np.empty_like(out) + + if bins[len(bins) - 1] == len(values): + ngroups = len(bins) else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 + ngroups = len(bins) + 1 + + N, K = (<object> values).shape - counts[b] += 1 - val = values[i, 0] + b = 0 + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] # not nan if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - sumxx[b, 0] += val * val + nobs[b, j] += 1 + if nobs[b, j] == rank: + resx[b, j] = val for i in range(ngroups): for j in range(K): - ct = nobs[i, j] - if ct < 2: - out[i, j] = nan + if nobs[i, j] == 0: + out[i, j] = iNaT else: - out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) / - (ct * ct - ct)) + out[i, j] = resx[i, j] @cython.wraparound(False) @cython.boundscheck(False) -def group_mean_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels): +def group_min_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' cdef: Py_ssize_t i, j, N, K, lab float64_t val, count - ndarray[float64_t, ndim=2] sumx, nobs + ndarray[float64_t, ndim=2] minx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") nobs = np.zeros_like(out) - sumx = np.zeros_like(out) + + minx = np.empty_like(out) + minx.fill(np.inf) N, K = (<object> values).shape @@ -5927,10 +6467,12 @@ def group_mean_float64(ndarray[float64_t, ndim=2] out, counts[lab] += 1 for j in range(K): val = values[i, j] + # not nan if val == val: nobs[lab, j] += 1 - sumx[lab, j] += val + if val < minx[lab, j]: + minx[lab, j] = val else: for i in range(N): lab = labels[i] @@ -5939,34 +6481,40 @@ def group_mean_float64(ndarray[float64_t, ndim=2] out, counts[lab] += 1 val = values[i, 0] + # not nan if val == val: nobs[lab, 0] += 1 - sumx[lab, 0] += val + if val < minx[lab, 0]: + minx[lab, 0] = val for i in range(len(counts)): for j in range(K): - count = nobs[i, j] if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = sumx[i, j] / count + out[i, j] = minx[i, j] @cython.wraparound(False) @cython.boundscheck(False) -def group_mean_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] labels): +def group_min_float32(ndarray[float32_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float32_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' cdef: Py_ssize_t i, j, N, K, lab float32_t val, count - ndarray[float32_t, ndim=2] sumx, nobs + ndarray[float32_t, ndim=2] minx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") nobs = np.zeros_like(out) - sumx = np.zeros_like(out) + + minx = np.empty_like(out) + minx.fill(np.inf) N, K = (<object> values).shape @@ -5979,10 +6527,12 @@ def group_mean_float32(ndarray[float32_t, ndim=2] out, counts[lab] += 1 for j in range(K): val = values[i, j] + # not nan if val == val: nobs[lab, j] += 1 - sumx[lab, j] += val + if val < minx[lab, j]: + minx[lab, j] = val else: for i in range(N): lab = labels[i] @@ -5991,139 +6541,32 @@ def group_mean_float32(ndarray[float32_t, ndim=2] out, counts[lab] += 1 val = values[i, 0] + # not nan if val == val: nobs[lab, 0] += 1 - sumx[lab, 0] += val + if val < minx[lab, 0]: + minx[lab, 0] = val for i in range(len(counts)): for j in range(K): - count = nobs[i, j] if nobs[i, j] == 0: out[i, j] = nan else: - out[i, j] = sumx[i, j] / count - - -def group_mean_bin_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] bins): - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float64_t val, count - ndarray[float64_t, ndim=2] sumx, nobs - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - N, K = (<object> values).shape - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - - for i in range(ngroups): - for j in range(K): - count = nobs[i, j] - if count == 0: - out[i, j] = nan - else: - out[i, j] = sumx[i, j] / count - -def group_mean_bin_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float32_t val, count - ndarray[float32_t, ndim=2] sumx, nobs - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - N, K = (<object> values).shape - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - b = 0 - if K > 1: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[b, j] += 1 - sumx[b, j] += val - else: - for i in range(N): - while b < ngroups - 1 and i >= bins[b]: - b += 1 - - counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - nobs[b, 0] += 1 - sumx[b, 0] += val - - for i in range(ngroups): - for j in range(K): - count = nobs[i, j] - if count == 0: - out[i, j] = nan - else: - out[i, j] = sumx[i, j] / count - + out[i, j] = minx[i, j] @cython.wraparound(False) @cython.boundscheck(False) -def group_min_float64(ndarray[float64_t, ndim=2] out, +def group_min_int64(ndarray[int64_t, ndim=2] out, ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, + ndarray[int64_t, ndim=2] values, ndarray[int64_t] labels): ''' Only aggregates on axis=0 ''' cdef: Py_ssize_t i, j, N, K, lab - float64_t val, count - ndarray[float64_t, ndim=2] minx, nobs + int64_t val, count + ndarray[int64_t, ndim=2] minx, nobs if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -6131,7 +6574,7 @@ def group_min_float64(ndarray[float64_t, ndim=2] out, nobs = np.zeros_like(out) minx = np.empty_like(out) - minx.fill(np.inf) + minx.fill(9223372036854775807) N, K = (<object> values).shape @@ -6168,83 +6611,84 @@ def group_min_float64(ndarray[float64_t, ndim=2] out, for i in range(len(counts)): for j in range(K): if nobs[i, j] == 0: - out[i, j] = nan + out[i, j] = iNaT else: out[i, j] = minx[i, j] + @cython.wraparound(False) @cython.boundscheck(False) -def group_min_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] labels): +def group_min_bin_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] bins): ''' Only aggregates on axis=0 ''' cdef: - Py_ssize_t i, j, N, K, lab - float32_t val, count - ndarray[float32_t, ndim=2] minx, nobs - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") + Py_ssize_t i, j, N, K, ngroups, b + float64_t val, count + ndarray[float64_t, ndim=2] minx, nobs nobs = np.zeros_like(out) minx = np.empty_like(out) minx.fill(np.inf) + if bins[len(bins) - 1] == len(values): + ngroups = len(bins) + else: + ngroups = len(bins) + 1 + N, K = (<object> values).shape + b = 0 if K > 1: for i in range(N): - lab = labels[i] - if lab < 0: - continue + while b < ngroups - 1 and i >= bins[b]: + b += 1 - counts[lab] += 1 + counts[b] += 1 for j in range(K): val = values[i, j] # not nan if val == val: - nobs[lab, j] += 1 - if val < minx[lab, j]: - minx[lab, j] = val + nobs[b, j] += 1 + if val < minx[b, j]: + minx[b, j] = val else: for i in range(N): - lab = labels[i] - if lab < 0: - continue + while b < ngroups - 1 and i >= bins[b]: + b += 1 - counts[lab] += 1 + counts[b] += 1 val = values[i, 0] # not nan if val == val: - nobs[lab, 0] += 1 - if val < minx[lab, 0]: - minx[lab, 0] = val + nobs[b, 0] += 1 + if val < minx[b, 0]: + minx[b, 0] = val - for i in range(len(counts)): + for i in range(ngroups): for j in range(K): if nobs[i, j] == 0: out[i, j] = nan else: out[i, j] = minx[i, j] - @cython.wraparound(False) @cython.boundscheck(False) -def group_min_bin_float64(ndarray[float64_t, ndim=2] out, +def group_min_bin_float32(ndarray[float32_t, ndim=2] out, ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, + ndarray[float32_t, ndim=2] values, ndarray[int64_t] bins): ''' Only aggregates on axis=0 ''' cdef: Py_ssize_t i, j, N, K, ngroups, b - float64_t val, count - ndarray[float64_t, ndim=2] minx, nobs + float32_t val, count + ndarray[float32_t, ndim=2] minx, nobs nobs = np.zeros_like(out) @@ -6295,22 +6739,22 @@ def group_min_bin_float64(ndarray[float64_t, ndim=2] out, out[i, j] = minx[i, j] @cython.wraparound(False) @cython.boundscheck(False) -def group_min_bin_float32(ndarray[float32_t, ndim=2] out, +def group_min_bin_int64(ndarray[int64_t, ndim=2] out, ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, + ndarray[int64_t, ndim=2] values, ndarray[int64_t] bins): ''' Only aggregates on axis=0 ''' cdef: Py_ssize_t i, j, N, K, ngroups, b - float32_t val, count - ndarray[float32_t, ndim=2] minx, nobs + int64_t val, count + ndarray[int64_t, ndim=2] minx, nobs nobs = np.zeros_like(out) minx = np.empty_like(out) - minx.fill(np.inf) + minx.fill(9223372036854775807) if bins[len(bins) - 1] == len(values): ngroups = len(bins) @@ -6351,7 +6795,7 @@ def group_min_bin_float32(ndarray[float32_t, ndim=2] out, for i in range(ngroups): for j in range(K): if nobs[i, j] == 0: - out[i, j] = nan + out[i, j] = iNaT else: out[i, j] = minx[i, j] @@ -6475,6 +6919,66 @@ def group_max_float32(ndarray[float32_t, ndim=2] out, out[i, j] = nan else: out[i, j] = maxx[i, j] +@cython.wraparound(False) +@cython.boundscheck(False) +def group_max_int64(ndarray[int64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int64_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, N, K, lab + int64_t val, count + ndarray[int64_t, ndim=2] maxx, nobs + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros_like(out) + + maxx = np.empty_like(out) + maxx.fill(-9223372036854775807) + + N, K = (<object> values).shape + + if K > 1: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + if val > maxx[lab, j]: + maxx[lab, j] = val + else: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + val = values[i, 0] + + # not nan + if val == val: + nobs[lab, 0] += 1 + if val > maxx[lab, 0]: + maxx[lab, 0] = val + + for i in range(len(counts)): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = iNaT + else: + out[i, j] = maxx[i, j] @cython.wraparound(False) @cython.boundscheck(False) @@ -6596,21 +7100,23 @@ def group_max_bin_float32(ndarray[float32_t, ndim=2] out, out[i, j] = nan else: out[i, j] = maxx[i, j] - @cython.wraparound(False) @cython.boundscheck(False) -def group_ohlc_float64(ndarray[float64_t, ndim=2] out, +def group_max_bin_int64(ndarray[int64_t, ndim=2] out, ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, + ndarray[int64_t, ndim=2] values, ndarray[int64_t] bins): ''' Only aggregates on axis=0 ''' cdef: Py_ssize_t i, j, N, K, ngroups, b - float64_t val, count - float64_t vopen, vhigh, vlow, vclose, NA - bint got_first = 0 + int64_t val, count + ndarray[int64_t, ndim=2] maxx, nobs + + nobs = np.zeros_like(out) + maxx = np.empty_like(out) + maxx.fill(-9223372036854775807) if bins[len(bins) - 1] == len(values): ngroups = len(bins) @@ -6619,130 +7125,41 @@ def group_ohlc_float64(ndarray[float64_t, ndim=2] out, N, K = (<object> values).shape - if out.shape[1] != 4: - raise ValueError('Output array must have 4 columns') - - NA = np.nan - b = 0 if K > 1: - raise NotImplementedError - else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: - if not got_first: - out[b, 0] = NA - out[b, 1] = NA - out[b, 2] = NA - out[b, 3] = NA - else: - out[b, 0] = vopen - out[b, 1] = vhigh - out[b, 2] = vlow - out[b, 3] = vclose b += 1 - got_first = 0 counts[b] += 1 - val = values[i, 0] - - # not nan - if val == val: - if not got_first: - got_first = 1 - vopen = val - vlow = val - vhigh = val - else: - if val < vlow: - vlow = val - if val > vhigh: - vhigh = val - vclose = val - - if not got_first: - out[b, 0] = NA - out[b, 1] = NA - out[b, 2] = NA - out[b, 3] = NA - else: - out[b, 0] = vopen - out[b, 1] = vhigh - out[b, 2] = vlow - out[b, 3] = vclose -@cython.wraparound(False) -@cython.boundscheck(False) -def group_ohlc_float32(ndarray[float32_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float32_t, ndim=2] values, - ndarray[int64_t] bins): - ''' - Only aggregates on axis=0 - ''' - cdef: - Py_ssize_t i, j, N, K, ngroups, b - float32_t val, count - float32_t vopen, vhigh, vlow, vclose, NA - bint got_first = 0 - - if bins[len(bins) - 1] == len(values): - ngroups = len(bins) - else: - ngroups = len(bins) + 1 - - N, K = (<object> values).shape - - if out.shape[1] != 4: - raise ValueError('Output array must have 4 columns') - - NA = np.nan + for j in range(K): + val = values[i, j] - b = 0 - if K > 1: - raise NotImplementedError + # not nan + if val == val: + nobs[b, j] += 1 + if val > maxx[b, j]: + maxx[b, j] = val else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: - if not got_first: - out[b, 0] = NA - out[b, 1] = NA - out[b, 2] = NA - out[b, 3] = NA - else: - out[b, 0] = vopen - out[b, 1] = vhigh - out[b, 2] = vlow - out[b, 3] = vclose b += 1 - got_first = 0 counts[b] += 1 val = values[i, 0] # not nan if val == val: - if not got_first: - got_first = 1 - vopen = val - vlow = val - vhigh = val - else: - if val < vlow: - vlow = val - if val > vhigh: - vhigh = val - vclose = val + nobs[b, 0] += 1 + if val > maxx[b, 0]: + maxx[b, 0] = val - if not got_first: - out[b, 0] = NA - out[b, 1] = NA - out[b, 2] = NA - out[b, 3] = NA - else: - out[b, 0] = vopen - out[b, 1] = vhigh - out[b, 2] = vlow - out[b, 3] = vclose + for i in range(ngroups): + for j in range(K): + if nobs[i, j] == 0: + out[i, j] = iNaT + else: + out[i, j] = maxx[i, j] @cython.boundscheck(False) @cython.wraparound(False) @@ -6816,6 +7233,42 @@ def group_count_float32(ndarray[float32_t, ndim=2] out, out[i, j] = nobs[i, j] +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_int64(ndarray[int64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int64_t, ndim=2] values, + ndarray[int64_t] labels): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, lab + Py_ssize_t N = values.shape[0], K = values.shape[1] + int64_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + if len(values) != len(labels): + raise AssertionError("len(index) != len(labels)") + + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[lab, j] += val == val and val != iNaT + + for i in range(len(counts)): + for j in range(K): + out[i, j] = nobs[i, j] + + @cython.boundscheck(False) @cython.wraparound(False) def group_count_object(ndarray[object, ndim=2] out, @@ -6854,7 +7307,7 @@ def group_count_object(ndarray[object, ndim=2] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_count_int64(ndarray[float64_t, ndim=2] out, +def group_count_int64(ndarray[int64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[int64_t, ndim=2] values, ndarray[int64_t] labels): @@ -6957,6 +7410,40 @@ def group_count_bin_float32(ndarray[float32_t, ndim=2] out, out[i, j] = nobs[i, j] +@cython.boundscheck(False) +@cython.wraparound(False) +def group_count_bin_int64(ndarray[int64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[int64_t, ndim=2] values, + ndarray[int64_t] bins): + ''' + Only aggregates on axis=0 + ''' + cdef: + Py_ssize_t i, j, ngroups + Py_ssize_t N = values.shape[0], K = values.shape[1], b = 0 + int64_t val + ndarray[int64_t, ndim=2] nobs = np.zeros((out.shape[0], out.shape[1]), + dtype=np.int64) + + ngroups = len(bins) + (bins[len(bins) - 1] != N) + + for i in range(N): + while b < ngroups - 1 and i >= bins[b]: + b += 1 + + counts[b] += 1 + for j in range(K): + val = values[i, j] + + # not nan + nobs[b, j] += val == val and val != iNaT + + for i in range(ngroups): + for j in range(K): + out[i, j] = nobs[i, j] + + @cython.boundscheck(False) @cython.wraparound(False) def group_count_bin_object(ndarray[object, ndim=2] out, @@ -6993,7 +7480,7 @@ def group_count_bin_object(ndarray[object, ndim=2] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_count_bin_int64(ndarray[float64_t, ndim=2] out, +def group_count_bin_int64(ndarray[int64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[int64_t, ndim=2] values, ndarray[int64_t] bins): diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index d4fcaaec9eb6e..f2ea17db44211 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -3483,6 +3483,77 @@ def test_groupby_categorical_no_compress(self): exp = np.array([1,2,4,np.nan]) self.assert_numpy_array_equivalent(result, exp) + def test_groupby_non_arithmetic_agg_types(self): + # GH9311, GH6620 + df = pd.DataFrame([{'a': 1, 'b': 1}, + {'a': 1, 'b': 2}, + {'a': 2, 'b': 3}, + {'a': 2, 'b': 4}]) + + dtypes = ['int8', 'int16', 'int32', 'int64', + 'float32', 'float64'] + + grp_exp = {'first': {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}, + 'last': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}, + 'min': {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}, + 'max': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}, + 'nth': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}], + 'args': [1]}, + 'count': {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}], + 'out_type': 'int64'}} + + for dtype in dtypes: + df_in = df.copy() + df_in['b'] = df_in.b.astype(dtype) + + for method, data in compat.iteritems(grp_exp): + if 'args' not in data: + data['args'] = [] + + if 'out_type' in data: + out_type = data['out_type'] + else: + out_type = dtype + + exp = data['df'] + df_out = pd.DataFrame(exp) + + df_out['b'] = df_out.b.astype(out_type) + df_out.set_index('a', inplace=True) + + grpd = df_in.groupby('a') + t = getattr(grpd, method)(*data['args']) + assert_frame_equal(t, df_out) + + def test_groupby_non_arithmetic_agg_intlike_precision(self): + # GH9311, GH6620 + c = 24650000000000000 + + inputs = ((Timestamp('2011-01-15 12:50:28.502376'), + Timestamp('2011-01-20 12:50:28.593448')), + (1 + c, 2 + c)) + + for i in inputs: + df = pd.DataFrame([{'a': 1, + 'b': i[0]}, + {'a': 1, + 'b': i[1]}]) + + grp_exp = {'first': {'expected': i[0]}, + 'last': {'expected': i[1]}, + 'min': {'expected': i[0]}, + 'max': {'expected': i[1]}, + 'nth': {'expected': i[1], 'args': [1]}, + 'count': {'expected': 2}} + + for method, data in compat.iteritems(grp_exp): + if 'args' not in data: + data['args'] = [] + + grpd = df.groupby('a') + res = getattr(grpd, method)(*data['args']) + self.assertEqual(res.iloc[0].b, data['expected']) + def test_groupby_first_datetime64(self): df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)]) df[1] = df[1].view('M8[ns]')
datetime64 columns were changing at the nano-second scale when applying a groupby aggregator. closes #9311 closes #6620
https://api.github.com/repos/pandas-dev/pandas/pulls/9345
2015-01-23T16:36:04Z
2015-02-14T03:10:28Z
2015-02-14T03:10:28Z
2015-02-14T04:48:51Z
Add lag parameter to autocorrelation
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index b3ac58a9fb84a..74a37d90bc35f 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -104,7 +104,7 @@ Enhancements - Added ``Series.str.slice_replace()``, which previously raised NotImplementedError (:issue:`8888`) - Added ``Timestamp.to_datetime64()`` to complement ``Timedelta.to_timedelta64()`` (:issue:`9255`) - ``tseries.frequencies.to_offset()`` now accepts ``Timedelta`` as input (:issue:`9064`) - +- Lag parameter was added to the autocorrelation method of Series, defaults to lag-1 autocorrelation (:issue:`9192`) - ``Timedelta`` will now accept nanoseconds keyword in constructor (:issue:`9273`) Performance diff --git a/pandas/core/series.py b/pandas/core/series.py index 60b601a462520..c6d40cc52ae8e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1333,15 +1333,20 @@ def diff(self, periods=1): result = com.diff(_values_from_object(self), periods) return self._constructor(result, index=self.index).__finalize__(self) - def autocorr(self): + def autocorr(self, lag=1): """ - Lag-1 autocorrelation + Lag-N autocorrelation + + Parameters + ---------- + lag : int, default 1 + Number of lags to apply before performing autocorrelation. Returns ------- autocorr : float """ - return self.corr(self.shift(1)) + return self.corr(self.shift(lag)) def dot(self, other): """ diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index a5de26da1606a..977a30b14fede 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -6304,7 +6304,30 @@ def test_pct_change_shift_over_nas(self): def test_autocorr(self): # Just run the function - self.ts.autocorr() + corr1 = self.ts.autocorr() + + # Now run it with the lag parameter + corr2 = self.ts.autocorr(lag=1) + + # corr() with lag needs Series of at least length 2 + if len(self.ts) <= 2: + self.assertTrue(np.isnan(corr1)) + self.assertTrue(np.isnan(corr2)) + else: + self.assertEqual(corr1, corr2) + + # Choose a random lag between 1 and length of Series - 2 + # and compare the result with the Series corr() function + n = 1 + np.random.randint(max(1, len(self.ts) - 2)) + corr1 = self.ts.corr(self.ts.shift(n)) + corr2 = self.ts.autocorr(lag=n) + + # corr() with lag needs Series of at least length 2 + if len(self.ts) <= 2: + self.assertTrue(np.isnan(corr1)) + self.assertTrue(np.isnan(corr2)) + else: + self.assertEqual(corr1, corr2) def test_first_last_valid(self): ts = self.ts.copy()
Add lag parameter to autocorrelation, default to lag-1 autocorrelation so existing code will work unchanged. This is effectively reopening issue #9192
https://api.github.com/repos/pandas-dev/pandas/pulls/9339
2015-01-22T18:44:10Z
2015-01-29T11:17:51Z
null
2015-01-29T11:17:51Z
GH 9016: Bitwise operation weirdness
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 9485ef18dbd6f..d9a4e3895c8b7 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -88,6 +88,35 @@ Backwards incompatible API changes The prior style can be achieved with matplotlib's `axhline` or `axvline` methods (:issue:`9088`). + +- ``Series`` now supports bitwise operation for integral types (:issue:`9016`) + + Previously even if the input dtypes where integral, the output dtype was coerced to bool. + + .. code-block:: python + In [2]: pd.Series([0,1,2,3], list('abcd')) | pd.Series([4,4,4,4], list('abcd')) + Out[2]: + a True + b True + c True + d True + dtype: bool + + Now if the input dtypes are integral, the output dtype is also integral and the output + values are the result of the bitwise operation. + + .. code-block:: python + + In [2]: pd.Series([0,1,2,3], list('abcd')) | pd.Series([4,4,4,4], list('abcd')) + Out[2]: + a 4 + b 5 + c 6 + d 7 + dtype: int64 + + + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/core/ops.py b/pandas/core/ops.py index a3154ff9df9a1..64672a9e75244 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -654,20 +654,31 @@ def na_op(x, y): return result def wrapper(self, other): + is_self_int_dtype = com.is_integer_dtype(self.dtype) + + fill_int = lambda x: x.fillna(0) + fill_bool = lambda x: x.fillna(False).astype(bool) + if isinstance(other, pd.Series): name = _maybe_match_name(self, other) + other = other.reindex_like(self) + is_other_int_dtype = com.is_integer_dtype(other.dtype) + other = fill_int(other) if is_other_int_dtype else fill_bool(other) - other = other.reindex_like(self).fillna(False).astype(bool) - return self._constructor(na_op(self.values, other.values), + filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool + return filler(self._constructor(na_op(self.values, other.values), index=self.index, - name=name).fillna(False).astype(bool) + name=name)) + elif isinstance(other, pd.DataFrame): return NotImplemented + else: - # scalars - res = self._constructor(na_op(self.values, other), - index=self.index).fillna(False) - return res.astype(bool).__finalize__(self) + # scalars, list, tuple, np.array + filler = fill_int if is_self_int_dtype and com.is_integer_dtype(np.asarray(other)) else fill_bool + return filler(self._constructor(na_op(self.values, other), + index=self.index)).__finalize__(self) + return wrapper diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 75da99ef05884..ee92f95b37b55 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3790,6 +3790,96 @@ def test_comparison_label_based(self): for v in [np.nan]: self.assertRaises(TypeError, lambda : t & v) + def test_operators_bitwise(self): + # GH 9016: support bitwise op for integer types + index = list('bca') + + s_tft = Series([True, False, True], index=index) + s_fff = Series([False, False, False], index=index) + s_tff = Series([True, False, False], index=index) + s_empty = Series([]) + s_0101 = Series([0,1,0,1]) + s_0123 = Series(range(4)) + s_3333 = Series([3] * 4) + s_4444 = Series([4] * 4) + + res = s_tft & s_empty + expected = s_fff + assert_series_equal(res, expected) + + res = s_tft | s_empty + expected = s_tft + assert_series_equal(res, expected) + + res = s_0123 & s_3333 + expected = Series(range(4)) + assert_series_equal(res, expected) + + res = s_0123 | s_4444 + expected = Series(range(4, 8)) + assert_series_equal(res, expected) + + s_a0b1c0 = Series([1], list('b')) + + res = s_tft & s_a0b1c0 + expected = s_tff + assert_series_equal(res, expected) + + res = s_tft | s_a0b1c0 + expected = s_tft + assert_series_equal(res, expected) + + n0 = 0 + res = s_tft & n0 + expected = s_fff + assert_series_equal(res, expected) + + res = s_0123 & n0 + expected = Series([0] * 4) + assert_series_equal(res, expected) + + n1 = 1 + res = s_tft & n1 + expected = s_tft + assert_series_equal(res, expected) + + res = s_0123 & n1 + expected = Series([0, 1, 0, 1]) + assert_series_equal(res, expected) + + s_1111 = Series([1]*4, dtype='int8') + res = s_0123 & s_1111 + expected = Series([0, 1, 0, 1], dtype='int64') + assert_series_equal(res, expected) + + res = s_0123.astype(np.int16) | s_1111.astype(np.int32) + expected = Series([1, 1, 3, 3], dtype='int32') + assert_series_equal(res, expected) + + self.assertRaises(TypeError, lambda: s_1111 & 'a') + self.assertRaises(TypeError, lambda: s_1111 & ['a','b','c','d']) + self.assertRaises(TypeError, lambda: s_0123 & np.NaN) + self.assertRaises(TypeError, lambda: s_0123 & 3.14) + self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2]) + + # s_0123 will be all false now because of reindexing like s_tft + assert_series_equal(s_tft & s_0123, Series([False] * 3, list('bca'))) + # s_tft will be all false now because of reindexing like s_0123 + assert_series_equal(s_0123 & s_tft, Series([False] * 4)) + assert_series_equal(s_0123 & False, Series([False] * 4)) + assert_series_equal(s_0123 ^ False, Series([False, True, True, True])) + assert_series_equal(s_0123 & [False], Series([False] * 4)) + assert_series_equal(s_0123 & (False), Series([False] * 4)) + assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4)) + + s_ftft = Series([False, True, False, True]) + assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft) + + s_abNd = Series(['a','b',np.NaN,'d']) + res = s_0123 & s_abNd + expected = s_ftft + assert_series_equal(res, expected) + def test_between(self): s = Series(bdate_range('1/1/2000', periods=20).asobject) s[::2] = np.nan
Series now supports bitwise op for integral types. I have made the changes in wrapper() itself instead of na_op() because it looked to me like wrapper is controlling the input and output fill and dtype. Once that is taken care of, na_op() seems to be doing the right thing by itself and so I did not have to change anything to get back the expected behavior. Happy to change if things need to be altered toward better design etc.
https://api.github.com/repos/pandas-dev/pandas/pulls/9338
2015-01-22T18:30:00Z
2015-02-05T11:37:26Z
2015-02-05T11:37:26Z
2015-02-05T16:54:48Z
Fix missing text on Index documentation page.
diff --git a/pandas/core/index.py b/pandas/core/index.py index 1b4a691851a8a..9464e6de92721 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1583,7 +1583,7 @@ def map(self, mapper): def isin(self, values, level=None): """ Compute boolean array of whether each index value is found in the - passed set of values + passed set of values. Parameters ----------
https://api.github.com/repos/pandas-dev/pandas/pulls/9335
2015-01-22T15:16:03Z
2015-03-05T23:29:43Z
2015-03-05T23:29:43Z
2022-03-15T10:04:21Z
Fix docs in groupby.tail
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index cb5dedc887bca..fe130dbd5736b 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -997,16 +997,16 @@ def tail(self, n=5): Examples -------- - >>> df = DataFrame([[1, 2], [1, 4], [5, 6]], + >>> df = DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]], columns=['A', 'B']) - >>> df.groupby('A', as_index=False).tail(1) + >>> df.groupby('A').tail(1) A B - 0 1 2 - 2 5 6 + 1 a 2 + 3 b 2 >>> df.groupby('A').head(1) A B - 0 1 2 - 2 5 6 + 0 a 1 + 2 b 1 """ obj = self._selected_obj
The old docs is wrong where head() & tail() return the same result. Change input of the example to see group data clearer.
https://api.github.com/repos/pandas-dev/pandas/pulls/9333
2015-01-22T11:06:37Z
2015-02-04T07:34:03Z
2015-02-04T07:34:03Z
2015-02-04T14:57:51Z
FIX: Fixed some issues around vb_suite
diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index 26311920ec861..27a442ece1281 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -559,3 +559,6 @@ def inject_bmark_into_globals(bmark): for func_name in no_arg_func_list: bmark = make_large_ngroups_bmark(ngroups, func_name) inject_bmark_into_globals(bmark) + +# avoid bmark to be collected as Benchmark object +del bmark diff --git a/vb_suite/indexing.py b/vb_suite/indexing.py index f05ebc47d2e25..012eb462fcc48 100644 --- a/vb_suite/indexing.py +++ b/vb_suite/indexing.py @@ -15,9 +15,8 @@ dt = ts.index[500] """ statement = "ts[dt]" - bm_getitem = Benchmark(statement, setup, ncalls=100000, - name='series_getitem_scalar') + name='time_series_getitem_scalar') setup = common_setup + """ index = tm.makeStringIndex(1000) @@ -25,9 +24,9 @@ idx = index[100] """ statement = "s.get_value(idx)" -bm_df_getitem3 = Benchmark(statement, setup, - name='series_get_value', - start_date=datetime(2011, 11, 12)) +bm_get_value = Benchmark(statement, setup, + name='series_get_value', + start_date=datetime(2011, 11, 12)) setup = common_setup + """ @@ -227,10 +226,10 @@ series_loc_list_like = Benchmark("s.loc[[800000]]", setup) series_loc_array = Benchmark("s.loc[np.arange(10000)]", setup) -series_iloc_scalar = Benchmark("s.loc[800000]", setup) -series_iloc_slice = Benchmark("s.loc[:800000]", setup) -series_iloc_list_like = Benchmark("s.loc[[800000]]", setup) -series_iloc_array = Benchmark("s.loc[np.arange(10000)]", setup) +series_iloc_scalar = Benchmark("s.iloc[800000]", setup) +series_iloc_slice = Benchmark("s.iloc[:800000]", setup) +series_iloc_list_like = Benchmark("s.iloc[[800000]]", setup) +series_iloc_array = Benchmark("s.iloc[np.arange(10000)]", setup) series_ix_scalar = Benchmark("s.ix[800000]", setup) series_ix_slice = Benchmark("s.ix[:800000]", setup) diff --git a/vb_suite/test_perf.py b/vb_suite/test_perf.py index 8cf832ade2813..d7ab014453f2e 100755 --- a/vb_suite/test_perf.py +++ b/vb_suite/test_perf.py @@ -56,7 +56,7 @@ from pandas import DataFrame, Series from suite import REPO_PATH -VB_DIR = os.path.dirname(os.path.abspath(__file__)) +VB_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) DEFAULT_MIN_DURATION = 0.01 HEAD_COL="head[ms]" BASE_COL="base[ms]" @@ -505,7 +505,7 @@ def main(): print("\n") - # move away from the pandas root dit, to avoid possible import + # move away from the pandas root dir, to avoid possible import # surprises os.chdir(os.path.dirname(os.path.abspath(__file__)))
I'm new to vb_suite, and ran following command according to [this document](https://github.com/pydata/pandas/wiki/Performance-Testing). ``` ./test_perf.sh -b master -t HEAD ``` But I got some errors (`InvalidGitRepositoryError` and Benchmark duplication errors), and finally I got tests run by fixing following issues. - VB_DIR pointed wrong directory - temporary variable `bmark` in groupby was collected as Benchmark - indexing module had same name ('series_getitem_scalar') Benchmarks I'm not so familier with vb_suite, and any feedbacks are welcome ! Thanks.
https://api.github.com/repos/pandas-dev/pandas/pulls/9332
2015-01-22T06:29:13Z
2015-01-22T23:54:43Z
2015-01-22T23:54:43Z
2015-01-22T23:55:04Z
BUG: don't sort unique values from categoricals
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 238a838cf727e..d1bfb21769ab1 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -193,6 +193,8 @@ Bug Fixes SQLAlchemy type (:issue:`9083`). +- Items in ``Categorical.unique()`` (and ``s.unique()`` if ``s`` is of dtype ``category``) now appear in the order in which they are originally found, not in sorted order (:issue:`9331`). This is now consistent with the behavior for other dtypes in pandas. + - Fixed bug on bug endian platforms which produced incorrect results in ``StataReader`` (:issue:`8688`). diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 28c9d096e06d3..960cc0dea361b 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1386,17 +1386,16 @@ def unique(self): """ Return the unique values. - Unused categories are NOT returned. + Unused categories are NOT returned. Unique values are returned in order + of appearance. Returns ------- unique values : array """ - unique_codes = np.unique(self.codes) - # for compatibility with normal unique, which has nan last - if unique_codes[0] == -1: - unique_codes[0:-1] = unique_codes[1:] - unique_codes[-1] = -1 + from pandas.core.nanops import unique1d + # unlike np.unique, unique1d does not sort + unique_codes = unique1d(self.codes) return take_1d(self.categories.values, unique_codes) def equals(self, other): diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 7a8d5e0ac0032..de59b84f1fbc7 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -774,12 +774,15 @@ def test_unique(self): exp = np.asarray(["a","b"]) res = cat.unique() self.assert_numpy_array_equal(res, exp) + cat = Categorical(["a","b","a","a"], categories=["a","b","c"]) res = cat.unique() self.assert_numpy_array_equal(res, exp) - cat = Categorical(["a","b","a", np.nan], categories=["a","b","c"]) + + # unique should not sort + cat = Categorical(["b", "b", np.nan, "a"], categories=["a","b","c"]) res = cat.unique() - exp = np.asarray(["a","b", np.nan], dtype=object) + exp = np.asarray(["b", np.nan, "a"], dtype=object) self.assert_numpy_array_equal(res, exp) def test_mode(self):
This should resolve the inconsistency @mwaskom reported in #9148. CC @jreback @TomAugspurger @JanSchulz
https://api.github.com/repos/pandas-dev/pandas/pulls/9331
2015-01-22T02:36:03Z
2015-02-13T00:42:02Z
2015-02-13T00:42:02Z
2015-02-13T00:42:40Z
ENH/DOC: reimplement Series delegates/accessors using descriptors
diff --git a/doc/_templates/autosummary/accessor_attribute.rst b/doc/_templates/autosummary/accessor_attribute.rst new file mode 100644 index 0000000000000..e38a9f22f9d99 --- /dev/null +++ b/doc/_templates/autosummary/accessor_attribute.rst @@ -0,0 +1,6 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module.split('.')[0] }} + +.. autoaccessorattribute:: {{ [module.split('.')[1], objname]|join('.') }} \ No newline at end of file diff --git a/doc/_templates/autosummary/accessor_method.rst b/doc/_templates/autosummary/accessor_method.rst new file mode 100644 index 0000000000000..8175d8615ceb2 --- /dev/null +++ b/doc/_templates/autosummary/accessor_method.rst @@ -0,0 +1,6 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module.split('.')[0] }} + +.. autoaccessormethod:: {{ [module.split('.')[1], objname]|join('.') }} \ No newline at end of file diff --git a/doc/source/api.rst b/doc/source/api.rst index 9d40fe9114f97..a8097f2648c4b 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -449,114 +449,113 @@ Datetimelike Properties ``Series.dt`` can be used to access the values of the series as datetimelike and return several properties. -Due to implementation details the methods show up here as methods of the -``DatetimeProperties/PeriodProperties/TimedeltaProperties`` classes. These can be accessed like ``Series.dt.<property>``. - -.. currentmodule:: pandas.tseries.common +These can be accessed like ``Series.dt.<property>``. **Datetime Properties** .. autosummary:: :toctree: generated/ - - DatetimeProperties.date - DatetimeProperties.time - DatetimeProperties.year - DatetimeProperties.month - DatetimeProperties.day - DatetimeProperties.hour - DatetimeProperties.minute - DatetimeProperties.second - DatetimeProperties.microsecond - DatetimeProperties.nanosecond - DatetimeProperties.second - DatetimeProperties.weekofyear - DatetimeProperties.dayofweek - DatetimeProperties.weekday - DatetimeProperties.dayofyear - DatetimeProperties.quarter - DatetimeProperties.is_month_start - DatetimeProperties.is_month_end - DatetimeProperties.is_quarter_start - DatetimeProperties.is_quarter_end - DatetimeProperties.is_year_start - DatetimeProperties.is_year_end + :template: autosummary/accessor_attribute.rst + + Series.dt.date + Series.dt.time + Series.dt.year + Series.dt.month + Series.dt.day + Series.dt.hour + Series.dt.minute + Series.dt.second + Series.dt.microsecond + Series.dt.nanosecond + Series.dt.second + Series.dt.weekofyear + Series.dt.dayofweek + Series.dt.weekday + Series.dt.dayofyear + Series.dt.quarter + Series.dt.is_month_start + Series.dt.is_month_end + Series.dt.is_quarter_start + Series.dt.is_quarter_end + Series.dt.is_year_start + Series.dt.is_year_end **Datetime Methods** .. autosummary:: :toctree: generated/ + :template: autosummary/accessor_method.rst - DatetimeProperties.to_period - DatetimeProperties.to_pydatetime - DatetimeProperties.tz_localize - DatetimeProperties.tz_convert + Series.dt.to_period + Series.dt.to_pydatetime + Series.dt.tz_localize + Series.dt.tz_convert **Timedelta Properties** .. autosummary:: :toctree: generated/ + :template: autosummary/accessor_attribute.rst - TimedeltaProperties.days - TimedeltaProperties.seconds - TimedeltaProperties.microseconds - TimedeltaProperties.nanoseconds - TimedeltaProperties.components + Series.dt.days + Series.dt.seconds + Series.dt.microseconds + Series.dt.nanoseconds + Series.dt.components **Timedelta Methods** .. autosummary:: :toctree: generated/ + :template: autosummary/accessor_method.rst - TimedeltaProperties.to_pytimedelta + Series.dt.to_pytimedelta String handling ~~~~~~~~~~~~~~~ ``Series.str`` can be used to access the values of the series as -strings and apply several methods to it. Due to implementation -details the methods show up here as methods of the -``StringMethods`` class. These can be acccessed like ``Series.str.<function/property>``. +strings and apply several methods to it. These can be acccessed like +``Series.str.<function/property>``. .. currentmodule:: pandas.core.strings .. autosummary:: :toctree: generated/ - - StringMethods.cat - StringMethods.center - StringMethods.contains - StringMethods.count - StringMethods.decode - StringMethods.encode - StringMethods.endswith - StringMethods.extract - StringMethods.findall - StringMethods.get - StringMethods.join - StringMethods.len - StringMethods.lower - StringMethods.lstrip - StringMethods.match - StringMethods.pad - StringMethods.repeat - StringMethods.replace - StringMethods.rstrip - StringMethods.slice - StringMethods.slice_replace - StringMethods.split - StringMethods.startswith - StringMethods.strip - StringMethods.title - StringMethods.upper - StringMethods.get_dummies + :template: autosummary/accessor_method.rst + + Series.str.cat + Series.str.center + Series.str.contains + Series.str.count + Series.str.decode + Series.str.encode + Series.str.endswith + Series.str.extract + Series.str.findall + Series.str.get + Series.str.join + Series.str.len + Series.str.lower + Series.str.lstrip + Series.str.match + Series.str.pad + Series.str.repeat + Series.str.replace + Series.str.rstrip + Series.str.slice + Series.str.slice_replace + Series.str.split + Series.str.startswith + Series.str.strip + Series.str.title + Series.str.upper + Series.str.get_dummies .. _api.categorical: Categorical ~~~~~~~~~~~ -.. currentmodule:: pandas.core.categorical - If the Series is of dtype ``category``, ``Series.cat`` can be used to change the the categorical data. This accessor is similar to the ``Series.dt`` or ``Series.str`` and has the following usable methods and properties (all available as ``Series.cat.<method_or_property>``). @@ -595,7 +594,6 @@ the Categorical back to a numpy array, so levels and order information is not pr Plotting ~~~~~~~~ -.. currentmodule:: pandas .. autosummary:: :toctree: generated/ diff --git a/doc/source/conf.py b/doc/source/conf.py index dd225dba7079a..fcb9c3fdd0016 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -297,6 +297,73 @@ 'pd.options.display.encoding="utf8"' ] + +# Add custom Documenter to handle attributes/methods of an AccessorProperty +# eg pandas.Series.str and pandas.Series.dt (see GH9322) + +from sphinx.util import rpartition +from sphinx.ext.autodoc import Documenter, MethodDocumenter, AttributeDocumenter + + +class AccessorLevelDocumenter(Documenter): + """ + Specialized Documenter subclass for objects on accessor level (methods, + attributes). + """ + + # This is the simple straightforward version + # modname is None, base the last elements (eg 'hour') + # and path the part before (eg 'Series.dt') + # def resolve_name(self, modname, parents, path, base): + # modname = 'pandas' + # mod_cls = path.rstrip('.') + # mod_cls = mod_cls.split('.') + # + # return modname, mod_cls + [base] + + def resolve_name(self, modname, parents, path, base): + if modname is None: + if path: + mod_cls = path.rstrip('.') + else: + mod_cls = None + # if documenting a class-level object without path, + # there must be a current class, either from a parent + # auto directive ... + mod_cls = self.env.temp_data.get('autodoc:class') + # ... or from a class directive + if mod_cls is None: + mod_cls = self.env.temp_data.get('py:class') + # ... if still None, there's no way to know + if mod_cls is None: + return None, [] + # HACK: this is added in comparison to ClassLevelDocumenter + # mod_cls still exists of class.accessor, so an extra + # rpartition is needed + modname, accessor = rpartition(mod_cls, '.') + modname, cls = rpartition(modname, '.') + parents = [cls, accessor] + # if the module name is still missing, get it like above + if not modname: + modname = self.env.temp_data.get('autodoc:module') + if not modname: + modname = self.env.temp_data.get('py:module') + # ... else, it stays None, which means invalid + return modname, parents + [base] + + +class AccessorAttributeDocumenter(AccessorLevelDocumenter, AttributeDocumenter): + + objtype = 'accessorattribute' + directivetype = 'attribute' + + +class AccessorMethodDocumenter(AccessorLevelDocumenter, MethodDocumenter): + + objtype = 'accessormethod' + directivetype = 'method' + + # remove the docstring of the flags attribute (inherited from numpy ndarray) # because these give doc build errors (see GH issue 5331) def remove_flags_docstring(app, what, name, obj, options, lines): @@ -305,3 +372,5 @@ def remove_flags_docstring(app, what, name, obj, options, lines): def setup(app): app.connect("autodoc-process-docstring", remove_flags_docstring) + app.add_autodocumenter(AccessorAttributeDocumenter) + app.add_autodocumenter(AccessorMethodDocumenter) diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index ddbfc60a5dfe7..dc13ce3e5c4da 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -478,7 +478,7 @@ This function is often used along with discretization functions like ``cut``: get_dummies(cut(values, bins)) -See also :func:`Series.str.get_dummies <pandas.core.strings.StringMethods.get_dummies>`. +See also :func:`Series.str.get_dummies <pandas.Series.str.get_dummies>`. .. versionadded:: 0.15.0 diff --git a/doc/source/text.rst b/doc/source/text.rst index 7032f5ff648a7..eb11cfb1248a9 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -204,27 +204,27 @@ Method Summary :header: "Method", "Description" :widths: 20, 80 - :meth:`~core.strings.StringMethods.cat`,Concatenate strings - :meth:`~core.strings.StringMethods.split`,Split strings on delimiter - :meth:`~core.strings.StringMethods.get`,Index into each element (retrieve i-th element) - :meth:`~core.strings.StringMethods.join`,Join strings in each element of the Series with passed separator - :meth:`~core.strings.StringMethods.contains`,Return boolean array if each string contains pattern/regex - :meth:`~core.strings.StringMethods.replace`,Replace occurrences of pattern/regex with some other string - :meth:`~core.strings.StringMethods.repeat`,Duplicate values (``s.str.repeat(3)`` equivalent to ``x * 3``) - :meth:`~core.strings.StringMethods.pad`,"Add whitespace to left, right, or both sides of strings" - :meth:`~core.strings.StringMethods.center`,Equivalent to ``pad(side='both')`` - :meth:`~core.strings.StringMethods.wrap`,Split long strings into lines with length less than a given width - :meth:`~core.strings.StringMethods.slice`,Slice each string in the Series - :meth:`~core.strings.StringMethods.slice_replace`,Replace slice in each string with passed value - :meth:`~core.strings.StringMethods.count`,Count occurrences of pattern - :meth:`~core.strings.StringMethods.startswith`,Equivalent to ``str.startswith(pat)`` for each element - :meth:`~core.strings.StringMethods.endswith`,Equivalent to ``str.endswith(pat)`` for each element - :meth:`~core.strings.StringMethods.findall`,Compute list of all occurrences of pattern/regex for each string - :meth:`~core.strings.StringMethods.match`,"Call ``re.match`` on each element, returning matched groups as list" - :meth:`~core.strings.StringMethods.extract`,"Call ``re.match`` on each element, as ``match`` does, but return matched groups as strings for convenience." - :meth:`~core.strings.StringMethods.len`,Compute string lengths - :meth:`~core.strings.StringMethods.strip`,Equivalent to ``str.strip`` - :meth:`~core.strings.StringMethods.rstrip`,Equivalent to ``str.rstrip`` - :meth:`~core.strings.StringMethods.lstrip`,Equivalent to ``str.lstrip`` - :meth:`~core.strings.StringMethods.lower`,Equivalent to ``str.lower`` - :meth:`~core.strings.StringMethods.upper`,Equivalent to ``str.upper`` + :meth:`~Series.str.cat`,Concatenate strings + :meth:`~Series.str.split`,Split strings on delimiter + :meth:`~Series.str.get`,Index into each element (retrieve i-th element) + :meth:`~Series.str.join`,Join strings in each element of the Series with passed separator + :meth:`~Series.str.contains`,Return boolean array if each string contains pattern/regex + :meth:`~Series.str.replace`,Replace occurrences of pattern/regex with some other string + :meth:`~Series.str.repeat`,Duplicate values (``s.str.repeat(3)`` equivalent to ``x * 3``) + :meth:`~Series.str.pad`,"Add whitespace to left, right, or both sides of strings" + :meth:`~Series.str.center`,Equivalent to ``pad(side='both')`` + :meth:`~Series.str.wrap`,Split long strings into lines with length less than a given width + :meth:`~Series.str.slice`,Slice each string in the Series + :meth:`~Series.str.slice_replace`,Replace slice in each string with passed value + :meth:`~Series.str.count`,Count occurrences of pattern + :meth:`~Series.str.startswith`,Equivalent to ``str.startswith(pat)`` for each element + :meth:`~Series.str.endswith`,Equivalent to ``str.endswith(pat)`` for each element + :meth:`~Series.str.findall`,Compute list of all occurrences of pattern/regex for each string + :meth:`~Series.str.match`,"Call ``re.match`` on each element, returning matched groups as list" + :meth:`~Series.str.extract`,"Call ``re.match`` on each element, as ``match`` does, but return matched groups as strings for convenience." + :meth:`~Series.str.len`,Compute string lengths + :meth:`~Series.str.strip`,Equivalent to ``str.strip`` + :meth:`~Series.str.rstrip`,Equivalent to ``str.rstrip`` + :meth:`~Series.str.lstrip`,Equivalent to ``str.lstrip`` + :meth:`~Series.str.lower`,Equivalent to ``str.lower`` + :meth:`~Series.str.upper`,Equivalent to ``str.upper`` diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index b3ac58a9fb84a..abd9d5850ff7e 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -107,6 +107,8 @@ Enhancements - ``Timedelta`` will now accept nanoseconds keyword in constructor (:issue:`9273`) +- Added auto-complete for ``Series.str.<tab>``, ``Series.dt.<tab>`` and ``Series.cat.<tab>`` (:issue:`9322`) + Performance ~~~~~~~~~~~ @@ -195,6 +197,7 @@ Bug Fixes - Bug in groupby ``.nth()`` with a multiple column groupby (:issue:`8979`) - Bug in ``DataFrame.where`` and ``Series.where`` coerce numerics to string incorrectly (:issue:`9280`) - Bug in ``DataFrame.where`` and ``Series.where`` raise ``ValueError`` when string list-like is passed. (:issue:`9280`) +- Accessing ``Series.str`` methods on with non-string values now raises ``TypeError`` instead of producing incorrect results (:issue:`9184`) - Fixed division by zero error for ``Series.kurt()`` when all values are equal (:issue:`9197`) diff --git a/pandas/core/base.py b/pandas/core/base.py index c3b3024a16d0c..dde2e74132c4b 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -166,6 +166,28 @@ def f(self, *args, **kwargs): if not hasattr(cls, name): setattr(cls,name,f) + +class AccessorProperty(object): + """Descriptor for implementing accessor properties like Series.str + """ + def __init__(self, accessor_cls, construct_accessor): + self.accessor_cls = accessor_cls + self.construct_accessor = construct_accessor + self.__doc__ = accessor_cls.__doc__ + + def __get__(self, instance, owner=None): + if instance is None: + # this ensures that Series.str.<method> is well defined + return self.accessor_cls + return self.construct_accessor(instance) + + def __set__(self, instance, value): + raise AttributeError("can't set attribute") + + def __delete__(self, instance): + raise AttributeError("can't delete attribute") + + class FrozenList(PandasObject, list): """ diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index fe8b1079f0942..28c9d096e06d3 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -829,7 +829,7 @@ def searchsorted(self, v, side='left', sorter=None): array([3, 4]) # eggs before milk >>> x = pd.Categorical(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts' ]) >>> x.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4]) - array([3, 5]) # eggs after donuts, after switching milk and donuts + array([3, 5]) # eggs after donuts, after switching milk and donuts """ if not self.ordered: raise ValueError("searchsorted requires an ordered Categorical.") diff --git a/pandas/core/series.py b/pandas/core/series.py index 60b601a462520..ca401518af66d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -27,7 +27,10 @@ from pandas.core.indexing import _check_bool_indexer, _maybe_convert_indices from pandas.core import generic, base from pandas.core.internals import SingleBlockManager -from pandas.core.categorical import Categorical +from pandas.core.categorical import Categorical, CategoricalAccessor +from pandas.core.strings import StringMethods +from pandas.tseries.common import (maybe_to_datetimelike, + CombinedDatetimelikeProperties) from pandas.tseries.index import DatetimeIndex from pandas.tseries.tdi import TimedeltaIndex from pandas.tseries.period import PeriodIndex, Period @@ -2042,7 +2045,8 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): y : Series or DataFrame if func returns a Series """ if len(self) == 0: - return Series() + return self._constructor(dtype=self.dtype, + index=self.index).__finalize__(self) if kwds or args and not isinstance(func, np.ufunc): f = lambda x: func(x, *args, **kwds) @@ -2452,11 +2456,6 @@ def asof(self, where): new_values = com.take_1d(values, locs) return self._constructor(new_values, index=where).__finalize__(self) - @cache_readonly - def str(self): - from pandas.core.strings import StringMethods - return StringMethods(self) - def to_timestamp(self, freq=None, how='start', copy=True): """ Cast to datetimeindex of timestamps, at *beginning* of period @@ -2502,27 +2501,41 @@ def to_period(self, freq=None, copy=True): return self._constructor(new_values, index=new_index).__finalize__(self) + #------------------------------------------------------------------------------ + # string methods + + def _make_str_accessor(self): + if not com.is_object_dtype(self.dtype): + # this really should exclude all series with any non-string values, + # but that isn't practical for performance reasons until we have a + # str dtype (GH 9343) + raise TypeError("Can only use .str accessor with string values, " + "which use np.object_ dtype in pandas") + return StringMethods(self) + + str = base.AccessorProperty(StringMethods, _make_str_accessor) + #------------------------------------------------------------------------------ # Datetimelike delegation methods - @cache_readonly - def dt(self): - from pandas.tseries.common import maybe_to_datetimelike + def _make_dt_accessor(self): try: return maybe_to_datetimelike(self) except (Exception): raise TypeError("Can only use .dt accessor with datetimelike values") + dt = base.AccessorProperty(CombinedDatetimelikeProperties, _make_dt_accessor) + #------------------------------------------------------------------------------ # Categorical methods - @cache_readonly - def cat(self): - from pandas.core.categorical import CategoricalAccessor + def _make_cat_accessor(self): if not com.is_categorical_dtype(self.dtype): raise TypeError("Can only use .cat accessor with a 'category' dtype") return CategoricalAccessor(self.values, self.index) + cat = base.AccessorProperty(CategoricalAccessor, _make_cat_accessor) + Series._setup_axes(['index'], info_axis=0, stat_axis=0, aliases={'rows': 0}) Series._add_numeric_operations() diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 9d4994e0f2de9..75d10654977cd 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2,8 +2,6 @@ from pandas.compat import zip from pandas.core.common import isnull, _values_from_object -from pandas.core.series import Series -from pandas.core.frame import DataFrame import pandas.compat as compat import re import pandas.lib as lib @@ -12,6 +10,8 @@ def _get_array_list(arr, others): + from pandas.core.series import Series + if len(others) and isinstance(_values_from_object(others)[0], (list, np.ndarray, Series)): arrays = [arr] + list(others) @@ -95,6 +95,8 @@ def _na_map(f, arr, na_result=np.nan, dtype=object): def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object): + from pandas.core.series import Series + if not len(arr): return np.ndarray(0, dtype=dtype) @@ -459,6 +461,9 @@ def str_extract(arr, pat, flags=0): 2 NaN NaN """ + from pandas.core.series import Series + from pandas.core.frame import DataFrame + regex = re.compile(pat, flags=flags) # just to be safe, check this if regex.groups == 0: @@ -510,6 +515,8 @@ def str_get_dummies(arr, sep='|'): See also ``pd.get_dummies``. """ + from pandas.core.frame import DataFrame + # TODO remove this hack? arr = arr.fillna('') try: @@ -643,6 +650,9 @@ def str_split(arr, pat=None, n=None, return_type='series'): ------- split : array """ + from pandas.core.series import Series + from pandas.core.frame import DataFrame + if return_type not in ('series', 'frame'): raise ValueError("return_type must be {'series', 'frame'}") if pat is None: @@ -949,6 +959,9 @@ def __iter__(self): g = self.get(i) def _wrap_result(self, result): + from pandas.core.series import Series + from pandas.core.frame import DataFrame + if not hasattr(result, 'ndim'): return result elif result.ndim == 1: diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 4852e142d2f29..7a8d5e0ac0032 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -909,8 +909,8 @@ def test_searchsorted(self): exp = np.array([1]) self.assert_numpy_array_equal(res, exp) self.assert_numpy_array_equal(res, chk) - - # Searching for a value that is not present in the Categorical + + # Searching for a value that is not present in the Categorical res = c1.searchsorted(['bread', 'eggs']) chk = s1.searchsorted(['bread', 'eggs']) exp = np.array([1, 4]) @@ -927,7 +927,7 @@ def test_searchsorted(self): # As above, but with a sorter array to reorder an unsorted array res = c2.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4]) chk = s2.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4]) - exp = np.array([3, 5]) # eggs after donuts, after switching milk and donuts + exp = np.array([3, 5]) # eggs after donuts, after switching milk and donuts self.assert_numpy_array_equal(res, exp) self.assert_numpy_array_equal(res, chk) @@ -2516,6 +2516,15 @@ def get_dir(s): results = get_dir(s) tm.assert_almost_equal(results,list(sorted(set(ok_for_cat)))) + def test_cat_accessor_api(self): + # GH 9322 + from pandas.core.categorical import CategoricalAccessor + self.assertIs(Series.cat, CategoricalAccessor) + s = Series(list('aabbcde')).astype('category') + self.assertIsInstance(s.cat, CategoricalAccessor) + with tm.assertRaisesRegexp(TypeError, "only use .cat accessor"): + Series([1]).cat + def test_pickle_v0_14_1(self): cat = pd.Categorical(values=['a', 'b', 'c'], categories=['a', 'b', 'c', 'd'], diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index a5de26da1606a..3f5f14a46d3c3 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -231,6 +231,18 @@ def test_valid_dt_with_missing_values(self): expected = Series([time(0),time(0),np.nan,time(0),time(0)],dtype='object') tm.assert_series_equal(result, expected) + def test_dt_accessor_api(self): + # GH 9322 + from pandas.tseries.common import (CombinedDatetimelikeProperties, + DatetimeProperties) + self.assertIs(Series.dt, CombinedDatetimelikeProperties) + + s = Series(date_range('2000-01-01', periods=3)) + self.assertIsInstance(s.dt, DatetimeProperties) + + with tm.assertRaisesRegexp(TypeError, "only use .dt accessor"): + Series([1]).dt + def test_binop_maybe_preserve_name(self): # names match, preserve @@ -5402,9 +5414,14 @@ def test_apply(self): tm.assert_frame_equal(result, expected) # empty series - s = Series() + s = Series(dtype=object, name='foo', index=pd.Index([], name='bar')) rs = s.apply(lambda x: x) tm.assert_series_equal(s, rs) + # check all metadata (GH 9322) + self.assertIsNot(s, rs) + self.assertIs(s.index, rs.index) + self.assertEqual(s.dtype, rs.dtype) + self.assertEqual(s.name, rs.name) # index but no data s = Series(index=[1, 2, 3]) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 50dba3bc7218a..b8f1a6ac342af 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -32,8 +32,13 @@ class TestStringMethods(tm.TestCase): def test_api(self): - # GH 6106 - self.assertIsNone(Series.str) + # GH 6106, GH 9322 + self.assertIs(Series.str, strings.StringMethods) + self.assertIsInstance(Series(['']).str, strings.StringMethods) + + # GH 9184 + with tm.assertRaisesRegexp(TypeError, "only use .str accessor"): + Series([1]).str def test_iter(self): # GH3638 @@ -79,26 +84,6 @@ def test_iter_single_element(self): self.assertFalse(i) assert_series_equal(ds, s) - def test_iter_numeric_try_string(self): - # behavior identical to empty series - dsi = Series(lrange(4)) - - i, s = 100, 'h' - - for i, s in enumerate(dsi.str): - pass - - self.assertEqual(i, 100) - self.assertEqual(s, 'h') - - dsf = Series(np.arange(4.)) - - for i, s in enumerate(dsf.str): - pass - - self.assertEqual(i, 100) - self.assertEqual(s, 'h') - def test_iter_object_try_string(self): ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(4)]) diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py index 7f6a0bc60dd57..2ceece087387e 100644 --- a/pandas/tseries/common.py +++ b/pandas/tseries/common.py @@ -3,7 +3,9 @@ import numpy as np from pandas.core.base import PandasDelegate from pandas.core import common as com -from pandas import Series, DatetimeIndex, PeriodIndex, TimedeltaIndex +from pandas.tseries.index import DatetimeIndex +from pandas.tseries.period import PeriodIndex +from pandas.tseries.tdi import TimedeltaIndex from pandas import lib, tslib from pandas.core.common import (_NS_DTYPE, _TD_DTYPE, is_period_arraylike, is_datetime_arraylike, is_integer_dtype, is_list_like, @@ -35,6 +37,7 @@ def maybe_to_datetimelike(data, copy=False): DelegatedClass """ + from pandas import Series if not isinstance(data, Series): raise TypeError("cannot convert an object of type {0} to a datetimelike index".format(type(data))) @@ -59,6 +62,8 @@ def __init__(self, values, index): self.index = index def _delegate_property_get(self, name): + from pandas import Series + result = getattr(self.values,name) # maybe need to upcast (ints) @@ -82,6 +87,8 @@ def _delegate_property_set(self, name, value, *args, **kwargs): "supported. Change values on the original.") def _delegate_method(self, name, *args, **kwargs): + from pandas import Series + method = getattr(self.values, name) result = method(*args, **kwargs) @@ -175,6 +182,14 @@ class PeriodProperties(Properties): accessors=PeriodIndex._datetimelike_ops, typ='property') + +class CombinedDatetimelikeProperties(DatetimeProperties, TimedeltaProperties): + # This class is never instantiated, and exists solely for the benefit of + # the Series.dt class property. For Series objects, .dt will always be one + # of the more specific classes above. + __doc__ = DatetimeProperties.__doc__ + + def _concat_compat(to_concat, axis=0): """ provide concatenation of an datetimelike array of arrays each of which is a single
Fixes #9184 This PR fixes the API docs to use `Series.str` and `Series.dt` instead of `StringMethods` and `DatetimeProperties`. It will need a rebase once #9318 is merged. CC @jorisvandenbossche @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/9322
2015-01-21T07:51:49Z
2015-01-25T20:06:58Z
2015-01-25T20:06:58Z
2015-03-10T06:47:48Z
ENH: plot method accessors
diff --git a/doc/_templates/autosummary/accessor_callable.rst b/doc/_templates/autosummary/accessor_callable.rst new file mode 100644 index 0000000000000..6f45e0fd01e16 --- /dev/null +++ b/doc/_templates/autosummary/accessor_callable.rst @@ -0,0 +1,6 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module.split('.')[0] }} + +.. autoaccessorcallable:: {{ [module.split('.')[1], objname]|join('.') }}.__call__ diff --git a/doc/source/api.rst b/doc/source/api.rst index 38c2c1091469b..1fe1d8751bf12 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -672,12 +672,34 @@ the Categorical back to a numpy array, so levels and order information is not pr Plotting ~~~~~~~~ +``Series.plot`` is both a callable method and a namespace attribute for +specific plotting methods of the form ``Series.plot.<kind>``. + .. autosummary:: :toctree: generated/ + :template: autosummary/accessor_callable.rst - Series.hist Series.plot +.. autosummary:: + :toctree: generated/ + :template: autosummary/accessor_method.rst + + Series.plot.area + Series.plot.bar + Series.plot.barh + Series.plot.box + Series.plot.density + Series.plot.hist + Series.plot.kde + Series.plot.line + Series.plot.pie + +.. autosummary:: + :toctree: generated/ + + Series.hist + Serialization / IO / Conversion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: @@ -946,14 +968,41 @@ Time series-related DataFrame.tz_convert DataFrame.tz_localize +.. _api.dataframe.plotting: + Plotting ~~~~~~~~ + +``DataFrame.plot`` is both a callable method and a namespace attribute for +specific plotting methods of the form ``DataFrame.plot.<kind>``. + +.. autosummary:: + :toctree: generated/ + :template: autosummary/accessor_plot.rst + + DataFrame.plot + +.. autosummary:: + :toctree: generated/ + :template: autosummary/accessor_method.rst + + DataFrame.plot.area + DataFrame.plot.bar + DataFrame.plot.barh + DataFrame.plot.box + DataFrame.plot.density + DataFrame.plot.hexbin + DataFrame.plot.hist + DataFrame.plot.kde + DataFrame.plot.line + DataFrame.plot.pie + DataFrame.plot.scatter + .. autosummary:: :toctree: generated/ DataFrame.boxplot DataFrame.hist - DataFrame.plot Serialization / IO / Conversion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/conf.py b/doc/source/conf.py index 57c1667dca0c3..f2a033eb82d9c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -214,7 +214,7 @@ # template names. # Add redirect for previously existing API pages (which are now included in -# the API pages as top-level functions) based on a template (GH9911) +# the API pages as top-level functions) based on a template (GH9911) moved_api_pages = [ 'pandas.core.common.isnull', 'pandas.core.common.notnull', 'pandas.core.reshape.get_dummies', 'pandas.tools.merge.concat', 'pandas.tools.merge.merge', 'pandas.tools.pivot.pivot_table', @@ -327,6 +327,7 @@ from sphinx.util import rpartition from sphinx.ext.autodoc import Documenter, MethodDocumenter, AttributeDocumenter +from sphinx.ext.autosummary import Autosummary class AccessorLevelDocumenter(Documenter): @@ -388,6 +389,44 @@ class AccessorMethodDocumenter(AccessorLevelDocumenter, MethodDocumenter): directivetype = 'method' +class AccessorCallableDocumenter(AccessorLevelDocumenter, MethodDocumenter): + """ + This documenter lets us removes .__call__ from the method signature for + callable accessors like Series.plot + """ + objtype = 'accessorcallable' + directivetype = 'method' + + # lower than MethodDocumenter; otherwise the doc build prints warnings + priority = 0.5 + + def format_name(self): + return MethodDocumenter.format_name(self).rstrip('.__call__') + + +class PandasAutosummary(Autosummary): + """ + This alternative autosummary class lets us override the table summary for + Series.plot and DataFrame.plot in the API docs. + """ + + def _replace_pandas_items(self, display_name, sig, summary, real_name): + # this a hack: ideally we should extract the signature from the + # .__call__ method instead of hard coding this + if display_name == 'DataFrame.plot': + sig = '([x, y, kind, ax, ....])' + summary = 'DataFrame plotting accessor and method' + elif display_name == 'Series.plot': + sig = '([kind, ax, figsize, ....])' + summary = 'Series plotting accessor and method' + return (display_name, sig, summary, real_name) + + def get_items(self, names): + items = Autosummary.get_items(self, names) + items = [self._replace_pandas_items(*item) for item in items] + return items + + # remove the docstring of the flags attribute (inherited from numpy ndarray) # because these give doc build errors (see GH issue 5331) def remove_flags_docstring(app, what, name, obj, options, lines): @@ -398,3 +437,5 @@ def setup(app): app.connect("autodoc-process-docstring", remove_flags_docstring) app.add_autodocumenter(AccessorAttributeDocumenter) app.add_autodocumenter(AccessorMethodDocumenter) + app.add_autodocumenter(AccessorCallableDocumenter) + app.add_directive('autosummary', PandasAutosummary) diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 8785a8d092d48..b6ee2d83fd131 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -121,8 +121,9 @@ You can plot one column versus another using the `x` and `y` keywords in Other Plots ----------- -The ``kind`` keyword argument of :meth:`~DataFrame.plot` accepts -a handful of values for plots other than the default Line plot. +Plotting methods allow for a handful of plot styles other than the +default Line plot. These methods can be provided as the ``kind`` +keyword argument to :meth:`~DataFrame.plot`. These include: * :ref:`'bar' <visualization.barplot>` or :ref:`'barh' <visualization.barplot>` for bar plots @@ -134,6 +135,19 @@ These include: * :ref:`'hexbin' <visualization.hexbin>` for hexagonal bin plots * :ref:`'pie' <visualization.pie>` for pie plots +.. versionadded:: 0.17 + +You can also create these other plots using the methods ``DataFrame.plot.<kind>`` instead of providing the ``kind`` keyword argument. This makes it easier to discover plot methods and the specific arguments they use: + +.. ipython:: + :verbatim: + + In [14]: df = pd.DataFrame() + + In [15]: df.plot.<TAB> + df.plot.area df.plot.barh df.plot.density df.plot.hist df.plot.line df.plot.scatter + df.plot.bar df.plot.box df.plot.hexbin df.plot.kde df.plot.pie + In addition to these ``kind`` s, there are the :ref:`DataFrame.hist() <visualization.hist>`, and :ref:`DataFrame.boxplot() <visualization.box>` methods, which use a separate interface. diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 0b6f6522dfde0..a29e7127f5f88 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -29,6 +29,7 @@ users upgrade to this version. Highlights include: - Release the Global Interpreter Lock (GIL) on some cython operations, see :ref:`here <whatsnew_0170.gil>` +- Plotting methods are now available as attributes of the ``.plot`` accessor, see :ref:`here <whatsnew_0170.plot>` - The sorting API has been revamped to remove some long-time inconsistencies, see :ref:`here <whatsnew_0170.api_breaking.sorting>` - Support for a ``datetime64[ns]`` with timezones as a first-class dtype, see :ref:`here <whatsnew_0170.tz>` - The default for ``to_datetime`` will now be to ``raise`` when presented with unparseable formats, @@ -116,6 +117,35 @@ Releasing of the GIL could benefit an application that uses threads for user int .. _dask: https://dask.readthedocs.org/en/latest/ .. _QT: https://wiki.python.org/moin/PyQt +.. _whatsnew_0170.plot: + +Plot submethods +^^^^^^^^^^^^^^^ + +The Series and DataFrame ``.plot()`` method allows for customizing :ref:`plot types<visualization.other>` by supplying the ``kind`` keyword arguments. Unfortunately, many of these kinds of plots use different required and optional keyword arguments, which makes it difficult to discover what any given plot kind uses out of the dozens of possible arguments. + +To alleviate this issue, we have added a new, optional plotting interface, which exposes each kind of plot as a method of the ``.plot`` attribute. Instead of writing ``series.plot(kind=<kind>, ...)``, you can now also use ``series.plot.<kind>(...)``: + +.. ipython:: + :verbatim: + + In [13]: df = pd.DataFrame(np.random.rand(10, 2), columns=['a', 'b']) + + In [14]: df.plot.bar() + +.. image:: _static/whatsnew_plot_submethods.png + +As a result of this change, these methods are now all discoverable via tab-completion: + +.. ipython:: + :verbatim: + + In [15]: df.plot.<TAB> + df.plot.area df.plot.barh df.plot.density df.plot.hist df.plot.line df.plot.scatter + df.plot.bar df.plot.box df.plot.hexbin df.plot.kde df.plot.pie + +Each method signature only includes relevant arguments. Currently, these are limited to required arguments, but in the future these will include optional arguments, as well. For an overview, see the new :ref:`api.dataframe.plotting` API documentation. + .. _whatsnew_0170.strftime: Support strftime for Datetimelikes @@ -251,7 +281,6 @@ has been changed to make this keyword unnecessary - the change is shown below. Excel files saved in version 0.16.2 or prior that had index names will still able to be read in, but the ``has_index_names`` argument must specified to ``True``. - .. _whatsnew_0170.enhancements.other: Other enhancements diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5ab75f7d2658a..77b8c4cf35aad 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -47,13 +47,14 @@ OrderedDict, raise_with_traceback) from pandas import compat from pandas.sparse.array import SparseArray -from pandas.util.decorators import deprecate, Appender, Substitution, \ - deprecate_kwarg +from pandas.util.decorators import (cache_readonly, deprecate, Appender, + Substitution, deprecate_kwarg) from pandas.tseries.period import PeriodIndex from pandas.tseries.index import DatetimeIndex import pandas.core.algorithms as algos +import pandas.core.base as base import pandas.core.common as com import pandas.core.format as fmt import pandas.core.nanops as nanops @@ -5432,7 +5433,7 @@ def _put_str(s, space): import pandas.tools.plotting as gfx -DataFrame.plot = gfx.plot_frame +DataFrame.plot = base.AccessorProperty(gfx.FramePlotMethods, gfx.FramePlotMethods) DataFrame.hist = gfx.hist_frame diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 1d5a92d43d680..6b4b8be1430fe 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -296,6 +296,28 @@ def _get_binner_for_grouping(self, obj): def groups(self): return self.grouper.groups + +class GroupByPlot(PandasObject): + """ + Class implementing the .plot attribute for groupby objects + """ + def __init__(self, groupby): + self._groupby = groupby + + def __call__(self, *args, **kwargs): + def f(self, *args, **kwargs): + return self.plot(*args, **kwargs) + f.__name__ = 'plot' + return self._groupby.apply(f) + + def __getattr__(self, name): + def attr(*args, **kwargs): + def f(self): + return getattr(self.plot, name)(*args, **kwargs) + return self._groupby.apply(f) + return attr + + class GroupBy(PandasObject): """ @@ -538,6 +560,8 @@ def __getattr__(self, attr): def __getitem__(self, key): raise NotImplementedError('Not implemented: %s' % key) + plot = property(GroupByPlot) + def _make_wrapper(self, name): if name not in self._apply_whitelist: is_callable = callable(getattr(self._selected_obj, name, None)) diff --git a/pandas/core/series.py b/pandas/core/series.py index 0c44f79febdda..067c514fa37cd 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2882,7 +2882,7 @@ def __init__(self, *args, **kwargs): import pandas.tools.plotting as _gfx -Series.plot = _gfx.plot_series +Series.plot = base.AccessorProperty(_gfx.SeriesPlotMethods, _gfx.SeriesPlotMethods) Series.hist = _gfx.hist_series # Add arithmetic! diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index d1f1f2196558a..ad9518116a066 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -505,8 +505,8 @@ def test_plot(self): ax = _check_plot_works(self.ts.plot, style='.', loglog=True) self._check_ax_scales(ax, xaxis='log', yaxis='log') - _check_plot_works(self.ts[:10].plot, kind='bar') - _check_plot_works(self.ts.plot, kind='area', stacked=False) + _check_plot_works(self.ts[:10].plot.bar) + _check_plot_works(self.ts.plot.area, stacked=False) _check_plot_works(self.iseries.plot) for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']: @@ -514,8 +514,8 @@ def test_plot(self): continue _check_plot_works(self.series[:5].plot, kind=kind) - _check_plot_works(self.series[:10].plot, kind='barh') - ax = _check_plot_works(Series(randn(10)).plot, kind='bar', color='black') + _check_plot_works(self.series[:10].plot.barh) + ax = _check_plot_works(Series(randn(10)).plot.bar, color='black') self._check_colors([ax.patches[0]], facecolors=['black']) # GH 6951 @@ -555,7 +555,7 @@ def test_ts_line_lim(self): self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1]) def test_ts_area_lim(self): - ax = self.ts.plot(kind='area', stacked=False) + ax = self.ts.plot.area(stacked=False) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] self.assertEqual(xmin, line[0]) @@ -563,7 +563,7 @@ def test_ts_area_lim(self): tm.close() # GH 7471 - ax = self.ts.plot(kind='area', stacked=False, x_compat=True) + ax = self.ts.plot.area(stacked=False, x_compat=True) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] self.assertEqual(xmin, line[0]) @@ -572,14 +572,14 @@ def test_ts_area_lim(self): tz_ts = self.ts.copy() tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET') - ax = tz_ts.plot(kind='area', stacked=False, x_compat=True) + ax = tz_ts.plot.area(stacked=False, x_compat=True) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] self.assertEqual(xmin, line[0]) self.assertEqual(xmax, line[-1]) tm.close() - ax = tz_ts.plot(kind='area', stacked=False, secondary_y=True) + ax = tz_ts.plot.area(stacked=False, secondary_y=True) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] self.assertEqual(xmin, line[0]) @@ -623,9 +623,9 @@ def test_line_area_nan_series(self): expected = np.array([1, 2, 0, 3]) ax = _check_plot_works(d.plot, stacked=True) self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) - ax = _check_plot_works(d.plot, kind='area') + ax = _check_plot_works(d.plot.area) self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) - ax = _check_plot_works(d.plot, kind='area', stacked=False) + ax = _check_plot_works(d.plot.area, stacked=False) self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) def test_line_use_index_false(self): @@ -634,7 +634,7 @@ def test_line_use_index_false(self): ax = s.plot(use_index=False) label = ax.get_xlabel() self.assertEqual(label, '') - ax2 = s.plot(kind='bar', use_index=False) + ax2 = s.plot.bar(use_index=False) label2 = ax2.get_xlabel() self.assertEqual(label2, '') @@ -645,11 +645,11 @@ def test_bar_log(self): if not self.mpl_le_1_2_1: expected = np.hstack((.1, expected, 1e4)) - ax = Series([200, 500]).plot(log=True, kind='bar') + ax = Series([200, 500]).plot.bar(log=True) tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected) tm.close() - ax = Series([200, 500]).plot(log=True, kind='barh') + ax = Series([200, 500]).plot.barh(log=True) tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected) tm.close() @@ -671,7 +671,7 @@ def test_bar_log(self): @slow def test_bar_ignore_index(self): df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) - ax = df.plot(kind='bar', use_index=False) + ax = df.plot.bar(use_index=False) self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3']) def test_rotation(self): @@ -697,17 +697,17 @@ def test_pie_series(self): # if sum of values is less than 1.0, pie handle them as rate and draw semicircle. series = Series(np.random.randint(1, 5), index=['a', 'b', 'c', 'd', 'e'], name='YLABEL') - ax = _check_plot_works(series.plot, kind='pie') + ax = _check_plot_works(series.plot.pie) self._check_text_labels(ax.texts, series.index) self.assertEqual(ax.get_ylabel(), 'YLABEL') # without wedge labels - ax = _check_plot_works(series.plot, kind='pie', labels=None) + ax = _check_plot_works(series.plot.pie, labels=None) self._check_text_labels(ax.texts, [''] * 5) # with less colors than elements color_args = ['r', 'g', 'b'] - ax = _check_plot_works(series.plot, kind='pie', colors=color_args) + ax = _check_plot_works(series.plot.pie, colors=color_args) color_expected = ['r', 'g', 'b', 'r', 'g'] self._check_colors(ax.patches, facecolors=color_expected) @@ -715,12 +715,12 @@ def test_pie_series(self): # with labels and colors labels = ['A', 'B', 'C', 'D', 'E'] color_args = ['r', 'g', 'b', 'c', 'm'] - ax = _check_plot_works(series.plot, kind='pie', labels=labels, colors=color_args) + ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args) self._check_text_labels(ax.texts, labels) self._check_colors(ax.patches, facecolors=color_args) # with autopct and fontsize - ax = _check_plot_works(series.plot, kind='pie', colors=color_args, + ax = _check_plot_works(series.plot.pie, colors=color_args, autopct='%.2f', fontsize=7) pcts = ['{0:.2f}'.format(s * 100) for s in series.values / float(series.sum())] iters = [iter(series.index), iter(pcts)] @@ -732,17 +732,17 @@ def test_pie_series(self): # includes negative value with tm.assertRaises(ValueError): series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e']) - series.plot(kind='pie') + series.plot.pie() # includes nan series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'], name='YLABEL') - ax = _check_plot_works(series.plot, kind='pie') + ax = _check_plot_works(series.plot.pie) self._check_text_labels(ax.texts, ['a', 'b', '', 'd']) def test_pie_nan(self): s = Series([1, np.nan, 1, 1]) - ax = s.plot(kind='pie', legend=True) + ax = s.plot.pie(legend=True) expected = ['0', '', '2', '3'] result = [x.get_text() for x in ax.texts] self.assertEqual(result, expected) @@ -750,7 +750,7 @@ def test_pie_nan(self): @slow def test_hist_df_kwargs(self): df = DataFrame(np.random.randn(10, 2)) - ax = df.plot(kind='hist', bins=5) + ax = df.plot.hist(bins=5) self.assertEqual(len(ax.patches), 10) @slow @@ -759,10 +759,10 @@ def test_hist_df_with_nonnumerics(self): with tm.RNGContext(1): df = DataFrame(np.random.randn(10, 4), columns=['A', 'B', 'C', 'D']) df['E'] = ['x', 'y'] * 5 - ax = df.plot(kind='hist', bins=5) + ax = df.plot.hist(bins=5) self.assertEqual(len(ax.patches), 20) - ax = df.plot(kind='hist') # bins=10 + ax = df.plot.hist() # bins=10 self.assertEqual(len(ax.patches), 40) @slow @@ -848,8 +848,8 @@ def test_hist_secondary_legend(self): df = DataFrame(np.random.randn(30, 4), columns=list('abcd')) # primary -> secondary - ax = df['a'].plot(kind='hist', legend=True) - df['b'].plot(kind='hist', ax=ax, legend=True, secondary_y=True) + ax = df['a'].plot.hist(legend=True) + df['b'].plot.hist(ax=ax, legend=True, secondary_y=True) # both legends are dran on left ax # left and right axis must be visible self._check_legend_labels(ax, labels=['a', 'b (right)']) @@ -858,8 +858,8 @@ def test_hist_secondary_legend(self): tm.close() # secondary -> secondary - ax = df['a'].plot(kind='hist', legend=True, secondary_y=True) - df['b'].plot(kind='hist', ax=ax, legend=True, secondary_y=True) + ax = df['a'].plot.hist(legend=True, secondary_y=True) + df['b'].plot.hist(ax=ax, legend=True, secondary_y=True) # both legends are draw on left ax # left axis must be invisible, right axis must be visible self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b (right)']) @@ -868,9 +868,9 @@ def test_hist_secondary_legend(self): tm.close() # secondary -> primary - ax = df['a'].plot(kind='hist', legend=True, secondary_y=True) + ax = df['a'].plot.hist(legend=True, secondary_y=True) # right axes is returned - df['b'].plot(kind='hist', ax=ax, legend=True) + df['b'].plot.hist(ax=ax, legend=True) # both legends are draw on left ax # left and right axis must be visible self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b']) @@ -945,7 +945,7 @@ def test_plot_fails_with_dupe_color_and_style(self): @slow def test_hist_kde(self): - ax = self.ts.plot(kind='hist', logy=True) + ax = self.ts.plot.hist(logy=True) self._check_ax_scales(ax, yaxis='log') xlabels = ax.get_xticklabels() # ticks are values, thus ticklabels are blank @@ -955,9 +955,9 @@ def test_hist_kde(self): tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() - _check_plot_works(self.ts.plot, kind='kde') - _check_plot_works(self.ts.plot, kind='density') - ax = self.ts.plot(kind='kde', logy=True) + _check_plot_works(self.ts.plot.kde) + _check_plot_works(self.ts.plot.density) + ax = self.ts.plot.kde(logy=True) self._check_ax_scales(ax, yaxis='log') xlabels = ax.get_xticklabels() self._check_text_labels(xlabels, [''] * len(xlabels)) @@ -969,9 +969,9 @@ def test_kde_kwargs(self): tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() from numpy import linspace - _check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20)) - _check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20)) - ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20)) + _check_plot_works(self.ts.plot.kde, bw_method=.5, ind=linspace(-100,100,20)) + _check_plot_works(self.ts.plot.density, bw_method=.5, ind=linspace(-100,100,20)) + ax = self.ts.plot.kde(logy=True, bw_method=.5, ind=linspace(-100,100,20)) self._check_ax_scales(ax, yaxis='log') self._check_text_labels(ax.yaxis.get_label(), 'Density') @@ -981,33 +981,33 @@ def test_kde_missing_vals(self): _skip_if_no_scipy_gaussian_kde() s = Series(np.random.uniform(size=50)) s[0] = np.nan - ax = _check_plot_works(s.plot, kind='kde') + ax = _check_plot_works(s.plot.kde) @slow def test_hist_kwargs(self): - ax = self.ts.plot(kind='hist', bins=5) + ax = self.ts.plot.hist(bins=5) self.assertEqual(len(ax.patches), 5) self._check_text_labels(ax.yaxis.get_label(), 'Frequency') tm.close() if self.mpl_ge_1_3_1: - ax = self.ts.plot(kind='hist', orientation='horizontal') + ax = self.ts.plot.hist(orientation='horizontal') self._check_text_labels(ax.xaxis.get_label(), 'Frequency') tm.close() - ax = self.ts.plot(kind='hist', align='left', stacked=True) + ax = self.ts.plot.hist(align='left', stacked=True) tm.close() @slow def test_hist_kde_color(self): - ax = self.ts.plot(kind='hist', logy=True, bins=10, color='b') + ax = self.ts.plot.hist(logy=True, bins=10, color='b') self._check_ax_scales(ax, yaxis='log') self.assertEqual(len(ax.patches), 10) self._check_colors(ax.patches, facecolors=['b'] * 10) tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() - ax = self.ts.plot(kind='kde', logy=True, color='r') + ax = self.ts.plot.kde(logy=True, color='r') self._check_ax_scales(ax, yaxis='log') lines = ax.get_lines() self.assertEqual(len(lines), 1) @@ -1015,13 +1015,22 @@ def test_hist_kde_color(self): @slow def test_boxplot_series(self): - ax = self.ts.plot(kind='box', logy=True) + ax = self.ts.plot.box(logy=True) self._check_ax_scales(ax, yaxis='log') xlabels = ax.get_xticklabels() self._check_text_labels(xlabels, [self.ts.name]) ylabels = ax.get_yticklabels() self._check_text_labels(ylabels, [''] * len(ylabels)) + @slow + def test_kind_both_ways(self): + s = Series(range(3)) + for kind in plotting._common_kinds + plotting._series_kinds: + if not _ok_for_gaussian_kde(kind): + continue + s.plot(kind=kind) + getattr(s.plot, kind)() + @slow def test_invalid_plot_data(self): s = Series(list('abcd')) @@ -1216,7 +1225,7 @@ def test_plot(self): df = DataFrame({'x': [1, 2], 'y': [3, 4]}) with tm.assertRaises(TypeError): - df.plot(kind='line', blarg=True) + df.plot.line(blarg=True) df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10])) @@ -1272,16 +1281,16 @@ def test_plot(self): # GH 6951 # Test with single column df = DataFrame({'x': np.random.rand(10)}) - axes = _check_plot_works(df.plot, kind='bar', subplots=True) + axes = _check_plot_works(df.plot.bar, subplots=True) self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) - axes = _check_plot_works(df.plot, kind='bar', subplots=True, + axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1)) self._check_axes_shape(axes, axes_num=1, layout=(1, 1)) # When ax is supplied and required number of axes is 1, # passed ax should be used: fig, ax = self.plt.subplots() - axes = df.plot(kind='bar', subplots=True, ax=ax) + axes = df.plot.bar(subplots=True, ax=ax) self.assertEqual(len(axes), 1) self.assertIs(ax.get_axes(), axes[0]) @@ -1674,9 +1683,9 @@ def test_negative_log(self): columns=['x', 'y', 'z', 'four']) with tm.assertRaises(ValueError): - df.plot(kind='area', logy=True) + df.plot.area(logy=True) with tm.assertRaises(ValueError): - df.plot(kind='area', loglog=True) + df.plot.area(loglog=True) def _compare_stacked_y_cood(self, normal_lines, stacked_lines): base = np.zeros(len(normal_lines[0].get_data()[1])) @@ -1740,11 +1749,11 @@ def test_line_area_nan_df(self): self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2) - ax = _check_plot_works(d.plot, kind='area') + ax = _check_plot_works(d.plot.area) self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2) - ax = _check_plot_works(d.plot, kind='area', stacked=False) + ax = _check_plot_works(d.plot.area, stacked=False) self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2) @@ -1778,7 +1787,7 @@ def test_area_lim(self): neg_df = - df for stacked in [True, False]: - ax = _check_plot_works(df.plot, kind='area', stacked=stacked) + ax = _check_plot_works(df.plot.area, stacked=stacked) xmin, xmax = ax.get_xlim() ymin, ymax = ax.get_ylim() lines = ax.get_lines() @@ -1786,7 +1795,7 @@ def test_area_lim(self): self.assertEqual(xmax, lines[0].get_data()[0][-1]) self.assertEqual(ymin, 0) - ax = _check_plot_works(neg_df.plot, kind='area', stacked=stacked) + ax = _check_plot_works(neg_df.plot.area, stacked=stacked) ymin, ymax = ax.get_ylim() self.assertEqual(ymax, 0) @@ -1797,29 +1806,29 @@ def test_bar_colors(self): default_colors = plt.rcParams.get('axes.color_cycle') df = DataFrame(randn(5, 5)) - ax = df.plot(kind='bar') + ax = df.plot.bar() self._check_colors(ax.patches[::5], facecolors=default_colors[:5]) tm.close() custom_colors = 'rgcby' - ax = df.plot(kind='bar', color=custom_colors) + ax = df.plot.bar(color=custom_colors) self._check_colors(ax.patches[::5], facecolors=custom_colors) tm.close() from matplotlib import cm # Test str -> colormap functionality - ax = df.plot(kind='bar', colormap='jet') + ax = df.plot.bar(colormap='jet') rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5)) self._check_colors(ax.patches[::5], facecolors=rgba_colors) tm.close() # Test colormap functionality - ax = df.plot(kind='bar', colormap=cm.jet) + ax = df.plot.bar(colormap=cm.jet) rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5)) self._check_colors(ax.patches[::5], facecolors=rgba_colors) tm.close() - ax = df.ix[:, [0]].plot(kind='bar', color='DodgerBlue') + ax = df.ix[:, [0]].plot.bar(color='DodgerBlue') self._check_colors([ax.patches[0]], facecolors=['DodgerBlue']) tm.close() @@ -1832,17 +1841,17 @@ def test_bar_linewidth(self): df = DataFrame(randn(5, 5)) # regular - ax = df.plot(kind='bar', linewidth=2) + ax = df.plot.bar(linewidth=2) for r in ax.patches: self.assertEqual(r.get_linewidth(), 2) # stacked - ax = df.plot(kind='bar', stacked=True, linewidth=2) + ax = df.plot.bar(stacked=True, linewidth=2) for r in ax.patches: self.assertEqual(r.get_linewidth(), 2) # subplots - axes = df.plot(kind='bar', linewidth=2, subplots=True) + axes = df.plot.bar(linewidth=2, subplots=True) self._check_axes_shape(axes, axes_num=5, layout=(5, 1)) for ax in axes: for r in ax.patches: @@ -1855,33 +1864,33 @@ def test_bar_barwidth(self): width = 0.9 # regular - ax = df.plot(kind='bar', width=width) + ax = df.plot.bar(width=width) for r in ax.patches: self.assertEqual(r.get_width(), width / len(df.columns)) # stacked - ax = df.plot(kind='bar', stacked=True, width=width) + ax = df.plot.bar(stacked=True, width=width) for r in ax.patches: self.assertEqual(r.get_width(), width) # horizontal regular - ax = df.plot(kind='barh', width=width) + ax = df.plot.barh(width=width) for r in ax.patches: self.assertEqual(r.get_height(), width / len(df.columns)) # horizontal stacked - ax = df.plot(kind='barh', stacked=True, width=width) + ax = df.plot.barh(stacked=True, width=width) for r in ax.patches: self.assertEqual(r.get_height(), width) # subplots - axes = df.plot(kind='bar', width=width, subplots=True) + axes = df.plot.bar(width=width, subplots=True) for ax in axes: for r in ax.patches: self.assertEqual(r.get_width(), width) # horizontal subplots - axes = df.plot(kind='barh', width=width, subplots=True) + axes = df.plot.barh(width=width, subplots=True) for ax in axes: for r in ax.patches: self.assertEqual(r.get_height(), width) @@ -1899,28 +1908,28 @@ def test_bar_barwidth_position(self): @slow def test_bar_bottom_left(self): df = DataFrame(rand(5, 5)) - ax = df.plot(kind='bar', stacked=False, bottom=1) + ax = df.plot.bar(stacked=False, bottom=1) result = [p.get_y() for p in ax.patches] self.assertEqual(result, [1] * 25) - ax = df.plot(kind='bar', stacked=True, bottom=[-1, -2, -3, -4, -5]) + ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5]) result = [p.get_y() for p in ax.patches[:5]] self.assertEqual(result, [-1, -2, -3, -4, -5]) - ax = df.plot(kind='barh', stacked=False, left=np.array([1, 1, 1, 1, 1])) + ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1])) result = [p.get_x() for p in ax.patches] self.assertEqual(result, [1] * 25) - ax = df.plot(kind='barh', stacked=True, left=[1, 2, 3, 4, 5]) + ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5]) result = [p.get_x() for p in ax.patches[:5]] self.assertEqual(result, [1, 2, 3, 4, 5]) - axes = df.plot(kind='bar', subplots=True, bottom=-1) + axes = df.plot.bar(subplots=True, bottom=-1) for ax in axes: result = [p.get_y() for p in ax.patches] self.assertEqual(result, [-1] * 5) - axes = df.plot(kind='barh', subplots=True, left=np.array([1, 1, 1, 1, 1])) + axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1])) for ax in axes: result = [p.get_x() for p in ax.patches] self.assertEqual(result, [1] * 5) @@ -1929,12 +1938,12 @@ def test_bar_bottom_left(self): def test_bar_nan(self): df = DataFrame({'A': [10, np.nan, 20], 'B': [5, 10, 20], 'C': [1, 2, 3]}) - ax = df.plot(kind='bar') + ax = df.plot.bar() expected = [10, 0, 20, 5, 10, 20, 1, 2, 3] result = [p.get_height() for p in ax.patches] self.assertEqual(result, expected) - ax = df.plot(kind='bar', stacked=True) + ax = df.plot.bar(stacked=True) result = [p.get_height() for p in ax.patches] self.assertEqual(result, expected) @@ -1948,13 +1957,13 @@ def test_plot_scatter(self): index=list(string.ascii_letters[:6]), columns=['x', 'y', 'z', 'four']) - _check_plot_works(df.plot, x='x', y='y', kind='scatter') - _check_plot_works(df.plot, x=1, y=2, kind='scatter') + _check_plot_works(df.plot.scatter, x='x', y='y') + _check_plot_works(df.plot.scatter, x=1, y=2) - with tm.assertRaises(ValueError): - df.plot(x='x', kind='scatter') - with tm.assertRaises(ValueError): - df.plot(y='y', kind='scatter') + with tm.assertRaises(TypeError): + df.plot.scatter(x='x') + with tm.assertRaises(TypeError): + df.plot.scatter(y='y') # GH 6951 axes = df.plot(x='x', y='y', kind='scatter', subplots=True) @@ -1966,8 +1975,8 @@ def test_plot_scatter_with_c(self): index=list(string.ascii_letters[:6]), columns=['x', 'y', 'z', 'four']) - axes = [df.plot(kind='scatter', x='x', y='y', c='z'), - df.plot(kind='scatter', x=0, y=1, c=2)] + axes = [df.plot.scatter(x='x', y='y', c='z'), + df.plot.scatter(x=0, y=1, c=2)] for ax in axes: # default to Greys self.assertEqual(ax.collections[0].cmap.name, 'Greys') @@ -1979,15 +1988,15 @@ def test_plot_scatter_with_c(self): self.assertEqual(ax.collections[0].colorbar._label, 'z') cm = 'cubehelix' - ax = df.plot(kind='scatter', x='x', y='y', c='z', colormap=cm) + ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm) self.assertEqual(ax.collections[0].cmap.name, cm) # verify turning off colorbar works - ax = df.plot(kind='scatter', x='x', y='y', c='z', colorbar=False) + ax = df.plot.scatter(x='x', y='y', c='z', colorbar=False) self.assertIs(ax.collections[0].colorbar, None) # verify that we can still plot a solid color - ax = df.plot(x=0, y=1, c='red', kind='scatter') + ax = df.plot.scatter(x=0, y=1, c='red') self.assertIs(ax.collections[0].colorbar, None) self._check_colors(ax.collections, facecolors=['r']) @@ -2001,7 +2010,7 @@ def test_plot_scatter_with_c(self): red_rgba = [1.0, 0.0, 0.0, 1.0] green_rgba = [0.0, 1.0, 0.0, 1.0] rgba_array = np.array([red_rgba, green_rgba]) - ax = df.plot(kind='scatter', x='A', y='B', c=rgba_array) + ax = df.plot.scatter(x='A', y='B', c=rgba_array) # expect the face colors of the points in the non-colormap path to be # identical to the values we supplied, normally we'd be on shaky ground # comparing floats for equality but here we expect them to be @@ -2014,7 +2023,7 @@ def test_plot_scatter_with_c(self): # are dependent on the spring colormap, which may change its colors # later. float_array = np.array([0.0, 1.0]) - df.plot(kind='scatter', x='A', y='B', c=float_array, cmap='spring') + df.plot.scatter(x='A', y='B', c=float_array, cmap='spring') @slow def test_plot_bar(self): @@ -2022,27 +2031,27 @@ def test_plot_bar(self): index=list(string.ascii_letters[:6]), columns=['one', 'two', 'three', 'four']) - _check_plot_works(df.plot, kind='bar') - _check_plot_works(df.plot, kind='bar', legend=False) - _check_plot_works(df.plot, kind='bar', subplots=True) - _check_plot_works(df.plot, kind='bar', stacked=True) + _check_plot_works(df.plot.bar) + _check_plot_works(df.plot.bar, legend=False) + _check_plot_works(df.plot.bar, subplots=True) + _check_plot_works(df.plot.bar, stacked=True) df = DataFrame(randn(10, 15), index=list(string.ascii_letters[:10]), columns=lrange(15)) - _check_plot_works(df.plot, kind='bar') + _check_plot_works(df.plot.bar) df = DataFrame({'a': [0, 1], 'b': [1, 0]}) - ax = _check_plot_works(df.plot, kind='bar') + ax = _check_plot_works(df.plot.bar) self._check_ticks_props(ax, xrot=90) - ax = df.plot(kind='bar', rot=35, fontsize=10) + ax = df.plot.bar(rot=35, fontsize=10) self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10) - ax = _check_plot_works(df.plot, kind='barh') + ax = _check_plot_works(df.plot.barh) self._check_ticks_props(ax, yrot=0) - ax = df.plot(kind='barh', rot=55, fontsize=11) + ax = df.plot.barh(rot=55, fontsize=11) self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11) def _check_bar_alignment(self, df, kind='bar', stacked=False, @@ -2175,7 +2184,7 @@ def test_bar_log_no_subplots(self): # no subplots df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5)) - ax = df.plot(kind='bar', grid=True, log=True) + ax = df.plot.bar(grid=True, log=True) tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected) @slow @@ -2185,8 +2194,7 @@ def test_bar_log_subplots(self): expected = np.hstack((.1, expected, 1e4)) ax = DataFrame([Series([200, 300]), - Series([300, 500])]).plot(log=True, kind='bar', - subplots=True) + Series([300, 500])]).plot.bar(log=True, subplots=True) tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected) tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected) @@ -2198,7 +2206,7 @@ def test_boxplot(self): numeric_cols = df._get_numeric_data().columns labels = [com.pprint_thing(c) for c in numeric_cols] - ax = _check_plot_works(df.plot, kind='box') + ax = _check_plot_works(df.plot.box) self._check_text_labels(ax.get_xticklabels(), labels) tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1)) @@ -2207,7 +2215,7 @@ def test_boxplot(self): # different warning on py3 if not PY3: - axes = _check_plot_works(df.plot, kind='box', + axes = _check_plot_works(df.plot.box, subplots=True, logy=True) self._check_axes_shape(axes, axes_num=3, layout=(1, 3)) @@ -2216,14 +2224,14 @@ def test_boxplot(self): self._check_text_labels(ax.get_xticklabels(), [label]) self.assertEqual(len(ax.lines), self.bp_n_objects) - axes = series.plot(kind='box', rot=40) + axes = series.plot.box(rot=40) self._check_ticks_props(axes, xrot=40, yrot=0) tm.close() - ax = _check_plot_works(series.plot, kind='box') + ax = _check_plot_works(series.plot.box) positions = np.array([1, 6, 7]) - ax = df.plot(kind='box', positions=positions) + ax = df.plot.box(positions=positions) numeric_cols = df._get_numeric_data().columns labels = [com.pprint_thing(c) for c in numeric_cols] self._check_text_labels(ax.get_xticklabels(), labels) @@ -2237,12 +2245,12 @@ def test_boxplot_vertical(self): labels = [com.pprint_thing(c) for c in numeric_cols] # if horizontal, yticklabels are rotated - ax = df.plot(kind='box', rot=50, fontsize=8, vert=False) + ax = df.plot.box(rot=50, fontsize=8, vert=False) self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8) self._check_text_labels(ax.get_yticklabels(), labels) self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols)) - axes = _check_plot_works(df.plot, kind='box', subplots=True, + axes = _check_plot_works(df.plot.box, subplots=True, vert=False, logx=True) self._check_axes_shape(axes, axes_num=3, layout=(1, 3)) self._check_ax_scales(axes, xaxis='log') @@ -2251,7 +2259,7 @@ def test_boxplot_vertical(self): self.assertEqual(len(ax.lines), self.bp_n_objects) positions = np.array([3, 2, 8]) - ax = df.plot(kind='box', positions=positions, vert=False) + ax = df.plot.box(positions=positions, vert=False) self._check_text_labels(ax.get_yticklabels(), labels) tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions) self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols)) @@ -2262,15 +2270,15 @@ def test_boxplot_return_type(self): index=list(string.ascii_letters[:6]), columns=['one', 'two', 'three', 'four']) with tm.assertRaises(ValueError): - df.plot(kind='box', return_type='NOTATYPE') + df.plot.box(return_type='NOTATYPE') - result = df.plot(kind='box', return_type='dict') + result = df.plot.box(return_type='dict') self._check_box_return_type(result, 'dict') - result = df.plot(kind='box', return_type='axes') + result = df.plot.box(return_type='axes') self._check_box_return_type(result, 'axes') - result = df.plot(kind='box', return_type='both') + result = df.plot.box(return_type='both') self._check_box_return_type(result, 'both') @slow @@ -2278,13 +2286,13 @@ def test_boxplot_subplots_return_type(self): df = self.hist_df # normal style: return_type=None - result = df.plot(kind='box', subplots=True) + result = df.plot.box(subplots=True) self.assertIsInstance(result, np.ndarray) self._check_box_return_type(result, None, expected_keys=['height', 'weight', 'category']) for t in ['dict', 'axes', 'both']: - returned = df.plot(kind='box', return_type=t, subplots=True) + returned = df.plot.box(return_type=t, subplots=True) self._check_box_return_type(returned, t, expected_keys=['height', 'weight', 'category'], check_ax_title=False) @@ -2324,29 +2332,29 @@ def test_hist_df(self): df = DataFrame(randn(100, 4)) series = df[0] - ax = _check_plot_works(df.plot, kind='hist') + ax = _check_plot_works(df.plot.hist) expected = [com.pprint_thing(c) for c in df.columns] self._check_legend_labels(ax, labels=expected) - axes = _check_plot_works(df.plot, kind='hist', subplots=True, logy=True) + axes = _check_plot_works(df.plot.hist, subplots=True, logy=True) self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) self._check_ax_scales(axes, yaxis='log') - axes = series.plot(kind='hist', rot=40) + axes = series.plot.hist(rot=40) self._check_ticks_props(axes, xrot=40, yrot=0) tm.close() - ax = series.plot(kind='hist', normed=True, cumulative=True, bins=4) + ax = series.plot.hist(normed=True, cumulative=True, bins=4) # height of last bin (index 5) must be 1.0 self.assertAlmostEqual(ax.get_children()[5].get_height(), 1.0) tm.close() - ax = series.plot(kind='hist', cumulative=True, bins=4) + ax = series.plot.hist(cumulative=True, bins=4) self.assertAlmostEqual(ax.get_children()[5].get_height(), 100.0) tm.close() # if horizontal, yticklabels are rotated - axes = df.plot(kind='hist', rot=50, fontsize=8, orientation='horizontal') + axes = df.plot.hist(rot=50, fontsize=8, orientation='horizontal') self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8) def _check_box_coord(self, patches, expected_y=None, expected_h=None, @@ -2384,7 +2392,7 @@ def test_hist_df_coord(self): columns=['A', 'B', 'C']) for df in [normal_df, nan_df]: - ax = df.plot(kind='hist', bins=5) + ax = df.plot.hist(bins=5) self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]), expected_h=np.array([10, 9, 8, 7, 6])) self._check_box_coord(ax.patches[5:10], expected_y=np.array([0, 0, 0, 0, 0]), @@ -2392,7 +2400,7 @@ def test_hist_df_coord(self): self._check_box_coord(ax.patches[10:], expected_y=np.array([0, 0, 0, 0, 0]), expected_h=np.array([6, 7, 8, 9, 10])) - ax = df.plot(kind='hist', bins=5, stacked=True) + ax = df.plot.hist(bins=5, stacked=True) self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]), expected_h=np.array([10, 9, 8, 7, 6])) self._check_box_coord(ax.patches[5:10], expected_y=np.array([10, 9, 8, 7, 6]), @@ -2400,7 +2408,7 @@ def test_hist_df_coord(self): self._check_box_coord(ax.patches[10:], expected_y=np.array([18, 17, 16, 15, 14]), expected_h=np.array([6, 7, 8, 9, 10])) - axes = df.plot(kind='hist', bins=5, stacked=True, subplots=True) + axes = df.plot.hist(bins=5, stacked=True, subplots=True) self._check_box_coord(axes[0].patches, expected_y=np.array([0, 0, 0, 0, 0]), expected_h=np.array([10, 9, 8, 7, 6])) self._check_box_coord(axes[1].patches, expected_y=np.array([0, 0, 0, 0, 0]), @@ -2411,7 +2419,7 @@ def test_hist_df_coord(self): if self.mpl_ge_1_3_1: # horizontal - ax = df.plot(kind='hist', bins=5, orientation='horizontal') + ax = df.plot.hist(bins=5, orientation='horizontal') self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]), expected_w=np.array([10, 9, 8, 7, 6])) self._check_box_coord(ax.patches[5:10], expected_x=np.array([0, 0, 0, 0, 0]), @@ -2419,7 +2427,7 @@ def test_hist_df_coord(self): self._check_box_coord(ax.patches[10:], expected_x=np.array([0, 0, 0, 0, 0]), expected_w=np.array([6, 7, 8, 9, 10])) - ax = df.plot(kind='hist', bins=5, stacked=True, orientation='horizontal') + ax = df.plot.hist(bins=5, stacked=True, orientation='horizontal') self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]), expected_w=np.array([10, 9, 8, 7, 6])) self._check_box_coord(ax.patches[5:10], expected_x=np.array([10, 9, 8, 7, 6]), @@ -2427,7 +2435,7 @@ def test_hist_df_coord(self): self._check_box_coord(ax.patches[10:], expected_x=np.array([18, 17, 16, 15, 14]), expected_w=np.array([6, 7, 8, 9, 10])) - axes = df.plot(kind='hist', bins=5, stacked=True, + axes = df.plot.hist(bins=5, stacked=True, subplots=True, orientation='horizontal') self._check_box_coord(axes[0].patches, expected_x=np.array([0, 0, 0, 0, 0]), expected_w=np.array([10, 9, 8, 7, 6])) @@ -2487,12 +2495,12 @@ def test_df_legend_labels(self): self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i']) # scatter - ax = df.plot(kind='scatter', x='a', y='b', label='data1') + ax = df.plot.scatter(x='a', y='b', label='data1') self._check_legend_labels(ax, labels=['data1']) - ax = df2.plot(kind='scatter', x='d', y='e', legend=False, - label='data2', ax=ax) + ax = df2.plot.scatter(x='d', y='e', legend=False, + label='data2', ax=ax) self._check_legend_labels(ax, labels=['data1']) - ax = df3.plot(kind='scatter', x='g', y='h', label='data3', ax=ax) + ax = df3.plot.scatter(x='g', y='h', label='data3', ax=ax) self._check_legend_labels(ax, labels=['data1', 'data3']) # ensure label args pass through and @@ -2683,7 +2691,7 @@ def test_area_colors(self): custom_colors = 'rgcby' df = DataFrame(rand(5, 5)) - ax = df.plot(kind='area', color=custom_colors) + ax = df.plot.area(color=custom_colors) self._check_colors(ax.get_lines(), linecolors=custom_colors) poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] self._check_colors(poly, facecolors=custom_colors) @@ -2695,7 +2703,7 @@ def test_area_colors(self): self.assertTrue(h.get_alpha() is None) tm.close() - ax = df.plot(kind='area', colormap='jet') + ax = df.plot.area(colormap='jet') jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) self._check_colors(ax.get_lines(), linecolors=jet_colors) poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] @@ -2708,7 +2716,7 @@ def test_area_colors(self): tm.close() # When stacked=True, alpha is set to 0.5 - ax = df.plot(kind='area', colormap=cm.jet, stacked=False) + ax = df.plot.area(colormap=cm.jet, stacked=False) self._check_colors(ax.get_lines(), linecolors=jet_colors) poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors] @@ -2725,29 +2733,29 @@ def test_hist_colors(self): default_colors = self.plt.rcParams.get('axes.color_cycle') df = DataFrame(randn(5, 5)) - ax = df.plot(kind='hist') + ax = df.plot.hist() self._check_colors(ax.patches[::10], facecolors=default_colors[:5]) tm.close() custom_colors = 'rgcby' - ax = df.plot(kind='hist', color=custom_colors) + ax = df.plot.hist( color=custom_colors) self._check_colors(ax.patches[::10], facecolors=custom_colors) tm.close() from matplotlib import cm # Test str -> colormap functionality - ax = df.plot(kind='hist', colormap='jet') + ax = df.plot.hist( colormap='jet') rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5)) self._check_colors(ax.patches[::10], facecolors=rgba_colors) tm.close() # Test colormap functionality - ax = df.plot(kind='hist', colormap=cm.jet) + ax = df.plot.hist( colormap=cm.jet) rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5)) self._check_colors(ax.patches[::10], facecolors=rgba_colors) tm.close() - ax = df.ix[:, [0]].plot(kind='hist', color='DodgerBlue') + ax = df.ix[:, [0]].plot.hist(color='DodgerBlue') self._check_colors([ax.patches[0]], facecolors=['DodgerBlue']) ax = df.plot(kind='hist', color='green') @@ -2764,16 +2772,16 @@ def test_kde_colors(self): custom_colors = 'rgcby' df = DataFrame(rand(5, 5)) - ax = df.plot(kind='kde', color=custom_colors) + ax = df.plot.kde(color=custom_colors) self._check_colors(ax.get_lines(), linecolors=custom_colors) tm.close() - ax = df.plot(kind='kde', colormap='jet') + ax = df.plot.kde(colormap='jet') rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) self._check_colors(ax.get_lines(), linecolors=rgba_colors) tm.close() - ax = df.plot(kind='kde', colormap=cm.jet) + ax = df.plot.kde(colormap=cm.jet) rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df))) self._check_colors(ax.get_lines(), linecolors=rgba_colors) @@ -2848,47 +2856,47 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k', fliers_c='b'): default_colors = self.plt.rcParams.get('axes.color_cycle') df = DataFrame(randn(5, 5)) - bp = df.plot(kind='box', return_type='dict') + bp = df.plot.box(return_type='dict') _check_colors(bp, default_colors[0], default_colors[0], default_colors[2]) tm.close() dict_colors = dict(boxes='#572923', whiskers='#982042', medians='#804823', caps='#123456') - bp = df.plot(kind='box', color=dict_colors, sym='r+', return_type='dict') + bp = df.plot.box(color=dict_colors, sym='r+', return_type='dict') _check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'], dict_colors['medians'], dict_colors['caps'], 'r') tm.close() # partial colors dict_colors = dict(whiskers='c', medians='m') - bp = df.plot(kind='box', color=dict_colors, return_type='dict') + bp = df.plot.box(color=dict_colors, return_type='dict') _check_colors(bp, default_colors[0], 'c', 'm') tm.close() from matplotlib import cm # Test str -> colormap functionality - bp = df.plot(kind='box', colormap='jet', return_type='dict') + bp = df.plot.box(colormap='jet', return_type='dict') jet_colors = lmap(cm.jet, np.linspace(0, 1, 3)) _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2]) tm.close() # Test colormap functionality - bp = df.plot(kind='box', colormap=cm.jet, return_type='dict') + bp = df.plot.box(colormap=cm.jet, return_type='dict') _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2]) tm.close() # string color is applied to all artists except fliers - bp = df.plot(kind='box', color='DodgerBlue', return_type='dict') + bp = df.plot.box(color='DodgerBlue', return_type='dict') _check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue', 'DodgerBlue') # tuple is also applied to all artists except fliers - bp = df.plot(kind='box', color=(0, 1, 0), sym='#123456', return_type='dict') + bp = df.plot.box(color=(0, 1, 0), sym='#123456', return_type='dict') _check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), '#123456') with tm.assertRaises(ValueError): # Color contains invalid key results in ValueError - df.plot(kind='box', color=dict(boxes='red', xxxx='blue')) + df.plot.box(color=dict(boxes='red', xxxx='blue')) def test_default_color_cycle(self): import matplotlib.pyplot as plt @@ -2912,6 +2920,17 @@ def test_unordered_ts(self): ydata = ax.lines[0].get_ydata() tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0])) + def test_kind_both_ways(self): + df = DataFrame({'x': [1, 2, 3]}) + for kind in plotting._common_kinds: + if not _ok_for_gaussian_kde(kind): + continue + df.plot(kind=kind) + getattr(df.plot, kind)() + for kind in ['scatter', 'hexbin']: + df.plot('x', 'x', kind=kind) + getattr(df.plot, kind)('x', 'x') + def test_all_invalid_plot_data(self): df = DataFrame(list('abcd')) for kind in plotting._common_kinds: @@ -2949,12 +2968,12 @@ def test_invalid_kind(self): def test_hexbin_basic(self): df = self.hexbin_df - ax = df.plot(kind='hexbin', x='A', y='B', gridsize=10) + ax = df.plot.hexbin(x='A', y='B', gridsize=10) # TODO: need better way to test. This just does existence. self.assertEqual(len(ax.collections), 1) # GH 6951 - axes = df.plot(x='A', y='B', kind='hexbin', subplots=True) + axes = df.plot.hexbin(x='A', y='B', subplots=True) # hexbin should have 2 axes in the figure, 1 for plotting and another is colorbar self.assertEqual(len(axes[0].figure.axes), 2) # return value is single axes @@ -2964,11 +2983,10 @@ def test_hexbin_basic(self): def test_hexbin_with_c(self): df = self.hexbin_df - ax = df.plot(kind='hexbin', x='A', y='B', C='C') + ax = df.plot.hexbin(x='A', y='B', C='C') self.assertEqual(len(ax.collections), 1) - ax = df.plot(kind='hexbin', x='A', y='B', C='C', - reduce_C_function=np.std) + ax = df.plot.hexbin(x='A', y='B', C='C', reduce_C_function=np.std) self.assertEqual(len(ax.collections), 1) @slow @@ -2976,45 +2994,45 @@ def test_hexbin_cmap(self): df = self.hexbin_df # Default to BuGn - ax = df.plot(kind='hexbin', x='A', y='B') + ax = df.plot.hexbin(x='A', y='B') self.assertEqual(ax.collections[0].cmap.name, 'BuGn') cm = 'cubehelix' - ax = df.plot(kind='hexbin', x='A', y='B', colormap=cm) + ax = df.plot.hexbin(x='A', y='B', colormap=cm) self.assertEqual(ax.collections[0].cmap.name, cm) @slow def test_no_color_bar(self): df = self.hexbin_df - ax = df.plot(kind='hexbin', x='A', y='B', colorbar=None) + ax = df.plot.hexbin(x='A', y='B', colorbar=None) self.assertIs(ax.collections[0].colorbar, None) @slow def test_allow_cmap(self): df = self.hexbin_df - ax = df.plot(kind='hexbin', x='A', y='B', cmap='YlGn') + ax = df.plot.hexbin(x='A', y='B', cmap='YlGn') self.assertEqual(ax.collections[0].cmap.name, 'YlGn') with tm.assertRaises(TypeError): - df.plot(kind='hexbin', x='A', y='B', cmap='YlGn', - colormap='BuGn') + df.plot.hexbin(x='A', y='B', cmap='YlGn', + colormap='BuGn') @slow def test_pie_df(self): df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'], index=['a', 'b', 'c', 'd', 'e']) with tm.assertRaises(ValueError): - df.plot(kind='pie') + df.plot.pie() - ax = _check_plot_works(df.plot, kind='pie', y='Y') + ax = _check_plot_works(df.plot.pie, y='Y') self._check_text_labels(ax.texts, df.index) - ax = _check_plot_works(df.plot, kind='pie', y=2) + ax = _check_plot_works(df.plot.pie, y=2) self._check_text_labels(ax.texts, df.index) - axes = _check_plot_works(df.plot, kind='pie', subplots=True) + axes = _check_plot_works(df.plot.pie, subplots=True) self.assertEqual(len(axes), len(df.columns)) for ax in axes: self._check_text_labels(ax.texts, df.index) @@ -3023,7 +3041,7 @@ def test_pie_df(self): labels = ['A', 'B', 'C', 'D', 'E'] color_args = ['r', 'g', 'b', 'c', 'm'] - axes = _check_plot_works(df.plot, kind='pie', subplots=True, + axes = _check_plot_works(df.plot.pie, subplots=True, labels=labels, colors=color_args) self.assertEqual(len(axes), len(df.columns)) @@ -3036,7 +3054,7 @@ def test_pie_df_nan(self): for i in range(4): df.iloc[i, i] = np.nan fig, axes = self.plt.subplots(ncols=4) - df.plot(kind='pie', subplots=True, ax=axes, legend=True) + df.plot.pie(subplots=True, ax=axes, legend=True) base_expected = ['0', '1', '2', '3'] for i, ax in enumerate(axes): @@ -3206,14 +3224,14 @@ def test_errorbar_scatter(self): df_err = DataFrame(np.random.randn(5, 2) / 5, index=range(5), columns=['x', 'y']) - ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y') + ax = _check_plot_works(df.plot.scatter, x='x', y='y') self._check_has_errorbars(ax, xerr=0, yerr=0) - ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', xerr=df_err) + ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err) self._check_has_errorbars(ax, xerr=1, yerr=0) - ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', yerr=df_err) + ax = _check_plot_works(df.plot.scatter, x='x', y='y', yerr=df_err) self._check_has_errorbars(ax, xerr=0, yerr=1) - ax = _check_plot_works(df.plot, kind='scatter', x='x', y='y', + ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err, yerr=df_err) self._check_has_errorbars(ax, xerr=1, yerr=1) @@ -3223,12 +3241,12 @@ def _check_errorbar_color(containers, expected, has_err='has_xerr'): # GH 8081 df = DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e']) - ax = df.plot(kind='scatter', x='a', y='b', xerr='d', yerr='e', c='red') + ax = df.plot.scatter(x='a', y='b', xerr='d', yerr='e', c='red') self._check_has_errorbars(ax, xerr=1, yerr=1) _check_errorbar_color(ax.containers, 'red', has_err='has_xerr') _check_errorbar_color(ax.containers, 'red', has_err='has_yerr') - ax = df.plot(kind='scatter', x='a', y='b', yerr='e', color='green') + ax = df.plot.scatter(x='a', y='b', yerr='e', color='green') self._check_has_errorbars(ax, xerr=0, yerr=1) _check_errorbar_color(ax.containers, 'green', has_err='has_yerr') @@ -3586,6 +3604,15 @@ def test_hist_single_row(self): df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]}) df["Mark"].hist(by=df["ByCol"], bins=bins) + def test_plot_submethod_works(self): + df = DataFrame({'x': [1, 2, 3, 4, 5], + 'y': [1, 2, 3, 2, 1], + 'z': list('ababa')}) + df.groupby('z').plot.scatter('x', 'y') + tm.close() + df.groupby('z')['x'].plot.line() + tm.close() + def assert_is_valid_plot_return_object(objs): import matplotlib.pyplot as plt diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 9eab385a7a2a5..3337a978961c4 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -11,6 +11,7 @@ import numpy as np from pandas.util.decorators import cache_readonly, deprecate_kwarg +from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.common import AbstractMethodError from pandas.core.generic import _shared_docs, _shared_doc_kwargs @@ -1462,8 +1463,12 @@ def _post_plot_logic(self, ax, data): class ScatterPlot(PlanePlot): _kind = 'scatter' - def __init__(self, data, x, y, c=None, **kwargs): - super(ScatterPlot, self).__init__(data, x, y, **kwargs) + def __init__(self, data, x, y, s=None, c=None, **kwargs): + if s is None: + # hide the matplotlib default for size, in case we want to change + # the handling of this argument later + s = 20 + super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs) if com.is_integer(c) and not self.data.columns.holds_integer(): c = self.data.columns[c] self.c = c @@ -2367,10 +2372,12 @@ def _plot(data, x=None, y=None, subplots=False, a single number (e.g. `mean`, `max`, `sum`, `std`).""" series_note = "" -_shared_doc_df_kwargs = dict(klass='DataFrame', klass_kind=df_kind, - klass_coord=df_coord, klass_ax=df_ax, - klass_unique=df_unique, klass_note=df_note) -_shared_doc_series_kwargs = dict(klass='Series', klass_kind=series_kind, +_shared_doc_df_kwargs = dict(klass='DataFrame', klass_obj='df', + klass_kind=df_kind, klass_coord=df_coord, + klass_ax=df_ax, klass_unique=df_unique, + klass_note=df_note) +_shared_doc_series_kwargs = dict(klass='Series', klass_obj='s', + klass_kind=series_kind, klass_coord=series_coord, klass_ax=series_ax, klass_unique=series_unique, klass_note=series_note) @@ -2378,6 +2385,11 @@ def _plot(data, x=None, y=None, subplots=False, _shared_docs['plot'] = """ Make plots of %(klass)s using matplotlib / pylab. + *New in version 0.17.0:* Each plot kind has a corresponding method on the + ``%(klass)s.plot`` accessor: + ``%(klass_obj)s.plot(kind='line')`` is equivalent to + ``%(klass_obj)s.plot.line()``. + Parameters ---------- data : %(klass)s @@ -2460,6 +2472,7 @@ def _plot(data, x=None, y=None, subplots=False, """ + @Appender(_shared_docs['plot'] % _shared_doc_df_kwargs) def plot_frame(data, x=None, y=None, kind='line', ax=None, # Dataframe unique subplots=False, sharex=None, sharey=False, layout=None, # Dataframe unique @@ -3384,6 +3397,428 @@ def _set_ticks_props(axes, xlabelsize=None, xrot=None, return axes +class BasePlotMethods(PandasObject): + def __init__(self, data): + self._data = data + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + +class SeriesPlotMethods(BasePlotMethods): + """Series plotting accessor and method + + Examples + -------- + >>> s.plot.line() + >>> s.plot.bar() + >>> s.plot.hist() + + Plotting methods can also be accessed by calling the accessor as a method + with the ``kind`` argument: + ``s.plot(kind='line')`` is equivalent to ``s.plot.line()`` + """ + def __call__(self, kind='line', ax=None, # Series unique + figsize=None, use_index=True, title=None, grid=None, + legend=False, style=None, logx=False, logy=False, loglog=False, + xticks=None, yticks=None, xlim=None, ylim=None, + rot=None, fontsize=None, colormap=None, table=False, + yerr=None, xerr=None, + label=None, secondary_y=False, # Series unique + **kwds): + return plot_series(self._data, kind=kind, ax=ax, figsize=figsize, + use_index=use_index, title=title, grid=grid, + legend=legend, style=style, logx=logx, logy=logy, + loglog=loglog, xticks=xticks, yticks=yticks, + xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, + colormap=colormap, table=table, yerr=yerr, + xerr=xerr, label=label, secondary_y=secondary_y, + **kwds) + __call__.__doc__ = plot_series.__doc__ + + def line(self, **kwds): + """ + Line plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='line', **kwds) + + def bar(self, **kwds): + """ + Vertical bar plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='bar', **kwds) + + def barh(self, **kwds): + """ + Horizontal bar plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='barh', **kwds) + + def box(self, **kwds): + """ + Boxplot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='box', **kwds) + + def hist(self, bins=10, **kwds): + """ + Histogram + + .. versionadded:: 0.17.0 + + Parameters + ---------- + bins: integer, default 10 + Number of histogram bins to be used + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='hist', bins=bins, **kwds) + + def kde(self, **kwds): + """ + Kernel Density Estimate plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='kde', **kwds) + + density = kde + + def area(self, **kwds): + """ + Area plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='area', **kwds) + + def pie(self, **kwds): + """ + Pie chart + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='pie', **kwds) + + +class FramePlotMethods(BasePlotMethods): + """DataFrame plotting accessor and method + + Examples + -------- + >>> df.plot.line() + >>> df.plot.scatter('x', 'y') + >>> df.plot.hexbin() + + These plotting methods can also be accessed by calling the accessor as a + method with the ``kind`` argument: + ``df.plot(kind='line')`` is equivalent to ``df.plot.line()`` + """ + def __call__(self, x=None, y=None, kind='line', ax=None, # Dataframe unique + subplots=False, sharex=None, sharey=False, layout=None, # Dataframe unique + figsize=None, use_index=True, title=None, grid=None, + legend=True, style=None, logx=False, logy=False, loglog=False, + xticks=None, yticks=None, xlim=None, ylim=None, + rot=None, fontsize=None, colormap=None, table=False, + yerr=None, xerr=None, + secondary_y=False, sort_columns=False, # Dataframe unique + **kwds): + return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax, + subplots=subplots, sharex=sharex, sharey=sharey, + layout=layout, figsize=figsize, use_index=use_index, + title=title, grid=grid, legend=legend, style=style, + logx=logx, logy=logy, loglog=loglog, xticks=xticks, + yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, + fontsize=fontsize, colormap=colormap, table=table, + yerr=yerr, xerr=xerr, secondary_y=secondary_y, + sort_columns=sort_columns, **kwds) + __call__.__doc__ = plot_frame.__doc__ + + def line(self, x=None, y=None, **kwds): + """ + Line plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='line', x=x, y=y, **kwds) + + def bar(self, x=None, y=None, **kwds): + """ + Vertical bar plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='bar', x=x, y=y, **kwds) + + def barh(self, x=None, y=None, **kwds): + """ + Horizontal bar plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='barh', x=x, y=y, **kwds) + + def box(self, by=None, **kwds): + """ + Boxplot + + .. versionadded:: 0.17.0 + + Parameters + --------- + by : string or sequence + Column in the DataFrame to group by. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='box', by=by, **kwds) + + def hist(self, by=None, bins=10, **kwds): + """ + Histogram + + .. versionadded:: 0.17.0 + + Parameters + ---------- + by : string or sequence + Column in the DataFrame to group by. + bins: integer, default 10 + Number of histogram bins to be used + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='hist', by=by, bins=bins, **kwds) + + def kde(self, **kwds): + """ + Kernel Density Estimate plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='kde', **kwds) + + density = kde + + def area(self, x=None, y=None, **kwds): + """ + Area plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='area', x=x, y=y, **kwds) + + def pie(self, y=None, **kwds): + """ + Pie chart + + .. versionadded:: 0.17.0 + + Parameters + ---------- + y : label or position, optional + Column to plot. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='pie', y=y, **kwds) + + def scatter(self, x, y, s=None, c=None, **kwds): + """ + Scatter plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + s : scalar or array_like, optional + Size of each point. + c : label or position, optional + Color of each point. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds) + + def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, + **kwds): + """ + Hexbin plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + C : label or position, optional + The value at each `(x, y)` point. + reduce_C_function : callable, optional + Function of one argument that reduces all the values in a bin to + a single number (e.g. `mean`, `max`, `sum`, `std`). + gridsize : int, optional + Number of bins. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + if reduce_C_function is not None: + kwds['reduce_C_function'] = reduce_C_function + if gridsize is not None: + kwds['gridsize'] = gridsize + return self(kind='hexbin', x=x, y=y, C=C, **kwds) + + if __name__ == '__main__': # import pandas.rpy.common as com # sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
Fixes #9124 This PR adds plotting sub-methods like `df.plot.scatter()` as an alternative to using `df.plot(kind='scatter')`. I've added meaningful function signatures and documentation for a few of these methods, but I would greatly appreciate help to fill in the rest -- this is a lot of documentation to assemble/reconstruct! The entire point of this PR, of course, is to have better introspection and docstrings. Todo list: - [x] Basic docstrings/signatures - [x] `area` - [x] `line` - [x] `bar` - [x] `barh` - [x] `box` - [x] `hexbin` - [x] `hist` - [x] `kde`/`density` - [x] `pie` - [x] `scatter` - [x] Write tests for the methods - [x] Fix groupby plots (tests currently failing) - [x] Plotting docs - [x] API docs - [x] Release notes
https://api.github.com/repos/pandas-dev/pandas/pulls/9321
2015-01-21T07:09:09Z
2015-09-11T04:42:33Z
2015-09-11T04:42:33Z
2015-09-11T19:53:48Z
DOC: delete removed Timedelta properties (see GH9257) from API overview
diff --git a/doc/source/api.rst b/doc/source/api.rst index b6fd14f425bd0..9d40fe9114f97 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -498,10 +498,7 @@ Due to implementation details the methods show up here as methods of the :toctree: generated/ TimedeltaProperties.days - TimedeltaProperties.hours - TimedeltaProperties.minutes TimedeltaProperties.seconds - TimedeltaProperties.milliseconds TimedeltaProperties.microseconds TimedeltaProperties.nanoseconds TimedeltaProperties.components @@ -1358,10 +1355,7 @@ Components :toctree: generated/ TimedeltaIndex.days - TimedeltaIndex.hours - TimedeltaIndex.minutes TimedeltaIndex.seconds - TimedeltaIndex.milliseconds TimedeltaIndex.microseconds TimedeltaIndex.nanoseconds TimedeltaIndex.components diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py index c953c0961a596..7f6a0bc60dd57 100644 --- a/pandas/tseries/common.py +++ b/pandas/tseries/common.py @@ -139,6 +139,15 @@ def to_pytimedelta(self): @property def components(self): + """ + Return a dataframe of the components (days, hours, minutes, + seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. + + Returns + ------- + a DataFrame + + """ return self.values.components.set_index(self.index) TimedeltaProperties._add_delegate_accessors(delegate=TimedeltaIndex, diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py index 2afdff2982d8a..db23c42294fd5 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/tseries/tdi.py @@ -370,7 +370,8 @@ def nanoseconds(self): @property def components(self): """ - Return a dataframe of the components of the Timedeltas + Return a dataframe of the components (days, hours, minutes, + seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. Returns -------
Related to #9257. The removed TimedeltaProperties were not removed from the api.rst overview.
https://api.github.com/repos/pandas-dev/pandas/pulls/9318
2015-01-20T20:20:35Z
2015-01-21T08:43:49Z
2015-01-21T08:43:49Z
2015-01-21T08:43:49Z
Doc: API docstrings for indexers (GH6920)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7fa64e0b4ca91..6101d11e98b37 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1034,7 +1034,7 @@ def _indexer(self): setattr(self, iname, i) return i - setattr(cls, name, property(_indexer)) + setattr(cls, name, property(_indexer, doc=indexer.__doc__)) # add to our internal names set cls._internal_names_set.add(iname) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e305eb828f410..f0543488fcd0d 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1166,7 +1166,25 @@ def _get_slice_axis(self, slice_obj, axis=0): class _IXIndexer(_NDFrameIndexer): - """ A primarily location based indexer, with integer fallback """ + """A primarily label-location based indexer, with integer position + fallback. + + ``.ix[]`` supports mixed integer and label based access. It is + primarily label based, but will fall back to integer positional + access unless the corresponding axis is of integer type. + + ``.ix`` is the most general indexer and will support any of the + inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating + point label schemes. ``.ix`` is exceptionally useful when dealing + with mixed positional and label based hierachical indexes. + + However, when an axis is integer based, ONLY label based access + and not positional access is supported. Thus, in such cases, it's + usually better to be explicit and use ``.iloc`` or ``.loc``. + + See more at :ref:`Advanced Indexing <advanced>`. + + """ def _has_valid_type(self, key, axis): if isinstance(key, slice): @@ -1224,7 +1242,27 @@ def _get_slice_axis(self, slice_obj, axis=0): class _LocIndexer(_LocationIndexer): - """ purely label based location based indexing """ + """Purely label-location based indexer for selection by label. + + ``.loc[]`` is primarily label based, but may also be used with a + boolean array. + + Allowed inputs are: + + - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is + interpreted as a *label* of the index, and **never** as an + integer position along the index). + - A list or array of labels, e.g. ``['a', 'b', 'c']``. + - A slice object with labels, e.g. ``'a':'f'`` (note that contrary + to usual python slices, **both** the start and the stop are included!). + - A boolean array. + + ``.loc`` will raise a ``KeyError`` when the items are not found. + + See more at :ref:`Selection by Label <indexing.label>` + + """ + _valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH " "endpoints included! Can be slices of integers if the " "index is integers), listlike of labels, boolean") @@ -1340,7 +1378,27 @@ def _getitem_axis(self, key, axis=0): class _iLocIndexer(_LocationIndexer): - """ purely integer based location based indexing """ + """Purely integer-location based indexing for selection by position. + + ``.iloc[]`` is primarily integer position based (from ``0`` to + ``length-1`` of the axis), but may also be used with a boolean + array. + + Allowed inputs are: + + - An integer, e.g. ``5``. + - A list or array of integers, e.g. ``[4, 3, 0]``. + - A slice object with ints, e.g. ``1:7``. + - A boolean array. + + ``.iloc`` will raise ``IndexError`` if a requested indexer is + out-of-bounds, except *slice* indexers which allow out-of-bounds + indexing (this conforms with python/numpy *slice* semantics). + + See more at :ref:`Selection by Position <indexing.integer>` + + """ + _valid_types = ("integer, integer slice (START point is INCLUDED, END " "point is EXCLUDED), listlike of integers, boolean array") _exception = IndexError @@ -1512,7 +1570,13 @@ def __setitem__(self, key, value): class _AtIndexer(_ScalarAccessIndexer): - """ label based scalar accessor """ + """Fast label-based scalar accessor + + Similarly to ``loc``, ``at`` provides **label** based scalar lookups. + You can also set using these indexers. + + """ + _takeable = False def _convert_key(self, key, is_setter=False): @@ -1535,7 +1599,13 @@ def _convert_key(self, key, is_setter=False): class _iAtIndexer(_ScalarAccessIndexer): - """ integer based scalar accessor """ + """Fast integer location scalar accessor. + + Similarly to ``iloc``, ``iat`` provides **integer** based lookups. + You can also set using these indexers. + + """ + _takeable = True def _has_valid_setitem_indexer(self, indexer):
WIP Closes #6920 For now, I just copied the relevant parts of the tutorial docs (in indexing.rst). I should also include `__getitem__` for `[]`
https://api.github.com/repos/pandas-dev/pandas/pulls/9316
2015-01-20T19:35:04Z
2015-03-05T23:30:27Z
2015-03-05T23:30:26Z
2015-03-05T23:31:16Z
DOC: update install.rst, required version of dateutil is 1.5 or higher G...
diff --git a/doc/source/install.rst b/doc/source/install.rst index 0331e8a47903c..dd9021d0439dc 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -248,7 +248,7 @@ Dependencies ------------ * `NumPy <http://www.numpy.org>`__: 1.7.0 or higher -* `python-dateutil <http://labix.org/python-dateutil>`__ 1.5 +* `python-dateutil <http://labix.org/python-dateutil>`__ 1.5 or higher * `pytz <http://pytz.sourceforge.net/>`__ * Needed for time zone support
Just show in the docs that required version of dateutil is >= 1.5 (#9305)
https://api.github.com/repos/pandas-dev/pandas/pulls/9310
2015-01-20T15:12:16Z
2015-01-20T15:14:55Z
2015-01-20T15:14:55Z
2015-01-20T20:03:54Z
BUG: 0/frame numeric ops buggy (GH9144)
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index cd7cdbb645686..3d82ba1f1b265 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -131,6 +131,37 @@ methods (:issue:`9088`). dtype: int64 +- During division involving a ``Series`` or ``DataFrame``, ``0/0`` and ``0//0`` now give ``np.nan`` instead of ``np.inf``. (:issue:`9144`, :issue:`8445`) + + Previous Behavior + + .. code-block:: python + + In [2]: p = pd.Series([0, 1]) + + In [3]: p / 0 + Out[3]: + 0 inf + 1 inf + dtype: float64 + + In [4]: p // 0 + Out[4]: + 0 inf + 1 inf + dtype: float64 + + + + New Behavior + + .. ipython:: python + + p = pd.Series([0, 1]) + p / 0 + p // 0 + + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/core/common.py b/pandas/core/common.py index b48e73ca7c85c..937dc421e3926 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1395,36 +1395,40 @@ def _fill_zeros(result, x, y, name, fill): mask the nan's from x """ - if fill is not None: + if fill is None or is_float_dtype(result): + return result + + if name.startswith(('r', '__r')): + x,y = y,x - if name.startswith('r'): - x,y = y,x + if np.isscalar(y): + y = np.array(y) + if is_integer_dtype(y): - if not isinstance(y, np.ndarray): - dtype, value = _infer_dtype_from_scalar(y) - y = np.empty(result.shape, dtype=dtype) - y.fill(value) + if (y == 0).any(): - if is_integer_dtype(y): + # GH 7325, mask and nans must be broadcastable (also: PR 9308) + # Raveling and then reshaping makes np.putmask faster + mask = ((y == 0) & ~np.isnan(result)).ravel() - if (y.ravel() == 0).any(): - shape = result.shape - result = result.ravel().astype('float64') + shape = result.shape + result = result.astype('float64', copy=False).ravel() - # GH 7325, mask and nans must be broadcastable - signs = np.sign(result) - mask = ((y == 0) & ~np.isnan(x)).ravel() + np.putmask(result, mask, fill) - np.putmask(result, mask, fill) + # if we have a fill of inf, then sign it correctly + # (GH 6178 and PR 9308) + if np.isinf(fill): + signs = np.sign(y if name.startswith(('r', '__r')) else x) + negative_inf_mask = (signs.ravel() < 0) & mask + np.putmask(result, negative_inf_mask, -fill) - # if we have a fill of inf, then sign it - # correctly - # GH 6178 - if np.isinf(fill): - np.putmask(result,(signs<0) & mask, -fill) + if "floordiv" in name: # (PR 9308) + nan_mask = ((y == 0) & (x == 0)).ravel() + np.putmask(result, nan_mask, np.nan) - result = result.reshape(shape) + result = result.reshape(shape) return result diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 64672a9e75244..954d2c8a77326 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -81,7 +81,8 @@ def names(x): rpow=arith_method(lambda x, y: y ** x, names('rpow'), op('**'), default_axis=default_axis, reversed=True), rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'), - default_axis=default_axis, reversed=True), + default_axis=default_axis, fill_zeros=np.nan, + reversed=True), ) new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 82e5d68187b1e..18e699695f330 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -20,7 +20,7 @@ ) from pandas import compat -from numpy import random, nan +from numpy import random, nan, inf from numpy.random import randn import numpy as np import numpy.ma as ma @@ -5138,23 +5138,26 @@ def test_modulo(self): def test_div(self): - # integer div, but deal with the 0's + # integer div, but deal with the 0's (GH 9144) p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] }) result = p / p - ### this is technically wrong as the integer portion is coerced to float ### - expected = DataFrame({ 'first' : Series([1,1,1,1],dtype='float64'), 'second' : Series([np.inf,np.inf,np.inf,1]) }) + expected = DataFrame({'first': Series([1.0, 1.0, 1.0, 1.0]), + 'second': Series([nan, nan, nan, 1])}) assert_frame_equal(result,expected) - result2 = DataFrame(p.values.astype('float64')/p.values,index=p.index,columns=p.columns).fillna(np.inf) + result2 = DataFrame(p.values.astype('float') / p.values, index=p.index, + columns=p.columns) assert_frame_equal(result2,expected) result = p / 0 - expected = DataFrame(np.inf,index=p.index,columns=p.columns) + expected = DataFrame(inf, index=p.index, columns=p.columns) + expected.iloc[0:3, 1] = nan assert_frame_equal(result,expected) # numpy has a slightly different (wrong) treatement - result2 = DataFrame(p.values.astype('float64')/0,index=p.index,columns=p.columns).fillna(np.inf) + result2 = DataFrame(p.values.astype('float64') / 0, index=p.index, + columns=p.columns) assert_frame_equal(result2,expected) p = DataFrame(np.random.randn(10, 5)) @@ -5604,7 +5607,7 @@ def test_arith_flex_series(self): # broadcasting issue in GH7325 df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='int64') - expected = DataFrame([[np.inf,np.inf],[1.0,1.5],[1.0,1.25]]) + expected = DataFrame([[nan, inf], [1.0, 1.5], [1.0, 1.25]]) result = df.div(df[0],axis='index') assert_frame_equal(result,expected) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index dd2dd4e6066b9..d54dae2aca6d2 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -11,7 +11,7 @@ import nose -from numpy import nan +from numpy import nan, inf import numpy as np import numpy.ma as ma import pandas as pd @@ -2689,6 +2689,17 @@ def test_modulo(self): result2 = p['second'] % p['first'] self.assertFalse(np.array_equal(result, result2)) + # GH 9144 + s = Series([0, 1]) + + result = s % 0 + expected = Series([nan, nan]) + assert_series_equal(result, expected) + + result = 0 % s + expected = Series([nan, 0.0]) + assert_series_equal(result, expected) + def test_div(self): # no longer do integer div for any ops, but deal with the 0's @@ -2730,6 +2741,21 @@ def test_div(self): result = p['second'] / p['first'] assert_series_equal(result, expected) + # GH 9144 + s = Series([-1, 0, 1]) + + result = 0 / s + expected = Series([0.0, nan, 0.0]) + assert_series_equal(result, expected) + + result = s / 0 + expected = Series([-inf, nan, inf]) + assert_series_equal(result, expected) + + result = s // 0 + expected = Series([-inf, nan, inf]) + assert_series_equal(result, expected) + def test_operators(self): def _check_op(series, other, op, pos_only=False): @@ -6414,17 +6440,17 @@ def test_pct_change_shift_over_nas(self): def test_autocorr(self): # Just run the function corr1 = self.ts.autocorr() - + # Now run it with the lag parameter corr2 = self.ts.autocorr(lag=1) - + # corr() with lag needs Series of at least length 2 if len(self.ts) <= 2: self.assertTrue(np.isnan(corr1)) self.assertTrue(np.isnan(corr2)) else: self.assertEqual(corr1, corr2) - + # Choose a random lag between 1 and length of Series - 2 # and compare the result with the Series corr() function n = 1 + np.random.randint(max(1, len(self.ts) - 2)) diff --git a/vb_suite/binary_ops.py b/vb_suite/binary_ops.py index 5ec2d9fcfc2cf..db9a6b730064e 100644 --- a/vb_suite/binary_ops.py +++ b/vb_suite/binary_ops.py @@ -72,6 +72,51 @@ Benchmark("df * df2", setup, name='frame_mult_no_ne',cleanup="expr.set_use_numexpr(True)", start_date=datetime(2013, 2, 26)) +#---------------------------------------------------------------------- +# division + +setup = common_setup + """ +df = DataFrame(np.random.randn(1000, 1000)) +""" +frame_float_div_by_zero = \ + Benchmark("df / 0", setup, name='frame_float_div_by_zero') + +setup = common_setup + """ +df = DataFrame(np.random.randn(1000, 1000)) +""" +frame_float_floor_by_zero = \ + Benchmark("df // 0", setup, name='frame_float_floor_by_zero') + +setup = common_setup + """ +df = DataFrame(np.random.random_integers((1000, 1000))) +""" +frame_int_div_by_zero = \ + Benchmark("df / 0", setup, name='frame_int_div_by_zero') + +setup = common_setup + """ +df = DataFrame(np.random.randn(1000, 1000)) +df2 = DataFrame(np.random.randn(1000, 1000)) +""" +frame_float_div = \ + Benchmark("df // df2", setup, name='frame_float_div') + +#---------------------------------------------------------------------- +# modulo + +setup = common_setup + """ +df = DataFrame(np.random.randn(1000, 1000)) +df2 = DataFrame(np.random.randn(1000, 1000)) +""" +frame_float_mod = \ + Benchmark("df / df2", setup, name='frame_float_mod') + +setup = common_setup + """ +df = DataFrame(np.random.random_integers((1000, 1000))) +df2 = DataFrame(np.random.random_integers((1000, 1000))) +""" +frame_int_mod = \ + Benchmark("df / df2", setup, name='frame_int_mod') + #---------------------------------------------------------------------- # multi and
closes #9144 closes #8445 --- Here's the results from testing the vbenches related to DataFrames (I also added 6 vbenches). ``` Invoked with : --ncalls: 10 --repeats: 10 ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- frame_float_div_by_zero | 1.5656 | 23.4006 | 0.0669 | frame_float_floor_by_zero | 3.1077 | 24.2786 | 0.1280 | groupby_frame_nth_none | 1.4918 | 2.1483 | 0.6944 | groupby_frame_nth_any | 4.0633 | 5.4162 | 0.7502 | dataframe_resample_max_string | 1.3970 | 1.5892 | 0.8791 | stat_ops_frame_mean_int_axis_1 | 3.4506 | 3.9006 | 0.8846 | stat_ops_frame_mean_float_axis_1 | 3.6501 | 4.1200 | 0.8859 | dataframe_resample_max_numpy | 1.4134 | 1.5861 | 0.8911 | frame_reindex_upcast | 5.6926 | 6.3748 | 0.8930 | stat_ops_frame_sum_int_axis_1 | 3.2424 | 3.6136 | 0.8973 | dataframe_resample_min_numpy | 1.4320 | 1.5937 | 0.8985 | stat_ops_frame_mean_float_axis_0 | 3.5722 | 3.9361 | 0.9075 | frame_ctor_dtindex_Nanox2 | 0.8544 | 0.9404 | 0.9086 | frame_dropna_axis1_any | 17.6609 | 19.4360 | 0.9087 | frame_ctor_dtindex_Hourx1 | 0.8695 | 0.9554 | 0.9101 | frame_ctor_dtindex_Secondx1 | 0.8586 | 0.9384 | 0.9150 | frame_ctor_dtindex_Microx2 | 0.8564 | 0.9341 | 0.9168 | dataframe_resample_mean_string | 1.8672 | 2.0302 | 0.9197 | frame_ctor_dtindex_Nanox1 | 0.8526 | 0.9242 | 0.9225 | stat_ops_level_frame_sum_multiple | 4.7174 | 5.0987 | 0.9252 | frame_ctor_dtindex_Hourx2 | 0.8645 | 0.9342 | 0.9254 | frame_xs_row | 0.0242 | 0.0261 | 0.9269 | frame_ctor_dtindex_BMonthBeginx1 | 1.0908 | 1.1755 | 0.9280 | stat_ops_frame_sum_float_axis_0 | 3.6003 | 3.8713 | 0.9300 | eval_frame_mult_python_one_thread | 12.8115 | 13.7715 | 0.9303 | stat_ops_frame_sum_int_axis_0 | 3.2616 | 3.4839 | 0.9362 | eval_frame_mult_python | 12.8203 | 13.6336 | 0.9403 | dataframe_resample_min_string | 1.4186 | 1.5066 | 0.9416 | frame_shift_axis0 | 7.3947 | 7.8502 | 0.9420 | stat_ops_frame_sum_float_axis_1 | 3.5332 | 3.7481 | 0.9427 | frame_ctor_dtindex_BYearEndx2 | 1.0797 | 1.1452 | 0.9428 | frame_fillna_many_columns_pad | 4.3826 | 4.6457 | 0.9434 | frame_ctor_dtindex_Secondx2 | 0.8635 | 0.9153 | 0.9434 | frame_get_dtype_counts | 0.0601 | 0.0635 | 0.9467 | frame_dropna_axis1_all | 34.1749 | 36.0852 | 0.9471 | frame_float_equal | 2.1541 | 2.2727 | 0.9478 | dataframe_resample_mean_numpy | 1.8673 | 1.9664 | 0.9496 | append_frame_single_mixed | 1.2900 | 1.3544 | 0.9525 | eval_frame_add_python | 13.0182 | 13.6672 | 0.9525 | eval_frame_and_python_one_thread | 23.5453 | 24.5942 | 0.9574 | frame_drop_dup_inplace | 1.8290 | 1.9081 | 0.9585 | groupby_frame_cython_many_columns | 2.4763 | 2.5806 | 0.9596 | frame_get_numeric_data | 0.0697 | 0.0725 | 0.9612 | eval_frame_and_python | 23.6922 | 24.6444 | 0.9614 | frame_mask_bools | 8.5507 | 8.8782 | 0.9631 | frame_shift_axis_1 | 11.5263 | 11.9162 | 0.9673 | eval_frame_chained_cmp_python | 68.9274 | 71.2359 | 0.9676 | frame_multi_and_st | 20.8511 | 21.5107 | 0.9693 | eval_frame_add_python_one_thread | 11.9996 | 12.3749 | 0.9697 | frame_mask_floats | 5.7624 | 5.9394 | 0.9702 | frame_multi_and_no_ne | 21.2904 | 21.9065 | 0.9719 | frame_apply_axis_1 | 45.9807 | 47.2289 | 0.9736 | join_dataframe_integer_2key | 3.5381 | 3.6330 | 0.9739 | frame_ctor_nested_dict_int64 | 48.7418 | 50.0032 | 0.9748 | eval_frame_chained_cmp_python_one_thread | 67.2400 | 68.9405 | 0.9753 | frame_to_csv_mixed | 404.9059 | 414.6069 | 0.9766 | groupby_frame_singlekey_integer | 1.4465 | 1.4805 | 0.9770 | frame_dropna_axis1_all_mixed_dtypes | 137.6919 | 140.7975 | 0.9779 | frame_insert_500_columns_end | 65.0207 | 66.4810 | 0.9780 | frame_drop_dup_na_inplace | 1.6683 | 1.7045 | 0.9788 | frame_iteritems | 17.1694 | 17.5347 | 0.9792 | stat_ops_level_frame_sum | 2.1278 | 2.1730 | 0.9792 | reindex_frame_level_align | 0.6034 | 0.6153 | 0.9807 | frame_count_level_axis0_mixed_dtypes_multi | 73.6246 | 75.0608 | 0.9809 | dataframe_reindex | 0.2569 | 0.2615 | 0.9824 | frame_reindex_axis1 | 44.7410 | 45.5110 | 0.9831 | groupby_frame_median | 5.1782 | 5.2658 | 0.9834 | frame_to_csv_date_formatting | 6.7534 | 6.8611 | 0.9843 | frame_fancy_lookup_all | 11.3160 | 11.4933 | 0.9846 | frame_ctor_dtindex_DateOffsetx2 | 0.7447 | 0.7562 | 0.9848 | join_dataframe_integer_key | 1.2000 | 1.2166 | 0.9863 | frame_reindex_columns | 0.2247 | 0.2276 | 0.9872 | frame_from_records_generator | 41.6522 | 42.1751 | 0.9876 | frame_ctor_dtindex_QuarterBeginx2 | 0.9039 | 0.9145 | 0.9884 | frame_sort_index_by_columns | 24.9708 | 25.2553 | 0.9887 | join_dataframe_index_single_key_bigger | 8.9800 | 9.0787 | 0.9891 | frame_apply_ref_by_name | 8.7795 | 8.8757 | 0.9892 | stat_ops_frame_mean_int_axis_0 | 3.2407 | 3.2742 | 0.9898 | indexing_dataframe_boolean | 82.9152 | 83.7444 | 0.9901 | frame_add | 3.7720 | 3.8059 | 0.9911 | frame_ctor_dtindex_BYearBeginx2 | 1.0778 | 1.0870 | 0.9915 | frame_drop_duplicates_na | 14.2417 | 14.3591 | 0.9918 | frame_ctor_dtindex_CBMonthBeginx1 | 2.2563 | 2.2738 | 0.9923 | frame_ctor_dtindex_Weekx2 | 0.7799 | 0.7859 | 0.9924 | join_dataframe_index_single_key_small | 8.2365 | 8.2989 | 0.9925 | frame_dropna_axis1_any_mixed_dtypes | 124.3344 | 125.2386 | 0.9928 | frame_ctor_list_of_dict | 42.8818 | 43.1860 | 0.9930 | frame_ctor_dtindex_YearEndx2 | 0.8680 | 0.8737 | 0.9935 | append_frame_single_homogenous | 0.8964 | 0.9022 | 0.9936 | groupby_frame_apply | 22.1934 | 22.3310 | 0.9938 | frame_mult_no_ne | 3.8051 | 3.8274 | 0.9942 | frame_nonunique_equal | 7.3191 | 7.3515 | 0.9956 | frame_ctor_dtindex_BMonthBeginx2 | 1.0844 | 1.0890 | 0.9958 | frame_apply_lambda_mean | 3.8462 | 3.8609 | 0.9962 | frame_ctor_nested_dict | 46.5123 | 46.6852 | 0.9963 | frame_count_level_axis1_mixed_dtypes_multi | 61.2177 | 61.4444 | 0.9963 | frame_ctor_dtindex_BMonthEndx2 | 0.9260 | 0.9288 | 0.9970 | frame_html_repr_trunc_mi | 22.1488 | 22.2066 | 0.9974 | frame_multi_and | 21.2545 | 21.3085 | 0.9975 | frame_add_st | 3.7661 | 3.7741 | 0.9979 | frame_ctor_dtindex_BYearBeginx1 | 1.0904 | 1.0926 | 0.9980 | join_dataframe_index_multi | 13.3128 | 13.3311 | 0.9986 | frame_getitem_single_column | 12.7089 | 12.7029 | 1.0005 | frame_object_equal | 7.3267 | 7.3224 | 1.0006 | frame_from_records_generator_nrows | 0.5950 | 0.5940 | 1.0016 | frame_to_string_floats | 15.5732 | 15.5458 | 1.0018 | frame_add_no_ne | 3.8073 | 3.7999 | 1.0019 | frame_ctor_dtindex_CBMonthBeginx2 | 1.9428 | 1.9389 | 1.0020 | frame_ctor_dtindex_BusinessDayx2 | 0.8365 | 0.8348 | 1.0020 | frame_ctor_dtindex_BDayx2 | 0.8400 | 0.8381 | 1.0023 | frame_float_mod | 2.4775 | 2.4717 | 1.0024 | frame_reindex_axis0 | 42.1150 | 42.0129 | 1.0024 | frame_drop_duplicates | 13.3927 | 13.3401 | 1.0039 | sparse_frame_constructor | 3.7919 | 3.7743 | 1.0047 | frame_iloc_big | 0.1020 | 0.1015 | 1.0049 | frame_interpolate_some_good_infer | 1.8705 | 1.8610 | 1.0051 | indexing_dataframe_boolean_st | 85.3268 | 84.8837 | 1.0052 | frame_dropna_axis0_any | 18.0998 | 18.0034 | 1.0054 | frame_count_level_axis1_multi | 57.8269 | 57.5086 | 1.0055 | indexing_dataframe_boolean_rows | 0.2319 | 0.2306 | 1.0056 | indexing_dataframe_boolean_rows_object | 0.3911 | 0.3880 | 1.0080 | frame_dropna_axis0_any_mixed_dtypes | 125.2823 | 124.2290 | 1.0085 | frame_repr_wide | 8.4213 | 8.3428 | 1.0094 | frame_apply_pass_thru | 2.7872 | 2.7612 | 1.0094 | frame_dtypes | 0.0732 | 0.0725 | 1.0099 | frame_to_html_mixed | 120.9245 | 119.7213 | 1.0101 | frame_ctor_dtindex_DateOffsetx1 | 0.7530 | 0.7452 | 1.0105 | frame_ctor_dtindex_Dayx1 | 0.8844 | 0.8746 | 1.0112 | groupby_frame_apply_overhead | 5.2761 | 5.2173 | 1.0113 | frame_ctor_dtindex_Millix1 | 0.9011 | 0.8910 | 1.0113 | frame_count_level_axis0_multi | 43.2266 | 42.7371 | 1.0115 | reindex_frame_level_reindex | 0.6016 | 0.5946 | 1.0117 | frame_ctor_dtindex_BQuarterBeginx1 | 1.1092 | 1.0958 | 1.0122 | frame_reindex_both_axes | 13.9118 | 13.7180 | 1.0141 | join_dataframe_index_single_key_bigger_sort | 11.0337 | 10.8757 | 1.0145 | frame_ctor_dtindex_Weekx1 | 0.7509 | 0.7395 | 1.0154 | frame_ctor_dtindex_BMonthEndx1 | 0.9711 | 0.9547 | 1.0172 | indexing_dataframe_boolean_no_ne | 87.0329 | 85.4171 | 1.0189 | frame_fancy_lookup | 2.0553 | 2.0135 | 1.0208 | frame_mult_st | 3.8758 | 3.7959 | 1.0210 | frame_repr_tall | 12.1078 | 11.8041 | 1.0257 | frame_insert_100_columns_begin | 24.3260 | 23.6877 | 1.0269 | frame_ctor_dtindex_QuarterEndx2 | 1.0204 | 0.9927 | 1.0279 | frame_iteritems_cached | 0.3542 | 0.3440 | 1.0297 | frame_ctor_dtindex_Easterx2 | 0.9328 | 0.9050 | 1.0307 | frame_interpolate | 64.3447 | 62.4263 | 1.0307 | frame_html_repr_trunc_si | 17.4936 | 16.9715 | 1.0308 | frame_mult | 3.9272 | 3.8050 | 1.0321 | frame_dropna_axis0_all_mixed_dtypes | 142.3560 | 137.7486 | 1.0334 | frame_from_series | 0.0670 | 0.0648 | 1.0338 | frame_apply_np_mean | 4.2445 | 4.0938 | 1.0368 | frame_interpolate_some_good | 1.0546 | 1.0159 | 1.0381 | frame_ctor_dtindex_MonthBeginx1 | 0.9428 | 0.9080 | 1.0383 | frame_ctor_dtindex_Minutex1 | 0.8931 | 0.8599 | 1.0386 | frame_constructor_ndarray | 0.0554 | 0.0532 | 1.0408 | frame_ctor_dtindex_BQuarterEndx2 | 1.0568 | 1.0139 | 1.0423 | frame_ctor_dtindex_QuarterEndx1 | 1.0460 | 1.0035 | 1.0423 | frame_getitem_single_column2 | 12.9310 | 12.3968 | 1.0431 | frame_ctor_dtindex_MonthBeginx2 | 0.9435 | 0.9042 | 1.0435 | frame_ctor_dtindex_Microx1 | 0.9086 | 0.8704 | 1.0439 | frame_ctor_dtindex_CustomBusinessDayx2 | 0.8865 | 0.8490 | 1.0441 | frame_ctor_dtindex_CustomBusinessDayx1 | 0.8898 | 0.8517 | 1.0447 | frame_to_csv2 | 82.5006 | 78.9100 | 1.0455 | frame_ctor_dtindex_BQuarterBeginx2 | 1.1443 | 1.0945 | 1.0455 | frame_ctor_dtindex_BQuarterEndx1 | 1.0723 | 1.0249 | 1.0462 | frame_ctor_dtindex_Dayx2 | 0.9089 | 0.8669 | 1.0485 | frame_ctor_dtindex_CDayx1 | 0.8952 | 0.8536 | 1.0487 | frame_ctor_dtindex_Easterx1 | 0.9384 | 0.8919 | 1.0521 | frame_ctor_dtindex_YearBeginx1 | 0.8962 | 0.8514 | 1.0526 | frame_float_div | 4.8441 | 4.6001 | 1.0530 | frame_reindex_both_axes_ix | 14.6171 | 13.8707 | 1.0538 | frame_ctor_dtindex_YearEndx1 | 0.9240 | 0.8754 | 1.0555 | frame_ctor_dtindex_QuarterBeginx1 | 0.9843 | 0.9323 | 1.0558 | frame_ctor_dtindex_BDayx1 | 0.8610 | 0.8153 | 1.0561 | frame_ctor_dtindex_MonthEndx2 | 0.9667 | 0.9151 | 1.0564 | frame_ctor_dtindex_Millix2 | 0.9122 | 0.8625 | 1.0576 | frame_to_csv | 96.1645 | 90.8887 | 1.0580 | frame_ctor_dtindex_CDayx2 | 0.9058 | 0.8557 | 1.0585 | frame_boolean_row_select | 0.1848 | 0.1741 | 1.0615 | frame_loc_dups | 0.7166 | 0.6750 | 1.0616 | frame_assign_timeseries_index | 0.5794 | 0.5451 | 1.0629 | frame_ctor_dtindex_Minutex2 | 0.9103 | 0.8556 | 1.0639 | frame_ctor_dtindex_CBMonthEndx1 | 3.0158 | 2.8304 | 1.0655 | frame_apply_user_func | 57.2865 | 53.6327 | 1.0681 | frame_ctor_dtindex_CBMonthEndx2 | 3.0658 | 2.8691 | 1.0686 | frame_ctor_dtindex_BusinessDayx1 | 0.8704 | 0.8101 | 1.0744 | frame_ctor_dtindex_YearBeginx2 | 0.9055 | 0.8417 | 1.0758 | frame_ctor_dtindex_MonthEndx1 | 0.9801 | 0.9096 | 1.0775 | frame_iloc_dups | 0.1877 | 0.1736 | 1.0813 | frame_dropna_axis0_all | 31.8504 | 29.3697 | 1.0845 | frame_fillna_inplace | 8.8447 | 8.0231 | 1.1024 | frame_ctor_dtindex_BYearEndx1 | 1.2137 | 1.0994 | 1.1040 | frame_isnull | 0.6357 | 0.5477 | 1.1606 | frame_xs_mi_ix | 2.3058 | 1.9489 | 1.1831 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [e33f3bc] : BUG: Fix #9144 #8445 Fix how core.common._fill_zeros handles div and mod by zero Base [76195fb] : Merge pull request #9498 from jreback/consist ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9308
2015-01-20T07:28:48Z
2015-02-16T12:35:24Z
2015-02-16T12:35:23Z
2018-01-21T09:03:00Z
Fix plotting memory leak
diff --git a/doc/source/whatsnew/v0.16.1.txt b/doc/source/whatsnew/v0.16.1.txt index 7166801b3fbf0..d36f094ae00cd 100755 --- a/doc/source/whatsnew/v0.16.1.txt +++ b/doc/source/whatsnew/v0.16.1.txt @@ -141,7 +141,6 @@ Bug Fixes - - Bug in unequal comparisons between categorical data and a scalar, which was not in the categories (e.g. ``Series(Categorical(list("abc"), ordered=True)) > "d"``. This returned ``False`` for all elements, but now raises a ``TypeError``. Equality comparisons also now return ``False`` for ``==`` and ``True`` for ``!=``. (:issue:`9848`) - Bug in DataFrame ``__setitem__`` when right hand side is a dictionary (:issue:`9874`) - Bug in ``where`` when dtype is ``datetime64/timedelta64``, but dtype of other is not (:issue:`9804`) @@ -164,3 +163,4 @@ Bug Fixes - Fixed latex output for multi-indexed dataframes (:issue:`9778`) - Bug causing an exception when setting an empty range using ``DataFrame.loc`` (:issue:`9596`) +- Fixed memory leak in ``AreaPlot`` and ``LinePlot`` that prevented calls to ``plt.close()`` from having any effect. (:issue:`9003`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 7ec57c0304530..638d4bae8e7d5 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -28,6 +28,8 @@ from numpy.testing import assert_array_equal, assert_allclose from numpy.testing.decorators import slow import pandas.tools.plotting as plotting +import weakref +import gc def _skip_if_mpl_14_or_dev_boxplot(): @@ -3390,6 +3392,33 @@ def test_sharey_and_ax(self): "y label is invisible but shouldn't") + def test_memory_leak(self): + """ Check that every plot type gets properly collected. """ + import matplotlib.pyplot as plt + results = {} + for kind in plotting._plot_klass.keys(): + args = {} + if kind in ['hexbin', 'scatter', 'pie']: + df = self.hexbin_df + args = {'x': 'A', 'y': 'B'} + elif kind == 'area': + df = self.tdf.abs() + else: + df = self.tdf + + # Use a weakref so we can see if the object gets collected without + # also preventing it from being collected + results[kind] = weakref.proxy(df.plot(kind=kind, **args)) + + # have matplotlib delete all the figures + plt.close('all') + # force a garbage collection + gc.collect() + for key in results: + # check that every plot was collected + with tm.assertRaises(ReferenceError): + # need to actually access something to get an error + results[key].lines @tm.mplskip class TestDataFrameGroupByPlots(TestPlotBase): diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 513f165af4686..f83e5cbd17368 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -738,6 +738,135 @@ def r(h): ax.grid() return ax +def _mplplot_plotf(errorbar=False): + import matplotlib.pyplot as plt + def plotf(ax, x, y, style=None, **kwds): + mask = com.isnull(y) + if mask.any(): + y = np.ma.array(y) + y = np.ma.masked_where(mask, y) + + if errorbar: + return plt.Axes.errorbar(ax, x, y, **kwds) + else: + # prevent style kwarg from going to errorbar, where it is unsupported + if style is not None: + args = (ax, x, y, style) + else: + args = (ax, x, y) + return plt.Axes.plot(*args, **kwds) + + return plotf + + +def _lineplot_plotf(f, stacked, subplots): + def plotf(ax, x, y, style=None, column_num=None, **kwds): + # column_num is used to get the target column from protf in line and area plots + if not hasattr(ax, '_pos_prior') or column_num == 0: + LinePlot._initialize_prior(ax, len(y)) + y_values = LinePlot._get_stacked_values(ax, y, kwds['label'], stacked) + lines = f(ax, x, y_values, style=style, **kwds) + LinePlot._update_prior(ax, y, stacked, subplots) + return lines + + return plotf + + +def _areaplot_plotf(f, stacked, subplots): + import matplotlib.pyplot as plt + def plotf(ax, x, y, style=None, column_num=None, **kwds): + if not hasattr(ax, '_pos_prior') or column_num == 0: + LinePlot._initialize_prior(ax, len(y)) + y_values = LinePlot._get_stacked_values(ax, y, kwds['label'], stacked) + lines = f(ax, x, y_values, style=style, **kwds) + + # get data from the line to get coordinates for fill_between + xdata, y_values = lines[0].get_data(orig=False) + + if (y >= 0).all(): + start = ax._pos_prior + elif (y <= 0).all(): + start = ax._neg_prior + else: + start = np.zeros(len(y)) + + if not 'color' in kwds: + kwds['color'] = lines[0].get_color() + + plt.Axes.fill_between(ax, xdata, start, y_values, **kwds) + LinePlot._update_prior(ax, y, stacked, subplots) + return lines + + return plotf + + +def _histplot_plotf(bins, bottom, stacked, subplots): + import matplotlib.pyplot as plt + def plotf(ax, y, style=None, column_num=None, **kwds): + if not hasattr(ax, '_pos_prior') or column_num == 0: + LinePlot._initialize_prior(ax, len(bins) - 1) + y = y[~com.isnull(y)] + new_bottom = ax._pos_prior + bottom + # ignore style + n, new_bins, patches = plt.Axes.hist(ax, y, bins=bins, + bottom=new_bottom, **kwds) + LinePlot._update_prior(ax, n, stacked, subplots) + return patches + + return plotf + + +def _boxplot_plotf(return_type): + def plotf(ax, y, column_num=None, **kwds): + if y.ndim == 2: + y = [remove_na(v) for v in y] + # Boxplot fails with empty arrays, so need to add a NaN + # if any cols are empty + # GH 8181 + y = [v if v.size > 0 else np.array([np.nan]) for v in y] + else: + y = remove_na(y) + bp = ax.boxplot(y, **kwds) + + if return_type == 'dict': + return bp, bp + elif return_type == 'both': + return BoxPlot.BP(ax=ax, lines=bp), bp + else: + return ax, bp + + return plotf + + +def _kdeplot_plotf(f, bw_method, ind): + from scipy.stats import gaussian_kde + from scipy import __version__ as spv + + def plotf(ax, y, style=None, column_num=None, **kwds): + y = remove_na(y) + if LooseVersion(spv) >= '0.11.0': + gkde = gaussian_kde(y, bw_method=bw_method) + else: + gkde = gaussian_kde(y) + if bw_method is not None: + msg = ('bw_method was added in Scipy 0.11.0.' + + ' Scipy version in use is %s.' % spv) + warnings.warn(msg) + + if ind is None: + sample_range = max(y) - min(y) + ind_local = np.linspace(min(y) - 0.5 * sample_range, + max(y) + 0.5 * sample_range, 1000) + else: + ind_local = ind + + y = gkde.evaluate(ind_local) + lines = f(ax, ind_local, y, style=style, **kwds) + return lines + + return plotf + + class MPLPlot(object): """ @@ -1194,28 +1323,15 @@ def _is_datetype(self): index.inferred_type in ('datetime', 'date', 'datetime64', 'time')) + def _plot_errors(self): + return any(e is not None for e in self.errors.values()) + def _get_plot_function(self): ''' Returns the matplotlib plotting function (plot or errorbar) based on the presence of errorbar keywords. ''' - errorbar = any(e is not None for e in self.errors.values()) - def plotf(ax, x, y, style=None, **kwds): - mask = com.isnull(y) - if mask.any(): - y = np.ma.array(y) - y = np.ma.masked_where(mask, y) - - if errorbar: - return self.plt.Axes.errorbar(ax, x, y, **kwds) - else: - # prevent style kwarg from going to errorbar, where it is unsupported - if style is not None: - args = (ax, x, y, style) - else: - args = (ax, x, y) - return self.plt.Axes.plot(*args, **kwds) - return plotf + return _mplplot_plotf(self._plot_errors()) def _get_index_name(self): if isinstance(self.data.index, MultiIndex): @@ -1594,7 +1710,6 @@ def _is_ts_plot(self): return not self.x_compat and self.use_index and self._use_dynamic_x() def _make_plot(self): - self._initialize_prior(len(self.data)) if self._is_ts_plot(): data = self._maybe_convert_index(self.data) @@ -1626,12 +1741,13 @@ def _make_plot(self): left, right = _get_xlim(lines) ax.set_xlim(left, right) - def _get_stacked_values(self, y, label): - if self.stacked: + @classmethod + def _get_stacked_values(cls, ax, y, label, stacked): + if stacked: if (y >= 0).all(): - return self._pos_prior + y + return ax._pos_prior + y elif (y <= 0).all(): - return self._neg_prior + y + return ax._neg_prior + y else: raise ValueError('When stacked is True, each column must be either all positive or negative.' '{0} contains both positive and negative values'.format(label)) @@ -1640,15 +1756,8 @@ def _get_stacked_values(self, y, label): def _get_plot_function(self): f = MPLPlot._get_plot_function(self) - def plotf(ax, x, y, style=None, column_num=None, **kwds): - # column_num is used to get the target column from protf in line and area plots - if column_num == 0: - self._initialize_prior(len(self.data)) - y_values = self._get_stacked_values(y, kwds['label']) - lines = f(ax, x, y_values, style=style, **kwds) - self._update_prior(y) - return lines - return plotf + + return _lineplot_plotf(f, self.stacked, self.subplots) def _get_ts_plot_function(self): from pandas.tseries.plotting import tsplot @@ -1660,19 +1769,21 @@ def _plot(ax, x, data, style=None, **kwds): return lines return _plot - def _initialize_prior(self, n): - self._pos_prior = np.zeros(n) - self._neg_prior = np.zeros(n) + @classmethod + def _initialize_prior(cls, ax, n): + ax._pos_prior = np.zeros(n) + ax._neg_prior = np.zeros(n) - def _update_prior(self, y): - if self.stacked and not self.subplots: + @classmethod + def _update_prior(cls, ax, y, stacked, subplots): + if stacked and not subplots: # tsplot resample may changedata length - if len(self._pos_prior) != len(y): - self._initialize_prior(len(y)) + if len(ax._pos_prior) != len(y): + cls._initialize_prior(ax, len(y)) if (y >= 0).all(): - self._pos_prior += y + ax._pos_prior += y elif (y <= 0).all(): - self._neg_prior += y + ax._neg_prior += y def _maybe_convert_index(self, data): # tsplot converts automatically, but don't want to convert index @@ -1736,28 +1847,8 @@ def _get_plot_function(self): raise ValueError("Log-y scales are not supported in area plot") else: f = MPLPlot._get_plot_function(self) - def plotf(ax, x, y, style=None, column_num=None, **kwds): - if column_num == 0: - self._initialize_prior(len(self.data)) - y_values = self._get_stacked_values(y, kwds['label']) - lines = f(ax, x, y_values, style=style, **kwds) - - # get data from the line to get coordinates for fill_between - xdata, y_values = lines[0].get_data(orig=False) - - if (y >= 0).all(): - start = self._pos_prior - elif (y <= 0).all(): - start = self._neg_prior - else: - start = np.zeros(len(y)) - if not 'color' in kwds: - kwds['color'] = lines[0].get_color() - - self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds) - self._update_prior(y) - return lines + return _areaplot_plotf(f, self.stacked, self.subplots) return plotf @@ -1943,17 +2034,7 @@ def _args_adjust(self): self.bottom = np.array(self.bottom) def _get_plot_function(self): - def plotf(ax, y, style=None, column_num=None, **kwds): - if column_num == 0: - self._initialize_prior(len(self.bins) - 1) - y = y[~com.isnull(y)] - bottom = self._pos_prior + self.bottom - # ignore style - n, bins, patches = self.plt.Axes.hist(ax, y, bins=self.bins, - bottom=bottom, **kwds) - self._update_prior(n) - return patches - return plotf + return _histplot_plotf(self.bins, self.bottom, self.stacked, self.subplots) def _make_plot(self): plotf = self._get_plot_function() @@ -2000,35 +2081,9 @@ def __init__(self, data, bw_method=None, ind=None, **kwargs): def _args_adjust(self): pass - def _get_ind(self, y): - if self.ind is None: - sample_range = max(y) - min(y) - ind = np.linspace(min(y) - 0.5 * sample_range, - max(y) + 0.5 * sample_range, 1000) - else: - ind = self.ind - return ind - def _get_plot_function(self): - from scipy.stats import gaussian_kde - from scipy import __version__ as spv f = MPLPlot._get_plot_function(self) - def plotf(ax, y, style=None, column_num=None, **kwds): - y = remove_na(y) - if LooseVersion(spv) >= '0.11.0': - gkde = gaussian_kde(y, bw_method=self.bw_method) - else: - gkde = gaussian_kde(y) - if self.bw_method is not None: - msg = ('bw_method was added in Scipy 0.11.0.' + - ' Scipy version in use is %s.' % spv) - warnings.warn(msg) - - ind = self._get_ind(y) - y = gkde.evaluate(ind) - lines = f(ax, ind, y, style=style, **kwds) - return lines - return plotf + return _kdeplot_plotf(f, self.bw_method, self.ind) def _post_plot_logic(self): for ax in self.axes: @@ -2123,24 +2178,7 @@ def _args_adjust(self): self.sharey = False def _get_plot_function(self): - def plotf(ax, y, column_num=None, **kwds): - if y.ndim == 2: - y = [remove_na(v) for v in y] - # Boxplot fails with empty arrays, so need to add a NaN - # if any cols are empty - # GH 8181 - y = [v if v.size > 0 else np.array([np.nan]) for v in y] - else: - y = remove_na(y) - bp = ax.boxplot(y, **kwds) - - if self.return_type == 'dict': - return bp, bp - elif self.return_type == 'both': - return self.BP(ax=ax, lines=bp), bp - else: - return ax, bp - return plotf + return _boxplot_plotf(self.return_type) def _validate_color_args(self): if 'color' in self.kwds:
This PR resolves #9003 (and explains matplotlib/matplotlib#3892). The root cause of the memory leak is a reference cycle between `MPLPlot` objects and the `AxesSubplot` objects they create. Specifically, a `plotf` function object is stored in `ax._plot_data` for the purposes of potentially redrawing if the data needs to be resampled. This would be fine if this were a top-level function; however, these are all nested functions that make use of `self`. This means that by `plotf` pulls in `self.ax` and `self.axes`, which point to the `AxesSubplot` that `plotf` is being attached to. We therefore have a reference cycle: ``` AxesSubplot -> AxesSubplot._plot_data -> plotf -> self -> self.ax -> AxesSubplot ``` In order to make the objects collectable, we need to either explicitly break a link or replace it with a weakref. Weakrefs don't work as `AxesSubplot` and `MPLPlot` have the same lifetime. Just not using `_plot_data` prevents the leak but breaks functionality. The final option as I see it is to change `plotf` to not depend on `self`. This works but involves a fair amount of modifications. I elected to make each of the `plotf`s top-level functions to make the lack of `self`-dependency explicit. This also required making several other functions `classmethods` and moving some data from `MPLPlot` to the `AxesSubplot` object. The key assumption being made by this change is that either `MPLPlot` objects are discarded immediately after use _or_ we don't want any modifications to the `MPLPlot` (e.g., adding errors post-plotting) to be reflected if redrawing. I believe both cases are true but this patch has the potential for behavioral changes if `MPLPlot` objects are regularly being retained and modified. I've also added a memory-leak test to prevent a regression; the test fails as expected if applied without the other commits in this patch.
https://api.github.com/repos/pandas-dev/pandas/pulls/9307
2015-01-20T05:41:04Z
2015-04-19T23:29:18Z
null
2023-05-11T01:12:48Z
BUG 9188: concat of all-nan with empty frame produces object dtype
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f4abe05097cff..587c5d32f9183 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -4098,7 +4098,7 @@ def get_empty_dtype_and_na(join_units): # Null blocks should not influence upcast class selection, unless there # are only null blocks, when same upcasting rules must be applied to # null upcast classes. - if unit.is_null: + if unit.is_null and unit.shape[1] == 0: null_upcast_classes.add(upcast_cls) else: upcast_classes.add(upcast_cls)
My first stab at this issue. Would wait for comments and inputs. This is the behavior without the fix. ``` In [4]: df_1 = pd.DataFrame({"Row":[0,1,1], "EmptyCol":np.nan, "NumberCol":[1,2,3]}) In [5]: df_2 = pd.DataFrame(columns = df_1.columns) In [6]: df_concat = pd.concat([df_1, df_2], axis=0) In [7]: df_1.dtypes Out[7]: EmptyCol float64 NumberCol int64 Row int64 dtype: object In [8]: df_2.dtypes Out[8]: EmptyCol object NumberCol object Row object dtype: object In [9]: df_concat.dtypes Out[9]: EmptyCol object NumberCol float64 Row float64 dtype: object In [10]: df_concat Out[10]: EmptyCol NumberCol Row 0 NaN 1 0 1 NaN 2 1 2 NaN 3 1 In [11]: df_1 Out[11]: EmptyCol NumberCol Row 0 NaN 1 0 1 NaN 2 1 2 NaN 3 1 In [12]: df_2 Out[12]: Empty DataFrame Columns: [EmptyCol, NumberCol, Row] Index: [] ``` Seeing this after the fix. ``` In [3]: df_1 = pd.DataFrame({"Row":[0,1,1], "EmptyCol":np.nan, "NumberCol":[1,2,3]}) In [4]: df_2 = pd.DataFrame(columns = df_1.columns) In [5]: df_concat = pd.concat([df_1, df_2], axis=0) In [6]: df_1.dtypes Out[6]: EmptyCol float64 NumberCol int64 Row int64 dtype: object In [7]: df_2.dtypes Out[7]: EmptyCol object NumberCol object Row object dtype: object In [8]: df_concat.dtypes Out[8]: EmptyCol float64 NumberCol float64 Row float64 dtype: object In [9]: df_1 Out[9]: EmptyCol NumberCol Row 0 NaN 1 0 1 NaN 2 1 2 NaN 3 1 In [10]: df_2 Out[10]: Empty DataFrame Columns: [EmptyCol, NumberCol, Row] Index: [] In [11]: df_concat Out[11]: EmptyCol NumberCol Row 0 NaN 1 0 1 NaN 2 1 2 NaN 3 1 ``` However this is causing `test_partial_setting_mixed_dtype` test to fail because after the fix dtypes change for df. ``` In [2]: df = DataFrame(columns=['A','B']) In [3]: df.loc[0] = Series(1,index=range(4)) In [4]: df1 = DataFrame(columns=['A','B'],index=[0]) In [5]: df.dtypes Out[5]: A float64 B float64 dtype: object In [6]: df1.dtypes Out[6]: A object B object dtype: object ``` whereas before the fix, the test was happy as all dtypes were same. ``` In [2]: df = DataFrame(columns=['A','B']) In [3]: df.loc[0] = Series(1,index=range(4)) In [4]: df1 = DataFrame(columns=['A','B'],index=[0]) In [5]: df.dtypes Out[5]: A object B object dtype: object In [6]: df1.dtypes Out[6]: A object B object dtype: object ``` Is it expected to preserve the test behavior?
https://api.github.com/repos/pandas-dev/pandas/pulls/9303
2015-01-19T17:51:58Z
2015-01-22T21:28:30Z
null
2015-01-22T21:29:00Z
BUG: Fixes #9281: fixes to tseries.tests.test_tslib.TestTimestamp
diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 945458de22d2c..8140d289f8c8a 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -338,9 +338,11 @@ def test_now(self): # Check that the delta between the times is less than 1s (arbitrarily small) delta = Timedelta(seconds=1) - self.assertTrue((ts_from_method - ts_from_string) < delta) - self.assertTrue((ts_from_method_tz - ts_from_string_tz) < delta) - self.assertTrue((ts_from_string_tz.tz_localize(None) - ts_from_string) < delta) + self.assertTrue(abs(ts_from_method - ts_from_string) < delta) + self.assertTrue(abs(ts_datetime - ts_from_method) < delta) + self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta) + self.assertTrue(abs(ts_from_string_tz.tz_localize(None) + - ts_from_method_tz.tz_localize(None)) < delta) def test_today(self): @@ -353,10 +355,11 @@ def test_today(self): # Check that the delta between the times is less than 1s (arbitrarily small) delta = Timedelta(seconds=1) - self.assertTrue((ts_from_method - ts_from_string) < delta) - self.assertTrue((ts_datetime - ts_from_method) < delta) - self.assertTrue((ts_datetime - ts_from_method) < delta) - self.assertTrue((ts_from_string_tz.tz_localize(None) - ts_from_string) < delta) + self.assertTrue(abs(ts_from_method - ts_from_string) < delta) + self.assertTrue(abs(ts_datetime - ts_from_method) < delta) + self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta) + self.assertTrue(abs(ts_from_string_tz.tz_localize(None) + - ts_from_method_tz.tz_localize(None)) < delta) class TestDatetimeParsingWrappers(tm.TestCase): def test_does_not_convert_mixed_integer(self):
closes #9281 The last test of both test_now and test_today was passing for anyone with a timezone of UTC-5 or greater (but failed, for example, in US/Pacific). The test was not testing what the original author meant it to (which is that the times are very close together) so I added abs(.) around the Timedeltas and also fixed the errant test.
https://api.github.com/repos/pandas-dev/pandas/pulls/9296
2015-01-19T09:26:05Z
2015-01-19T12:14:11Z
2015-01-19T12:14:11Z
2015-01-19T19:32:00Z
Update plotting.py (issue #2916)
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 9919415b06546..34db4877d77f0 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2906,6 +2906,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, fontsize : int or string rot : label rotation angle grid : Setting this to True will show the grid + ax : Matplotlib axis object, default None figsize : A tuple (width, height) in inches layout : tuple (optional) (rows, columns) for the layout of the plot
Added ax docstring. Address issue generated by `scripts/find_undoc_args.py`: [+2892 tools/plotting.py boxplot_frame_groupby()](https://github.com/pydata/pandas/blob/master/pandas/tools/plotting.py#L2892): Missing[1/9]=['ax']
https://api.github.com/repos/pandas-dev/pandas/pulls/9293
2015-01-19T04:45:17Z
2015-01-19T09:33:27Z
2015-01-19T09:33:27Z
2015-01-19T09:33:44Z
TST: tests for GH4862, GH7401, GH7403, GH7405
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index b0c5b11079f31..6082a58687c2c 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -146,7 +146,7 @@ Bug Fixes - Fixed bug on bug endian platforms which produced incorrect results in ``StataReader`` (:issue:`8688`). - Bug in ``MultiIndex.has_duplicates`` when having many levels causes an indexer overflow (:issue:`9075`, :issue:`5873`) -- Bug in ``pivot`` and `unstack`` where ``nan`` values would break index alignment (:issue:`7466`) +- Bug in ``pivot`` and `unstack`` where ``nan`` values would break index alignment (:issue:`4862`, :issue:`7401`, :issue:`7403`, :issue:`7405`, :issue:`7466`) - Bug in left ``join`` on multi-index with ``sort=True`` or null values (:issue:`9210`). - Bug in ``MultiIndex`` where inserting new keys would fail (:issue:`9250`). diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 659d944a5e784..18dab471e3de2 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -17,7 +17,7 @@ import pandas.core.common as com import pandas.algos as algos -from pandas.core.index import MultiIndex, _get_na_value +from pandas.core.index import MultiIndex class _Unstacker(object): @@ -198,14 +198,8 @@ def get_new_values(self): def get_new_columns(self): if self.value_columns is None: - if self.lift == 0: - return self.removed_level - - lev = self.removed_level - vals = np.insert(lev.astype('object'), 0, - _get_na_value(lev.dtype.type)) - - return lev._shallow_copy(vals) + return _make_new_index(self.removed_level, None) \ + if self.lift != 0 else self.removed_level stride = len(self.removed_level) + self.lift width = len(self.value_columns) @@ -232,19 +226,31 @@ def get_new_index(self): # construct the new index if len(self.new_index_levels) == 1: lev, lab = self.new_index_levels[0], result_labels[0] - if not (lab == -1).any(): - return lev.take(lab) - - vals = np.insert(lev.astype('object'), len(lev), - _get_na_value(lev.dtype.type)).take(lab) - - return lev._shallow_copy(vals) + return _make_new_index(lev, lab) \ + if (lab == -1).any() else lev.take(lab) return MultiIndex(levels=self.new_index_levels, labels=result_labels, names=self.new_index_names, verify_integrity=False) + +def _make_new_index(lev, lab): + from pandas.core.index import Index, _get_na_value + + nan = _get_na_value(lev.dtype.type) + vals = lev.values.astype('object') + vals = np.insert(vals, 0, nan) if lab is None else \ + np.insert(vals, len(vals), nan).take(lab) + + try: + vals = vals.astype(lev.dtype, subok=False, copy=False) + except ValueError: + return Index(vals, **lev._get_attributes_dict()) + + return lev._shallow_copy(vals) + + def _unstack_multiple(data, clocs): if len(clocs) == 0: return data diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 7984c82cfbe9c..563e9d4dae57c 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -12328,6 +12328,25 @@ def test_unstack_dtypes(self): expected = Series({'float64' : 2, 'object' : 2}) assert_series_equal(result, expected) + # GH7405 + for c, d in (np.zeros(5), np.zeros(5)), \ + (np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')): + + df = DataFrame({'A': ['a']*5, 'C':c, 'D':d, + 'B':pd.date_range('2012-01-01', periods=5)}) + + right = df.iloc[:3].copy(deep=True) + + df = df.set_index(['A', 'B']) + df['D'] = df['D'].astype('int64') + + left = df.iloc[:3].unstack(0) + right = right.set_index(['A', 'B']).unstack(0) + right[('D', 'a')] = right[('D', 'a')].astype('int64') + + self.assertEqual(left.shape, (3, 2)) + tm.assert_frame_equal(left, right) + def test_unstack_non_unique_index_names(self): idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['c1', 'c1']) @@ -12385,6 +12404,93 @@ def verify(df): for col in ['4th', '5th']: verify(udf[col]) + # GH7403 + df = pd.DataFrame({'A': list('aaaabbbb'),'B':range(8), 'C':range(8)}) + df.iloc[3, 1] = np.NaN + left = df.set_index(['A', 'B']).unstack(0) + + vals = [[3, 0, 1, 2, nan, nan, nan, nan], + [nan, nan, nan, nan, 4, 5, 6, 7]] + vals = list(map(list, zip(*vals))) + idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B') + cols = MultiIndex(levels=[['C'], ['a', 'b']], + labels=[[0, 0], [0, 1]], + names=[None, 'A']) + + right = DataFrame(vals, columns=cols, index=idx) + assert_frame_equal(left, right) + + df = DataFrame({'A': list('aaaabbbb'), 'B':list(range(4))*2, + 'C':range(8)}) + df.iloc[2,1] = np.NaN + left = df.set_index(['A', 'B']).unstack(0) + + vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]] + cols = MultiIndex(levels=[['C'], ['a', 'b']], + labels=[[0, 0], [0, 1]], + names=[None, 'A']) + idx = Index([nan, 0, 1, 2, 3], name='B') + right = DataFrame(vals, columns=cols, index=idx) + assert_frame_equal(left, right) + + df = pd.DataFrame({'A': list('aaaabbbb'),'B':list(range(4))*2, + 'C':range(8)}) + df.iloc[3,1] = np.NaN + left = df.set_index(['A', 'B']).unstack(0) + + vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]] + cols = MultiIndex(levels=[['C'], ['a', 'b']], + labels=[[0, 0], [0, 1]], + names=[None, 'A']) + idx = Index([nan, 0, 1, 2, 3], name='B') + right = DataFrame(vals, columns=cols, index=idx) + assert_frame_equal(left, right) + + # GH7401 + df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C':np.arange(10), + 'B':date_range('2012-01-01', periods=5).tolist()*2 }) + + df.iloc[3,1] = np.NaN + left = df.set_index(['A', 'B']).unstack() + + vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]]) + idx = Index(['a', 'b'], name='A') + cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)], + labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]], + names=[None, 'B']) + + right = DataFrame(vals, columns=cols, index=idx) + assert_frame_equal(left, right) + + # GH4862 + vals = [['Hg', nan, nan, 680585148], + ['U', 0.0, nan, 680585148], + ['Pb', 7.07e-06, nan, 680585148], + ['Sn', 2.3614e-05, 0.0133, 680607017], + ['Ag', 0.0, 0.0133, 680607017], + ['Hg', -0.00015, 0.0133, 680607017]] + df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'], + index=[17263, 17264, 17265, 17266, 17267, 17268]) + + left = df.copy().set_index(['s_id','dosage','agent']).unstack() + + vals = [[nan, nan, 7.07e-06, nan, 0.0], + [0.0, -0.00015, nan, 2.3614e-05, nan]] + + idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]], + labels=[[0, 1], [-1, 0]], + names=['s_id', 'dosage']) + + cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']], + labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]], + names=[None, 'agent']) + + right = DataFrame(vals, columns=cols, index=idx) + assert_frame_equal(left, right) + + left = df.ix[17264:].copy().set_index(['s_id','dosage','agent']) + assert_frame_equal(left.unstack(), right) + def test_stack_datetime_column_multiIndex(self): # GH 8039 t = datetime(2014, 1, 1) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index db8ff37e4e1b4..d762ac4ff774e 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -5954,7 +5954,6 @@ def test_unstack(self): idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]]) ts = pd.Series([1,2], index=idx) left = ts.unstack() - left.columns = left.columns.astype('float64') right = DataFrame([[nan, 1], [2, nan]], index=[101, 102], columns=[nan, 3.5]) assert_frame_equal(left, right)
closes https://github.com/pydata/pandas/issues/4862 closes https://github.com/pydata/pandas/issues/7401 closes https://github.com/pydata/pandas/issues/7403 closes https://github.com/pydata/pandas/issues/7405 minor code change to https://github.com/pydata/pandas/pull/9061; otherwise only tests. the code change is to avoid https://github.com/pydata/pandas/issues/9170 issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/9292
2015-01-18T23:28:06Z
2015-01-26T01:29:07Z
2015-01-26T01:29:07Z
2015-02-13T01:59:02Z
BUG: Adding nano offset raises TypeError
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index b3ac58a9fb84a..16882c572f48e 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -152,6 +152,9 @@ Bug Fixes +- Bug in adding ``offsets.Nano`` to other offets raises ``TypeError`` (:issue:`9284`) + + diff --git a/pandas/io/tests/data/legacy_pickle/0.15.2/0.15.2_x86_64_darwin_2.7.9.pickle b/pandas/io/tests/data/legacy_pickle/0.15.2/0.15.2_x86_64_darwin_2.7.9.pickle new file mode 100644 index 0000000000000..1a01539700cf1 Binary files /dev/null and b/pandas/io/tests/data/legacy_pickle/0.15.2/0.15.2_x86_64_darwin_2.7.9.pickle differ diff --git a/pandas/io/tests/test_pickle.py b/pandas/io/tests/test_pickle.py index aea7fb42b7d36..3073673575702 100644 --- a/pandas/io/tests/test_pickle.py +++ b/pandas/io/tests/test_pickle.py @@ -75,6 +75,7 @@ def read_pickles(self, version): if 'series' in data: if 'ts' in data['series']: self._validate_timeseries(data['series']['ts'], self.data['series']['ts']) + self._validate_frequency(data['series']['ts']) def test_read_pickles_0_10_1(self): self.read_pickles('0.10.1') @@ -148,6 +149,21 @@ def _validate_timeseries(self, pickled, current): self.assertEqual(pickled.index.freq.normalize, False) self.assert_numpy_array_equal(pickled > 0, current > 0) + def _validate_frequency(self, pickled): + # GH 9291 + from pandas.tseries.offsets import Day + freq = pickled.index.freq + result = freq + Day(1) + self.assertTrue(result, Day(2)) + + result = freq + pandas.Timedelta(hours=1) + self.assertTrue(isinstance(result, pandas.Timedelta)) + self.assertEqual(result, pandas.Timedelta(days=1, hours=1)) + + result = freq + pandas.Timedelta(nanoseconds=1) + self.assertTrue(isinstance(result, pandas.Timedelta)) + self.assertEqual(result, pandas.Timedelta(days=1, nanoseconds=1)) + if __name__ == '__main__': import nose diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 81daa2b451c6b..84449cd2fad98 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -9,7 +9,7 @@ from dateutil.relativedelta import relativedelta, weekday from dateutil.easter import easter import pandas.tslib as tslib -from pandas.tslib import Timestamp, OutOfBoundsDatetime +from pandas.tslib import Timestamp, OutOfBoundsDatetime, Timedelta import functools @@ -2010,7 +2010,7 @@ def f(self, other): class Tick(SingleConstructorOffset): - _inc = timedelta(microseconds=1000) + _inc = Timedelta(microseconds=1000) __gt__ = _tick_comp(operator.gt) __ge__ = _tick_comp(operator.ge) @@ -2107,36 +2107,37 @@ def _delta_to_tick(delta): class Day(Tick): - _inc = timedelta(1) + _inc = Timedelta(days=1) _prefix = 'D' class Hour(Tick): - _inc = timedelta(0, 3600) + _inc = Timedelta(hours=1) _prefix = 'H' class Minute(Tick): - _inc = timedelta(0, 60) + _inc = Timedelta(minutes=1) _prefix = 'T' class Second(Tick): - _inc = timedelta(0, 1) + _inc = Timedelta(seconds=1) _prefix = 'S' class Milli(Tick): + _inc = Timedelta(milliseconds=1) _prefix = 'L' class Micro(Tick): - _inc = timedelta(microseconds=1) + _inc = Timedelta(microseconds=1) _prefix = 'U' class Nano(Tick): - _inc = np.timedelta64(1, 'ns') + _inc = Timedelta(nanoseconds=1) _prefix = 'N' diff --git a/pandas/tseries/tests/test_offsets.py b/pandas/tseries/tests/test_offsets.py index ef4288b28e9e4..6f9e8b6819bd3 100644 --- a/pandas/tseries/tests/test_offsets.py +++ b/pandas/tseries/tests/test_offsets.py @@ -25,7 +25,7 @@ import pandas.tseries.offsets as offsets from pandas.io.pickle import read_pickle -from pandas.tslib import NaT, Timestamp +from pandas.tslib import NaT, Timestamp, Timedelta import pandas.tslib as tslib from pandas.util.testing import assertRaisesRegexp import pandas.util.testing as tm @@ -2817,6 +2817,7 @@ def test_Easter(): assertEq(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12)) assertEq(-Easter(2), datetime(2010, 4, 4), datetime(2008, 3, 23)) + def test_Hour(): assertEq(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1)) assertEq(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1)) @@ -2904,6 +2905,10 @@ def test_Nanosecond(): assert (Nano(3) + Nano(2)) == Nano(5) assert (Nano(3) - Nano(2)) == Nano() + # GH9284 + assert Nano(1) + Nano(10) == Nano(11) + assert Nano(5) + Micro(1) == Nano(1005) + assert Micro(5) + Nano(1) == Nano(5001) def test_tick_offset(): assert not Day().isAnchored() @@ -2928,6 +2933,23 @@ def test_compare_ticks(): assert(kls(3) != kls(4)) +class TestTicks(tm.TestCase): + + def test_ticks(self): + offsets = [(Hour, Timedelta(hours=5)), + (Minute, Timedelta(hours=2, minutes=3)), + (Second, Timedelta(hours=2, seconds=3)), + (Milli, Timedelta(hours=2, milliseconds=3)), + (Micro, Timedelta(hours=2, microseconds=3)), + (Nano, Timedelta(hours=2, nanoseconds=3))] + + for kls, expected in offsets: + offset = kls(3) + result = offset + Timedelta(hours=2) + self.assertTrue(isinstance(result, Timedelta)) + self.assertEqual(result, expected) + + class TestOffsetNames(tm.TestCase): def test_get_offset_name(self): assertRaisesRegexp(ValueError, 'Bad rule.*BusinessDays', get_offset_name, BDay(2)) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 8217c4b31b287..c7c35564c1e5a 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -997,6 +997,8 @@ cdef class _NaT(_Timestamp): def _delta_to_nanoseconds(delta): + if hasattr(delta, 'nanos'): + return delta.nanos if hasattr(delta, 'delta'): delta = delta.delta if is_timedelta64_object(delta): diff --git a/setup.py b/setup.py index f58b9b0bb8551..e64235affaae2 100755 --- a/setup.py +++ b/setup.py @@ -590,6 +590,9 @@ def pxd(name): 'tests/data/legacy_pickle/0.12.0/*.pickle', 'tests/data/legacy_pickle/0.13.0/*.pickle', 'tests/data/legacy_pickle/0.14.0/*.pickle', + 'tests/data/legacy_pickle/0.14.1/*.pickle', + 'tests/data/legacy_pickle/0.15.0/*.pickle', + 'tests/data/legacy_pickle/0.15.2/*.pickle', 'tests/data/*.csv', 'tests/data/*.dta', 'tests/data/*.txt',
Closes #9284.
https://api.github.com/repos/pandas-dev/pandas/pulls/9291
2015-01-18T13:34:15Z
2015-01-25T18:41:40Z
2015-01-25T18:41:40Z
2015-01-31T02:57:19Z
GH 9273: Timedelta constructor should accept nanoseconds keyword.
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 18c55e38ab7af..01e6170413187 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -101,6 +101,7 @@ Enhancements - Added ``Timestamp.to_datetime64()`` to complement ``Timedelta.to_timedelta64()`` (:issue:`9255`) - ``tseries.frequencies.to_offset()`` now accepts ``Timedelta`` as input (:issue:`9064`) +- ``Timedelta`` will now accept nanoseconds keyword in constructor (:issue:`9273`) Performance ~~~~~~~~~~~ diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index ced566157d48f..9442dc3b6b6e7 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -683,6 +683,30 @@ def test_to_timedelta_on_missing_values(self): actual = pd.to_timedelta(pd.NaT) self.assertEqual(actual.value, timedelta_NaT.astype('int64')) + def test_to_timedelta_on_nanoseconds(self): + # GH 9273 + result = Timedelta(nanoseconds=100) + expected = Timedelta('100ns') + self.assertEqual(result, expected) + + result = Timedelta(days=1,hours=1,minutes=1,weeks=1,seconds=1,milliseconds=1,microseconds=1,nanoseconds=1) + expected = Timedelta(694861001001001) + self.assertEqual(result, expected) + + result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1) + expected = Timedelta('1us1ns') + self.assertEqual(result, expected) + + result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1) + expected = Timedelta('999ns') + self.assertEqual(result, expected) + + result = Timedelta(microseconds=1) + 5*Timedelta(nanoseconds=-2) + expected = Timedelta('990ns') + self.assertEqual(result, expected) + + self.assertRaises(TypeError, lambda: Timedelta(nanoseconds='abc')) + def test_timedelta_ops_with_missing_values(self): # setup s1 = pd.to_timedelta(Series(['00:00:01'])) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 8e2cb199214cf..8217c4b31b287 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1722,10 +1722,11 @@ class Timedelta(_Timedelta): kwargs = dict([ (k, _to_py_int_float(v)) for k, v in iteritems(kwargs) ]) try: - value = timedelta(**kwargs) + nano = kwargs.pop('nanoseconds',0) + value = convert_to_timedelta64(timedelta(**kwargs),'ns',False) + nano except TypeError as e: raise ValueError("cannot construct a TimeDelta from the passed arguments, allowed keywords are " - "[days, seconds, microseconds, milliseconds, minutes, hours, weeks]") + "[weeks, days, hours, minutes, seconds, milliseconds, microseconds, nanoseconds]") if isinstance(value, Timedelta): value = value.value
closes #9273 This patch adds nanoseconds keyword support in Timedelta constructor. <pre> In [1]: from pandas import Timedelta In [2]: td = Timedelta(nanoseconds=1) In [3]: td1 = Timedelta(microseconds=1) In [4]: td.components Out[4]: Components(days=0, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0, nanoseconds=1) In [5]: (td + td1).components Out[5]: Components(days=0, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=1, nanoseconds=1) </pre>
https://api.github.com/repos/pandas-dev/pandas/pulls/9289
2015-01-18T05:56:43Z
2015-01-19T03:17:20Z
2015-01-19T03:17:20Z
2015-01-19T06:47:47Z
ENH: Corrects to_html print spacing GH4987
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 2db455272363b..b35d1f87f2560 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -21,6 +21,11 @@ New features ~~~~~~~~~~~~ .. _whatsnew_0160.api: +API changes +~~~~~~~~~~~ + +-Changed ``.to_html`` to remove leading/trailing spaces in table body (:issue:`4987`) + Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/format.py b/pandas/core/format.py index a17c45b70c74b..d183320328b1f 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -819,7 +819,7 @@ def _write_cell(self, s, kind='td', indent=0, tags=None): ) else: esc = {} - rs = com.pprint_thing(s, escape_chars=esc) + rs = com.pprint_thing(s, escape_chars=esc).strip() self.write( '%s%s</%s>' % (start_tag, rs, kind), indent) diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 80f1733ab4be5..d36aaaccaed20 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -527,13 +527,13 @@ def test_to_html_escaped(self): <tbody> <tr> <th>str&lt;ing1 &amp;amp;</th> - <td> &lt;type 'str'&gt;</td> - <td> &lt;type 'str'&gt;</td> + <td>&lt;type 'str'&gt;</td> + <td>&lt;type 'str'&gt;</td> </tr> <tr> <th>stri&gt;ng2 &amp;amp;</th> - <td> &lt;type 'str'&gt;</td> - <td> &lt;type 'str'&gt;</td> + <td>&lt;type 'str'&gt;</td> + <td>&lt;type 'str'&gt;</td> </tr> </tbody> </table>""" @@ -559,13 +559,13 @@ def test_to_html_escape_disabled(self): <tbody> <tr> <th>str<ing1 &amp;</th> - <td> <b>bold</b></td> - <td> <b>bold</b></td> + <td><b>bold</b></td> + <td><b>bold</b></td> </tr> <tr> <th>stri>ng2 &amp;</th> - <td> <b>bold</b></td> - <td> <b>bold</b></td> + <td><b>bold</b></td> + <td><b>bold</b></td> </tr> </tbody> </table>""" @@ -597,16 +597,16 @@ def test_to_html_multiindex_index_false(self): </thead> <tbody> <tr> - <td> 0</td> - <td> 3</td> - <td> 5</td> - <td> 3</td> + <td>0</td> + <td>3</td> + <td>5</td> + <td>3</td> </tr> <tr> - <td> 1</td> - <td> 4</td> - <td> 6</td> - <td> 4</td> + <td>1</td> + <td>4</td> + <td>6</td> + <td>4</td> </tr> </tbody> </table>""" @@ -640,26 +640,26 @@ def test_to_html_multiindex_sparsify_false_multi_sparse(self): <tr> <th>0</th> <th>0</th> - <td> 0</td> - <td> 1</td> + <td>0</td> + <td>1</td> </tr> <tr> <th>0</th> <th>1</th> - <td> 2</td> - <td> 3</td> + <td>2</td> + <td>3</td> </tr> <tr> <th>1</th> <th>0</th> - <td> 4</td> - <td> 5</td> + <td>4</td> + <td>5</td> </tr> <tr> <th>1</th> <th>1</th> - <td> 6</td> - <td> 7</td> + <td>6</td> + <td>7</td> </tr> </tbody> </table>""" @@ -695,26 +695,26 @@ def test_to_html_multiindex_sparsify_false_multi_sparse(self): <tr> <th>0</th> <th>0</th> - <td> 0</td> - <td> 1</td> + <td>0</td> + <td>1</td> </tr> <tr> <th>0</th> <th>1</th> - <td> 2</td> - <td> 3</td> + <td>2</td> + <td>3</td> </tr> <tr> <th>1</th> <th>0</th> - <td> 4</td> - <td> 5</td> + <td>4</td> + <td>5</td> </tr> <tr> <th>1</th> <th>1</th> - <td> 6</td> - <td> 7</td> + <td>6</td> + <td>7</td> </tr> </tbody> </table>""" @@ -746,24 +746,24 @@ def test_to_html_multiindex_sparsify(self): <tr> <th rowspan="2" valign="top">0</th> <th>0</th> - <td> 0</td> - <td> 1</td> + <td>0</td> + <td>1</td> </tr> <tr> <th>1</th> - <td> 2</td> - <td> 3</td> + <td>2</td> + <td>3</td> </tr> <tr> <th rowspan="2" valign="top">1</th> <th>0</th> - <td> 4</td> - <td> 5</td> + <td>4</td> + <td>5</td> </tr> <tr> <th>1</th> - <td> 6</td> - <td> 7</td> + <td>6</td> + <td>7</td> </tr> </tbody> </table>""" @@ -799,24 +799,24 @@ def test_to_html_multiindex_sparsify(self): <tr> <th rowspan="2" valign="top">0</th> <th>0</th> - <td> 0</td> - <td> 1</td> + <td>0</td> + <td>1</td> </tr> <tr> <th>1</th> - <td> 2</td> - <td> 3</td> + <td>2</td> + <td>3</td> </tr> <tr> <th rowspan="2" valign="top">1</th> <th>0</th> - <td> 4</td> - <td> 5</td> + <td>4</td> + <td>5</td> </tr> <tr> <th>1</th> - <td> 6</td> - <td> 7</td> + <td>6</td> + <td>7</td> </tr> </tbody> </table>""" @@ -840,23 +840,23 @@ def test_to_html_index_formatter(self): <tbody> <tr> <th>a</th> - <td> 0</td> - <td> 1</td> + <td>0</td> + <td>1</td> </tr> <tr> <th>b</th> - <td> 2</td> - <td> 3</td> + <td>2</td> + <td>3</td> </tr> <tr> <th>c</th> - <td> 4</td> - <td> 5</td> + <td>4</td> + <td>5</td> </tr> <tr> <th>d</th> - <td> 6</td> - <td> 7</td> + <td>6</td> + <td>7</td> </tr> </tbody> </table>""" @@ -896,35 +896,35 @@ def test_to_html_truncate(self): <tbody> <tr> <th>2001-01-01</th> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>2001-01-02</th> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>2001-01-03</th> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>2001-01-04</th> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>...</th> @@ -936,35 +936,35 @@ def test_to_html_truncate(self): </tr> <tr> <th>2001-01-17</th> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>2001-01-18</th> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>2001-01-19</th> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>2001-01-20</th> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> </tbody> </table> @@ -1010,34 +1010,34 @@ def test_to_html_truncate_multi_index(self): <tr> <th rowspan="2" valign="top">bar</th> <th>one</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>two</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>baz</th> <th>one</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>...</th> @@ -1053,34 +1053,34 @@ def test_to_html_truncate_multi_index(self): <tr> <th>foo</th> <th>two</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th rowspan="2" valign="top">qux</th> <th>one</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>two</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> </tbody> </table> @@ -1129,68 +1129,68 @@ def test_to_html_truncate_multi_index_sparse_off(self): <tr> <th>bar</th> <th>one</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>bar</th> <th>two</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>baz</th> <th>one</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>foo</th> <th>two</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>qux</th> <th>one</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> <tr> <th>qux</th> <th>two</th> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> <td>...</td> - <td> NaN</td> - <td> NaN</td> - <td> NaN</td> + <td>NaN</td> + <td>NaN</td> + <td>NaN</td> </tr> </tbody> </table> @@ -1766,17 +1766,17 @@ def test_to_html_multiindex(self): ' <tbody>\n' ' <tr>\n' ' <th>0</th>\n' - ' <td> a</td>\n' - ' <td> b</td>\n' - ' <td> c</td>\n' - ' <td> d</td>\n' + ' <td>a</td>\n' + ' <td>b</td>\n' + ' <td>c</td>\n' + ' <td>d</td>\n' ' </tr>\n' ' <tr>\n' ' <th>1</th>\n' - ' <td> e</td>\n' - ' <td> f</td>\n' - ' <td> g</td>\n' - ' <td> h</td>\n' + ' <td>e</td>\n' + ' <td>f</td>\n' + ' <td>g</td>\n' + ' <td>h</td>\n' ' </tr>\n' ' </tbody>\n' '</table>') @@ -1808,17 +1808,17 @@ def test_to_html_multiindex(self): ' <tbody>\n' ' <tr>\n' ' <th>0</th>\n' - ' <td> a</td>\n' - ' <td> b</td>\n' - ' <td> c</td>\n' - ' <td> d</td>\n' + ' <td>a</td>\n' + ' <td>b</td>\n' + ' <td>c</td>\n' + ' <td>d</td>\n' ' </tr>\n' ' <tr>\n' ' <th>1</th>\n' - ' <td> e</td>\n' - ' <td> f</td>\n' - ' <td> g</td>\n' - ' <td> h</td>\n' + ' <td>e</td>\n' + ' <td>f</td>\n' + ' <td>g</td>\n' + ' <td>h</td>\n' ' </tr>\n' ' </tbody>\n' '</table>') @@ -1843,25 +1843,24 @@ def test_to_html_justify(self): ' <tbody>\n' ' <tr>\n' ' <th>0</th>\n' - ' <td> 6</td>\n' - ' <td> 1</td>\n' - ' <td> 223442</td>\n' + ' <td>6</td>\n' + ' <td>1</td>\n' + ' <td>223442</td>\n' ' </tr>\n' ' <tr>\n' ' <th>1</th>\n' - ' <td> 30000</td>\n' - ' <td> 2</td>\n' - ' <td> 0</td>\n' + ' <td>30000</td>\n' + ' <td>2</td>\n' + ' <td>0</td>\n' ' </tr>\n' ' <tr>\n' ' <th>2</th>\n' - ' <td> 2</td>\n' - ' <td> 70000</td>\n' - ' <td> 1</td>\n' + ' <td>2</td>\n' + ' <td>70000</td>\n' + ' <td>1</td>\n' ' </tr>\n' ' </tbody>\n' '</table>') - self.assertEqual(result, expected) result = df.to_html(justify='right') @@ -1877,21 +1876,21 @@ def test_to_html_justify(self): ' <tbody>\n' ' <tr>\n' ' <th>0</th>\n' - ' <td> 6</td>\n' - ' <td> 1</td>\n' - ' <td> 223442</td>\n' + ' <td>6</td>\n' + ' <td>1</td>\n' + ' <td>223442</td>\n' ' </tr>\n' ' <tr>\n' ' <th>1</th>\n' - ' <td> 30000</td>\n' - ' <td> 2</td>\n' - ' <td> 0</td>\n' + ' <td>30000</td>\n' + ' <td>2</td>\n' + ' <td>0</td>\n' ' </tr>\n' ' <tr>\n' ' <th>2</th>\n' - ' <td> 2</td>\n' - ' <td> 70000</td>\n' - ' <td> 1</td>\n' + ' <td>2</td>\n' + ' <td>70000</td>\n' + ' <td>1</td>\n' ' </tr>\n' ' </tbody>\n' '</table>')
Closes #4987 This is my first non-doc PR, so please bare with me. Is it worth my while to go through the tests and standarize all of the test HTML to all block-strings, or is the mix of individual lines/blocks ok?
https://api.github.com/repos/pandas-dev/pandas/pulls/9285
2015-01-17T16:32:01Z
2015-03-06T23:06:34Z
null
2015-03-23T13:55:59Z
BUG: where coerces numeric to str incorrectly
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 2db455272363b..d8fc10dd54e8c 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -187,6 +187,8 @@ Bug Fixes - Bug in read_csv when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) - isnull now detects ``NaT`` in PeriodIndex (:issue:`9129`) - Bug in groupby ``.nth()`` with a multiple column groupby (:issue:`8979`) +- Bug in ``DataFrame.where`` and ``Series.where`` coerce numerics to string incorrectly (:issue:`9280`) +- Bug in ``DataFrame.where`` and ``Series.where`` raise ``ValueError`` when string list-like is passed. (:issue:`9280`) - Fixed division by zero error for ``Series.kurt()`` when all values are equal (:issue:`9197`) diff --git a/pandas/core/common.py b/pandas/core/common.py index 143f65ee64e60..f8f5928ca7d51 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -19,7 +19,7 @@ import pandas.lib as lib import pandas.tslib as tslib from pandas import compat -from pandas.compat import StringIO, BytesIO, range, long, u, zip, map +from pandas.compat import StringIO, BytesIO, range, long, u, zip, map, string_types from pandas.core.config import get_option @@ -1322,6 +1322,19 @@ def _possibly_downcast_to_dtype(result, dtype): return result +def _maybe_convert_string_to_object(values): + """ + Convert string-like and string-like array to convert object dtype. + This is to avoid numpy to handle the array as str dtype. + """ + if isinstance(values, string_types): + values = np.array([values], dtype=object) + elif (isinstance(values, np.ndarray) and + issubclass(values.dtype.type, (np.string_, np.unicode_))): + values = values.astype(object) + return values + + def _lcd_dtypes(a_dtype, b_dtype): """ return the lcd dtype to hold these types """ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7fa64e0b4ca91..b2adfae744db7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3292,7 +3292,11 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, if self.ndim == 1: # try to set the same dtype as ourselves - new_other = np.array(other, dtype=self.dtype) + try: + new_other = np.array(other, dtype=self.dtype) + except ValueError: + new_other = np.array(other) + if not (new_other == np.array(other)).all(): other = np.array(other) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index ef33e27d861fd..f4abe05097cff 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -13,7 +13,8 @@ ABCSparseSeries, _infer_dtype_from_scalar, _is_null_datelike_scalar, _maybe_promote, is_timedelta64_dtype, is_datetime64_dtype, - _possibly_infer_to_datetimelike, array_equivalent) + _possibly_infer_to_datetimelike, array_equivalent, + _maybe_convert_string_to_object) from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import (_maybe_convert_indices, _length_of_indexer) from pandas.core.categorical import Categorical, _maybe_to_categorical, _is_categorical @@ -1052,6 +1053,7 @@ def where(self, other, cond, align=True, raise_on_error=True, values = values.T is_transposed = not is_transposed + other = _maybe_convert_string_to_object(other) # our where function def func(c, v, o): diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 2f57fa593bc40..36d6c39586d97 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -947,6 +947,34 @@ def test_2d_datetime64(self): tm.assert_almost_equal(result, expected) +class TestMaybe(tm.TestCase): + + def test_maybe_convert_string_to_array(self): + result = com._maybe_convert_string_to_object('x') + tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object)) + self.assertTrue(result.dtype == object) + + result = com._maybe_convert_string_to_object(1) + self.assertEquals(result, 1) + + arr = np.array(['x', 'y'], dtype=str) + result = com._maybe_convert_string_to_object(arr) + tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object)) + self.assertTrue(result.dtype == object) + + # unicode + arr = np.array(['x', 'y']).astype('U') + result = com._maybe_convert_string_to_object(arr) + tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object)) + self.assertTrue(result.dtype == object) + + # object + arr = np.array(['x', 2], dtype=object) + result = com._maybe_convert_string_to_object(arr) + tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object)) + self.assertTrue(result.dtype == object) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index b67a8c5de1c2d..a5de26da1606a 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1886,6 +1886,28 @@ def test_ix_setitem(self): self.assertEqual(self.series[d1], 4) self.assertEqual(self.series[d2], 6) + def test_where_numeric_with_string(self): + # GH 9280 + s = pd.Series([1, 2, 3]) + w = s.where(s>1, 'X') + + self.assertTrue(isinstance(w[0], str)) + self.assertTrue(isinstance(w[1], int)) + self.assertTrue(isinstance(w[2], int)) + self.assertTrue(w.dtype == 'object') + + w = s.where(s>1, ['X', 'Y', 'Z']) + self.assertTrue(isinstance(w[0], str)) + self.assertTrue(isinstance(w[1], int)) + self.assertTrue(isinstance(w[2], int)) + self.assertTrue(w.dtype == 'object') + + w = s.where(s>1, np.array(['X', 'Y', 'Z'])) + self.assertTrue(isinstance(w[0], str)) + self.assertTrue(isinstance(w[1], int)) + self.assertTrue(isinstance(w[2], int)) + self.assertTrue(w.dtype == 'object') + def test_setitem_boolean(self): mask = self.series > self.series.median()
Closes #9280.
https://api.github.com/repos/pandas-dev/pandas/pulls/9283
2015-01-17T10:35:48Z
2015-01-18T20:36:40Z
2015-01-18T20:36:40Z
2015-01-19T14:31:31Z
ENH: StringMethods supports is_xxx methods
diff --git a/doc/source/api.rst b/doc/source/api.rst index a8097f2648c4b..4fc9df52a4300 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -549,6 +549,13 @@ strings and apply several methods to it. These can be acccessed like Series.str.strip Series.str.title Series.str.upper + Series.str.isalnum + Series.str.isalpha + Series.str.isdigit + Series.str.isspace + Series.str.islower + Series.str.isupper + Series.str.istitle Series.str.get_dummies .. _api.categorical: diff --git a/doc/source/text.rst b/doc/source/text.rst index eb11cfb1248a9..4cd96613f6d51 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -228,3 +228,10 @@ Method Summary :meth:`~Series.str.lstrip`,Equivalent to ``str.lstrip`` :meth:`~Series.str.lower`,Equivalent to ``str.lower`` :meth:`~Series.str.upper`,Equivalent to ``str.upper`` + :meth:`~Series.str.isalnum`,Equivalent to ``str.isalnum`` + :meth:`~Series.str.isalpha`,Equivalent to ``str.isalpha`` + :meth:`~Series.str.isdigit`,Equivalent to ``str.isdigit`` + :meth:`~Series.str.isspace`,Equivalent to ``str.isspace`` + :meth:`~Series.str.islower`,Equivalent to ``str.islower`` + :meth:`~Series.str.isupper`,Equivalent to ``str.isupper`` + :meth:`~Series.str.istitle`,Equivalent to ``str.istitle`` diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 6082a58687c2c..feee9e4fba4b0 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -104,11 +104,12 @@ Enhancements - Added ``Series.str.slice_replace()``, which previously raised NotImplementedError (:issue:`8888`) - Added ``Timestamp.to_datetime64()`` to complement ``Timedelta.to_timedelta64()`` (:issue:`9255`) - ``tseries.frequencies.to_offset()`` now accepts ``Timedelta`` as input (:issue:`9064`) - - ``Timedelta`` will now accept nanoseconds keyword in constructor (:issue:`9273`) - SQL code now safely escapes table and column names (:issue:`8986`) - Added auto-complete for ``Series.str.<tab>``, ``Series.dt.<tab>`` and ``Series.cat.<tab>`` (:issue:`9322`) +- Added ``StringMethods.isalnum()``, ``isalpha()``, ``isdigit()``, ``isspace()``, ``islower()``, +``isupper()``, ``istitle()`` which behave as the same as standard ``str`` (:issue:`9282`) Performance ~~~~~~~~~~~ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 75d10654977cd..1556d3290cd01 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -9,6 +9,9 @@ import textwrap +_shared_docs = dict() + + def _get_array_list(arr, others): from pandas.core.series import Series @@ -124,17 +127,6 @@ def g(x): return lib.map_infer(arr, f) -def str_title(arr): - """ - Convert strings to titlecased version - - Returns - ------- - titled : array - """ - return _na_map(lambda x: x.title(), arr) - - def str_count(arr, pat, flags=0): """ Count occurrences of pattern in each string @@ -197,7 +189,8 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): else: upper_pat = pat.upper() f = lambda x: upper_pat in x - return _na_map(f, str_upper(arr), na, dtype=bool) + uppered = _na_map(lambda x: x.upper(), arr) + return _na_map(f, uppered, na, dtype=bool) return _na_map(f, arr, na, dtype=bool) @@ -239,28 +232,6 @@ def str_endswith(arr, pat, na=np.nan): return _na_map(f, arr, na, dtype=bool) -def str_lower(arr): - """ - Convert strings in array to lowercase - - Returns - ------- - lowercase : array - """ - return _na_map(lambda x: x.lower(), arr) - - -def str_upper(arr): - """ - Convert strings in array to uppercase - - Returns - ------- - uppercase : array - """ - return _na_map(lambda x: x.upper(), arr) - - def str_replace(arr, pat, repl, n=-1, case=True, flags=0): """ Replace @@ -553,17 +524,6 @@ def str_join(arr, sep): return _na_map(sep.join, arr) -def str_len(arr): - """ - Compute length of each string in array. - - Returns - ------- - lengths : array - """ - return _na_map(len, arr, dtype=int) - - def str_findall(arr, pat, flags=0): """ Find all occurrences of pattern or regular expression @@ -884,14 +844,16 @@ def str_encode(arr, encoding, errors="strict"): return _na_map(f, arr) -def _noarg_wrapper(f): +def _noarg_wrapper(f, docstring=None, **kargs): def wrapper(self): - result = f(self.series) + result = _na_map(f, self.series, **kargs) return self._wrap_result(result) wrapper.__name__ = f.__name__ - if f.__doc__: - wrapper.__doc__ = f.__doc__ + if docstring is not None: + wrapper.__doc__ = docstring + else: + raise ValueError('Provide docstring') return wrapper @@ -1076,7 +1038,47 @@ def get_dummies(self, sep='|'): findall = _pat_wrapper(str_findall, flags=True) extract = _pat_wrapper(str_extract, flags=True) - len = _noarg_wrapper(str_len) - lower = _noarg_wrapper(str_lower) - upper = _noarg_wrapper(str_upper) - title = _noarg_wrapper(str_title) + _shared_docs['len'] = (""" + Compute length of each string in array. + + Returns + ------- + lengths : array + """) + len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int) + + _shared_docs['casemethods'] = (""" + Convert strings in array to %s + + Returns + ------- + uppercase : array + """) + lower = _noarg_wrapper(lambda x: x.lower(), + docstring=_shared_docs['casemethods'] % 'lowercase') + upper = _noarg_wrapper(lambda x: x.upper(), + docstring=_shared_docs['casemethods'] % 'uppercase') + title = _noarg_wrapper(lambda x: x.title(), + docstring=_shared_docs['casemethods'] % 'titlecase') + + _shared_docs['ismethods'] = (""" + Check whether all characters in each string in the array are %s + + Returns + ------- + Series of boolean values + """) + isalnum = _noarg_wrapper(lambda x: x.isalnum(), + docstring=_shared_docs['ismethods'] % 'alphanumeric') + isalpha = _noarg_wrapper(lambda x: x.isalpha(), + docstring=_shared_docs['ismethods'] % 'alphabetic') + isdigit = _noarg_wrapper(lambda x: x.isdigit(), + docstring=_shared_docs['ismethods'] % 'digits') + isspace = _noarg_wrapper(lambda x: x.isspace(), + docstring=_shared_docs['ismethods'] % 'whitespace') + islower = _noarg_wrapper(lambda x: x.islower(), + docstring=_shared_docs['ismethods'] % 'lowercase') + isupper = _noarg_wrapper(lambda x: x.isupper(), + docstring=_shared_docs['ismethods'] % 'uppercase') + istitle = _noarg_wrapper(lambda x: x.istitle(), + docstring=_shared_docs['ismethods'] % 'titlecase') diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index b8f1a6ac342af..2d7463249bd65 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -623,6 +623,41 @@ def test_empty_str_methods(self): tm.assert_series_equal(empty_str, empty.str.get(0)) tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii')) tm.assert_series_equal(empty_bytes, empty.str.encode('ascii')) + tm.assert_series_equal(empty_str, empty.str.isalnum()) + tm.assert_series_equal(empty_str, empty.str.isalpha()) + tm.assert_series_equal(empty_str, empty.str.isdigit()) + tm.assert_series_equal(empty_str, empty.str.isspace()) + tm.assert_series_equal(empty_str, empty.str.islower()) + tm.assert_series_equal(empty_str, empty.str.isupper()) + tm.assert_series_equal(empty_str, empty.str.istitle()) + + def test_ismethods(self): + values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' '] + str_s = Series(values) + alnum_e = [True, True, True, True, True, False, True, True, False, False] + alpha_e = [True, True, True, False, False, False, True, False, False, False] + digit_e = [False, False, False, True, False, False, False, True, False, False] + num_e = [False, False, False, True, False, False, False, True, False, False] + space_e = [False, False, False, False, False, False, False, False, False, True] + lower_e = [False, True, False, False, False, False, False, False, False, False] + upper_e = [True, False, False, False, True, False, True, False, False, False] + title_e = [True, False, True, False, True, False, False, False, False, False] + + tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e)) + tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e)) + tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e)) + tm.assert_series_equal(str_s.str.isspace(), Series(space_e)) + tm.assert_series_equal(str_s.str.islower(), Series(lower_e)) + tm.assert_series_equal(str_s.str.isupper(), Series(upper_e)) + tm.assert_series_equal(str_s.str.istitle(), Series(title_e)) + + self.assertEquals(str_s.str.isalnum().tolist(), [v.isalnum() for v in values]) + self.assertEquals(str_s.str.isalpha().tolist(), [v.isalpha() for v in values]) + self.assertEquals(str_s.str.isdigit().tolist(), [v.isdigit() for v in values]) + self.assertEquals(str_s.str.isspace().tolist(), [v.isspace() for v in values]) + self.assertEquals(str_s.str.islower().tolist(), [v.islower() for v in values]) + self.assertEquals(str_s.str.isupper().tolist(), [v.isupper() for v in values]) + self.assertEquals(str_s.str.istitle().tolist(), [v.istitle() for v in values]) def test_get_dummies(self): s = Series(['a|b', 'a|c', np.nan])
Derived from #9111. Add following methods to be compat with standard `str`. - StringMethods.isalnum - StringMethods.isalpha - StringMethods.isdigit - StringMethods.isspace - StringMethods.islower - StringMethods.isupper - StringMethods.istitle
https://api.github.com/repos/pandas-dev/pandas/pulls/9282
2015-01-17T10:07:19Z
2015-01-29T11:10:35Z
2015-01-29T11:10:35Z
2015-01-29T23:28:37Z
BUG: Subplotting boxplot shows unnecessary warnings
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 2db455272363b..53ed040860269 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -172,7 +172,7 @@ Bug Fixes - +- Bug in boxplot, scatter and hexbin plot may show an unnecessary warning (:issue:`8877`) diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 109d9e7c00c6b..4195baf4874f1 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -1900,15 +1900,14 @@ def test_boxplot(self): # different warning on py3 if not PY3: - with tm.assert_produces_warning(UserWarning): - axes = _check_plot_works(df.plot, kind='box', - subplots=True, logy=True) - - self._check_axes_shape(axes, axes_num=3, layout=(1, 3)) - self._check_ax_scales(axes, yaxis='log') - for ax, label in zip(axes, labels): - self._check_text_labels(ax.get_xticklabels(), [label]) - self.assertEqual(len(ax.lines), self.bp_n_objects) + axes = _check_plot_works(df.plot, kind='box', + subplots=True, logy=True) + + self._check_axes_shape(axes, axes_num=3, layout=(1, 3)) + self._check_ax_scales(axes, yaxis='log') + for ax, label in zip(axes, labels): + self._check_text_labels(ax.get_xticklabels(), [label]) + self.assertEqual(len(ax.lines), self.bp_n_objects) axes = series.plot(kind='box', rot=40) self._check_ticks_props(axes, xrot=40, yrot=0) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 2d7976d567108..9919415b06546 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1451,7 +1451,10 @@ def _make_plot(self): kws['label'] = c if c_is_column else '' self.fig.colorbar(img, **kws) - self._add_legend_handle(scatter, label) + if label is not None: + self._add_legend_handle(scatter, label) + else: + self.legend = False errors_x = self._get_errorbars(label=x, index=0, yerr=False) errors_y = self._get_errorbars(label=y, index=0, xerr=False) @@ -1512,6 +1515,9 @@ def _make_plot(self): img = ax.collections[0] self.fig.colorbar(img, ax=ax) + def _make_legend(self): + pass + def _post_plot_logic(self): ax = self.axes[0] x, y = self.x, self.y @@ -2228,6 +2234,9 @@ def _set_ticklabels(self, ax, labels): else: ax.set_yticklabels(labels) + def _make_legend(self): + pass + def _post_plot_logic(self): pass
Related to #8877 and 80a730c93717e7fc01ae2f880109bc752519cecf Because `legend=True` is default, `DataFrame.plot(kind='box', subplots=True)` shows unnecessary warnings. ``` # OK, no legend / no warnings df.plot(kind='box') ``` ``` # NG: no legend and warnings df.plot(kind='box', subplots=True) # UserWarning: No labelled objects found. Use label='...' kwarg on individual plots. # warnings.warn("No labelled objects found. " ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9278
2015-01-17T02:24:35Z
2015-01-18T13:24:08Z
2015-01-18T13:24:08Z
2015-01-19T14:37:26Z
allow for empty SparseSeries SparsePanel constructors
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index b3ac58a9fb84a..9c9644509b964 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -202,3 +202,6 @@ Bug Fixes - Fixed issue in the ``xlsxwriter`` engine where it added a default 'General' format to cells if no other format wass applied. This prevented other row or column formatting being applied. (:issue:`9167`) - Fixes issue with ``index_col=False`` when ``usecols`` is also specified in ``read_csv``. (:issue:`9082`) - Bug where ``wide_to_long`` would modify the input stubnames list (:issue:`9204`) + + +- ``SparseSeries`` and ``SparsePanel`` now accept zero argument constructors (same as their non-sparse counterparts) (:issue:`9272`). diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index 62e0e3e985775..ee9edbe36ae28 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -65,9 +65,13 @@ class SparsePanel(Panel): _typ = 'panel' _subtyp = 'sparse_panel' - def __init__(self, frames, items=None, major_axis=None, minor_axis=None, + def __init__(self, frames=None, items=None, major_axis=None, minor_axis=None, default_fill_value=np.nan, default_kind='block', copy=False): + + if frames is None: + frames = {} + if isinstance(frames, np.ndarray): new_frames = {} for item, vals in zip(items, frames): diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index 39d286f3744e1..bcf9606c3748f 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -103,7 +103,7 @@ class SparseSeries(Series): """ _subtyp = 'sparse_series' - def __init__(self, data, index=None, sparse_index=None, kind='block', + def __init__(self, data=None, index=None, sparse_index=None, kind='block', fill_value=None, name=None, dtype=None, copy=False, fastpath=False): @@ -115,6 +115,9 @@ def __init__(self, data, index=None, sparse_index=None, kind='block', if copy: data = data.copy() else: + + if data is None: + data = [] is_sparse_array = isinstance(data, SparseArray) if fill_value is None: diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py index 9197a4fc22b9c..eebe822ae74c0 100644 --- a/pandas/sparse/tests/test_sparse.py +++ b/pandas/sparse/tests/test_sparse.py @@ -280,6 +280,11 @@ def test_constructor_nonnan(self): arr = [0, 0, 0, nan, nan] sp_series = SparseSeries(arr, fill_value=0) assert_equal(sp_series.values.values, arr) + + # GH 9272 + def test_constructor_empty(self): + sp = SparseSeries() + self.assertEqual(len(sp.index), 0) def test_copy_astype(self): cop = self.bseries.astype(np.float64) @@ -862,6 +867,7 @@ def test_constructor_ndarray(self): ValueError, "^Column length", SparseDataFrame, self.frame.values, columns=self.frame.columns[:-1]) + # GH 9272 def test_constructor_empty(self): sp = SparseDataFrame() self.assertEqual(len(sp.index), 0) @@ -1605,6 +1611,13 @@ def test_constructor(self): with tm.assertRaisesRegexp(TypeError, "input must be a dict, a 'list' was passed"): SparsePanel(['a', 'b', 'c']) + + # GH 9272 + def test_constructor_empty(self): + sp = SparsePanel() + self.assertEqual(len(sp.items), 0) + self.assertEqual(len(sp.major_axis), 0) + self.assertEqual(len(sp.minor_axis), 0) def test_from_dict(self): fd = SparsePanel.from_dict(self.data_dict)
Closes #9272. Note that my solution is to provide a default `data=[]` for SparseSeries and `frames={}` for SparsePanel. This differs from the dense implementations that have `None` defaults that are then changed to the above. Doesn't seem like a big deal to me, but I can do that here if needed.
https://api.github.com/repos/pandas-dev/pandas/pulls/9277
2015-01-16T21:34:05Z
2015-01-29T11:33:13Z
null
2015-01-29T11:33:13Z
TST: Remove tests using proprietary data
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index f0ebebc1f143f..f896b98fddf5b 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -20,14 +20,11 @@ PossiblePrecisionLoss, StataMissingValue) import pandas.util.testing as tm from pandas.tslib import NaT -from pandas.util.misc import is_little_endian from pandas import compat class TestStata(tm.TestCase): def setUp(self): - # Unit test datasets for dta7 - dta9 (old stata formats 104, 105 and 107) can be downloaded from: - # http://stata-press.com/data/glmext.html self.dirpath = tm.get_data_path() self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta') self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta') @@ -48,16 +45,6 @@ def setUp(self): self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta') self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta') - self.dta7 = os.path.join(self.dirpath, 'cancer.dta') - self.csv7 = os.path.join(self.dirpath, 'cancer.csv') - - self.dta8 = os.path.join(self.dirpath, 'tbl19-3.dta') - - self.csv8 = os.path.join(self.dirpath, 'tbl19-3.csv') - - self.dta9 = os.path.join(self.dirpath, 'lbw.dta') - self.csv9 = os.path.join(self.dirpath, 'lbw.csv') - self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta') self.csv14 = os.path.join(self.dirpath, 'stata5.csv') @@ -253,24 +240,6 @@ def test_write_dta6(self): tm.assert_frame_equal(written_and_read_again.set_index('index'), original) - @nose.tools.nottest - def test_read_dta7(self): - expected = read_csv(self.csv7, parse_dates=True, sep='\t') - parsed = self.read_dta(self.dta7) - tm.assert_frame_equal(parsed, expected) - - @nose.tools.nottest - def test_read_dta8(self): - expected = read_csv(self.csv8, parse_dates=True, sep='\t') - parsed = self.read_dta(self.dta8) - tm.assert_frame_equal(parsed, expected) - - @nose.tools.nottest - def test_read_dta9(self): - expected = read_csv(self.csv9, parse_dates=True, sep='\t') - parsed = self.read_dta(self.dta9) - tm.assert_frame_equal(parsed, expected) - def test_read_write_dta10(self): original = DataFrame(data=[["string", "object", 1, 1.1, np.datetime64('2003-12-25')]],
Removes three tests that use proprietary data that is not available for testing.
https://api.github.com/repos/pandas-dev/pandas/pulls/9271
2015-01-16T15:10:36Z
2015-01-18T20:41:28Z
2015-01-18T20:41:27Z
2015-01-18T22:42:19Z
FIX: Add endianness missing flag when reading data
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index b221a7df373a4..f4b0024f5d5b8 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -99,7 +99,7 @@ Bug Fixes - +- Fixed bug on bug endian platforms which produced incorrect results in ``StataReader`` (:issue:`8688`). - Bug in ``MultiIndex.has_duplicates`` when having many levels causes an indexer overflow (:issue:`9075`, :issue:`5873`) - Bug in ``pivot`` and `unstack`` where ``nan`` values would break index alignment (:issue:`7466`) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index ccfe8468813c7..0d6e554b8b474 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -833,6 +833,7 @@ def __init__(self, path_or_buf, encoding='iso-8859-1'): self._missing_values = False self._data_read = False self._value_labels_read = False + self._native_byteorder = _set_endianness(sys.byteorder) if isinstance(path_or_buf, str): path_or_buf, encoding = get_filepath_or_buffer( path_or_buf, encoding=self._default_encoding @@ -1195,13 +1196,16 @@ def data(self, convert_dates=True, convert_categoricals=True, index=None, dtype = [] # Convert struct data types to numpy data type for i, typ in enumerate(self.typlist): if typ in self.NUMPY_TYPE_MAP: - dtype.append(('s' + str(i), self.NUMPY_TYPE_MAP[typ])) + dtype.append(('s' + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ])) else: dtype.append(('s' + str(i), 'S' + str(typ))) dtype = np.dtype(dtype) read_len = count * dtype.itemsize self.path_or_buf.seek(self.data_location) data = np.frombuffer(self.path_or_buf.read(read_len),dtype=dtype,count=count) + # if necessary, swap the byte order to native here + if self.byteorder != self._native_byteorder: + data = data.byteswap().newbyteorder() self._data_read = True if convert_categoricals:
Added endianess flat to data type to allow data to be read cross platforms closes #8688
https://api.github.com/repos/pandas-dev/pandas/pulls/9264
2015-01-15T20:14:19Z
2015-01-16T15:10:44Z
2015-01-16T15:10:44Z
2015-01-18T22:42:19Z
API/ENH: add method='nearest' to Index.get_indexer/reindex and method to get_loc
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 7ee82cd69a257..dc43c1177f8c3 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -948,15 +948,9 @@ chosen from the following table: pad / ffill, Fill values forward bfill / backfill, Fill values backward + nearest, Fill from the nearest index value -Other fill methods could be added, of course, but these are the two most -commonly used for time series data. In a way they only make sense for time -series or otherwise ordered data, but you may have an application on non-time -series data where this sort of "interpolation" logic is the correct thing to -do. More sophisticated interpolation of missing values would be an obvious -extension. - -We illustrate these fill methods on a simple TimeSeries: +We illustrate these fill methods on a simple Series: .. ipython:: python @@ -969,18 +963,22 @@ We illustrate these fill methods on a simple TimeSeries: ts2.reindex(ts.index) ts2.reindex(ts.index, method='ffill') ts2.reindex(ts.index, method='bfill') + ts2.reindex(ts.index, method='nearest') -Note these methods require that the indexes are **order increasing**. +These methods require that the indexes are **ordered** increasing or +decreasing. -Note the same result could have been achieved using :ref:`fillna -<missing_data.fillna>`: +Note that the same result could have been achieved using +:ref:`fillna <missing_data.fillna>` (except for ``method='nearest'``) or +:ref:`interpolate <missing_data.interpolation>`: .. ipython:: python ts2.reindex(ts.index).fillna(method='ffill') -Note that ``reindex`` will raise a ValueError if the index is not -monotonic. ``fillna`` will not make any checks on the order of the index. +``reindex`` will raise a ValueError if the index is not monotonic increasing or +descreasing. ``fillna`` and ``interpolate`` will not make any checks on the +order of the index. .. _basics.drop: diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 63606cb830cbe..bf050dfabea9b 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -20,6 +20,15 @@ users upgrade to this version. New features ~~~~~~~~~~~~ +- Reindex now supports ``method='nearest'`` for frames or series with a monotonic increasing or decreasing index (:issue:`9258`): + + .. ipython:: python + + df = pd.DataFrame({'x': range(5)}) + df.reindex([0.2, 1.8, 3.5], method='nearest') + + This method is also exposed by the lower level ``Index.get_indexer`` and ``Index.get_loc`` methods. + .. _whatsnew_0160.api: .. _whatsnew_0160.api_breaking: @@ -189,6 +198,9 @@ Enhancements - Added ``StringMethods.find()`` and ``rfind()`` which behave as the same as standard ``str`` (:issue:`9386`) +- ``Index.get_indexer`` now supports ``method='pad'`` and ``method='backfill'`` even for any target array, not just monotonic targets. These methods also work for monotonic decreasing as well as monotonic increasing indexes (:issue:`9258`). +- ``Index.asof`` now works on all index types (:issue:`9258`). + - Added ``StringMethods.isnumeric`` and ``isdecimal`` which behave as the same as standard ``str`` (:issue:`9439`) - Added ``StringMethods.ljust()`` and ``rjust()`` which behave as the same as standard ``str`` (:issue:`9352`) - ``StringMethods.pad()`` and ``center()`` now accept ``fillchar`` option to specify filling character (:issue:`9352`) @@ -244,6 +256,22 @@ Bug Fixes - Fixed character encoding bug in ``read_stata`` and ``StataReader`` when loading data from a URL (:issue:`9231`). +- Looking up a partial string label with ``DatetimeIndex.asof`` now includes values that match the string, even if they are after the start of the partial string label (:issue:`9258`). Old behavior: + + .. ipython:: python + :verbatim: + + In [4]: pd.to_datetime(['2000-01-31', '2000-02-28']).asof('2000-02') + Out[4]: Timestamp('2000-01-31 00:00:00') + + Fixed behavior: + + .. ipython:: python + + pd.to_datetime(['2000-01-31', '2000-02-28']).asof('2000-02') + + To reproduce the old behavior, simply add more precision to the label (e.g., use ``2000-02-01`` instead of ``2000-02``). + - Bug in adding ``offsets.Nano`` to other offets raises ``TypeError`` (:issue:`9284`) diff --git a/pandas/core/common.py b/pandas/core/common.py index 7ab88edd77d4b..78c0c6c5dbd0f 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -2682,7 +2682,7 @@ def _astype_nansafe(arr, dtype, copy=True): return arr.view(dtype) -def _clean_fill_method(method): +def _clean_fill_method(method, allow_nearest=False): if method is None: return None method = method.lower() @@ -2690,13 +2690,23 @@ def _clean_fill_method(method): method = 'pad' if method == 'bfill': method = 'backfill' - if method not in ['pad', 'backfill']: - msg = ('Invalid fill method. Expecting pad (ffill) or backfill ' - '(bfill). Got %s' % method) + + valid_methods = ['pad', 'backfill'] + expecting = 'pad (ffill) or backfill (bfill)' + if allow_nearest: + valid_methods.append('nearest') + expecting = 'pad (ffill), backfill (bfill) or nearest' + if method not in valid_methods: + msg = ('Invalid fill method. Expecting %s. Got %s' + % (expecting, method)) raise ValueError(msg) return method +def _clean_reindex_fill_method(method): + return _clean_fill_method(method, allow_nearest=True) + + def _all_none(*args): for arg in args: if arg is not None: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f454af5df9c90..336b29dfb8572 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1672,10 +1672,12 @@ def sort_index(self, axis=0, ascending=True): keywords) New labels / index to conform to. Preferably an Index object to avoid duplicating data - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed DataFrame - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap + method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional + Method to use for filling holes in reindexed DataFrame: + * default: don't fill gaps + * pad / ffill: propagate last valid observation forward to next valid + * backfill / bfill: use next valid observation to fill gap + * nearest: use nearest valid observations to fill gap copy : boolean, default True Return a new object, even if the passed indexes are the same level : int or name @@ -1703,7 +1705,7 @@ def reindex(self, *args, **kwargs): # construct the args axes, kwargs = self._construct_axes_from_arguments(args, kwargs) - method = com._clean_fill_method(kwargs.get('method')) + method = com._clean_reindex_fill_method(kwargs.get('method')) level = kwargs.get('level') copy = kwargs.get('copy', True) limit = kwargs.get('limit') @@ -1744,9 +1746,8 @@ def _reindex_axes(self, axes, level, limit, method, fill_value, copy): axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( - {axis: [new_index, indexer]}, method=method, - fill_value=fill_value, limit=limit, copy=copy, - allow_dups=False) + {axis: [new_index, indexer]}, + fill_value=fill_value, copy=copy, allow_dups=False) return obj @@ -1770,10 +1771,12 @@ def _reindex_multi(self, axes, copy, fill_value): New labels / index to conform to. Preferably an Index object to avoid duplicating data axis : %(axes_single_arg)s - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed object. - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap + method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional + Method to use for filling holes in reindexed DataFrame: + * default: don't fill gaps + * pad / ffill: propagate last valid observation forward to next valid + * backfill / bfill: use next valid observation to fill gap + * nearest: use nearest valid observations to fill gap copy : boolean, default True Return a new object, even if the passed indexes are the same level : int or name @@ -1802,15 +1805,14 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, axis_name = self._get_axis_name(axis) axis_values = self._get_axis(axis_name) - method = com._clean_fill_method(method) + method = com._clean_reindex_fill_method(method) new_index, indexer = axis_values.reindex(labels, method, level, limit=limit) return self._reindex_with_indexers( - {axis: [new_index, indexer]}, method=method, fill_value=fill_value, - limit=limit, copy=copy) + {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy) - def _reindex_with_indexers(self, reindexers, method=None, - fill_value=np.nan, limit=None, copy=False, + def _reindex_with_indexers(self, reindexers, + fill_value=np.nan, copy=False, allow_dups=False): """ allow_dups indicates an internal call here """ diff --git a/pandas/core/index.py b/pandas/core/index.py index 2444014ac9779..0cad537855857 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1089,22 +1089,20 @@ def identical(self, other): def asof(self, label): """ For a sorted index, return the most recent label up to and including - the passed label. Return NaN if not found - """ - if isinstance(label, (Index, ABCSeries, np.ndarray)): - raise TypeError('%s' % type(label)) - - if not isinstance(label, Timestamp): - label = Timestamp(label) - - if label not in self: - loc = self.searchsorted(label, side='left') - if loc > 0: - return self[loc - 1] - else: - return np.nan + the passed label. Return NaN if not found. - return label + See also + -------- + get_loc : asof is a thin wrapper around get_loc with method='pad' + """ + try: + loc = self.get_loc(label, method='pad') + except KeyError: + return _get_na_value(self.dtype) + else: + if isinstance(loc, slice): + loc = loc.indices(len(self))[-1] + return self[loc] def asof_locs(self, where, mask): """ @@ -1402,15 +1400,34 @@ def sym_diff(self, other, result_name=None): the_diff = sorted(set((self.difference(other)).union(other.difference(self)))) return Index(the_diff, name=result_name) - def get_loc(self, key): + def get_loc(self, key, method=None): """ Get integer location for requested label + Parameters + ---------- + key : label + method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'} + * default: exact matches only. + * pad / ffill: find the PREVIOUS index value if no exact match. + * backfill / bfill: use NEXT index value if no exact match + * nearest: use the NEAREST index value if no exact match. Tied + distances are broken by preferring the larger index value. + Returns ------- loc : int if unique index, possibly slice or mask if not """ - return self._engine.get_loc(_values_from_object(key)) + if method is None: + return self._engine.get_loc(_values_from_object(key)) + + indexer = self.get_indexer([key], method=method) + if indexer.ndim > 1 or indexer.size > 1: + raise TypeError('get_loc requires scalar valued input') + loc = indexer.item() + if loc == -1: + raise KeyError(key) + return loc def get_value(self, series, key): """ @@ -1477,19 +1494,20 @@ def get_indexer(self, target, method=None, limit=None): """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the - current data to the new index. The mask determines whether labels are - found or not in the current index + current data to the new index. Parameters ---------- target : Index - method : {'pad', 'ffill', 'backfill', 'bfill'} - pad / ffill: propagate LAST valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - - Notes - ----- - This is a low-level method and probably should be used at your own risk + method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'} + * default: exact matches only. + * pad / ffill: find the PREVIOUS index value if no exact match. + * backfill / bfill: use NEXT index value if no exact match + * nearest: use the NEAREST index value if no exact match. Tied + distances are broken by preferring the larger index value. + limit : int + Maximum number of consecuctive labels in ``target`` to match for + inexact matches. Examples -------- @@ -1498,9 +1516,12 @@ def get_indexer(self, target, method=None, limit=None): Returns ------- - indexer : ndarray + indexer : ndarray of int + Integers from 0 to n - 1 indicating that the index at these + positions matches the corresponding target values. Missing values + in the target are marked by -1. """ - method = self._get_method(method) + method = com._clean_reindex_fill_method(method) target = _ensure_index(target) pself, ptarget = self._possibly_promote(target) @@ -1516,21 +1537,73 @@ def get_indexer(self, target, method=None, limit=None): raise InvalidIndexError('Reindexing only valid with uniquely' ' valued Index objects') - if method == 'pad': - if not self.is_monotonic or not target.is_monotonic: - raise ValueError('Must be monotonic for forward fill') - indexer = self._engine.get_pad_indexer(target.values, limit) - elif method == 'backfill': - if not self.is_monotonic or not target.is_monotonic: - raise ValueError('Must be monotonic for backward fill') - indexer = self._engine.get_backfill_indexer(target.values, limit) - elif method is None: - indexer = self._engine.get_indexer(target.values) + if method == 'pad' or method == 'backfill': + indexer = self._get_fill_indexer(target, method, limit) + elif method == 'nearest': + indexer = self._get_nearest_indexer(target, limit) else: - raise ValueError('unrecognized method: %s' % method) + indexer = self._engine.get_indexer(target.values) return com._ensure_platform_int(indexer) + def _get_fill_indexer(self, target, method, limit=None): + if self.is_monotonic_increasing and target.is_monotonic_increasing: + method = (self._engine.get_pad_indexer if method == 'pad' + else self._engine.get_backfill_indexer) + indexer = method(target.values, limit) + else: + indexer = self._get_fill_indexer_searchsorted(target, method, limit) + return indexer + + def _get_fill_indexer_searchsorted(self, target, method, limit=None): + """ + Fallback pad/backfill get_indexer that works for monotonic decreasing + indexes and non-monotonic targets + """ + if limit is not None: + raise ValueError('limit argument for %r method only well-defined ' + 'if index and target are monotonic' % method) + + side = 'left' if method == 'pad' else 'right' + target = np.asarray(target) + + # find exact matches first (this simplifies the algorithm) + indexer = self.get_indexer(target) + nonexact = (indexer == -1) + indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) + if side == 'left': + # searchsorted returns "indices into a sorted array such that, + # if the corresponding elements in v were inserted before the + # indices, the order of a would be preserved". + # Thus, we need to subtract 1 to find values to the left. + indexer[nonexact] -= 1 + # This also mapped not found values (values of 0 from + # np.searchsorted) to -1, which conveniently is also our + # sentinel for missing values + else: + # Mark indices to the right of the largest value as not found + indexer[indexer == len(self)] = -1 + return indexer + + def _get_nearest_indexer(self, target, limit): + """ + Get the indexer for the nearest index labels; requires an index with + values that can be subtracted from each other (e.g., not strings or + tuples). + """ + left_indexer = self.get_indexer(target, 'pad', limit=limit) + right_indexer = self.get_indexer(target, 'backfill', limit=limit) + + target = np.asarray(target) + left_distances = abs(self.values[left_indexer] - target) + right_distances = abs(self.values[right_indexer] - target) + + op = operator.lt if self.is_monotonic_increasing else operator.le + indexer = np.where(op(left_distances, right_distances) + | (right_indexer == -1), + left_indexer, right_indexer) + return indexer + def get_indexer_non_unique(self, target, **kwargs): """ return an indexer suitable for taking from a non unique index return the labels in the same order as the target, and @@ -1616,16 +1689,6 @@ def isin(self, values, level=None): self._validate_index_level(level) return lib.ismember(self._array_values(), value_set) - def _get_method(self, method): - if method: - method = method.lower() - - aliases = { - 'ffill': 'pad', - 'bfill': 'backfill' - } - return aliases.get(method, method) - def reindex(self, target, method=None, level=None, limit=None): """ Create index with target's values (move/add/delete values as necessary) @@ -2063,6 +2126,19 @@ def _maybe_cast_slice_bound(self, label, side): """ return label + def _searchsorted_monotonic(self, label, side='left'): + if self.is_monotonic_increasing: + return self.searchsorted(label, side=side) + elif self.is_monotonic_decreasing: + # np.searchsorted expects ascending sort order, have to reverse + # everything for it to work (element ordering, search side and + # resulting value). + pos = self[::-1].searchsorted( + label, side='right' if side == 'left' else 'right') + return len(self) - pos + + raise ValueError('index must be monotonic increasing or decreasing') + def get_slice_bound(self, label, side): """ Calculate slice bound that corresponds to given label. @@ -2088,19 +2164,12 @@ def get_slice_bound(self, label, side): try: slc = self.get_loc(label) - except KeyError: - if self.is_monotonic_increasing: - return self.searchsorted(label, side=side) - elif self.is_monotonic_decreasing: - # np.searchsorted expects ascending sort order, have to reverse - # everything for it to work (element ordering, search side and - # resulting value). - pos = self[::-1].searchsorted( - label, side='right' if side == 'left' else 'right') - return len(self) - pos - - # In all other cases, just re-raise the KeyError - raise + except KeyError as err: + try: + return self._searchsorted_monotonic(label, side) + except ValueError: + # raise the original KeyError + raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array or an array of indices, which @@ -2664,7 +2733,7 @@ def __contains__(self, other): except: return False - def get_loc(self, key): + def get_loc(self, key, method=None): try: if np.all(np.isnan(key)): nan_idxs = self._nan_idxs @@ -2676,7 +2745,7 @@ def get_loc(self, key): return nan_idxs except (TypeError, NotImplementedError): pass - return super(Float64Index, self).get_loc(key) + return super(Float64Index, self).get_loc(key, method=method) @property def is_all_dates(self): @@ -3932,7 +4001,7 @@ def get_indexer(self, target, method=None, limit=None): ------- (indexer, mask) : (ndarray, ndarray) """ - method = self._get_method(method) + method = com._clean_reindex_fill_method(method) target = _ensure_index(target) @@ -3949,20 +4018,13 @@ def get_indexer(self, target, method=None, limit=None): self_index = self._tuple_index - if method == 'pad': - if not self.is_unique or not self.is_monotonic: - raise AssertionError(('Must be unique and monotonic to ' - 'use forward fill getting the indexer')) - indexer = self_index._engine.get_pad_indexer(target_index.values, - limit=limit) - elif method == 'backfill': - if not self.is_unique or not self.is_monotonic: - raise AssertionError(('Must be unique and monotonic to ' - 'use backward fill getting the indexer')) - indexer = self_index._engine.get_backfill_indexer(target_index.values, - limit=limit) + if method == 'pad' or method == 'backfill': + indexer = self_index._get_fill_indexer(target, method, limit) + elif method == 'nearest': + raise NotImplementedError("method='nearest' not implemented yet " + 'for MultiIndex; see GitHub issue 9365') else: - indexer = self_index._engine.get_indexer(target_index.values) + indexer = self_index._engine.get_indexer(target.values) return com._ensure_platform_int(indexer) @@ -4099,7 +4161,7 @@ def _partial_tup_index(self, tup, side='left'): else: return start + section.searchsorted(idx, side=side) - def get_loc(self, key): + def get_loc(self, key, method=None): """ Get integer location, slice or boolean mask for requested label or tuple If the key is past the lexsort depth, the return may be a boolean mask @@ -4108,11 +4170,16 @@ def get_loc(self, key): Parameters ---------- key : label or tuple + method : None Returns ------- loc : int, slice object or boolean mask """ + if method is not None: + raise NotImplementedError('only the default get_loc method is ' + 'currently supported for MultiIndex') + def _maybe_to_slice(loc): '''convert integer indexer to boolean mask or slice if possible''' if not isinstance(loc, np.ndarray) or loc.dtype != 'int64': diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 66e008aa16b3e..0f015843fcb0f 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -1875,27 +1875,43 @@ def test_nested_exception(self): except Exception as e: self.assertNotEqual(type(e), UnboundLocalError) - def test_reverse_reindex_ffill_raises(self): + def test_reindex_methods(self): + df = pd.DataFrame({'x': range(5)}) + target = np.array([-0.1, 0.9, 1.1, 1.5]) + + for method, expected_values in [('nearest', [0, 1, 1, 2]), + ('pad', [np.nan, 0, 1, 1]), + ('backfill', [0, 1, 2, 2])]: + expected = pd.DataFrame({'x': expected_values}, index=target) + actual = df.reindex(target, method=method) + assert_frame_equal(expected, actual) + + e2 = expected[::-1] + actual = df.reindex(target[::-1], method=method) + assert_frame_equal(e2, actual) + + new_order = [3, 0, 2, 1] + e2 = expected.iloc[new_order] + actual = df.reindex(target[new_order], method=method) + assert_frame_equal(e2, actual) + + switched_method = ('pad' if method == 'backfill' + else 'backfill' if method == 'pad' + else method) + actual = df[::-1].reindex(target, method=switched_method) + assert_frame_equal(expected, actual) + + def test_non_monotonic_reindex_methods(self): dr = pd.date_range('2013-08-01', periods=6, freq='B') data = np.random.randn(6,1) df = pd.DataFrame(data, index=dr, columns=list('A')) - df['A'][3] = np.nan - df_rev = pd.DataFrame(data, index=dr[::-1], columns=list('A')) - # Reverse index is not 'monotonic' + df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]], + columns=list('A')) + # index is not monotonic increasing or decreasing self.assertRaises(ValueError, df_rev.reindex, df.index, method='pad') self.assertRaises(ValueError, df_rev.reindex, df.index, method='ffill') self.assertRaises(ValueError, df_rev.reindex, df.index, method='bfill') - - def test_reversed_reindex_ffill_raises(self): - dr = pd.date_range('2013-08-01', periods=6, freq='B') - data = np.random.randn(6,1) - df = pd.DataFrame(data, index=dr, columns=list('A')) - df['A'][3] = np.nan - df = pd.DataFrame(data, index=dr, columns=list('A')) - # Reversed reindex is not 'monotonic' - self.assertRaises(ValueError, df.reindex, dr[::-1], method='pad') - self.assertRaises(ValueError, df.reindex, dr[::-1], method='ffill') - self.assertRaises(ValueError, df.reindex, dr[::-1], method='bfill') + self.assertRaises(ValueError, df_rev.reindex, df.index, method='nearest') def test_reindex_level(self): from itertools import permutations diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 65e42f128564e..75c28681ecde5 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # pylint: disable=E1101,E1103,W0232 -from datetime import datetime, timedelta +from datetime import datetime, timedelta, time from pandas.compat import range, lrange, lzip, u, zip import operator import re @@ -95,6 +95,15 @@ def f(): pass tm.assertRaisesRegexp(ValueError,'The truth value of a',f) + def test_reindex_base(self): + idx = self.create_index() + expected = np.arange(idx.size) + actual = idx.get_indexer(idx) + assert_array_equal(expected, actual) + + with tm.assertRaisesRegexp(ValueError, 'Invalid fill method'): + idx.get_indexer(idx, method='invalid') + def test_ndarray_compat_properties(self): idx = self.create_index() @@ -109,6 +118,7 @@ def test_ndarray_compat_properties(self): idx.nbytes idx.values.nbytes + class TestIndex(Base, tm.TestCase): _holder = Index _multiprocess_can_split_ = True @@ -421,7 +431,7 @@ def test_is_(self): def test_asof(self): d = self.dateIndex[0] - self.assertIs(self.dateIndex.asof(d), d) + self.assertEqual(self.dateIndex.asof(d), d) self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1)))) d = self.dateIndex[-1] @@ -432,9 +442,10 @@ def test_asof(self): def test_asof_datetime_partial(self): idx = pd.date_range('2010-01-01', periods=2, freq='m') - expected = Timestamp('2010-01-31') + expected = Timestamp('2010-02-28') result = idx.asof('2010-02') self.assertEqual(result, expected) + self.assertFalse(isinstance(result, Index)) def test_nanosecond_index_access(self): s = Series([Timestamp('20130101')]).values.view('i8')[0] @@ -855,17 +866,81 @@ def test_get_indexer(self): assert_almost_equal(r1, [1, 3, -1]) r1 = idx2.get_indexer(idx1, method='pad') - assert_almost_equal(r1, [-1, 0, 0, 1, 1]) + e1 = [-1, 0, 0, 1, 1] + assert_almost_equal(r1, e1) + + r2 = idx2.get_indexer(idx1[::-1], method='pad') + assert_almost_equal(r2, e1[::-1]) rffill1 = idx2.get_indexer(idx1, method='ffill') assert_almost_equal(r1, rffill1) r1 = idx2.get_indexer(idx1, method='backfill') - assert_almost_equal(r1, [0, 0, 1, 1, 2]) + e1 = [0, 0, 1, 1, 2] + assert_almost_equal(r1, e1) rbfill1 = idx2.get_indexer(idx1, method='bfill') assert_almost_equal(r1, rbfill1) + r2 = idx2.get_indexer(idx1[::-1], method='backfill') + assert_almost_equal(r2, e1[::-1]) + + def test_get_indexer_nearest(self): + idx = Index(np.arange(10)) + + all_methods = ['pad', 'backfill', 'nearest'] + for method in all_methods: + actual = idx.get_indexer([0, 5, 9], method=method) + self.assert_array_equal(actual, [0, 5, 9]) + + for method, expected in zip(all_methods, [[0, 1, 8], [1, 2, 9], [0, 2, 9]]): + actual = idx.get_indexer([0.2, 1.8, 8.5], method=method) + self.assert_array_equal(actual, expected) + + with tm.assertRaisesRegexp(ValueError, 'limit argument'): + idx.get_indexer([1, 0], method='nearest', limit=1) + + def test_get_indexer_nearest_decreasing(self): + idx = Index(np.arange(10))[::-1] + + all_methods = ['pad', 'backfill', 'nearest'] + for method in all_methods: + actual = idx.get_indexer([0, 5, 9], method=method) + self.assert_array_equal(actual, [9, 4, 0]) + + for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1], [9, 7, 0]]): + actual = idx.get_indexer([0.2, 1.8, 8.5], method=method) + self.assert_array_equal(actual, expected) + + def test_get_indexer_strings(self): + idx = pd.Index(['b', 'c']) + + actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad') + expected = [-1, 0, 1, 1] + self.assert_array_equal(actual, expected) + + actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill') + expected = [0, 0, 1, -1] + self.assert_array_equal(actual, expected) + + with tm.assertRaises(TypeError): + idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest') + + def test_get_loc(self): + idx = pd.Index([0, 1, 2]) + all_methods = [None, 'pad', 'backfill', 'nearest'] + for method in all_methods: + self.assertEqual(idx.get_loc(1, method=method), 1) + with tm.assertRaises(TypeError): + idx.get_loc([1, 2], method=method) + + for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: + self.assertEqual(idx.get_loc(1.1, method), loc) + + idx = pd.Index(['a', 'c']) + with tm.assertRaises(TypeError): + idx.get_loc('a', method='nearest') + def test_slice_locs(self): for dtype in [int, float]: idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) @@ -1247,6 +1322,7 @@ def test_ufunc_compat(self): expected = Float64Index(np.sin(np.arange(5,dtype='int64'))) tm.assert_index_equal(result, expected) + class TestFloat64Index(Numeric, tm.TestCase): _holder = Float64Index _multiprocess_can_split_ = True @@ -1360,6 +1436,26 @@ def test_equals(self): i2 = Float64Index([1.0,np.nan]) self.assertTrue(i.equals(i2)) + def test_get_indexer(self): + idx = Float64Index([0.0, 1.0, 2.0]) + self.assert_array_equal(idx.get_indexer(idx), [0, 1, 2]) + + target = [-0.1, 0.5, 1.1] + self.assert_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1]) + self.assert_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2]) + self.assert_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1]) + + def test_get_loc(self): + idx = Float64Index([0.0, 1.0, 2.0]) + for method in [None, 'pad', 'backfill', 'nearest']: + self.assertEqual(idx.get_loc(1, method), 1) + + for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: + self.assertEqual(idx.get_loc(1.1, method), loc) + + self.assertRaises(KeyError, idx.get_loc, 'foo') + self.assertRaises(KeyError, idx.get_loc, 1.5) + def test_get_loc_na(self): idx = Float64Index([np.nan, 1, 2]) self.assertEqual(idx.get_loc(1), 1) @@ -1897,6 +1993,54 @@ def test_numeric_compat(self): lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]: self.assertRaises(TypeError, f) + def test_get_loc(self): + idx = pd.date_range('2000-01-01', periods=3) + + for method in [None, 'pad', 'backfill', 'nearest']: + self.assertEqual(idx.get_loc(idx[1], method), 1) + self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1) + self.assertEqual(idx.get_loc(str(idx[1]), method), 1) + + self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0) + self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1) + + self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3)) + self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3)) + + self.assertEqual(idx.get_loc('1999', method='nearest'), 0) + self.assertEqual(idx.get_loc('2001', method='nearest'), 2) + + with tm.assertRaises(KeyError): + idx.get_loc('1999', method='pad') + with tm.assertRaises(KeyError): + idx.get_loc('2001', method='backfill') + + with tm.assertRaises(KeyError): + idx.get_loc('foobar') + with tm.assertRaises(TypeError): + idx.get_loc(slice(2)) + + idx = pd.to_datetime(['2000-01-01', '2000-01-04']) + self.assertEqual(idx.get_loc('2000-01-02', method='nearest'), 0) + self.assertEqual(idx.get_loc('2000-01-03', method='nearest'), 1) + self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 2)) + + # time indexing + idx = pd.date_range('2000-01-01', periods=24, freq='H') + assert_array_equal(idx.get_loc(time(12)), [12]) + assert_array_equal(idx.get_loc(time(12, 30)), []) + with tm.assertRaises(NotImplementedError): + idx.get_loc(time(12, 30), method='pad') + + def test_get_indexer(self): + idx = pd.date_range('2000-01-01', periods=3) + self.assert_array_equal(idx.get_indexer(idx), [0, 1, 2]) + + target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour']) + self.assert_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1]) + self.assert_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2]) + self.assert_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1]) + def test_roundtrip_pickle_with_tz(self): # GH 8367 @@ -1959,6 +2103,30 @@ def create_index(self): def test_pickle_compat_construction(self): pass + def test_get_loc(self): + idx = pd.period_range('2000-01-01', periods=3) + + for method in [None, 'pad', 'backfill', 'nearest']: + self.assertEqual(idx.get_loc(idx[1], method), 1) + self.assertEqual(idx.get_loc(idx[1].asfreq('H', how='start'), method), 1) + self.assertEqual(idx.get_loc(idx[1].to_timestamp(), method), 1) + self.assertEqual(idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method), 1) + self.assertEqual(idx.get_loc(str(idx[1]), method), 1) + + def test_get_indexer(self): + idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start') + self.assert_array_equal(idx.get_indexer(idx), [0, 1, 2]) + + target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12', + '2000-01-02T01'], freq='H') + self.assert_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1]) + self.assert_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2]) + self.assert_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1]) + + with self.assertRaisesRegexp(ValueError, 'different freq'): + idx.asfreq('D').get_indexer(idx) + + class TestTimedeltaIndex(DatetimeLike, tm.TestCase): _holder = TimedeltaIndex _multiprocess_can_split_ = True @@ -1966,6 +2134,26 @@ class TestTimedeltaIndex(DatetimeLike, tm.TestCase): def create_index(self): return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1) + def test_get_loc(self): + idx = pd.to_timedelta(['0 days', '1 days', '2 days']) + + for method in [None, 'pad', 'backfill', 'nearest']: + self.assertEqual(idx.get_loc(idx[1], method), 1) + self.assertEqual(idx.get_loc(idx[1].to_pytimedelta(), method), 1) + self.assertEqual(idx.get_loc(str(idx[1]), method), 1) + + for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: + self.assertEqual(idx.get_loc('1 day 1 hour', method), loc) + + def test_get_indexer(self): + idx = pd.to_timedelta(['0 days', '1 days', '2 days']) + self.assert_array_equal(idx.get_indexer(idx), [0, 1, 2]) + + target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour']) + self.assert_array_equal(idx.get_indexer(target, 'pad'), [-1, 0, 1]) + self.assert_array_equal(idx.get_indexer(target, 'backfill'), [0, 1, 2]) + self.assert_array_equal(idx.get_indexer(target, 'nearest'), [0, 1, 1]) + def test_numeric_compat(self): idx = self._holder(np.arange(5,dtype='int64')) @@ -2733,6 +2921,9 @@ def test_get_loc(self): self.assertRaises(KeyError, self.index.get_loc, ('bar', 'two')) self.assertRaises(KeyError, self.index.get_loc, 'quux') + self.assertRaises(NotImplementedError, self.index.get_loc, 'foo', + method='nearest') + # 3 levels index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), @@ -2935,13 +3126,21 @@ def test_get_indexer(self): assert_almost_equal(r1, [1, 3, -1]) r1 = idx2.get_indexer(idx1, method='pad') - assert_almost_equal(r1, [-1, 0, 0, 1, 1]) + e1 = [-1, 0, 0, 1, 1] + assert_almost_equal(r1, e1) + + r2 = idx2.get_indexer(idx1[::-1], method='pad') + assert_almost_equal(r2, e1[::-1]) rffill1 = idx2.get_indexer(idx1, method='ffill') assert_almost_equal(r1, rffill1) r1 = idx2.get_indexer(idx1, method='backfill') - assert_almost_equal(r1, [0, 0, 1, 1, 2]) + e1 = [0, 0, 1, 1, 2] + assert_almost_equal(r1, e1) + + r2 = idx2.get_indexer(idx1[::-1], method='backfill') + assert_almost_equal(r2, e1[::-1]) rbfill1 = idx2.get_indexer(idx1, method='bfill') assert_almost_equal(r1, rbfill1) @@ -2961,6 +3160,11 @@ def test_get_indexer(self): " uniquely valued Index objects", idx1.get_indexer, idx2) + def test_get_indexer_nearest(self): + midx = MultiIndex.from_tuples([('a', 1), ('b', 2)]) + with tm.assertRaises(NotImplementedError): + midx.get_indexer(['a'], method='nearest') + def test_format(self): self.index.format() self.index[:0].format() diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 1a2fc5a8fc13c..04dad68703577 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -5862,8 +5862,9 @@ def test_reindex_pad(self): result = s.reindex(new_index).ffill(downcast='infer') assert_series_equal(result, expected) - # invalid because we can't forward fill on this type of index - self.assertRaises(ValueError, lambda : s.reindex(new_index, method='ffill')) + expected = Series([1, 5, 3, 5], index=new_index) + result = s.reindex(new_index, method='ffill') + assert_series_equal(result, expected) # inferrence of new dtype s = Series([True,False,False,True],index=list('abcd')) @@ -5878,6 +5879,16 @@ def test_reindex_pad(self): expected = Series(False,index=lrange(0,5)) assert_series_equal(result, expected) + def test_reindex_nearest(self): + s = Series(np.arange(10, dtype='int64')) + target = [0.1, 0.9, 1.5, 2.0] + actual = s.reindex(target, method='nearest') + expected = Series(np.around(target).astype('int64'), target) + assert_series_equal(expected, actual) + + actual = s.reindex_like(actual, method='nearest') + assert_series_equal(expected, actual) + def test_reindex_backfill(self): pass diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 2205c6c4f4a64..3940bbcc949ba 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1227,7 +1227,7 @@ def get_value_maybe_box(self, series, key): values = self._engine.get_value(_values_from_object(series), key) return _maybe_box(self, values, series, key) - def get_loc(self, key): + def get_loc(self, key, method=None): """ Get integer location for requested label @@ -1237,15 +1237,18 @@ def get_loc(self, key): """ if isinstance(key, datetime): # needed to localize naive datetimes - stamp = Timestamp(key, tz=self.tz) - return self._engine.get_loc(stamp) + key = Timestamp(key, tz=self.tz) + return Index.get_loc(self, key, method=method) if isinstance(key, time): + if method is not None: + raise NotImplementedError('cannot yet lookup inexact labels ' + 'when key is a time object') return self.indexer_at_time(key) try: - return Index.get_loc(self, key) - except (KeyError, ValueError): + return Index.get_loc(self, key, method=method) + except (KeyError, ValueError, TypeError): try: return self._get_string_slice(key) except (TypeError, KeyError, ValueError): @@ -1253,7 +1256,7 @@ def get_loc(self, key): try: stamp = Timestamp(key, tz=self.tz) - return self._engine.get_loc(stamp) + return Index.get_loc(self, stamp, method=method) except (KeyError, ValueError): raise KeyError(key) @@ -1637,9 +1640,6 @@ def indexer_at_time(self, time, asof=False): Parameters ---------- time : datetime.time or string - tz : string or pytz.timezone or dateutil.tz.tzfile - Time zone for time. Corresponding timestamps would be converted to - time zone of the TimeSeries Returns ------- diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 106e8535ce15a..074ed720991ce 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -516,7 +516,13 @@ def get_value(self, series, key): key = Period(key, self.freq).ordinal return _maybe_box(self, self._engine.get_value(s, key), series, key) - def get_loc(self, key): + def get_indexer(self, target, method=None, limit=None): + if hasattr(target, 'freq') and target.freq != self.freq: + raise ValueError('target and index have different freq: ' + '(%s, %s)' % (target.freq, self.freq)) + return Index.get_indexer(self, target, method, limit) + + def get_loc(self, key, method=None): """ Get integer location for requested label @@ -538,7 +544,7 @@ def get_loc(self, key): key = Period(key, self.freq) try: - return self._engine.get_loc(key.ordinal) + return Index.get_loc(self, key.ordinal, method=method) except KeyError: raise KeyError(key) diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py index c365dced8d277..897a28e8f5ea9 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/tseries/tdi.py @@ -649,7 +649,7 @@ def get_value_maybe_box(self, series, key): values = self._engine.get_value(_values_from_object(series), key) return _maybe_box(self, values, series, key) - def get_loc(self, key): + def get_loc(self, key, method=None): """ Get integer location for requested label @@ -659,11 +659,11 @@ def get_loc(self, key): """ if _is_convertible_to_td(key): key = Timedelta(key) - return self._engine.get_loc(key) + return Index.get_loc(self, key, method=method) try: - return Index.get_loc(self, key) - except (KeyError, ValueError): + return Index.get_loc(self, key, method=method) + except (KeyError, ValueError, TypeError): try: return self._get_string_slice(key) except (TypeError, KeyError, ValueError): @@ -671,7 +671,7 @@ def get_loc(self, key): try: stamp = Timedelta(key) - return self._engine.get_loc(stamp) + return Index.get_loc(self, stamp, method=method) except (KeyError, ValueError): raise KeyError(key) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index cf82733c6629d..5f48861097b6d 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -1544,7 +1544,7 @@ def test_period_set_index_reindex(self): df = df.set_index(idx1) self.assertTrue(df.index.equals(idx1)) - df = df.reindex(idx2) + df = df.set_index(idx2) self.assertTrue(df.index.equals(idx2)) def test_nested_dict_frame_constructor(self): diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 54045dfd7c835..b65ecd14d3fff 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -660,7 +660,8 @@ def test_sparse_frame_fillna_limit(self): def test_pad_require_monotonicity(self): rng = date_range('1/1/2000', '3/1/2000', freq='B') - rng2 = rng[::2][::-1] + # neither monotonic increasing or decreasing + rng2 = rng[[1, 0, 2]] self.assertRaises(ValueError, rng2.get_indexer, rng, method='pad') diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index c1b9a3e2359d9..f5626618ea9f5 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -6,7 +6,7 @@ import pandas._period as period import datetime -from pandas.core.api import Timestamp, Series, Timedelta +from pandas.core.api import Timestamp, Series, Timedelta, Period from pandas.tslib import get_timezone from pandas._period import period_asfreq, period_ordinal from pandas.tseries.index import date_range @@ -138,6 +138,12 @@ def test_constructor_with_stringoffset(self): self.assertEqual(repr(result), expected_repr) self.assertEqual(result, eval(repr(result))) + def test_constructor_invalid(self): + with tm.assertRaisesRegexp(TypeError, 'Cannot convert input'): + Timestamp(slice(2)) + with tm.assertRaisesRegexp(ValueError, 'Cannot convert Period'): + Timestamp(Period('1000-01-01')) + def test_conversion(self): # GH 9255 ts = Timestamp('2000-01-01') diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 85cb50b8f18ae..f4cf711951f5e 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1154,8 +1154,10 @@ cdef convert_to_tsobject(object ts, object tz, object unit): # Keep the converter same as PyDateTime's ts = datetime.combine(ts, datetime_time()) return convert_to_tsobject(ts, tz, None) - else: + elif getattr(ts, '_typ', None) == 'period': raise ValueError("Cannot convert Period to Timestamp unambiguously. Use to_timestamp") + else: + raise TypeError('Cannot convert input to Timestamp') if obj.value != NPY_NAT: _check_dts_bounds(&obj.dts)
Fixes #8845 Currently only an index method; I'm open to ideas for how to expose it more directly -- maybe `df.loc_nearest`?. In particular, I think this is usually a more useful way to do indexing for floats than pandas's reindex based `.loc`, because floats are inherently imprecise. CC @jreback @jorisvandenbossche @immerrr @hugadams
https://api.github.com/repos/pandas-dev/pandas/pulls/9258
2015-01-15T05:53:32Z
2015-02-23T18:30:41Z
2015-02-23T18:30:41Z
2015-04-13T22:55:42Z
API: restore full datetime.timedelta compat with Timedelta w.r.t. seconds/microseconds accessors (GH9185, GH9139)
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 1ad5492efe61a..d6b99770ad4f9 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -251,8 +251,13 @@ yields another ``timedelta64[ns]`` dtypes Series. Attributes ---------- -You can access various components of the ``Timedelta`` or ``TimedeltaIndex`` directly using the attributes ``days,hours,minutes,seconds,milliseconds,microseconds,nanoseconds``. -These operations can be directly accessed via the ``.dt`` property of the ``Series`` as well. These return an integer representing that interval (which is signed according to whether the ``Timedelta`` is signed). +You can access various components of the ``Timedelta`` or ``TimedeltaIndex`` directly using the attributes ``days,seconds,microseconds,nanoseconds``. These are identical to the values returned by ``datetime.timedelta``, in that, for example, the ``.seconds`` attribute represents the number of seconds >= 0 and < 1 day. These are signed according to whether the ``Timedelta`` is signed. + +These operations can also be directly accessed via the ``.dt`` property of the ``Series`` as well. + +.. note:: + + Note that the attributes are NOT the displayed values of the ``Timedelta``. Use ``.components`` to retrieve the displayed values. For a ``Series`` @@ -271,29 +276,12 @@ You can access the component field for a scalar ``Timedelta`` directly. (-tds).seconds You can use the ``.components`` property to access a reduced form of the timedelta. This returns a ``DataFrame`` indexed -similarly to the ``Series`` +similarly to the ``Series``. These are the *displayed* values of the ``Timedelta``. .. ipython:: python td.dt.components - -.. _timedeltas.attribues_warn: - -.. warning:: - - ``Timedelta`` scalars (and ``TimedeltaIndex``) component fields are *not the same* as the component fields on a ``datetime.timedelta`` object. For example, ``.seconds`` on a ``datetime.timedelta`` object returns the total number of seconds combined between ``hours``, ``minutes`` and ``seconds``. In contrast, the pandas ``Timedelta`` breaks out hours, minutes, microseconds and nanoseconds separately. - - .. ipython:: python - - # Timedelta accessor - tds = Timedelta('31 days 5 min 3 sec') - tds.minutes - tds.seconds - - # datetime.timedelta accessor - # this is 5 minutes * 60 + 3 seconds - tds.to_pytimedelta().seconds - + td.dt.components.seconds .. _timedeltas.index: diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt index 8397d2fcac2e9..8ec431d6c70ed 100644 --- a/doc/source/whatsnew/v0.15.0.txt +++ b/doc/source/whatsnew/v0.15.0.txt @@ -29,7 +29,7 @@ users upgrade to this version. - Split out string methods documentation into :ref:`Working with Text Data <text>` - Check the :ref:`API Changes <whatsnew_0150.api>` and :ref:`deprecations <whatsnew_0150.deprecations>` before updating - + - :ref:`Other Enhancements <whatsnew_0150.enhancements>` - :ref:`Performance Improvements <whatsnew_0150.performance>` @@ -403,7 +403,7 @@ Rolling/Expanding Moments improvements rolling_window(s, window=3, win_type='triang', center=True) -- Removed ``center`` argument from all :func:`expanding_ <expanding_apply>` functions (see :ref:`list <api.functions_expanding>`), +- Removed ``center`` argument from all :func:`expanding_ <expanding_apply>` functions (see :ref:`list <api.functions_expanding>`), as the results produced when ``center=True`` did not make much sense. (:issue:`7925`) - Added optional ``ddof`` argument to :func:`expanding_cov` and :func:`rolling_cov`. @@ -574,20 +574,20 @@ for more details): .. code-block:: python In [2]: pd.Categorical.from_codes([0,1,0,2,1], categories=['a', 'b', 'c']) - Out[2]: + Out[2]: [a, b, a, c, b] Categories (3, object): [a, b, c] API changes related to the introduction of the ``Timedelta`` scalar (see :ref:`above <whatsnew_0150.timedeltaindex>` for more details): - + - Prior to 0.15.0 :func:`to_timedelta` would return a ``Series`` for list-like/Series input, and a ``np.timedelta64`` for scalar input. It will now return a ``TimedeltaIndex`` for list-like input, ``Series`` for Series input, and ``Timedelta`` for scalar input. For API changes related to the rolling and expanding functions, see detailed overview :ref:`above <whatsnew_0150.roll>`. -Other notable API changes: +Other notable API changes: - Consistency when indexing with ``.loc`` and a list-like indexer when no values are found. @@ -872,7 +872,7 @@ Enhancements in the importing/exporting of Stata files: objects and columns containing missing values have ``object`` data type. (:issue:`8045`) Enhancements in the plotting functions: - + - Added ``layout`` keyword to ``DataFrame.plot``. You can pass a tuple of ``(rows, columns)``, one of which can be ``-1`` to automatically infer (:issue:`6667`, :issue:`8071`). - Allow to pass multiple axes to ``DataFrame.plot``, ``hist`` and ``boxplot`` (:issue:`5353`, :issue:`6970`, :issue:`7069`) - Added support for ``c``, ``colormap`` and ``colorbar`` arguments for ``DataFrame.plot`` with ``kind='scatter'`` (:issue:`7780`) diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 4a61140c3829c..a38adc4658492 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -27,6 +27,41 @@ Backwards incompatible API changes .. _whatsnew_0160.api_breaking: +- In v0.15.0 a new scalar type ``Timedelta`` was introduced, that is a sub-class of ``datetime.timedelta``. Mentioned :ref:`here <whatsnew_0150.timedeltaindex>` was a notice of an API change w.r.t. the ``.seconds`` accessor. The intent was to provide a user-friendly set of accessors that give the 'natural' value for that unit, e.g. if you had a ``Timedelta('1 day, 10:11:12')``, then ``.seconds`` would return 12. However, this is at odds with the definition of ``datetime.timedelta``, which defines ``.seconds`` as ``10 * 3600 + 11 * 60 + 12 == 36672``. + +So in v0.16.0, we are restoring the API to match that of ``datetime.timedelta``. However, the component values are still available through the ``.components`` accessor. This affects the ``.seconds`` and ``.microseconds`` accessors, and removes the ``.hours``, ``.minutes``, ``.milliseconds`` accessors. These changes affect ``TimedeltaIndex`` and the Series ``.dt`` accessor as well. (:issue:`9185`, :issue:`9139`) + +Previous Behavior + +.. code-block:: python + + In [2]: t = pd.Timedelta('1 day, 10:11:12.100123') + + In [3]: t.days + Out[3]: 1 + + In [4]: t.seconds + Out[4]: 12 + + In [5]: t.microseconds + Out[5]: 123 + +New Behavior + +.. ipython:: python + + t = pd.Timedelta('1 day, 10:11:12.100123') + t.days + t.seconds + t.microseconds + +Using ``.components`` allows the full component access + +.. ipython:: python + + t.components + t.components.seconds + - ``Index.duplicated`` now returns `np.array(dtype=bool)` rather than `Index(dtype=object)` containing `bool` values. (:issue:`8875`) - ``DataFrame.to_json`` now returns accurate type serialisation for each column for frames of mixed dtype (:issue:`9037`) diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 634402d891e53..5909f8af0e5dd 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -1151,6 +1151,29 @@ def test_swapped_columns(self): tm.assert_series_equal(write_frame['A'], read_frame['A']) tm.assert_series_equal(write_frame['B'], read_frame['B']) + def test_datetimes(self): + + # Test writing and reading datetimes. For issue #9139. (xref #9185) + _skip_if_no_xlrd() + + datetimes = [datetime(2013, 1, 13, 1, 2, 3), + datetime(2013, 1, 13, 2, 45, 56), + datetime(2013, 1, 13, 4, 29, 49), + datetime(2013, 1, 13, 6, 13, 42), + datetime(2013, 1, 13, 7, 57, 35), + datetime(2013, 1, 13, 9, 41, 28), + datetime(2013, 1, 13, 11, 25, 21), + datetime(2013, 1, 13, 13, 9, 14), + datetime(2013, 1, 13, 14, 53, 7), + datetime(2013, 1, 13, 16, 37, 0), + datetime(2013, 1, 13, 18, 20, 52)] + + with ensure_clean(self.ext) as path: + write_frame = DataFrame.from_items([('A', datetimes)]) + write_frame.to_excel(path, 'Sheet1') + read_frame = read_excel(path, 'Sheet1', header=0) + + tm.assert_series_equal(write_frame['A'], read_frame['A']) def raise_wrapper(major_ver): def versioned_raise_wrapper(orig_method): diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 6faf6229b6d3b..b67a8c5de1c2d 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -84,7 +84,7 @@ def test_dt_namespace_accessor(self): ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end', 'tz'] ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert'] - ok_for_td = ['days','hours','minutes','seconds','milliseconds','microseconds','nanoseconds'] + ok_for_td = ['days','seconds','microseconds','nanoseconds'] ok_for_td_methods = ['components','to_pytimedelta'] def get_expected(s, name): diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py index 097ccef9e462b..2afdff2982d8a 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/tseries/tdi.py @@ -118,8 +118,8 @@ def _join_i8_wrapper(joinf, **kwargs): _left_indexer_unique = _join_i8_wrapper( _algos.left_join_indexer_unique_int64, with_indexers=False) _arrmap = None - _datetimelike_ops = ['days','hours','minutes','seconds','milliseconds','microseconds', - 'nanoseconds','freq','components'] + _datetimelike_ops = ['days','seconds','microseconds','nanoseconds', + 'freq','components'] __eq__ = _td_index_cmp('__eq__') __ne__ = _td_index_cmp('__ne__', nat_result=True) @@ -349,37 +349,22 @@ def _get_field(self, m): @property def days(self): - """ The number of integer days for each element """ + """ Number of days for each element. """ return self._get_field('days') - @property - def hours(self): - """ The number of integer hours for each element """ - return self._get_field('hours') - - @property - def minutes(self): - """ The number of integer minutes for each element """ - return self._get_field('minutes') - @property def seconds(self): - """ The number of integer seconds for each element """ + """ Number of seconds (>= 0 and less than 1 day) for each element. """ return self._get_field('seconds') - @property - def milliseconds(self): - """ The number of integer milliseconds for each element """ - return self._get_field('milliseconds') - @property def microseconds(self): - """ The number of integer microseconds for each element """ + """ Number of microseconds (>= 0 and less than 1 second) for each element. """ return self._get_field('microseconds') @property def nanoseconds(self): - """ The number of integer nanoseconds for each element """ + """ Number of nanoseconds (>= 0 and less than 1 microsecond) for each element. """ return self._get_field('nanoseconds') @property diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index b6c5327357590..ced566157d48f 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -301,15 +301,18 @@ class Other: self.assertTrue(td.__floordiv__(td) is NotImplemented) def test_fields(self): + + # compat to datetime.timedelta rng = to_timedelta('1 days, 10:11:12') self.assertEqual(rng.days,1) - self.assertEqual(rng.hours,10) - self.assertEqual(rng.minutes,11) - self.assertEqual(rng.seconds,12) - self.assertEqual(rng.milliseconds,0) + self.assertEqual(rng.seconds,10*3600+11*60+12) self.assertEqual(rng.microseconds,0) self.assertEqual(rng.nanoseconds,0) + self.assertRaises(AttributeError, lambda : rng.hours) + self.assertRaises(AttributeError, lambda : rng.minutes) + self.assertRaises(AttributeError, lambda : rng.milliseconds) + td = Timedelta('-1 days, 10:11:12') self.assertEqual(abs(td),Timedelta('13:48:48')) self.assertTrue(str(td) == "-1 days +10:11:12") @@ -317,14 +320,14 @@ def test_fields(self): self.assertEqual(-Timedelta('-1 days, 10:11:12').value,49728000000000) self.assertEqual(Timedelta('-1 days, 10:11:12').value,-49728000000000) - rng = to_timedelta('-1 days, 10:11:12') + rng = to_timedelta('-1 days, 10:11:12.100123456') self.assertEqual(rng.days,-1) - self.assertEqual(rng.hours,10) - self.assertEqual(rng.minutes,11) - self.assertEqual(rng.seconds,12) - self.assertEqual(rng.milliseconds,0) - self.assertEqual(rng.microseconds,0) - self.assertEqual(rng.nanoseconds,0) + self.assertEqual(rng.seconds,10*3600+11*60+12) + self.assertEqual(rng.microseconds,100*1000+123) + self.assertEqual(rng.nanoseconds,456) + self.assertRaises(AttributeError, lambda : rng.hours) + self.assertRaises(AttributeError, lambda : rng.minutes) + self.assertRaises(AttributeError, lambda : rng.milliseconds) # components tup = pd.to_timedelta(-1, 'us').components @@ -830,22 +833,22 @@ def test_astype(self): self.assert_numpy_array_equal(result, rng.asi8) def test_fields(self): - rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s') + rng = timedelta_range('1 days, 10:11:12.100123456', periods=2, freq='s') self.assert_numpy_array_equal(rng.days, np.array([1,1],dtype='int64')) - self.assert_numpy_array_equal(rng.hours, np.array([10,10],dtype='int64')) - self.assert_numpy_array_equal(rng.minutes, np.array([11,11],dtype='int64')) - self.assert_numpy_array_equal(rng.seconds, np.array([12,13],dtype='int64')) - self.assert_numpy_array_equal(rng.milliseconds, np.array([0,0],dtype='int64')) - self.assert_numpy_array_equal(rng.microseconds, np.array([0,0],dtype='int64')) - self.assert_numpy_array_equal(rng.nanoseconds, np.array([0,0],dtype='int64')) + self.assert_numpy_array_equal(rng.seconds, np.array([10*3600+11*60+12,10*3600+11*60+13],dtype='int64')) + self.assert_numpy_array_equal(rng.microseconds, np.array([100*1000+123,100*1000+123],dtype='int64')) + self.assert_numpy_array_equal(rng.nanoseconds, np.array([456,456],dtype='int64')) + + self.assertRaises(AttributeError, lambda : rng.hours) + self.assertRaises(AttributeError, lambda : rng.minutes) + self.assertRaises(AttributeError, lambda : rng.milliseconds) # with nat s = Series(rng) s[1] = np.nan tm.assert_series_equal(s.dt.days,Series([1,np.nan],index=[0,1])) - tm.assert_series_equal(s.dt.hours,Series([10,np.nan],index=[0,1])) - tm.assert_series_equal(s.dt.milliseconds,Series([0,np.nan],index=[0,1])) + tm.assert_series_equal(s.dt.seconds,Series([10*3600+11*60+12,np.nan],index=[0,1])) def test_components(self): rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s') diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 7cf7147a48d63..8e2cb199214cf 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1896,45 +1896,43 @@ class Timedelta(_Timedelta): @property def days(self): - """ The days for the Timedelta """ + """ + Number of Days + + .components will return the shown components + """ self._ensure_components() if self._sign < 0: return -1*self._d return self._d - @property - def hours(self): - """ The hours for the Timedelta """ - self._ensure_components() - return self._h - - @property - def minutes(self): - """ The minutes for the Timedelta """ - self._ensure_components() - return self._m - @property def seconds(self): - """ The seconds for the Timedelta """ - self._ensure_components() - return self._s + """ + Number of seconds (>= 0 and less than 1 day). - @property - def milliseconds(self): - """ The milliseconds for the Timedelta """ + .components will return the shown components + """ self._ensure_components() - return self._ms + return self._h*3600 + self._m*60 + self._s @property def microseconds(self): - """ The microseconds for the Timedelta """ + """ + Number of microseconds (>= 0 and less than 1 second). + + .components will return the shown components + """ self._ensure_components() - return self._us + return self._ms*1000 + self._us @property def nanoseconds(self): - """ The nanoseconds for the Timedelta """ + """ + Number of nanoseconds (>= 0 and less than 1 microsecond). + + .components will return the shown components + """ self._ensure_components() return self._ns
closes #9185 closes #9139
https://api.github.com/repos/pandas-dev/pandas/pulls/9257
2015-01-15T02:34:37Z
2015-01-16T16:54:46Z
2015-01-16T16:54:45Z
2015-01-16T17:01:13Z
BUG: bug in multi-index where insert fails
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 5ec0e90383f4c..cd3a3f6e38d98 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -103,6 +103,7 @@ Bug Fixes - Bug in ``MultiIndex.has_duplicates`` when having many levels causes an indexer overflow (:issue:`9075`, :issue:`5873`) - Bug in ``pivot`` and `unstack`` where ``nan`` values would break index alignment (:issue:`7466`) - Bug in left ``join`` on multi-index with ``sort=True`` or null values (:issue:`9210`). +- Bug in ``MultiIndex`` where inserting new keys would fail (:issue:`9250`). diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 7202ed64e1c9c..e305eb828f410 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -93,31 +93,27 @@ def _get_loc(self, key, axis=0): def _slice(self, obj, axis=0, typ=None): return self.obj._slice(obj, axis=axis, typ=typ) - def __setitem__(self, key, value): - + def _get_setitem_indexer(self, key): if self.axis is not None: - indexer = self._convert_tuple(key, is_setter=True) + return self._convert_tuple(key, is_setter=True) - else: + axis = self.obj._get_axis(0) + if isinstance(axis, MultiIndex): + try: + return axis.get_loc(key) + except Exception: + pass - # kludgetastic - ax = self.obj._get_axis(0) - if isinstance(ax, MultiIndex): - try: - indexer = ax.get_loc(key) - self._setitem_with_indexer(indexer, value) - return - except Exception: - pass + if isinstance(key, tuple) and not self.ndim < len(key): + return self._convert_tuple(key, is_setter=True) - if isinstance(key, tuple): - if len(key) > self.ndim: - raise IndexingError('only tuples of length <= %d supported' % - self.ndim) - indexer = self._convert_tuple(key, is_setter=True) - else: - indexer = self._convert_to_indexer(key, is_setter=True) + try: + return self._convert_to_indexer(key, is_setter=True) + except TypeError: + raise IndexingError(key) + def __setitem__(self, key, value): + indexer = self._get_setitem_indexer(key) self._setitem_with_indexer(indexer, value) def _has_valid_type(self, k, axis): @@ -259,10 +255,6 @@ def _setitem_with_indexer(self, indexer, value): self.obj._maybe_update_cacher(clear=True) self.obj.is_copy=None - if isinstance(labels, MultiIndex): - self.obj.sortlevel(inplace=True) - labels = self.obj._get_axis(i) - nindexer.append(labels.get_loc(key)) else: @@ -1064,7 +1056,12 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False): # if we are a label return me try: return labels.get_loc(obj) - except (KeyError, TypeError): + except KeyError: + if isinstance(obj, tuple) and isinstance(labels, MultiIndex): + if is_setter and len(obj) == labels.nlevels: + return {'key': obj} + raise + except TypeError: pass except (ValueError): if not is_int_positional: @@ -1136,10 +1133,6 @@ def _convert_to_indexer(self, obj, axis=0, is_setter=False): mask = check == -1 if mask.any(): - - # mi here - if isinstance(obj, tuple) and is_setter: - return {'key': obj} raise KeyError('%s not in index' % objarr[mask]) return _values_from_object(indexer) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index fcbfb21bd20e3..129bddea6eed5 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -1351,8 +1351,8 @@ def test_getitem_setitem_fancy_exceptions(self): ix = self.frame.ix with assertRaisesRegexp(IndexingError, 'Too many indexers'): ix[:, :, :] - with assertRaisesRegexp(IndexingError, 'only tuples of length <= 2 ' - 'supported'): + + with assertRaises(IndexingError): ix[:, :, :] = 1 def test_getitem_setitem_boolean_misaligned(self): diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index d474981771015..ef41748e2cda9 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -3354,6 +3354,56 @@ def test_insert(self): assertRaisesRegexp(ValueError, "Item must have length equal to number" " of levels", self.index.insert, 0, ('foo2',)) + left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]], + columns=['1st', '2nd', '3rd']) + left.set_index(['1st', '2nd'], inplace=True) + ts = left['3rd'].copy(deep=True) + + left.loc[('b', 'x'), '3rd'] = 2 + left.loc[('b', 'a'), '3rd'] = -1 + left.loc[('b', 'b'), '3rd'] = 3 + left.loc[('a', 'x'), '3rd'] = 4 + left.loc[('a', 'w'), '3rd'] = 5 + left.loc[('a', 'a'), '3rd'] = 6 + + ts.loc[('b', 'x')] = 2 + ts.loc['b', 'a'] = -1 + ts.loc[('b', 'b')] = 3 + ts.loc['a', 'x'] = 4 + ts.loc[('a', 'w')] = 5 + ts.loc['a', 'a'] = 6 + + right = pd.DataFrame([['a', 'b', 0], + ['b', 'd', 1], + ['b', 'x', 2], + ['b', 'a', -1], + ['b', 'b', 3], + ['a', 'x', 4], + ['a', 'w', 5], + ['a', 'a', 6]], + columns=['1st', '2nd', '3rd']) + right.set_index(['1st', '2nd'], inplace=True) + # FIXME data types changes to float because + # of intermediate nan insertion; + tm.assert_frame_equal(left, right, check_dtype=False) + tm.assert_series_equal(ts, right['3rd']) + + # GH9250 + idx = [('test1', i) for i in range(5)] + \ + [('test2', i) for i in range(6)] + \ + [('test', 17), ('test', 18)] + + left = pd.Series(np.linspace(0, 10, 11), + pd.MultiIndex.from_tuples(idx[:-2])) + + left.loc[('test', 17)] = 11 + left.ix[('test', 18)] = 12 + + right = pd.Series(np.linspace(0, 12, 13), + pd.MultiIndex.from_tuples(idx)) + + tm.assert_series_equal(left, right) + def test_take_preserve_name(self): taken = self.index.take([3, 0, 1]) self.assertEqual(taken.names, self.index.names)
closes https://github.com/pydata/pandas/issues/9250 on master: ``` >>> df 3rd 1st 2nd a b 0 b d 1 >>> df.loc[('b', 'x'), '3rd'] = 2 # this works! >>> df 3rd 1st 2nd a b 0 b d 1 x 2 >>> df.loc[('b', 'a'), '3rd'] = -1 # fails! sets everything to -1 >>> df 3rd 1st 2nd a b -1 b d -1 x -1 >>> df.loc[('b', 'b'), '3rd'] = 3 # erros! NotImplementedError: Index._join_level on non-unique index is not implemented ``` on branch: ``` >>> df 3rd 1st 2nd a b 0 b d 1 >>> df.loc[('b', 'x'), '3rd'] = 2 >>> df.loc[('b', 'a'), '3rd'] = -1 >>> df.loc[('b', 'b'), '3rd'] = 3 >>> df 3rd 1st 2nd a b 0 b d 1 x 2 a -1 b 3 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9256
2015-01-15T01:16:58Z
2015-01-16T12:51:42Z
2015-01-16T12:51:42Z
2015-01-17T11:33:53Z
ENH: add Timestamp.to_datetime64
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 5ec0e90383f4c..1528747891c64 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -63,6 +63,7 @@ Enhancements - Paths beginning with ~ will now be expanded to begin with the user's home directory (:issue:`9066`) - Added time interval selection in get_data_yahoo (:issue:`9071`) - Added ``Series.str.slice_replace()``, which previously raised NotImplementedError (:issue:`8888`) +- Added ``Timestamp.to_datetime64()`` to complement ``Timedelta.to_timedelta64()`` (:issue:`9255`) Performance diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 679fd2992855c..945458de22d2c 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -136,6 +136,21 @@ def test_constructor_with_stringoffset(self): self.assertEqual(repr(result), expected_repr) self.assertEqual(result, eval(repr(result))) + def test_conversion(self): + # GH 9255 + ts = Timestamp('2000-01-01') + + result = ts.to_pydatetime() + expected = datetime.datetime(2000, 1, 1) + self.assertEqual(result, expected) + self.assertEqual(type(result), type(expected)) + + result = ts.to_datetime64() + expected = np.datetime64(ts.value, 'ns') + self.assertEqual(result, expected) + self.assertEqual(type(result), type(expected)) + self.assertEqual(result.dtype, expected.dtype) + def test_repr(self): tm._skip_if_no_pytz() tm._skip_if_no_dateutil() diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 78a3a5d75cfd3..7cf7147a48d63 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -852,6 +852,10 @@ cdef class _Timestamp(datetime): dts.hour, dts.min, dts.sec, dts.us, ts.tzinfo) + cpdef to_datetime64(self): + """ Returns a numpy.datetime64 object with 'ns' precision """ + return np.datetime64(self.value, 'ns') + def __add__(self, other): cdef int64_t other_int
This PR adds a `Timestamp.to_datetime64()` method to complement the `Timedelta.to_timedelta64()` method I added in #8884. It is a continuation of the aborted #8916. Arguably, there should also be the alias `Timestamp.values` to complement the series property but I haven't added that yet.
https://api.github.com/repos/pandas-dev/pandas/pulls/9255
2015-01-15T00:47:10Z
2015-01-15T04:43:11Z
2015-01-15T04:43:11Z
2015-01-15T04:43:16Z
BUG: Bug in the returned Series.dt.components index was reset to the default index (GH9247)
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 365b4ab7ac9d9..5ec0e90383f4c 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -86,7 +86,7 @@ Bug Fixes - Fixed issue using `read_csv` on s3 with Python 3. - Fixed compatibility issue in ``DatetimeIndex`` affecting architectures where ``numpy.int_`` defaults to ``numpy.int32`` (:issue:`8943`) - Bug in Panel indexing with an object-like (:issue:`9140`) - +- Bug in the returned ``Series.dt.components`` index was reset to the default index (:issue:`9247`) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 9b072e2f62968..6faf6229b6d3b 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -142,7 +142,7 @@ def compare(s, name): tm.assert_series_equal(result, expected) # timedeltaindex - for s in [Series(timedelta_range('1 day',periods=5)), + for s in [Series(timedelta_range('1 day',periods=5),index=list('abcde')), Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')), Series(timedelta_range('2 days 01:23:45.012345',periods=5,freq='ms'))]: for prop in ok_for_td: diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py index f12e0263bcf0c..c953c0961a596 100644 --- a/pandas/tseries/common.py +++ b/pandas/tseries/common.py @@ -139,7 +139,7 @@ def to_pytimedelta(self): @property def components(self): - return self.values.components + return self.values.components.set_index(self.index) TimedeltaProperties._add_delegate_accessors(delegate=TimedeltaIndex, accessors=TimedeltaIndex._datetimelike_ops,
closes #9247
https://api.github.com/repos/pandas-dev/pandas/pulls/9248
2015-01-14T13:55:39Z
2015-01-14T14:39:57Z
2015-01-14T14:39:57Z
2015-01-14T14:39:57Z
FIX: Fix encoding to allow StataReader to read urls
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 1528747891c64..b221a7df373a4 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -106,7 +106,7 @@ Bug Fixes - Bug in left ``join`` on multi-index with ``sort=True`` or null values (:issue:`9210`). - +- Fixed character encoding bug in ``read_stata`` and ``StataReader`` when loading data from a URL (:issue:`9231`). diff --git a/pandas/io/stata.py b/pandas/io/stata.py index d8ebb8027c4ce..ccfe8468813c7 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -44,7 +44,7 @@ def read_stata(filepath_or_buffer, convert_dates=True, Read value labels and convert columns to Categorical/Factor variables encoding : string, None or encoding Encoding used to parse the files. Note that Stata doesn't - support unicode. None defaults to cp1252. + support unicode. None defaults to iso-8859-1. index : identifier of index column identifier of column that should be used as index of the DataFrame convert_missing : boolean, defaults to False @@ -683,7 +683,7 @@ def get_base_missing_value(cls, dtype): class StataParser(object): - _default_encoding = 'cp1252' + _default_encoding = 'iso-8859-1' def __init__(self, encoding): self._encoding = encoding @@ -823,10 +823,10 @@ class StataReader(StataParser): Path to .dta file or object implementing a binary read() functions encoding : string, None or encoding Encoding used to parse the files. Note that Stata doesn't - support unicode. None defaults to cp1252. + support unicode. None defaults to iso-8859-1. """ - def __init__(self, path_or_buf, encoding='cp1252'): + def __init__(self, path_or_buf, encoding='iso-8859-1'): super(StataReader, self).__init__(encoding) self.col_sizes = () self._has_string_data = False @@ -841,7 +841,13 @@ def __init__(self, path_or_buf, encoding='cp1252'): if isinstance(path_or_buf, (str, compat.text_type, bytes)): self.path_or_buf = open(path_or_buf, 'rb') else: - self.path_or_buf = path_or_buf + # Copy to BytesIO, and ensure no encoding + contents = path_or_buf.read() + try: + contents = contents.encode(self._default_encoding) + except: + pass + self.path_or_buf = BytesIO(contents) self._read_header() diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index 6a3c16655745e..f0ebebc1f143f 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -889,7 +889,6 @@ def test_categorical_ordering(self): tm.assert_equal(False, parsed_115_unordered[col].cat.ordered) tm.assert_equal(False, parsed_117_unordered[col].cat.ordered) - if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
Fix encoding so that StataReader can correctly read URLs closes #9231
https://api.github.com/repos/pandas-dev/pandas/pulls/9245
2015-01-14T03:43:56Z
2015-01-16T12:48:10Z
2015-01-16T12:48:10Z
2015-01-18T22:42:19Z
API: Add DataFrame.assign method
diff --git a/doc/source/_static/whatsnew_assign.png b/doc/source/_static/whatsnew_assign.png new file mode 100644 index 0000000000000..0e39e161dc606 Binary files /dev/null and b/doc/source/_static/whatsnew_assign.png differ diff --git a/doc/source/basics.rst b/doc/source/basics.rst index dc43c1177f8c3..8e78ac597479b 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -11,6 +11,7 @@ from pandas.compat import lrange options.display.max_rows=15 + ============================== Essential Basic Functionality ============================== @@ -793,6 +794,7 @@ This is equivalent to the following result result.loc[:,:,'ItemA'] + .. _basics.reindexing: diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 44321375d31a2..6eb13ce722fff 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -450,6 +450,82 @@ available to insert at a particular location in the columns: df.insert(1, 'bar', df['one']) df +.. _dsintro.chained_assignment: + +Assigning New Columns in Method Chains +-------------------------------------- + +.. versionadded:: 0.16.0 + +Inspired by `dplyr's +<http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html#mutate>`__ +``mutate`` verb, DataFrame has an :meth:`~pandas.DataFrame.assign` +method that allows you to easily create new columns that are potentially +derived from existing columns. + +.. ipython:: python + + iris = read_csv('data/iris.data') + iris.head() + + (iris.assign(sepal_ratio = iris['SepalWidth'] / iris['SepalLength']) + .head()) + +Above was an example of inserting a precomputed value. We can also pass in +a function of one argument to be evalutated on the DataFrame being assigned to. + +.. ipython:: python + + iris.assign(sepal_ratio = lambda x: (x['SepalWidth'] / + x['SepalLength'])).head() + +``assign`` **always** returns a copy of the data, leaving the original +DataFrame untouched. + +Passing a callable, as opposed to an actual value to be inserted, is +useful when you don't have a reference to the DataFrame at hand. This is +common when using ``assign`` in chains of operations. For example, +we can limit the DataFrame to just those observations with a Sepal Length +greater than 5, calculate the ratio, and plot: + +.. ipython:: python + + @savefig basics_assign.png + (iris.query('SepalLength > 5') + .assign(SepalRatio = lambda x: x.SepalWidth / x.SepalLength, + PetalRatio = lambda x: x.PetalWidth / x.PetalLength) + .plot(kind='scatter', x='SepalRatio', y='PetalRatio')) + +Since a function is passed in, the function is computed on the DataFrame +being assigned to. Importantly, this is the DataFrame that's been filtered +to those rows with sepal length greater than 5. The filtering happens first, +and then the ratio calculations. This is an example where we didn't +have a reference to the *filtered* DataFrame available. + +The function signature for ``assign`` is simply ``**kwargs``. The keys +are the column names for the new fields, and the values are either a value +to be inserted (for example, a ``Series`` or NumPy array), or a function +of one argument to be called on the ``DataFrame``. A *copy* of the original +DataFrame is returned, with the new values inserted. + +.. warning:: + + Since the function signature of ``assign`` is ``**kwargs``, a dictionary, + the order of the new columns in the resulting DataFrame cannot be guaranteed. + + All expressions are computed first, and then assigned. So you can't refer + to another column being assigned in the same call to ``assign``. For example: + + .. ipython:: + :verbatim: + + In [1]: # Don't do this, bad reference to `C` + df.assign(C = lambda x: x['A'] + x['B'], + D = lambda x: x['A'] + x['C']) + In [2]: # Instead, break it into two assigns + (df.assign(C = lambda x: x['A'] + x['B']) + .assign(D = lambda x: x['A'] + x['C'])) + Indexing / Selection ~~~~~~~~~~~~~~~~~~~~ The basics of indexing are as follows: diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index ead3c79430bf9..b9c358f24f460 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -29,6 +29,47 @@ New features This method is also exposed by the lower level ``Index.get_indexer`` and ``Index.get_loc`` methods. +- DataFrame assign method + +Inspired by `dplyr's +<http://cran.rstudio.com/web/packages/dplyr/vignettes/introduction.html#mutate>`__ ``mutate`` verb, DataFrame has a new +:meth:`~pandas.DataFrame.assign` method. +The function signature for ``assign`` is simply ``**kwargs``. The keys +are the column names for the new fields, and the values are either a value +to be inserted (for example, a ``Series`` or NumPy array), or a function +of one argument to be called on the ``DataFrame``. The new values are inserted, +and the entire DataFrame (with all original and new columns) is returned. + +.. ipython :: python + + iris = read_csv('data/iris.data') + iris.head() + + iris.assign(sepal_ratio=iris['SepalWidth'] / iris['SepalLength']).head() + +Above was an example of inserting a precomputed value. We can also pass in +a function to be evalutated. + +.. ipython :: python + + iris.assign(sepal_ratio = lambda x: (x['SepalWidth'] / + x['SepalLength'])).head() + +The power of ``assign`` comes when used in chains of operations. For example, +we can limit the DataFrame to just those with a Sepal Length greater than 5, +calculate the ratio, and plot + +.. ipython:: python + + (iris.query('SepalLength > 5') + .assign(SepalRatio = lambda x: x.SepalWidth / x.SepalLength, + PetalRatio = lambda x: x.PetalWidth / x.PetalLength) + .plot(kind='scatter', x='SepalRatio', y='PetalRatio')) + +.. image:: _static/whatsnew_assign.png + +See the :ref:`documentation <dsintro.chained_assignment>` for more. (:issue:`9229`) + .. _whatsnew_0160.api: .. _whatsnew_0160.api_breaking: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d64353db8cda6..97e3560e3fcb1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2220,6 +2220,88 @@ def insert(self, loc, column, value, allow_duplicates=False): self._data.insert( loc, column, value, allow_duplicates=allow_duplicates) + def assign(self, **kwargs): + """ + Assign new columns to a DataFrame, returning a new object + (a copy) with all the original columns in addition to the new ones. + + .. versionadded:: 0.16.0 + + Parameters + ---------- + kwargs : keyword, value pairs + keywords are the column names. If the values are + callable, they are computed on the DataFrame and + assigned to the new columns. If the values are + not callable, (e.g. a Series, scalar, or array), + they are simply assigned. + + Returns + ------- + df : DataFrame + A new DataFrame with the new columns in addition to + all the existing columns. + + Notes + ----- + Since ``kwargs`` is a dictionary, the order of your + arguments may not be preserved, and so the order of the + new columns is not well defined. Assigning multiple + columns within the same ``assign`` is possible, but you cannot + reference other columns created within the same ``assign`` call. + + Examples + -------- + >>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)}) + + Where the value is a callable, evaluated on `df`: + + >>> df.assign(ln_A = lambda x: np.log(x.A)) + A B ln_A + 0 1 0.426905 0.000000 + 1 2 -0.780949 0.693147 + 2 3 -0.418711 1.098612 + 3 4 -0.269708 1.386294 + 4 5 -0.274002 1.609438 + 5 6 -0.500792 1.791759 + 6 7 1.649697 1.945910 + 7 8 -1.495604 2.079442 + 8 9 0.549296 2.197225 + 9 10 -0.758542 2.302585 + + Where the value already exists and is inserted: + + >>> newcol = np.log(df['A']) + >>> df.assign(ln_A=newcol) + A B ln_A + 0 1 0.426905 0.000000 + 1 2 -0.780949 0.693147 + 2 3 -0.418711 1.098612 + 3 4 -0.269708 1.386294 + 4 5 -0.274002 1.609438 + 5 6 -0.500792 1.791759 + 6 7 1.649697 1.945910 + 7 8 -1.495604 2.079442 + 8 9 0.549296 2.197225 + 9 10 -0.758542 2.302585 + """ + data = self.copy() + + # do all calculations first... + results = {} + for k, v in kwargs.items(): + + if callable(v): + results[k] = v(data) + else: + results[k] = v + + # ... and then assign + for k, v in results.items(): + data[k] = v + + return data + def _sanitize_column(self, key, value): # Need to make sure new columns (which go into the BlockManager as new # blocks) are always copied diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 9ec890a1d1856..f7c91501b683b 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -13965,6 +13965,60 @@ def test_select_dtypes_bad_arg_raises(self): with tm.assertRaisesRegexp(TypeError, 'data type.*not understood'): df.select_dtypes(['blargy, blarg, blarg']) + def test_assign(self): + df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) + original = df.copy() + result = df.assign(C=df.B / df.A) + expected = df.copy() + expected['C'] = [4, 2.5, 2] + assert_frame_equal(result, expected) + + # lambda syntax + result = df.assign(C=lambda x: x.B / x.A) + assert_frame_equal(result, expected) + + # original is unmodified + assert_frame_equal(df, original) + + # Non-Series array-like + result = df.assign(C=[4, 2.5, 2]) + assert_frame_equal(result, expected) + # original is unmodified + assert_frame_equal(df, original) + + result = df.assign(B=df.B / df.A) + expected = expected.drop('B', axis=1).rename(columns={'C': 'B'}) + assert_frame_equal(result, expected) + + # overwrite + result = df.assign(A=df.A + df.B) + expected = df.copy() + expected['A'] = [5, 7, 9] + assert_frame_equal(result, expected) + + # lambda + result = df.assign(A=lambda x: x.A + x.B) + assert_frame_equal(result, expected) + + def test_assign_multiple(self): + df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) + result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B) + expected = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9], + 'D': [1, 2, 3], 'E': [4, 5, 6]}) + # column order isn't preserved + assert_frame_equal(result.reindex_like(expected), expected) + + def test_assign_bad(self): + df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) + # non-keyword argument + with tm.assertRaises(TypeError): + df.assign(lambda x: x.A) + with tm.assertRaises(AttributeError): + df.assign(C=df.A, D=df.A + df.C) + with tm.assertRaises(KeyError): + df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C']) + with tm.assertRaises(KeyError): + df.assign(C=df.A, D=lambda x: x['A'] + x['C']) def skip_if_no_ne(engine='numexpr'): if engine == 'numexpr':
Closes https://github.com/pydata/pandas/issues/9229 signature: `DataFrame.transform(**kwargs)` - the keyword is the name of the new column (existing columns are overwritten if there's a name conflict, as in dplyr) - the value is either - called on self if it's callable. The callable should be a function of 1 argument, the DataFrame being called on. - inserted otherwise ``` python In [7]: df.head() Out[7]: sepal_length sepal_width petal_length petal_width species 0 5.1 3.5 1.4 0.2 setosa 1 4.9 3.0 1.4 0.2 setosa 2 4.7 3.2 1.3 0.2 setosa 3 4.6 3.1 1.5 0.2 setosa 4 5.0 3.6 1.4 0.2 setosa In [8]: (df.query('species == "virginica"') .transform(sepal_ratio=lambda x: x.sepal_length / x.sepal_width) .head()) Out[8]: sepal_length sepal_width petal_length petal_width species \ 100 6.3 3.3 6.0 2.5 virginica 101 5.8 2.7 5.1 1.9 virginica 102 7.1 3.0 5.9 2.1 virginica 103 6.3 2.9 5.6 1.8 virginica 104 6.5 3.0 5.8 2.2 virginica sepal_ratio 100 1.909091 101 2.148148 102 2.366667 103 2.172414 104 2.166667 ``` My question now is - How strict should we be on the shape of the `transformed` DataFrame? Should we do any kind of checking on the index or columns?
https://api.github.com/repos/pandas-dev/pandas/pulls/9239
2015-01-13T13:32:24Z
2015-03-01T14:51:09Z
2015-03-01T14:51:09Z
2017-04-05T02:06:15Z
DOC: Small syntax change to indexing.rst to address GH8686.
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index f56e0d22b25a7..5ab72f633f49b 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -71,8 +71,7 @@ Object selection has had a number of user-requested additions in order to support more explicit location based indexing. pandas now supports three types of multi-axis indexing. -- ``.loc`` is strictly label based, will raise ``KeyError`` when the items are - not found, allowed inputs are: +- ``.loc`` is primarily label based, but may also be used with a boolean array. ``.loc`` will raise ``KeyError`` when the items are not found. Allowed inputs are: - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a *label* of the index. This use is **not** an integer position along the
Brings indexing.rst docs for .loc into line with .iloc. Closes https://github.com/pydata/pandas/issues/8686.
https://api.github.com/repos/pandas-dev/pandas/pulls/9238
2015-01-13T02:54:36Z
2015-01-13T08:09:37Z
2015-01-13T08:09:37Z
2015-01-13T08:09:48Z
BUG: fix DataFrame constructor w named Series
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index fcf18e6c1cf94..6a8add9e375be 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -367,6 +367,13 @@ The behavior of a small sub-set of edge cases for using ``.loc`` have changed (: In [4]: df.loc[2:3] TypeError: Cannot do slice indexing on <class 'pandas.tseries.index.DatetimeIndex'> with <type 'int'> keys +DataFrame Construction Changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0160.api_breaking.construction: + + + .. _whatsnew_0160.deprecations: @@ -535,3 +542,7 @@ Bug Fixes - Fixed bug with reading CSV files from Amazon S3 on python 3 raising a TypeError (:issue:`9452`) - Bug in the Google BigQuery reader where the 'jobComplete' key may be present but False in the query results (:issue:`8728`) + + +- Fixed bug with DataFrame constructor when passed a Series with a +name and the `columns` keyword argument. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fad271dbdb224..5ed03582940a3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -231,7 +231,7 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, if columns is None: columns = data_columns mgr = self._init_dict(data, index, columns, dtype=dtype) - elif getattr(data, 'name', None): + elif getattr(data, 'name', None) is not None: mgr = self._init_dict({data.name: data}, index, columns, dtype=dtype) else: @@ -295,16 +295,15 @@ def _init_dict(self, data, index, columns, dtype=None): if columns is not None: columns = _ensure_index(columns) - # prefilter if columns passed - - data = dict((k, v) for k, v in compat.iteritems(data) - if k in columns) - if index is None: index = extract_index(list(data.values())) else: index = _ensure_index(index) + # prefilter if columns passed + data = dict((k, v) for k, v in compat.iteritems(data) + if k in columns) + arrays = [] data_names = [] for k in columns: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index e1687fd6a67cf..f4580f37d2ee1 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -3362,6 +3362,18 @@ def test_constructor_Series_named(self): expected = DataFrame({ 1 : s1, 0 : arr },columns=[0,1]) assert_frame_equal(df,expected) + def test_constructor_Series_named_different(self): + # 9232 + x = Series([1, 2], name=0) + expected = DataFrame([np.nan, np.nan], columns=[1]) + result = DataFrame(x, columns=[1]) + assert_frame_equal(result, expected) + + x.name = 1 + expected = DataFrame([np.nan, np.nan], columns=[0]) + result = DataFrame(x, columns=[0]) + assert_frame_equal(result, expected) + def test_constructor_Series_differently_indexed(self): # name s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
closes #7893 Closes https://github.com/pydata/pandas/issues/9232 Problem was passing Series w/ a name to DataFrame w/ the `columns` kwarg. Before: ``` python In [55]: x = pd.Series(range(5), name=1) In [56]: y = pd.Series(range(5), name=0) In [57]: pd.DataFrame(x, columns=[0]) Out[57]: Empty DataFrame Columns: [0] Index: [] In [58]: pd.DataFrame(x, columns=[1]) Out[58]: 1 0 0 1 1 2 2 3 3 4 4 In [59]: pd.DataFrame(y, columns=[0]) Out[59]: 0 0 0 1 1 2 2 3 3 4 4 In [60]: pd.DataFrame(y, columns=[1]) Out[60]: 1 0 0 1 1 2 2 3 3 4 4 ``` after ``` python In [1]: x = pd.Series(range(5), name=1) In [2]: y = pd.Series(range(5), name=0) In [4]: pd.DataFrame(x, columns=[0]) Out[4]: 0 0 0 1 1 2 2 3 3 4 4 In [5]: pd.DataFrame(y, columns=[1]) Out[5]: 1 0 0 1 1 2 2 3 3 4 4 ``` There were two intertwined problems 1. we checked `if getattr(data, 'name', None):`, which returned False when data.name was `False`ish (like 0). I now compare it directly against None. 2. If `data` has a name and the columns kwarg is specified, the constructor returned an Empty DataFrame w/ the column specified in columns. Now, we do what's [documented](http://pandas.pydata.org/pandas-docs/version/0.15.2/dsintro.html#from-a-series): > The result will be a DataFrame with the same index as the input Series, and with one column whose name is the original name of the Series (only if no other column name provided).
https://api.github.com/repos/pandas-dev/pandas/pulls/9237
2015-01-13T02:03:58Z
2015-10-19T13:10:55Z
null
2023-05-11T01:12:47Z
DOC: update docstring of DataFrame.append
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ea7896b0352a5..46f284f22c82e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2726,8 +2726,8 @@ def duplicated(self, subset=None, take_last=False): Only consider certain columns for identifying duplicates, by default use all of the columns take_last : boolean, default False - For a set of distinct duplicate rows, flag all but the last row as - duplicated. Default is for all but the first row to be flagged + For a set of distinct duplicate rows, flag all but the last row as + duplicated. Default is for all but the first row to be flagged cols : kwargs only argument of subset [deprecated] Returns @@ -3770,35 +3770,65 @@ def infer(x): def append(self, other, ignore_index=False, verify_integrity=False): """ - Append columns of other to end of this frame's columns and index, - returning a new object. Columns not in this frame are added as new - columns. + Append rows of `other` to the end of this frame, returning a new + object. Columns not in this frame are added as new columns. Parameters ---------- - other : DataFrame or list of Series/dict-like objects + other : DataFrame or Series/dict-like object, or list of these + The data to append. ignore_index : boolean, default False - If True do not use the index labels. Useful for gluing together - record arrays + If True, do not use the index labels. verify_integrity : boolean, default False - If True, raise ValueError on creating index with duplicates + If True, raise ValueError on creating index with duplicates. + + Returns + ------- + appended : DataFrame Notes ----- - If a list of dict is passed and the keys are all contained in the + If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame - will be unchanged + will be unchanged. + + See also + -------- + pandas.concat : General function to concatenate DataFrame, Series + or Panel objects + + Examples + -------- + + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) + >>> df + A B + 0 1 2 + 1 3 4 + >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) + >>> df.append(df2) + A B + 0 1 2 + 1 3 4 + 0 5 6 + 1 7 8 + + With `ignore_index` set to True: + + >>> df.append(df2, ignore_index=True) + A B + 0 1 2 + 1 3 4 + 2 5 6 + 3 7 8 - Returns - ------- - appended : DataFrame """ if isinstance(other, (Series, dict)): if isinstance(other, dict): other = Series(other) if other.name is None and not ignore_index: - raise TypeError('Can only append a Series if ' - 'ignore_index=True') + raise TypeError('Can only append a Series if ignore_index=True' + ' or if the Series has a name') index = None if other.name is None else [other.name] combined_columns = self.columns.tolist() + self.columns.union(other.index).difference(self.columns).tolist()
I always thought that `append` was for appending rows, not columns. So, unless I am completely missing something, the docstring was saying the wrong thing. So I reworded it a bit, see below. Apart from that, there were some other things that are not fully clear to me in the docstring: - The docstring says that `other` should be a "list of Series/dict-like objects" (or a Dataframe): - Does it need to be a list? It also works with just a Series. Or what are the differences between both? - With a simple example, I don't see a difference, although in implementation, there is. - The docstring says for the `ignore_index` kwarg: "Useful for gluing together record arrays" - How does this look in practice? If I try to append a record array, I get "TypeError: cannot concatenate a non-NDFrame object" -> so I will remove this
https://api.github.com/repos/pandas-dev/pandas/pulls/9234
2015-01-12T21:11:42Z
2015-01-19T13:58:48Z
2015-01-19T13:58:48Z
2015-01-19T13:58:49Z
COMPAT: Need to read Bytes on Python
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 6e93535451fbc..365b4ab7ac9d9 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -83,6 +83,7 @@ Bug Fixes .. _whatsnew_0160.bug_fixes: +- Fixed issue using `read_csv` on s3 with Python 3. - Fixed compatibility issue in ``DatetimeIndex`` affecting architectures where ``numpy.int_`` defaults to ``numpy.int32`` (:issue:`8943`) - Bug in Panel indexing with an object-like (:issue:`9140`) diff --git a/pandas/io/common.py b/pandas/io/common.py index aafd551d82b05..737a55a6752c1 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -5,7 +5,7 @@ import zipfile from contextlib import contextmanager, closing -from pandas.compat import StringIO, string_types +from pandas.compat import StringIO, string_types, BytesIO from pandas import compat @@ -154,7 +154,7 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None): b = conn.get_bucket(parsed_url.netloc) k = boto.s3.key.Key(b) k.key = parsed_url.path - filepath_or_buffer = StringIO(k.get_contents_as_string()) + filepath_or_buffer = BytesIO(k.get_contents_as_string()) return filepath_or_buffer, None
Reading from s3 on Python 3 returns a Bytes object.
https://api.github.com/repos/pandas-dev/pandas/pulls/9230
2015-01-12T17:38:48Z
2015-01-12T22:30:32Z
2015-01-12T22:30:32Z
2015-01-12T22:30:38Z
TST: Clean test_format.py
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index 80f1733ab4be5..fef4778d4a5d5 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -20,8 +20,6 @@ import pandas.util.testing as tm import pandas.core.common as com from pandas.util.terminal import get_terminal_size -import pandas -import pandas.tslib as tslib import pandas as pd from pandas.core.config import (set_option, get_option, option_context, reset_option) @@ -110,7 +108,7 @@ def test_eng_float_formatter(self): self.frame.ix[5] = 0 fmt.set_eng_float_format() - result = repr(self.frame) + repr(self.frame) fmt.set_eng_float_format(use_eng_prefix=True) repr(self.frame) @@ -126,7 +124,7 @@ def test_show_null_counts(self): def check(null_counts, result): buf = StringIO() - r = df.info(buf=buf,null_counts=null_counts) + df.info(buf=buf, null_counts=null_counts) self.assertTrue(('non-null' in buf.getvalue()) is result) with option_context('display.max_info_rows',20,'display.max_info_columns',20): @@ -193,10 +191,8 @@ def test_repr_obeys_max_seq_limit(self): self.assertTrue(len(com.pprint_thing(lrange(1000)))< 100) def test_repr_is_valid_construction_code(self): - import pandas as pd - # for the case of Index, where the repr is traditional rather then stylized - idx = pd.Index(['a','b']) + idx = Index(['a','b']) res = eval("pd."+repr(idx)) tm.assert_series_equal(Series(res),Series(idx)) @@ -381,7 +377,7 @@ def test_to_string_utf8_columns(self): n = u("\u05d0").encode('utf-8') with option_context('display.max_rows', 1): - df = pd.DataFrame([1, 2], columns=[n]) + df = DataFrame([1, 2], columns=[n]) repr(df) def test_to_string_unicode_two(self): @@ -471,7 +467,7 @@ def test_to_string_truncate_indices(self): def test_to_string_truncate_multilevel(self): arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] - df = pd.DataFrame(index=arrays,columns=arrays) + df = DataFrame(index=arrays,columns=arrays) with option_context("display.max_rows", 7,"display.max_columns", 7): self.assertTrue(has_doubly_truncated_repr(df)) @@ -515,7 +511,7 @@ def test_to_html_escaped(self): b: "<type 'str'>"}, 'co>l2':{a: "<type 'str'>", b: "<type 'str'>"}} - rs = pd.DataFrame(test_dict).to_html() + rs = DataFrame(test_dict).to_html() xp = """<table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> @@ -547,7 +543,7 @@ def test_to_html_escape_disabled(self): b: "<b>bold</b>"}, 'co>l2': {a: "<b>bold</b>", b: "<b>bold</b>"}} - rs = pd.DataFrame(test_dict).to_html(escape=False) + rs = DataFrame(test_dict).to_html(escape=False) xp = """<table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> @@ -573,13 +569,13 @@ def test_to_html_escape_disabled(self): def test_to_html_multiindex_index_false(self): # issue 8452 - df = pd.DataFrame({ + df = DataFrame({ 'a': range(2), 'b': range(3, 5), 'c': range(5, 7), 'd': range(3, 5)} ) - df.columns = pd.MultiIndex.from_product([['a', 'b'], ['c', 'd']]) + df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']]) result = df.to_html(index=False) expected = """\ <table border="1" class="dataframe"> @@ -614,7 +610,7 @@ def test_to_html_multiindex_index_false(self): def test_to_html_multiindex_sparsify_false_multi_sparse(self): with option_context('display.multi_sparse', False): - index = pd.MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], + index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', None]) df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index) @@ -721,7 +717,7 @@ def test_to_html_multiindex_sparsify_false_multi_sparse(self): self.assertEqual(result, expected) def test_to_html_multiindex_sparsify(self): - index = pd.MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], + index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', None]) df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index) @@ -876,7 +872,7 @@ def test_to_html_regression_GH6098(self): def test_to_html_truncate(self): index = pd.DatetimeIndex(start='20010101',freq='D',periods=20) - df = pd.DataFrame(index=index,columns=range(20)) + df = DataFrame(index=index,columns=range(20)) fmt.set_option('display.max_rows',8) fmt.set_option('display.max_columns',4) result = df._repr_html_() @@ -977,7 +973,7 @@ def test_to_html_truncate(self): def test_to_html_truncate_multi_index(self): arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] - df = pd.DataFrame(index=arrays,columns=arrays) + df = DataFrame(index=arrays,columns=arrays) fmt.set_option('display.max_rows',7) fmt.set_option('display.max_columns',7) result = df._repr_html_() @@ -1093,7 +1089,7 @@ def test_to_html_truncate_multi_index(self): def test_to_html_truncate_multi_index_sparse_off(self): arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] - df = pd.DataFrame(index=arrays,columns=arrays) + df = DataFrame(index=arrays,columns=arrays) fmt.set_option('display.max_rows',7) fmt.set_option('display.max_columns',7) fmt.set_option('display.multi_sparse',False) @@ -1214,14 +1210,14 @@ def test_unicode_problem_decoding_as_ascii(self): def test_string_repr_encoding(self): filepath = tm.get_data_path('unicode_series.csv') - df = pandas.read_csv(filepath, header=None, encoding='latin1') + df = pd.read_csv(filepath, header=None, encoding='latin1') repr(df) repr(df[1]) def test_repr_corner(self): # representing infs poses no problems df = DataFrame({'foo': np.inf * np.empty(10)}) - foo = repr(df) + repr(df) def test_frame_info_encoding(self): index = ['\'Til There Was You (1997)', @@ -1309,8 +1305,7 @@ def test_wide_repr_named(self): def test_wide_repr_multiindex(self): with option_context('mode.sim_interactive', True): - midx = pandas.MultiIndex.from_arrays( - tm.rands_array(5, size=(2, 10))) + midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10))) max_cols = get_option('display.max_columns') df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx) @@ -1333,9 +1328,9 @@ def test_wide_repr_multiindex(self): def test_wide_repr_multiindex_cols(self): with option_context('mode.sim_interactive', True): max_cols = get_option('display.max_columns') - midx = pandas.MultiIndex.from_arrays( + midx = MultiIndex.from_arrays( tm.rands_array(5, size=(2, 10))) - mcols = pandas.MultiIndex.from_arrays( + mcols = MultiIndex.from_arrays( tm.rands_array(3, size=(2, max_cols - 1))) df = DataFrame(tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols) @@ -1491,7 +1486,7 @@ def test_to_string_no_header(self): df_s = df.to_string(header=False) expected = "0 1 4\n1 2 5\n2 3 6" - assert(df_s == expected) + self.assertEqual(df_s, expected) def test_to_string_no_index(self): df = DataFrame({'x': [1, 2, 3], @@ -1500,7 +1495,7 @@ def test_to_string_no_index(self): df_s = df.to_string(index=False) expected = " x y\n 1 4\n 2 5\n 3 6" - assert(df_s == expected) + self.assertEqual(df_s, expected) def test_to_string_float_formatting(self): self.reset_display_options() @@ -1524,7 +1519,7 @@ def test_to_string_float_formatting(self): '2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n' '5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n' '8 -1.00000e+06') - assert(df_s == expected) + self.assertEqual(df_s, expected) df = DataFrame({'x': [3234, 0.253]}) df_s = df.to_string() @@ -1532,7 +1527,7 @@ def test_to_string_float_formatting(self): expected = (' x\n' '0 3234.000\n' '1 0.253') - assert(df_s == expected) + self.assertEqual(df_s, expected) self.reset_display_options() self.assertEqual(get_option("display.precision"), 7) @@ -1549,7 +1544,7 @@ def test_to_string_float_formatting(self): expected = (' x\n' '0 1.000000e+09\n' '1 2.512000e-01') - assert(df_s == expected) + self.assertEqual(df_s, expected) def test_to_string_small_float_values(self): df = DataFrame({'a': [1.5, 1e-17, -5.5e-7]}) @@ -1632,7 +1627,7 @@ def test_to_string_left_justify_cols(self): expected = (' x \n' '0 3234.000\n' '1 0.253') - assert(df_s == expected) + self.assertEqual(df_s, expected) def test_to_string_format_na(self): self.reset_display_options() @@ -1661,12 +1656,12 @@ def test_to_string_format_na(self): self.assertEqual(result, expected) def test_to_string_line_width(self): - df = pd.DataFrame(123, lrange(10, 15), lrange(30)) + df = DataFrame(123, lrange(10, 15), lrange(30)) s = df.to_string(line_width=80) self.assertEqual(max(len(l) for l in s.split('\n')), 80) def test_show_dimensions(self): - df = pd.DataFrame(123, lrange(10, 15), lrange(30)) + df = DataFrame(123, lrange(10, 15), lrange(30)) with option_context('display.max_rows', 10, 'display.max_columns', 40, 'display.width', 500, 'display.expand_frame_repr', 'info', 'display.show_dimensions', True): @@ -1736,17 +1731,17 @@ def test_to_html_filename(self): def test_to_html_with_no_bold(self): x = DataFrame({'x': randn(5)}) ashtml = x.to_html(bold_rows=False) - assert('<strong>' not in ashtml[ashtml.find('</thead>')]) + self.assertFalse('<strong' in ashtml[ashtml.find("</thead>")]) def test_to_html_columns_arg(self): result = self.frame.to_html(columns=['A']) self.assertNotIn('<th>B</th>', result) def test_to_html_multiindex(self): - columns = pandas.MultiIndex.from_tuples(list(zip(np.arange(2).repeat(2), - np.mod(lrange(4), 2))), - names=['CL0', 'CL1']) - df = pandas.DataFrame([list('abcd'), list('efgh')], columns=columns) + columns = MultiIndex.from_tuples(list(zip(np.arange(2).repeat(2), + np.mod(lrange(4), 2))), + names=['CL0', 'CL1']) + df = DataFrame([list('abcd'), list('efgh')], columns=columns) result = df.to_html(justify='left') expected = ('<table border="1" class="dataframe">\n' ' <thead>\n' @@ -1783,9 +1778,9 @@ def test_to_html_multiindex(self): self.assertEqual(result, expected) - columns = pandas.MultiIndex.from_tuples(list(zip(range(4), + columns = MultiIndex.from_tuples(list(zip(range(4), np.mod(lrange(4), 2)))) - df = pandas.DataFrame([list('abcd'), list('efgh')], columns=columns) + df = DataFrame([list('abcd'), list('efgh')], columns=columns) result = df.to_html(justify='right') expected = ('<table border="1" class="dataframe">\n' @@ -1826,10 +1821,10 @@ def test_to_html_multiindex(self): self.assertEqual(result, expected) def test_to_html_justify(self): - df = pandas.DataFrame({'A': [6, 30000, 2], - 'B': [1, 2, 70000], - 'C': [223442, 0, 1]}, - columns=['A', 'B', 'C']) + df = DataFrame({'A': [6, 30000, 2], + 'B': [1, 2, 70000], + 'C': [223442, 0, 1]}, + columns=['A', 'B', 'C']) result = df.to_html(justify='left') expected = ('<table border="1" class="dataframe">\n' ' <thead>\n' @@ -1899,17 +1894,17 @@ def test_to_html_justify(self): def test_to_html_index(self): index = ['foo', 'bar', 'baz'] - df = pandas.DataFrame({'A': [1, 2, 3], - 'B': [1.2, 3.4, 5.6], - 'C': ['one', 'two', np.NaN]}, - columns=['A', 'B', 'C'], - index=index) + df = DataFrame({'A': [1, 2, 3], + 'B': [1.2, 3.4, 5.6], + 'C': ['one', 'two', np.NaN]}, + columns=['A', 'B', 'C'], + index=index) result = df.to_html(index=False) for i in index: self.assertNotIn(i, result) tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')] - df.index = pandas.MultiIndex.from_tuples(tuples) + df.index = MultiIndex.from_tuples(tuples) result = df.to_html(index=False) for i in ['foo', 'bar', 'car', 'bike']: self.assertNotIn(i, result) @@ -1946,17 +1941,17 @@ def test_repr_html_wide(self): def test_repr_html_wide_multiindex_cols(self): max_cols = get_option('display.max_columns') - mcols = pandas.MultiIndex.from_product([np.arange(max_cols//2), - ['foo', 'bar']], - names=['first', 'second']) + mcols = MultiIndex.from_product([np.arange(max_cols//2), + ['foo', 'bar']], + names=['first', 'second']) df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols) reg_repr = df._repr_html_() assert '...' not in reg_repr - mcols = pandas.MultiIndex.from_product((np.arange(1+(max_cols//2)), - ['foo', 'bar']), - names=['first', 'second']) + mcols = MultiIndex.from_product((np.arange(1+(max_cols//2)), + ['foo', 'bar']), + names=['first', 'second']) df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols) wide_repr = df._repr_html_() @@ -1965,13 +1960,13 @@ def test_repr_html_wide_multiindex_cols(self): def test_repr_html_long(self): max_rows = get_option('display.max_rows') h = max_rows - 1 - df = pandas.DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)}) + df = DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)}) reg_repr = df._repr_html_() assert '..' not in reg_repr assert str(41 + max_rows // 2) in reg_repr h = max_rows + 1 - df = pandas.DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)}) + df = DataFrame({'A':np.arange(1,1+h), 'B':np.arange(41, 41+h)}) long_repr = df._repr_html_() assert '..' in long_repr assert str(41 + max_rows // 2) not in long_repr @@ -1981,13 +1976,13 @@ def test_repr_html_long(self): def test_repr_html_float(self): max_rows = get_option('display.max_rows') h = max_rows - 1 - df = pandas.DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx') + df = DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx') reg_repr = df._repr_html_() assert '..' not in reg_repr assert str(40 + h) in reg_repr h = max_rows + 1 - df = pandas.DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx') + df = DataFrame({'idx':np.linspace(-10,10,h), 'A':np.arange(1,1+h), 'B': np.arange(41, 41+h) }).set_index('idx') long_repr = df._repr_html_() assert '..' in long_repr assert '31' not in long_repr @@ -1999,14 +1994,14 @@ def test_repr_html_long_multiindex(self): max_L1 = max_rows//2 tuples = list(itertools.product(np.arange(max_L1), ['foo', 'bar'])) - idx = pandas.MultiIndex.from_tuples(tuples, names=['first', 'second']) + idx = MultiIndex.from_tuples(tuples, names=['first', 'second']) df = DataFrame(np.random.randn(max_L1*2, 2), index=idx, columns=['A', 'B']) reg_repr = df._repr_html_() assert '...' not in reg_repr tuples = list(itertools.product(np.arange(max_L1+1), ['foo', 'bar'])) - idx = pandas.MultiIndex.from_tuples(tuples, names=['first', 'second']) + idx = MultiIndex.from_tuples(tuples, names=['first', 'second']) df = DataFrame(np.random.randn((max_L1+1)*2, 2), index=idx, columns=['A', 'B']) long_repr = df._repr_html_() @@ -2017,11 +2012,11 @@ def test_repr_html_long_and_wide(self): max_rows = get_option('display.max_rows') h, w = max_rows-1, max_cols-1 - df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) + df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) assert '...' not in df._repr_html_() h, w = max_rows+1, max_cols+1 - df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) + df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) assert '...' in df._repr_html_() def test_info_repr(self): @@ -2029,14 +2024,14 @@ def test_info_repr(self): max_cols = get_option('display.max_columns') # Long h, w = max_rows+1, max_cols-1 - df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) + df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) assert has_vertically_truncated_repr(df) with option_context('display.large_repr', 'info'): assert has_info_repr(df) # Wide h, w = max_rows-1, max_cols+1 - df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) + df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) assert has_horizontally_truncated_repr(df) with option_context('display.large_repr', 'info'): assert has_info_repr(df) @@ -2062,14 +2057,14 @@ def test_info_repr_html(self): max_cols = get_option('display.max_columns') # Long h, w = max_rows+1, max_cols-1 - df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) + df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) assert r'&lt;class' not in df._repr_html_() with option_context('display.large_repr', 'info'): assert r'&lt;class' in df._repr_html_() # Wide h, w = max_rows-1, max_cols+1 - df = pandas.DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) + df = DataFrame(dict((k,np.arange(1,1+h)) for k in np.arange(w))) assert '<class' not in df._repr_html_() with option_context('display.large_repr', 'info'): assert '&lt;class' in df._repr_html_() @@ -2090,7 +2085,7 @@ def get_ipython(): self.reset_display_options() def test_to_html_with_classes(self): - df = pandas.DataFrame() + df = DataFrame() result = df.to_html(classes="sortable draggable") expected = dedent(""" @@ -2118,7 +2113,7 @@ def test_pprint_pathological_object(self): class A: def __getitem__(self, key): return 3 # obviously simplified - df = pandas.DataFrame([A()]) + df = DataFrame([A()]) repr(df) # just don't dine def test_float_trim_zeros(self): @@ -2212,8 +2207,8 @@ def test_to_latex_escape(self): u('co$e^x$'): {a: "a", b: "b"}} - unescaped_result = pd.DataFrame(test_dict).to_latex(escape=False) - escaped_result = pd.DataFrame(test_dict).to_latex() # default: escape=True + unescaped_result = DataFrame(test_dict).to_latex(escape=False) + escaped_result = DataFrame(test_dict).to_latex() # default: escape=True unescaped_expected = r'''\begin{tabular}{lll} \toprule @@ -2971,19 +2966,19 @@ def test_zero(self): class TestDatetime64Formatter(tm.TestCase): def test_mixed(self): - x = pd.Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT]) + x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT]) result = fmt.Datetime64Formatter(x).get_result() self.assertEqual(result[0].strip(), "2013-01-01 00:00:00") self.assertEqual(result[1].strip(), "2013-01-01 12:00:00") def test_dates(self): - x = pd.Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT]) + x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT]) result = fmt.Datetime64Formatter(x).get_result() self.assertEqual(result[0].strip(), "2013-01-01") self.assertEqual(result[1].strip(), "2013-01-02") def test_date_nanos(self): - x = pd.Series([Timestamp(200)]) + x = Series([Timestamp(200)]) result = fmt.Datetime64Formatter(x).get_result() self.assertEqual(result[0].strip(), "1970-01-01 00:00:00.000000200")
Mostly just stylistic changes. The only substantive one is the test now on line 56 (`has_horizontally_truncated_repr`). I can't figure what the test was supposed to be doing. Suggestions? Original Test: ``` python def has_horizontally_truncated_repr(df): try: # Check header row fst_line = np.array(repr(df).splitlines()[0].split()) cand_col = np.where(fst_line=='...')[0][0] except: return False # Make sure each row has this ... in the same place r = repr(df) for ix,l in enumerate(r.splitlines()): if not r.split()[cand_col] == '...': return False return True ``` New Test: ``` python def has_horizontally_truncated_repr(df): try: # Check header row fst_line = np.array(repr(df).splitlines()[0].split()) cand_col = np.where(fst_line=='...')[0][0] except: return False # Make sure each row has this ... in the same place return repr(df).split()[cand_col] == "..." ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9227
2015-01-12T03:39:43Z
2015-03-05T23:35:47Z
2015-03-05T23:35:47Z
2015-03-05T23:35:52Z
ENH: add to_offset method to Timedelta #9064
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 2db455272363b..f12e5f1f9970e 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -99,6 +99,7 @@ Enhancements - Added time interval selection in get_data_yahoo (:issue:`9071`) - Added ``Series.str.slice_replace()``, which previously raised NotImplementedError (:issue:`8888`) - Added ``Timestamp.to_datetime64()`` to complement ``Timedelta.to_timedelta64()`` (:issue:`9255`) +- ``tseries.frequencies.to_offset()`` now accepts ``Timedelta`` as input (:issue:`9064`) Performance diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 54b29b1641309..0ec225d77f5e2 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime,timedelta from pandas.compat import range, long, zip from pandas import compat import re @@ -12,6 +12,7 @@ import pandas.core.common as com import pandas.lib as lib import pandas.tslib as tslib +from pandas.tslib import Timedelta class FreqGroup(object): FR_ANN = 1000 @@ -276,9 +277,18 @@ def get_period_alias(offset_str): _legacy_reverse_map = dict((v, k) for k, v in reversed(sorted(compat.iteritems(_rule_aliases)))) +_name_to_offset_map = {'days': Day(1), + 'hours': Hour(1), + 'minutes': Minute(1), + 'seconds': Second(1), + 'milliseconds': Milli(1), + 'microseconds': Micro(1), + 'nanoseconds': Nano(1)} + def to_offset(freqstr): """ - Return DateOffset object from string representation + Return DateOffset object from string representation or + Timedelta object Examples -------- @@ -298,6 +308,23 @@ def to_offset(freqstr): name, stride = stride, name name, _ = _base_and_stride(name) delta = get_offset(name) * stride + + elif isinstance(freqstr, timedelta): + delta = None + freqstr = Timedelta(freqstr) + try: + for name in freqstr.components._fields: + offset = _name_to_offset_map[name] + stride = getattr(freqstr.components, name) + if stride != 0: + offset = stride * offset + if delta is None: + delta = offset + else: + delta = delta + offset + except Exception: + raise ValueError("Could not evaluate %s" % freqstr) + else: delta = None stride_sign = None diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py index b84cdefe7009f..965c198eb7c95 100644 --- a/pandas/tseries/tests/test_frequencies.py +++ b/pandas/tseries/tests/test_frequencies.py @@ -17,6 +17,7 @@ import pandas.compat as compat import pandas.util.testing as tm +from pandas import Timedelta def test_to_offset_multiple(): freqstr = '2h30min' @@ -81,6 +82,47 @@ def test_to_offset_leading_zero(): assert(result.n == -194) +def test_to_offset_pd_timedelta(): + # Tests for #9064 + td = Timedelta(days=1, seconds=1) + result = frequencies.to_offset(td) + expected = offsets.Second(86401) + assert(expected==result) + + td = Timedelta(days=-1, seconds=1) + result = frequencies.to_offset(td) + expected = offsets.Second(-86399) + assert(expected==result) + + td = Timedelta(hours=1, minutes=10) + result = frequencies.to_offset(td) + expected = offsets.Minute(70) + assert(expected==result) + + td = Timedelta(hours=1, minutes=-10) + result = frequencies.to_offset(td) + expected = offsets.Minute(50) + assert(expected==result) + + td = Timedelta(weeks=1) + result = frequencies.to_offset(td) + expected = offsets.Day(7) + assert(expected==result) + + td1 = Timedelta(hours=1) + result1 = frequencies.to_offset(td1) + result2 = frequencies.to_offset('60min') + assert(result1 == result2) + + td = Timedelta(microseconds=1) + result = frequencies.to_offset(td) + expected = offsets.Micro(1) + assert(expected == result) + + td = Timedelta(microseconds=0) + tm.assertRaises(ValueError, lambda: frequencies.to_offset(td)) + + def test_anchored_shortcuts(): result = frequencies.to_offset('W') expected = frequencies.to_offset('W-SUN')
Closes #9064
https://api.github.com/repos/pandas-dev/pandas/pulls/9226
2015-01-12T03:39:28Z
2015-01-18T20:53:40Z
2015-01-18T20:53:40Z
2015-01-19T02:18:33Z
Fix randomly failing test in test_frame.py
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index fcbfb21bd20e3..777fc6d68f3cd 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -1938,7 +1938,11 @@ def verify(df, level, idx, indexer): 'joe':['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 + ['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 + ['3rd'] * 3 + ['2nd'] * 2, - 'jolie':np.random.randint(0, 1000, 20), + # this needs to be jointly unique with jim and joe or + # reindexing will fail ~1.5% of the time, this works + # out to needing unique groups of same size as joe + 'jolie': np.concatenate([np.random.choice(1000, x, replace=False) + for x in [2, 3, 3, 2, 3, 2, 3, 2]]), 'joline': np.random.randn(20).round(3) * 10}) for idx in permutations(df['jim'].unique()):
@jreback I noticed a randomly failing test while doing a full suite run a week or two ago. The test is using random integers as part of a `MultiIndex`, which randomly leads to a non-unique index. Attempting to re-index the `DataFrame` therefore randomly fails. The solution is to just sample without replacement. The following code gives a failure rate of ~1.5%, which is in line with birthday problem estimates (`d=1000` and four iterations each of `n=3` and `n=2`): ``` import subprocess total = 0 n = 10 for i in range(n): result = subprocess.call('nosetests pandas/tests/test_frame.py:TestDataFrame.test_reindex_level', shell=True) if result: print i total += 1 print 'Total error rate of {0:.6f}'.format(total/float(n)) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9225
2015-01-11T09:44:43Z
2015-01-15T03:25:17Z
2015-01-15T03:25:17Z
2015-01-15T03:25:21Z
BUG: Bug in using grouper functions that need passed thru arguments (GH9221)
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index e58bb6f703b3d..e202bea960e12 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -142,7 +142,7 @@ Bug Fixes - +- Bug in using grouper functions that need passed thru arguments (e.g. axis), when using wrapped function (e.g. ``fillna``), (:issue:`9221`) - DataFrame now properly supports simultaneous ``copy`` and ``dtype`` arguments in constructor (:issue:`9099`) - Bug in read_csv when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index dd48a470e7dd4..7fa64e0b4ca91 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2252,7 +2252,7 @@ def convert_objects(self, convert_dates=True, convert_numeric=False, #---------------------------------------------------------------------- # Filling NA's - def fillna(self, value=None, method=None, axis=0, inplace=False, + def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None): """ Fill NA/NaN values using the specified method @@ -2295,6 +2295,10 @@ def fillna(self, value=None, method=None, axis=0, inplace=False, 'you passed a "{0}"'.format(type(value).__name__)) self._consolidate_inplace() + # set the default here, so functions examining the signaure + # can detect if something was set (e.g. in groupby) (GH9221) + if axis is None: + axis = 0 axis = self._get_axis_number(axis) method = com._clean_fill_method(method) @@ -2383,12 +2387,12 @@ def fillna(self, value=None, method=None, axis=0, inplace=False, else: return self._constructor(new_data).__finalize__(self) - def ffill(self, axis=0, inplace=False, limit=None, downcast=None): + def ffill(self, axis=None, inplace=False, limit=None, downcast=None): "Synonym for NDFrame.fillna(method='ffill')" return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) - def bfill(self, axis=0, inplace=False, limit=None, downcast=None): + def bfill(self, axis=None, inplace=False, limit=None, downcast=None): "Synonym for NDFrame.fillna(method='bfill')" return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit, downcast=downcast) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 039bb2cd599e5..4077f468d8b1f 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -4322,6 +4322,21 @@ def test_filter_non_bool_raises(self): with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'): df.groupby('a').filter(lambda g: g.c.mean()) + def test_fill_constistency(self): + + # GH9221 + # pass thru keyword arguments to the generated wrapper + # are set if the passed kw is None (only) + df = DataFrame(index=pd.MultiIndex.from_product([['value1','value2'], + date_range('2014-01-01','2014-01-06')]), + columns=Index(['1','2'], name='id')) + df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan, np.nan, 22, np.nan] + df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan, np.nan, 44, np.nan] + + expected = df.groupby(level=0, axis=0).fillna(method='ffill') + result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T + assert_frame_equal(result, expected) + def test_index_label_overlaps_location(self): # checking we don't have any label/location confusion in the # the wake of GH5375
closes #9221
https://api.github.com/repos/pandas-dev/pandas/pulls/9222
2015-01-10T15:48:22Z
2015-01-10T17:23:12Z
2015-01-10T17:23:12Z
2015-01-10T17:23:12Z
BUG FIX: wide_to_long modifies stubnames
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 6e93535451fbc..df01b30f941c6 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -154,4 +154,6 @@ Bug Fixes - Fixed issue in the ``xlsxwriter`` engine where it added a default 'General' format to cells if no other format wass applied. This prevented other row or column formatting being applied. (:issue:`9167`) -- Fixes issue with ``index_col=False`` when ``usecols`` is also specified in ``read_csv``. (:issue:`9082`) + +- Bug in ``wide_to_long``, modifies stubnames list (:issue:`9204`) +- Fixes issue with ``index_col=False`` when ``usecols`` is also specified in ``read_csv``. (:issue:`9082`) \ No newline at end of file diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 19208506fdc72..659d944a5e784 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -930,10 +930,9 @@ def melt_stub(df, stub, i, j): if i not in id_vars: id_vars += [i] - stub = stubnames.pop(0) - newdf = melt_stub(df, stub, id_vars, j) + newdf = melt_stub(df, stubnames[0], id_vars, j) - for stub in stubnames: + for stub in stubnames[1:]: new = melt_stub(df, stub, id_vars, j) newdf = newdf.merge(new, how="outer", on=id_vars + [j], copy=False) return newdf.set_index([i, j]) diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py index f89d04f6fb2df..f249831465e20 100644 --- a/pandas/tests/test_reshape.py +++ b/pandas/tests/test_reshape.py @@ -443,6 +443,14 @@ def test_simple(self): long_frame = wide_to_long(df, ["A", "B"], i="id", j="year") tm.assert_frame_equal(long_frame, exp_frame) + # Issue GH9204 + def test_stubs(self): + df = pd.DataFrame([[0,1,2,3,8],[4,5,6,7,9]]) + df.columns = ['id', 'inc1', 'inc2', 'edu1', 'edu2'] + stubs = ['inc', 'edu'] + df_long = pd.wide_to_long(df, stubs, i='id', j='age') + + assert stubs == ['inc', 'edu'] if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
Simple fix that prevents modification of the list `stubnames`, which is an argument to `wide_to_long`. Closes #9204
https://api.github.com/repos/pandas-dev/pandas/pulls/9215
2015-01-09T00:25:22Z
2015-01-18T20:59:05Z
null
2015-01-18T20:59:05Z
BUG: bug in left join on multi-index with sort=True or nulls
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 839a055bf2a63..e878851233be1 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -99,6 +99,7 @@ Bug Fixes - Bug in ``MultiIndex.has_duplicates`` when having many levels causes an indexer overflow (:issue:`9075`, :issue:`5873`) - Bug in ``pivot`` and `unstack`` where ``nan`` values would break index alignment (:issue:`7466`) +- Bug in left ``join`` on multi-index with ``sort=True`` or null values (:issue:`9210`). diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 56eb8c68ad275..27e4845e3faee 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -9,7 +9,6 @@ from pandas.core.categorical import Categorical from pandas.core.frame import DataFrame, _merge_doc from pandas.core.generic import NDFrame -from pandas.core.groupby import get_group_index from pandas.core.series import Series from pandas.core.index import (Index, MultiIndex, _get_combined_index, _ensure_index, _get_consensus_names, @@ -525,27 +524,39 @@ def get_result(self): return result -def _get_multiindex_indexer(join_keys, index, sort=False): - shape = [] - labels = [] - for level, key in zip(index.levels, join_keys): - llab, rlab, count = _factorize_keys(level, key, sort=False) - labels.append(rlab) - shape.append(count) +def _get_multiindex_indexer(join_keys, index, sort): + from functools import partial - left_group_key = get_group_index(labels, shape) - right_group_key = get_group_index(index.labels, shape) + # bind `sort` argument + fkeys = partial(_factorize_keys, sort=sort) - left_group_key, right_group_key, max_groups = \ - _factorize_keys(left_group_key, right_group_key, - sort=False) + # left & right join labels and num. of levels at each location + rlab, llab, shape = map(list, zip( * map(fkeys, index.levels, join_keys))) + if sort: + rlab = list(map(np.take, rlab, index.labels)) + else: + i8copy = lambda a: a.astype('i8', subok=False, copy=True) + rlab = list(map(i8copy, index.labels)) - left_indexer, right_indexer = \ - algos.left_outer_join(com._ensure_int64(left_group_key), - com._ensure_int64(right_group_key), - max_groups, sort=False) + # fix right labels if there were any nulls + for i in range(len(join_keys)): + mask = index.labels[i] == -1 + if mask.any(): + # check if there already was any nulls at this location + # if there was, it is factorized to `shape[i] - 1` + a = join_keys[i][llab[i] == shape[i] - 1] + if a.size == 0 or not a[0] != a[0]: + shape[i] += 1 - return left_indexer, right_indexer + rlab[i][mask] = shape[i] - 1 + + # get flat i8 join keys + lkey, rkey = _get_join_keys(llab, rlab, shape, sort) + + # factorize keys to a dense i8 space + lkey, rkey, count = fkeys(lkey, rkey) + + return algos.left_outer_join(lkey, rkey, count, sort=sort) def _get_single_indexer(join_key, index, sort=False): diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index 58d14154f0190..cf5cc4661ec52 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -901,14 +901,78 @@ def test_merge_on_multikey(self): # TODO: columns aren't in the same order yet assert_frame_equal(joined, expected.ix[:, joined.columns]) + left = self.data.join(self.to_join, on=['key1', 'key2'], sort=True) + right = expected.ix[:, joined.columns].sort(['key1', 'key2'], + kind='mergesort') + assert_frame_equal(left, right) + + def test_left_join_multi_index(self): + icols = ['1st', '2nd', '3rd'] + + def bind_cols(df): + iord = lambda a: 0 if a != a else ord(a) + f = lambda ts: ts.map(iord) - ord('a') + return f(df['1st']) + f(df['3rd'])* 1e2 + df['2nd'].fillna(0) * 1e4 + + def run_asserts(left, right): + for sort in [False, True]: + res = left.join(right, on=icols, how='left', sort=sort) + + self.assertTrue(len(left) < len(res) + 1) + self.assertFalse(res['4th'].isnull().any()) + self.assertFalse(res['5th'].isnull().any()) + + tm.assert_series_equal(res['4th'], - res['5th']) + tm.assert_series_equal(res['4th'], bind_cols(res.iloc[:, :-2])) + + if sort: + tm.assert_frame_equal(res, + res.sort(icols, kind='mergesort')) + + out = merge(left, right.reset_index(), on=icols, + sort=sort, how='left') + + res.index = np.arange(len(res)) + tm.assert_frame_equal(out, res) + + lc = list(map(chr, np.arange(ord('a'), ord('z') + 1))) + left = DataFrame(np.random.choice(lc, (5000, 2)), + columns=['1st', '3rd']) + left.insert(1, '2nd', np.random.randint(0, 1000, len(left))) + + i = np.random.permutation(len(left)) + right = left.iloc[i].copy() + + left['4th'] = bind_cols(left) + right['5th'] = - bind_cols(right) + right.set_index(icols, inplace=True) + + run_asserts(left, right) + + # inject some nulls + left.loc[1::23, '1st'] = np.nan + left.loc[2::37, '2nd'] = np.nan + left.loc[3::43, '3rd'] = np.nan + left['4th'] = bind_cols(left) + + i = np.random.permutation(len(left)) + right = left.iloc[i, :-1] + right['5th'] = - bind_cols(right) + right.set_index(icols, inplace=True) + + run_asserts(left, right) + def test_merge_right_vs_left(self): # compare left vs right merge with multikey - merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'], - right_index=True, how='left') - merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'], - left_index=True, how='right') - merged2 = merged2.ix[:, merged1.columns] - assert_frame_equal(merged1, merged2) + for sort in [False, True]: + merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'], + right_index=True, how='left', sort=sort) + + merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'], + left_index=True, how='right', sort=sort) + + merged2 = merged2.ix[:, merged1.columns] + assert_frame_equal(merged1, merged2) def test_compress_group_combinations(self): @@ -943,6 +1007,8 @@ def test_left_join_index_preserve_order(self): expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7 tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result.sort(['k1', 'k2'], kind='mergesort'), + left.join(right, on=['k1', 'k2'], sort=True)) # test join with multi dtypes blocks left = DataFrame({'k1': [0, 1, 2] * 8, @@ -961,6 +1027,8 @@ def test_left_join_index_preserve_order(self): expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'),'v2'] = 7 tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result.sort(['k1', 'k2'], kind='mergesort'), + left.join(right, on=['k1', 'k2'], sort=True)) # do a right join for an extra test joined = merge(right, left, left_index=True, @@ -1022,6 +1090,12 @@ def test_left_join_index_multi_match_multiindex(self): tm.assert_frame_equal(result, expected) + result = left.join(right, on=['cola', 'colb', 'colc'], + how='left', sort=True) + + tm.assert_frame_equal(result, + expected.sort(['cola', 'colb', 'colc'], kind='mergesort')) + # GH7331 - maintain left frame order in left merge right.reset_index(inplace=True) right.columns = left.columns[:3].tolist() + right.columns[-1:].tolist() @@ -1066,6 +1140,9 @@ def test_left_join_index_multi_match(self): tm.assert_frame_equal(result, expected) + result = left.join(right, on='tag', how='left', sort=True) + tm.assert_frame_equal(result, expected.sort('tag', kind='mergesort')) + # GH7331 - maintain left frame order in left merge result = merge(left, right.reset_index(), how='left', on='tag') expected.index = np.arange(len(expected)) @@ -1094,6 +1171,10 @@ def _test(dtype1,dtype2): tm.assert_frame_equal(result, expected) + result = left.join(right, on=['k1', 'k2'], sort=True) + expected.sort(['k1', 'k2'], kind='mergesort', inplace=True) + tm.assert_frame_equal(result, expected) + for d1 in [np.int64,np.int32,np.int16,np.int8,np.uint8]: for d2 in [np.int64,np.float64,np.float32,np.float16]: _test(np.dtype(d1),np.dtype(d2))
on master: ``` In [8]: left Out[8]: 1st 2nd 3rd 0 c c 13 1 b b 79 2 a a 27 3 b b 27 4 c a 86 In [9]: right Out[9]: 4th 1st 2nd c a -86 b b -79 c c -13 b b -27 a a -27 ``` `sort=True` is ignored, and the result is not sorted by the join key: ``` In [10]: left.join(right, on=['1st', '2nd'], how='left', sort=True) Out[10]: 1st 2nd 3rd 4th 0 c c 13 -13 1 b b 79 -79 1 b b 79 -27 2 a a 27 -27 3 b b 27 -79 3 b b 27 -27 4 c a 86 -86 ``` in addition: ``` In [44]: left Out[44]: 1st 2nd 3rd 0 NaN a 14 1 a NaN 10 2 a b 19 3 NaN NaN 62 4 a c 90 In [45]: right Out[45]: 4th 1st 2nd NaN a -14 a c -90 NaN -10 b -19 NaN NaN -62 ``` this works: ``` In [46]: merge(left, right.reset_index(), on=['1st', '2nd'], how='left') Out[46]: 1st 2nd 3rd 4th 0 NaN a 14 -14 1 a NaN 10 -10 2 a b 19 -19 3 NaN NaN 62 -62 4 a c 90 -90 ``` but this does not: ``` In [47]: left.join(right, on=['1st', '2nd'], how='left') Out[47]: 1st 2nd 3rd 4th 0 NaN a 14 NaN 1 a NaN 10 NaN 2 a b 19 -19 3 NaN NaN 62 NaN 4 a c 90 -90 ``` also, [`get_group_index`](https://github.com/pydata/pandas/blob/b62754d4de0b60fdbfd67e0d0216ad7cd51d3c5f/pandas/core/groupby.py#L3493) called in [these lines](https://github.com/pydata/pandas/blob/b62754d4de0b60fdbfd67e0d0216ad7cd51d3c5f/pandas/tools/merge.py#L536) is subject to overflow, and should be avoided. `r 'join|merge'` benchmarks: ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- join_dataframe_index_multi | 35.4387 | 36.5883 | 0.9686 | join_dataframe_index_single_key_bigger_sort | 24.7660 | 24.8604 | 0.9962 | strings_join_split | 57.6473 | 57.6183 | 1.0005 | join_dataframe_index_single_key_small | 16.6840 | 16.6461 | 1.0023 | merge_2intkey_sort | 61.2427 | 60.5460 | 1.0115 | join_non_unique_equal | 0.9513 | 0.9391 | 1.0130 | left_outer_join_index | 2887.7623 | 2839.2557 | 1.0171 | i8merge | 1534.6023 | 1506.8540 | 1.0184 | join_dataframe_index_single_key_bigger | 25.3410 | 24.8287 | 1.0206 | merge_2intkey_nosort | 21.6643 | 21.2137 | 1.0212 | join_dataframe_integer_key | 3.0307 | 2.9414 | 1.0304 | join_dataframe_integer_2key | 7.7363 | 7.4220 | 1.0423 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [f02ef89] : bug in left join on multi-index with sort=True or nulls Base [b62754d] : Merge pull request #9206 from robertdavidwest/9203_resubmitted_in_single_commit 9203 SQUASHED - DOCS: doc string edited pandas/core/frame.duplicated() ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9210
2015-01-08T01:36:53Z
2015-01-10T18:07:10Z
2015-01-10T18:07:10Z
2015-01-10T20:17:03Z
added mising numeric_only option for DataFrame.std/var/sem
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index de15fa29de8dd..48a1924d84041 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -486,3 +486,5 @@ Bug Fixes - Fixed bug with reading CSV files from Amazon S3 on python 3 raising a TypeError (:issue:`9452`) - Bug in the Google BigQuery reader where the 'jobComplete' key may be present but False in the query results (:issue:`8728`) - Bug in ``Series.values_counts`` with excluding ``NaN`` for categorical type ``Series`` with ``dropna=True`` (:issue:`9443`) + +- Fixed mising numeric_only option for ``DataFrame.std/var/sem`` (:issue:`9201`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 67a9ab67c0a98..feebb3efaa9d0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4090,7 +4090,7 @@ def _make_stat_function_ddof(name, desc, f): @Substitution(outname=name, desc=desc) @Appender(_num_doc) def stat_func(self, axis=None, skipna=None, level=None, ddof=1, - **kwargs): + numeric_only=None, **kwargs): if skipna is None: skipna = True if axis is None: @@ -4099,6 +4099,7 @@ def stat_func(self, axis=None, skipna=None, level=None, ddof=1, return self._agg_by_level(name, axis=axis, level=level, skipna=skipna, ddof=ddof) return self._reduce(f, name, axis=axis, + numeric_only=numeric_only, skipna=skipna, ddof=ddof) stat_func.__name__ = name return stat_func diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 602850d859d27..f68f4f9037d97 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -332,7 +332,7 @@ def _get_counts_nanvar(mask, axis, ddof): def _nanvar(values, axis=None, skipna=True, ddof=1): # private nanvar calculator mask = isnull(values) - if not is_floating_dtype(values): + if is_any_int_dtype(values): values = values.astype('f8') count, d = _get_counts_nanvar(mask, axis, ddof) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index e1687fd6a67cf..1acad4cf978a8 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -11503,6 +11503,32 @@ def test_var_std(self): self.assertFalse((result < 0).any()) nanops._USE_BOTTLENECK = True + def test_numeric_only_flag(self): + # GH #9201 + methods = ['sem', 'var', 'std'] + df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz']) + # set one entry to a number in str format + df1.ix[0, 'foo'] = '100' + + df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz']) + # set one entry to a non-number str + df2.ix[0, 'foo'] = 'a' + + for meth in methods: + result = getattr(df1, meth)(axis=1, numeric_only=True) + expected = getattr(df1[['bar', 'baz']], meth)(axis=1) + assert_series_equal(expected, result) + + result = getattr(df2, meth)(axis=1, numeric_only=True) + expected = getattr(df2[['bar', 'baz']], meth)(axis=1) + assert_series_equal(expected, result) + + assertRaisesRegexp(TypeError, 'float', + getattr(df1, meth), axis=1, numeric_only=False) + + assertRaisesRegexp(TypeError, 'float', + getattr(df2, meth), axis=1, numeric_only=False) + def test_sem(self): alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x)) self._check_stat_op('sem', alt)
closes https://github.com/pydata/pandas/issues/9201, the `numeric_only` option is missing for `DataFrame.std()` (and also `DataFrame.var()` and `DataFrame.sem()`), this is a fix for it
https://api.github.com/repos/pandas-dev/pandas/pulls/9209
2015-01-07T23:36:46Z
2015-03-12T12:08:53Z
2015-03-12T12:08:53Z
2015-04-29T15:31:04Z
9203 SQUASHED - DOCS: doc string edited pandas/core/frame.duplicated()
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8ee65949e6bc1..ea7896b0352a5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2726,7 +2726,8 @@ def duplicated(self, subset=None, take_last=False): Only consider certain columns for identifying duplicates, by default use all of the columns take_last : boolean, default False - Take the last observed row in a row. Defaults to the first row + For a set of distinct duplicate rows, flag all but the last row as + duplicated. Default is for all but the first row to be flagged cols : kwargs only argument of subset [deprecated] Returns
Redefined `take_last` variable in doc string. Original definition only made sense for drop_duplicates()
https://api.github.com/repos/pandas-dev/pandas/pulls/9206
2015-01-07T16:16:56Z
2015-01-07T16:25:16Z
2015-01-07T16:25:16Z
2015-01-07T16:33:34Z
DOC: Edited doc string of pandas/core/frame.duplicated(). Redefined take...
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8ee65949e6bc1..988183835b7f0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2726,7 +2726,8 @@ def duplicated(self, subset=None, take_last=False): Only consider certain columns for identifying duplicates, by default use all of the columns take_last : boolean, default False - Take the last observed row in a row. Defaults to the first row + For a set of distinct duplicate rows, flag all but the last row as duplicated. + Default is for all but the first row to be flagged cols : kwargs only argument of subset [deprecated] Returns
..._last variable in doc string. Original definition only made sense for drop_duplicates().
https://api.github.com/repos/pandas-dev/pandas/pulls/9203
2015-01-06T19:52:04Z
2015-01-07T16:25:03Z
null
2015-01-07T16:25:03Z
BUG: fix for GH9010
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 21b1ddea0e9da..03f90c74ea4b7 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -141,3 +141,4 @@ Bug Fixes - Bug in read_csv when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) - isnull now detects NaT in PeriodIndex (:issue:`9129`) - Bug in groupby ``.nth()`` with a multiple column groupby (:issue:`8979`) +- Bug in ``Options`` where parsing the underlying price returns a ValueError when the price has a thousands separator in the HTML text. (:issue:`9010`) diff --git a/pandas/io/data.py b/pandas/io/data.py index 3d92d383badf8..5f9c57534cd20 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -698,8 +698,18 @@ def _option_frames_from_url(self, url): def _get_underlying_price(self, url): root = self._parse_url(url) - underlying_price = float(root.xpath('.//*[@class="time_rtq_ticker Fz-30 Fw-b"]')[0]\ - .getchildren()[0].text) + underlying_price = root.xpath('.//*[@class="time_rtq_ticker Fz-30 Fw-b"]')[0]\ + .getchildren()[0].text + + try: + underlying_price = float(underlying_price) + except ValueError: + # see if there is a comma thousands separator that needs to be filtered out + underlying_price = ''.join(c for c in underlying_price if c != ',') + try: + underlying_price = float(underlying_price) + except ValueError: + underlying_price = np.nan #Gets the time of the quote, note this is actually the time of the underlying price. try: diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index a65722dc76556..45cb7b648e9dc 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -320,6 +320,17 @@ def test_get_expiry_dates(self): raise nose.SkipTest(e) self.assertTrue(len(dates) > 1) + @network + def test_get_underlying_price(self): + try: + options_object = web.Options('^spxpm', 'yahoo') + expiry_dates, urls = options_object._get_expiry_dates_and_links() + url = options_object._FINANCE_BASE_URL + urls.values()[0] + quote_price, quote_time = options_object._get_underlying_price( url ) + except RemoteDataError as e: + raise nose.SkipTest(e) + self.assertIsInstance( quote_price, float ) + @network def test_get_all_data(self): try:
Superseded by #9358 --- This is an update of PR #9024. closes #9010
https://api.github.com/repos/pandas-dev/pandas/pulls/9198
2015-01-05T03:58:04Z
2015-03-04T10:40:47Z
null
2015-03-04T10:41:26Z
fixes division by zero error for kurt()
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 21b1ddea0e9da..528c32d84e898 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -141,3 +141,4 @@ Bug Fixes - Bug in read_csv when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) - isnull now detects NaT in PeriodIndex (:issue:`9129`) - Bug in groupby ``.nth()`` with a multiple column groupby (:issue:`8979`) +- Fixed division by zero error for ``Series.kurt()`` when all values are equal (:issue:`9197`) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 9703dba40a18a..b3d8f2b67ffab 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -514,17 +514,21 @@ def nankurt(values, axis=None, skipna=True): C = _zero_out_fperr(C) D = _zero_out_fperr(D) + if not isinstance(B, np.ndarray): + # if B is a scalar, check these corner cases first before doing division + if count < 4: + return np.nan + if B == 0: + return 0 + result = (((count * count - 1.) * D / (B * B) - 3 * ((count - 1.) ** 2)) / ((count - 2.) * (count - 3.))) + if isinstance(result, np.ndarray): result = np.where(B == 0, 0, result) result[count < 4] = np.nan - return result - else: - result = 0 if B == 0 else result - if count < 4: - return np.nan - return result + + return result @disallow('M8','m8') diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 93cebd034b4df..9b072e2f62968 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2212,6 +2212,18 @@ def test_skew(self): alt = lambda x: skew(x, bias=False) self._check_stat_op('skew', alt) + # test corner cases, skew() returns NaN unless there's at least 3 values + min_N = 3 + for i in range(1, min_N + 1): + s = Series(np.ones(i)) + df = DataFrame(np.ones((i, i))) + if i < min_N: + self.assertTrue(np.isnan(s.skew())) + self.assertTrue(np.isnan(df.skew()).all()) + else: + self.assertEqual(0, s.skew()) + self.assertTrue((df.skew() == 0).all()) + def test_kurt(self): tm._skip_if_no_scipy() @@ -2226,6 +2238,18 @@ def test_kurt(self): s = Series(np.random.randn(6), index=index) self.assertAlmostEqual(s.kurt(), s.kurt(level=0)['bar']) + # test corner cases, kurt() returns NaN unless there's at least 4 values + min_N = 4 + for i in range(1, min_N + 1): + s = Series(np.ones(i)) + df = DataFrame(np.ones((i, i))) + if i < min_N: + self.assertTrue(np.isnan(s.kurt())) + self.assertTrue(np.isnan(df.kurt()).all()) + else: + self.assertEqual(0, s.kurt()) + self.assertTrue((df.kurt() == 0).all()) + def test_argsort(self): self._check_accum_op('argsort') argsorted = self.ts.argsort()
Currently if `kurt()` is called on a `Series` with equal values, it throws a `ZeroDivisionError` ``` In [1]: import pandas as pd In [2]: import numpy as np In [3]: s = pd.Series(np.ones(5)) In [4]: s.kurt() ZeroDivisionError: float division by zero ``` This is not consistent with the case when `kurt()` is called on a `DataFrame` of equal values ``` In [5]: df = pd.DataFrame(np.ones((5, 5))) In [6]: df.kurt() Out[6]: 0 0 1 0 2 0 3 0 4 0 dtype: float64 ``` with this patch `s.kurt()` will return `0` instead of throwing.
https://api.github.com/repos/pandas-dev/pandas/pulls/9197
2015-01-04T06:33:03Z
2015-01-05T06:37:52Z
2015-01-05T06:37:52Z
2015-04-29T15:32:55Z
Documentations: Ecosystem/API - pydatastream (issue #5630)
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index e5afe1db9417f..4a0743b8be3e4 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -130,6 +130,13 @@ API Quandl API for Python wraps the Quandl REST API to return Pandas DataFrames with timeseries indexes. +`pydatastream <https://github.com/vfilimonov/pydatastream>`_ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +PyDatastream is a Python interface to the +`Thomson Dataworks Enterprise (DWE/Datastream) <http://dataworks.thomson.com/Dataworks/Enterprise/1.0/>`__ +SOAP API to return indexed Pandas DataFrames or Panels with financial data. +This package requires valid credentials for this API (non free). + .. _ecosystem.domain:
Following an old issue #5630 (Documentation / Remote Data Access - 3rd pary libraries). Quandl was already described in Ecosystem / API section, so I've added link to pydatastream that targets Thomson Reuters Datastream. closes #5630.
https://api.github.com/repos/pandas-dev/pandas/pulls/9195
2015-01-03T22:12:54Z
2015-01-06T13:14:19Z
2015-01-06T13:14:19Z
2015-01-06T13:27:59Z
BUG: read_html with a single column table #9178
diff --git a/pandas/io/html.py b/pandas/io/html.py index 13318203bec67..627375204e92e 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -341,14 +341,14 @@ def _parse_raw_thead(self, table): res = [] if thead: res = lmap(self._text_getter, self._parse_th(thead[0])) - return np.array(res).squeeze() if res and len(res) == 1 else res + return np.atleast_1d(np.array(res).squeeze()) if res and len(res) == 1 else res def _parse_raw_tfoot(self, table): tfoot = self._parse_tfoot(table) res = [] if tfoot: res = lmap(self._text_getter, self._parse_td(tfoot[0])) - return np.array(res).squeeze() if res and len(res) == 1 else res + return np.atleast_1d(np.array(res).squeeze()) if res and len(res) == 1 else res def _parse_raw_tbody(self, table): tbody = self._parse_tbody(table)
This commit fixes a crash on read_html(html, flavor="bs4") if table only has one column. closes #9178
https://api.github.com/repos/pandas-dev/pandas/pulls/9194
2015-01-03T19:25:58Z
2015-05-09T20:10:25Z
null
2023-05-11T01:12:47Z
Validate that 'name' attribute is set only if hashable
diff --git a/pandas/core/series.py b/pandas/core/series.py index 081e5c50946bc..7ce783278b86f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -17,6 +17,7 @@ _default_index, _maybe_upcast, _asarray_tuplesafe, _infer_dtype_from_scalar, is_list_like, _values_from_object, + is_hashable, _possibly_cast_to_datetime, _possibly_castable, _possibly_convert_platform, _try_sort, ABCSparseArray, _maybe_match_name, _coerce_to_dtype, @@ -105,6 +106,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame): dict. dtype : numpy.dtype or None If None, dtype will be inferred + name : used to attach metadata to a Series, e.g., as str or namedtuple. + Must be hashable, defaults to None. copy : boolean, default False Copy input data """ @@ -272,6 +275,20 @@ def _update_inplace(self, result, **kwargs): # we want to call the generic version and not the IndexOpsMixin return generic.NDFrame._update_inplace(self, result, **kwargs) + # Validate that name is hashable + @property + def name(self): + return self._name + + @name.setter + def name(self, value): + if is_hashable(value): + object.__setattr__(self, '_name', value) + else: + raise TypeError('Series.name must be hashable, got %s.' + % value.__class__.__name__) + + # ndarray compatibility @property def dtype(self): diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index c0daeb793fc40..6c33dcc23a0a7 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -644,6 +644,13 @@ def test_constructor_map(self): result = Series(m, index=lrange(10, 20)) exp.index = lrange(10, 20) assert_series_equal(result, exp) + + def test_constructor_unhashable_name(self): + def set_to_unhashable(s_): + s_.name = {} + s = Series([1,3], name = 'test') + self.assertRaises(TypeError, set_to_unhashable, s) + self.assertEqual(s.name, 'test') def test_constructor_categorical(self): cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True) @@ -1980,7 +1987,7 @@ def test_repr(self): rep_str = repr(ser) self.assertIn("Name: 0", rep_str) - ser = Series(["a\n\r\tb"], name=["a\n\r\td"], index=["a\n\r\tf"]) + ser = Series(["a\n\r\tb"], name="a\n\r\td", index=["a\n\r\tf"]) self.assertFalse("\t" in repr(ser)) self.assertFalse("\r" in repr(ser)) self.assertFalse("a\n" in repr(ser))
addresses part of issue #8263.
https://api.github.com/repos/pandas-dev/pandas/pulls/9193
2015-01-03T15:50:34Z
2015-05-09T15:59:50Z
null
2015-05-14T15:48:28Z
Add lag parameter to autocorrelation
Add lag parameter to autocorrelation, default to lag-1 autocorrelation so existing code will work unchanged.
https://api.github.com/repos/pandas-dev/pandas/pulls/9192
2015-01-02T22:42:12Z
2015-01-14T17:33:41Z
null
2015-01-14T19:16:18Z
Add DataFrame.view() to display contents interactively.
diff --git a/doc/source/api.rst b/doc/source/api.rst index b6fd14f425bd0..8b3fa2402e8dd 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -226,6 +226,14 @@ Exponentially-weighted moving window functions ewmcorr ewmcov +Visualization / Plotting +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autosummary:: + :toctree: generated/ + + interact + .. _api.series: Series @@ -596,14 +604,15 @@ the Categorical back to a numpy array, so levels and order information is not pr Categorical.__array__ -Plotting -~~~~~~~~ +Visualization / Plotting +~~~~~~~~~~~~~~~~~~~~~~~~ .. currentmodule:: pandas .. autosummary:: :toctree: generated/ Series.hist + Series.interact Series.plot Serialization / IO / Conversion @@ -863,13 +872,14 @@ Time series-related DataFrame.tz_convert DataFrame.tz_localize -Plotting -~~~~~~~~ +Visualization / Plotting +~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: :toctree: generated/ DataFrame.boxplot DataFrame.hist + DataFrame.interact DataFrame.plot Serialization / IO / Conversion @@ -1107,6 +1117,13 @@ Serialization / IO / Conversion Panel.to_frame Panel.to_clipboard +Visualization / Plotting +~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + Panel.interact + .. _api.panel4d: Panel4D diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 44321375d31a2..069cc8f51f792 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -647,6 +647,14 @@ option: You can also disable this feature via the ``expand_frame_repr`` option. This will print the table in one block. +You can visualize the contents of the DataFrame on an interactive grid using +:meth:`~pandas.DataFrame.interact`. + +.. ipython:: python + + df = DataFrame(randn(3, 12)) + df.interact() + DataFrame column attribute access and IPython completion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/__init__.py b/pandas/__init__.py index 69e8a4bad377e..c7d241985dba4 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -58,6 +58,7 @@ from pandas.tools.plotting import scatter_matrix, plot_params from pandas.tools.tile import cut, qcut from pandas.tools.util import value_range +from pandas.tools.interact import interact from pandas.core.reshape import melt from pandas.util.print_versions import show_versions import pandas.util.testing diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 8ee65949e6bc1..10b703c465bab 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1543,6 +1543,21 @@ def _sizeof_fmt(num, size_qualifier): _sizeof_fmt(mem_usage, size_qualifier)) _put_lines(buf, lines) + def interact(self, **kwargs): + """Visualize the contents of the DataFrame interatively + + Parameters + ---------- + See :func:`pandas.interact` for a list of keyword arguments to control + the display. + + See Also + -------- + pandas.interact + """ + from pandas.tools.interact import interact + interact(self, **kwargs) + def memory_usage(self, index=False): """Memory usage of DataFrame columns. diff --git a/pandas/core/panel.py b/pandas/core/panel.py index df3e6c0195be3..7b14e9bd2c933 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -603,6 +603,21 @@ def head(self, n=5): def tail(self, n=5): raise NotImplementedError + def interact(self, **kwargs): + """Visualize the contents of the Panel interatively + + Parameters + ---------- + See :func:`pandas.interact` for a list of keyword arguments to control + the display. + + See Also + -------- + pandas.interact + """ + from pandas.tools.interact import interact + interact(self, **kwargs) + def _needs_reindex_multi(self, axes, method, level): """ don't allow a multi reindex on Panel or above ndim """ return False diff --git a/pandas/core/series.py b/pandas/core/series.py index 60b601a462520..67f799382971c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -377,6 +377,21 @@ def view(self, dtype=None): return self._constructor(self.values.view(dtype), index=self.index).__finalize__(self) + def interact(self, **kwargs): + """Visualize the contents of the Panel interatively + + Parameters + ---------- + See :func:`pandas.interact` for a list of keyword arguments to control + the display. + + See Also + -------- + pandas.interact + """ + from pandas.tools.interact import interact + interact(self, **kwargs) + def __array__(self, result=None): """ the array interface, return my values diff --git a/pandas/tools/interact.py b/pandas/tools/interact.py new file mode 100644 index 0000000000000..06e466648379a --- /dev/null +++ b/pandas/tools/interact.py @@ -0,0 +1,83 @@ +from pandas.core.api import Int64Index, DataFrame, Series, Panel + +try: # tabview optional + from tabview import tabview +except ImportError: + pass + + +def interact(data, index=None, header=None, start=None, + fixed_index=None, fixed_header=None, **kwargs): + """ + Visualize the contents of ``data`` interactively. + + Parameters + ---------- + data : DataFrame, Panel, Series or list + Object containing the data + index : bool + Show the index. When None, detect if an index exists. + header : bool + Show the column names or Series name. When None, detect if column names + or a Series name has been set. + start : Y or (Y,X) tuple + Start the viewer at the indicated location. + fixed_index : bool + Instruct the viewer to keep the index fixed + fixed_header : bool + Instruct the viewer to keep the header fixed + **kwargs : dict + Any parameter supported by the underlying viewer. + """ + if isinstance(data, (Panel, DataFrame)): + if isinstance(data, Panel): + data = data.to_frame() + + # detect if data is using the built-in index for the labels/columns + if index is None: + index = type(data.index) is not Int64Index + if header is None: + header = type(data.index) is not Int64Index + + if index: + data = data.reset_index() + buf = [] + if header: + buf += [data.columns.tolist()] + buf += data.values.tolist() + data = buf + + elif isinstance(data, Series): + if index is None: + index = type(data.index) is not Int64Index + if header is None: + header = data.name is not None and len(data.name) + buf = [] + if index: + if header: + buf += [[data.index.name, data.name]] + buf += data.reset_index().values.tolist() + else: + if header: + buf += [[data.name]] + buf += [[x] for x in data.values] + data = buf + + else: + # try to convert to a simple list + data = list(data) + + # defaults + if fixed_index is None: + fixed_index = index + if fixed_header is None: + fixed_header = header + if type(start) is not tuple: + start = (start, 0) + + interact_list(data, **kwargs) + + +def interact_list(data, start=None, fixed_header=None, + fixed_index=None, **kwargs): + tabview.view(data)
This is a first attempt at addressing #9179 We add a DataFrame.view method to visualize the contents of a dataframe interactively. The viewing itself is performed using the "tabview" module from https://github.com/firecat53/tabview A few initial notes: - tabview requires python3 currently, so please test using python3. There are no significant changes to make tabview work on python 2.7 as well but I'd like some feedback first. - tabview tries to sys.exit() on quit even when loaded as a module. Also needs a fix. ipython complains, but prevents the module to exit. - I have no idea where it would be more appropriate to put the dependency/suggestion to install tabview.
https://api.github.com/repos/pandas-dev/pandas/pulls/9191
2015-01-02T22:37:18Z
2015-03-29T17:15:11Z
null
2015-03-29T17:15:11Z
Docstring: pd.to_datetime (issue #9107)
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index e680fa06a9c8e..db62fceaceef8 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -200,6 +200,8 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, If True, require an exact format match. If False, allow the format to match anywhere in the target string. coerce : force errors to NaT (False by default) + Timestamps outside the interval between Timestamp.min and Timestamp.max + (approximately 1677-09-22 to 2262-04-11) will be also forced to NaT. unit : unit of the arg (D,s,ms,us,ns) denote the unit in epoch (e.g. a unix timestamp), which is an integer/float number infer_datetime_format : boolean, default False @@ -212,6 +214,9 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp + In case when it is not possible to return designated types (e.g. when + any element of input is before Timestamp.min or after Timestamp.max) + return will have datetime.datetime type (or correspoding array/Series). Examples -------- @@ -221,11 +226,30 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, >>> i = pd.date_range('20000101',periods=100) >>> df = pd.DataFrame(dict(year = i.year, month = i.month, day = i.day)) >>> pd.to_datetime(df.year*10000 + df.month*100 + df.day, format='%Y%m%d') + 0 2000-01-01 + 1 2000-01-02 + ... + 98 2000-04-08 + 99 2000-04-09 + Length: 100, dtype: datetime64[ns] Or from strings >>> df = df.astype(str) >>> pd.to_datetime(df.day + df.month + df.year, format="%d%m%Y") + 0 2000-01-01 + 1 2000-01-02 + ... + 98 2000-04-08 + 99 2000-04-09 + Length: 100, dtype: datetime64[ns] + + Date that does not meet timestamp limitations: + + >>> pd.to_datetime('13000101', format='%Y%m%d') + datetime.datetime(1300, 1, 1, 0, 0) + >>> pd.to_datetime('13000101', format='%Y%m%d', coerce=True) + NaT """ from pandas import Timestamp from pandas.core.series import Series
Following the issue "to_datetime returns NaT for old dates with coerce=True". Do I understand correctly that online documentation is build automatically from docstrings? I.e. are there any other files to change? Also it would be good to make a link to [this section of "Caveats and Gotchas"](http://pandas.pydata.org/pandas-docs/version/0.15.0/gotchas.html#timestamp-limitations), but I'm not sure if the docstring is a good place to do this.
https://api.github.com/repos/pandas-dev/pandas/pulls/9189
2015-01-02T21:38:28Z
2015-01-04T22:36:13Z
2015-01-04T22:36:13Z
2015-01-06T11:17:47Z
ENH: Handle categorical dtype to/from R
diff --git a/pandas/rpy/common.py b/pandas/rpy/common.py index 55adad3610816..6318b50052bb5 100644 --- a/pandas/rpy/common.py +++ b/pandas/rpy/common.py @@ -23,7 +23,7 @@ 'convert_to_r_matrix'] -def load_data(name, package=None, convert=True): +def load_data(name, package=None, convert=True, factors_as_strings=True): if package: importr(package) @@ -32,7 +32,7 @@ def load_data(name, package=None, convert=True): robj = r[name] if convert: - return convert_robj(robj) + return convert_robj(robj, factors_as_strings=factors_as_strings) else: return robj @@ -48,7 +48,7 @@ def _is_null(obj): return _rclass(obj) == 'NULL' -def _convert_list(obj): +def _convert_list(obj, **kwargs): """ Convert named Vector to dict, factors to list """ @@ -64,7 +64,7 @@ def _convert_list(obj): return result -def _convert_array(obj): +def _convert_array(obj, **kwargs): """ Convert Array to DataFrame """ @@ -90,8 +90,11 @@ def _list(item): return df -def _convert_vector(obj): - if isinstance(obj, robj.IntVector): +def _convert_vector(obj, **kwargs): + # FactorVector is sub-class, so check first + if isinstance(obj, robj.FactorVector): + return _convert_factor(obj, **kwargs) + elif isinstance(obj, robj.IntVector): return _convert_int_vector(obj) elif isinstance(obj, robj.StrVector): return _convert_str_vector(obj) @@ -117,7 +120,7 @@ def _convert_vector(obj): NA_INTEGER = -2147483648 -def _convert_int_vector(obj): +def _convert_int_vector(obj, **kwargs): arr = np.asarray(obj) mask = arr == NA_INTEGER if mask.any(): @@ -126,7 +129,7 @@ def _convert_int_vector(obj): return arr -def _convert_str_vector(obj): +def _convert_str_vector(obj, **kwargs): arr = np.asarray(obj, dtype=object) mask = arr == robj.NA_Character if mask.any(): @@ -134,35 +137,47 @@ def _convert_str_vector(obj): return arr -def _convert_DataFrame(rdf): +def _convert_factor(obj, **kwargs): + if kwargs.get("factors_as_strings", True): + levels = np.asarray(obj.levels) + values = np.asarray(obj) + if com.is_float_dtype(values): + mask = np.isnan(values) + notmask = -mask + result = np.empty(len(values), dtype=object) + result[mask] = np.nan + + locs = (values[notmask] - 1).astype(np.int_) + result[notmask] = levels.take(locs) + values = result + else: + values = np.asarray(obj.levels).take(values - 1) + + else: # give a categorical object back + ordered = r["is.ordered"](obj)[0] + categories = list(obj.levels) + codes = np.asarray(obj) - 1 # zero-based indexing + values = pd.Categorical.from_codes(codes, categories=categories, + ordered=ordered) + + return values + + +def _convert_DataFrame(rdf, **kwargs): columns = list(rdf.colnames) rows = np.array(rdf.rownames) data = {} for i, col in enumerate(columns): vec = rdf.rx2(i + 1) - values = _convert_vector(vec) - - if isinstance(vec, robj.FactorVector): - levels = np.asarray(vec.levels) - if com.is_float_dtype(values): - mask = np.isnan(values) - notmask = -mask - result = np.empty(len(values), dtype=object) - result[mask] = np.nan - - locs = (values[notmask] - 1).astype(np.int_) - result[notmask] = levels.take(locs) - values = result - else: - values = np.asarray(vec.levels).take(values - 1) + values = _convert_vector(vec, **kwargs) data[col] = values return pd.DataFrame(data, index=_check_int(rows), columns=columns) -def _convert_Matrix(mat): +def _convert_Matrix(mat, **kwargs): columns = mat.colnames rows = mat.rownames @@ -181,12 +196,14 @@ def _check_int(vec): return vec + _pandas_converters = [ (robj.DataFrame, _convert_DataFrame), (robj.Matrix, _convert_Matrix), (robj.StrVector, _convert_vector), (robj.FloatVector, _convert_vector), (robj.Array, _convert_array), + (robj.FactorVector, _convert_factor), (robj.Vector, _convert_list), ] @@ -197,11 +214,12 @@ def _check_int(vec): (robj.StrVector, _convert_vector), (robj.FloatVector, _convert_vector), (robj.Array, _convert_array), + (robj.FactorVector, _convert_factor), (robj.Vector, _convert_list), ] -def convert_robj(obj, use_pandas=True): +def convert_robj(obj, use_pandas=True, factors_as_strings=True): """ Convert rpy2 object to a pandas-friendly form @@ -220,7 +238,7 @@ def convert_robj(obj, use_pandas=True): for rpy_type, converter in converters: if isinstance(obj, rpy_type): - return converter(obj) + return converter(obj, factors_as_strings=factors_as_strings) raise TypeError('Do not know what to do with %s object' % type(obj)) @@ -263,6 +281,8 @@ def convert_to_r_posixct(obj): np.float32: robj.FloatVector, np.float: robj.FloatVector, np.int: robj.IntVector, + np.int8: robj.IntVector, + np.int16: robj.IntVector, np.int32: robj.IntVector, np.int64: robj.IntVector, np.object_: robj.StrVector, @@ -274,6 +294,8 @@ def convert_to_r_posixct(obj): np.float32: robj.NA_Real, np.float: robj.NA_Real, np.int: robj.NA_Integer, + np.int8: robj.NA_Integer, + np.int16: robj.NA_Integer, np.int32: robj.NA_Integer, np.int64: robj.NA_Integer, np.object_: robj.NA_Character, @@ -318,6 +340,11 @@ def convert_to_r_dataframe(df, strings_as_factors=False): if value_type == np.datetime64: value = convert_to_r_posixct(value) + elif value_type == com.CategoricalDtypeType: + levels = robj.StrVector(value.cat.categories) + value = robj.FactorVector(value, + levels=levels, + ordered=value.cat.ordered) else: value = [item if pd.notnull(item) else NA_TYPES[value_type] for item in value] diff --git a/pandas/rpy/tests/test_common.py b/pandas/rpy/tests/test_common.py index a2e6d08d07b58..9e1fbc943323b 100644 --- a/pandas/rpy/tests/test_common.py +++ b/pandas/rpy/tests/test_common.py @@ -205,7 +205,50 @@ def test_factor(self): level = list(r['levels'](vector)) factors = [level[index - 1] for index in factors] result = com.load_data(name) - assert np.equal(result, factors) + np.testing.assert_equal(result, factors) + + # test it as a data.frame + result = com.convert_robj(r("as.data.frame({0})".format(name))) + np.testing.assert_equal(result[name].values, factors) + + def test_factor_as_factor(self): + for name in ('state.division', 'state.region'): + vector = r[name] + factors = np.asarray(r['factor'](vector)) - 1 + level = list(r['levels'](vector)) + ordered = r["is.ordered"](vector)[0] + + result = com.load_data(name, factors_as_strings=False) + factor = pd.Categorical.from_codes(factors, categories=level, + ordered=ordered) + np.testing.assert_(result.equals(factor)) + + # test it as a data.frame + result = com.convert_robj(r("as.data.frame({0})".format(name)), + factors_as_strings=False) + np.testing.assert_(isinstance(result, pd.DataFrame)) + np.testing.assert_(result[name].dtype.type == + pd.core.common.CategoricalDtypeType) + + # no easy way to go from categorical Series to Cateogical? + np.testing.assert_equal(result[name].cat.codes, factor.codes) + cat_equals = result[name].cat.categories.equals(factor.categories) + np.testing.assert_(cat_equals) + + def test_to_r_dataframe_with_categorical(self): + r("dta <- warpbreaks") + r("dta[\"tension\"] <- factor(warpbreaks$tension, ordered=TRUE)") + dta = com.load_data("dta", factors_as_strings=False) + # check this while we're here + np.testing.assert_(not dta.wool.cat.ordered) + np.testing.assert_(dta.tension.cat.ordered) + df = com.convert_to_r_dataframe(dta) + np.testing.assert_(isinstance(df[1], robj.FactorVector)) + np.testing.assert_(isinstance(df[2], robj.FactorVector)) + + np.testing.assert_(not r["is.ordered"](df[1])[0]) + np.testing.assert_(r["is.ordered"](df[2])[0]) + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
null
https://api.github.com/repos/pandas-dev/pandas/pulls/9187
2015-01-02T19:55:15Z
2015-03-08T14:39:29Z
null
2023-05-11T01:12:46Z
FIX: Fix problems with Series text representation.
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 0eeee8ccfddf6..e61b7d18672f7 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -567,3 +567,39 @@ Bug Fixes - Bug in ``Series.values_counts`` with excluding ``NaN`` for categorical type ``Series`` with ``dropna=True`` (:issue:`9443`) - Fixed mising numeric_only option for ``DataFrame.std/var/sem`` (:issue:`9201`) - Support constructing ``Panel`` or ``Panel4D`` with scalar data (:issue:`8285`) +- ``Series`` text representation disconnected from `max_rows`/`max_columns` (:issue:`7508`). +- ``Series`` number formatting inconsistent when truncated (:issue:`8532`). + + Previous Behavior + + .. code-block:: python + + In [2]: pd.options.display.max_rows = 10 + In [3]: s = pd.Series([1,1,1,1,1,1,1,1,1,1,0.9999,1,1]*10) + In [4]: s + Out[4]: + 0 1 + 1 1 + 2 1 + ... + 127 0.9999 + 128 1.0000 + 129 1.0000 + Length: 130, dtype: float64 + + New Behavior + + .. code-block:: python + + 0 1.0000 + 1 1.0000 + 2 1.0000 + 3 1.0000 + 4 1.0000 + ... + 125 1.0000 + 126 1.0000 + 127 0.9999 + 128 1.0000 + 129 1.0000 + dtype: float64 diff --git a/pandas/core/format.py b/pandas/core/format.py index 3efcfec254591..b21ca9050ffd0 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -129,45 +129,63 @@ def to_string(self): class SeriesFormatter(object): - def __init__(self, series, buf=None, header=True, length=True, - na_rep='NaN', name=False, float_format=None, dtype=True): + def __init__(self, series, buf=None, length=True, header=True, + na_rep='NaN', name=False, float_format=None, dtype=True, + max_rows=None): self.series = series self.buf = buf if buf is not None else StringIO() self.name = name self.na_rep = na_rep - self.length = length self.header = header + self.length = length + self.max_rows = max_rows if float_format is None: float_format = get_option("display.float_format") self.float_format = float_format self.dtype = dtype + self._chk_truncate() + + def _chk_truncate(self): + from pandas.tools.merge import concat + max_rows = self.max_rows + truncate_v = max_rows and (len(self.series) > max_rows) + series = self.series + if truncate_v: + if max_rows == 1: + row_num = max_rows + series = series.iloc[:max_rows] + else: + row_num = max_rows // 2 + series = concat((series.iloc[:row_num], series.iloc[-row_num:])) + self.tr_row_num = row_num + self.tr_series = series + self.truncate_v = truncate_v + def _get_footer(self): + name = self.series.name footer = u('') - if self.name: - if getattr(self.series.index, 'freq', None): - footer += 'Freq: %s' % self.series.index.freqstr + if getattr(self.series.index, 'freq', None) is not None: + footer += 'Freq: %s' % self.series.index.freqstr - if footer and self.series.name is not None: - # categories have already a comma + linebreak - if not com.is_categorical_dtype(self.series.dtype): - footer += ', ' + if self.name is not False and name is not None: + if footer: + footer += ', ' - series_name = com.pprint_thing(self.series.name, + series_name = com.pprint_thing(name, escape_chars=('\t', '\r', '\n')) footer += ("Name: %s" % - series_name) if self.series.name is not None else "" + series_name) if name is not None else "" if self.length: if footer: footer += ', ' footer += 'Length: %d' % len(self.series) - # TODO: in tidy_repr, with freq index, no dtype is shown -> also include a guard here? - if self.dtype: - name = getattr(self.series.dtype, 'name', None) + if self.dtype is not False and self.dtype is not None: + name = getattr(self.tr_series.dtype, 'name', None) if name: if footer: footer += ', ' @@ -175,8 +193,8 @@ def _get_footer(self): # level infos are added to the end and in a new line, like it is done for Categoricals # Only added when we request a name - if self.name and com.is_categorical_dtype(self.series.dtype): - level_info = self.series.values._repr_categories_info() + if name and com.is_categorical_dtype(self.tr_series.dtype): + level_info = self.tr_series.values._repr_categories_info() if footer: footer += "\n" footer += level_info @@ -184,7 +202,7 @@ def _get_footer(self): return compat.text_type(footer) def _get_formatted_index(self): - index = self.series.index + index = self.tr_series.index is_multi = isinstance(index, MultiIndex) if is_multi: @@ -196,35 +214,44 @@ def _get_formatted_index(self): return fmt_index, have_header def _get_formatted_values(self): - return format_array(self.series.get_values(), None, + return format_array(self.tr_series.get_values(), None, float_format=self.float_format, na_rep=self.na_rep) def to_string(self): - series = self.series + series = self.tr_series + footer = self._get_footer() if len(series) == 0: - return u('') + return 'Series([], ' + footer + ')' fmt_index, have_header = self._get_formatted_index() fmt_values = self._get_formatted_values() - maxlen = max(len(x) for x in fmt_index) + maxlen = max(len(x) for x in fmt_index) # max index len pad_space = min(maxlen, 60) - result = ['%s %s'] * len(fmt_values) - for i, (k, v) in enumerate(zip(fmt_index[1:], fmt_values)): - idx = k.ljust(pad_space) - result[i] = result[i] % (idx, v) + if self.truncate_v: + n_header_rows = 0 + row_num = self.tr_row_num + width = len(fmt_values[row_num-1]) + if width > 3: + dot_str = '...' + else: + dot_str = '..' + dot_str = dot_str.center(width) + fmt_values.insert(row_num + n_header_rows, dot_str) + fmt_index.insert(row_num + 1, '') + + result = adjoin(3, *[fmt_index[1:], fmt_values]) if self.header and have_header: - result.insert(0, fmt_index[0]) + result = fmt_index[0] + '\n' + result - footer = self._get_footer() if footer: - result.append(footer) + result += '\n' + footer - return compat.text_type(u('\n').join(result)) + return compat.text_type(u('').join(result)) def _strlen_func(): diff --git a/pandas/core/series.py b/pandas/core/series.py index d34657f0dc256..7e3b21be13525 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -36,7 +36,7 @@ from pandas.tseries.period import PeriodIndex, Period from pandas import compat from pandas.util.terminal import get_terminal_size -from pandas.compat import zip, u, OrderedDict +from pandas.compat import zip, u, OrderedDict, StringIO import pandas.core.ops as ops from pandas.core.algorithms import select_n @@ -883,43 +883,16 @@ def __unicode__(self): Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3. """ + buf = StringIO(u("")) width, height = get_terminal_size() max_rows = (height if get_option("display.max_rows") == 0 else get_option("display.max_rows")) - if max_rows and len(self.index) > max_rows: - result = self._tidy_repr(min(30, max_rows - 4)) - elif len(self.index) > 0: - result = self._get_repr(print_header=True, - length=len(self) > 50, - name=True, - dtype=True) - elif self.name is None: - result = u('Series([], dtype: %s)') % (self.dtype) - else: - result = u('Series([], name: %s, dtype: %s)') % (self.name, - self.dtype) - return result - def _tidy_repr(self, max_vals=20): - """ + self.to_string(buf=buf, name=self.name, dtype=self.dtype, + max_rows=max_rows) + result = buf.getvalue() - Internal function, should always return unicode string - """ - if max_vals > 1: - num = max_vals // 2 - else: - num = 1 - max_vals = 2 - head = self.iloc[:num]._get_repr(print_header=True, length=False, - dtype=False, name=False) - tail = self.iloc[-(max_vals - num):]._get_repr(print_header=False, - length=False, - name=False, - dtype=False) - result = head + '\n...\n' + tail - result = '%s\n%s' % (result, self._repr_footer()) - - return compat.text_type(result) + return result def _repr_footer(self): @@ -948,8 +921,8 @@ def _repr_footer(self): len(self), str(self.dtype.name)) - def to_string(self, buf=None, na_rep='NaN', float_format=None, - length=False, dtype=False, name=False): + def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True, + length=False, dtype=False, name=False, max_rows=None): """ Render a string representation of the Series @@ -962,12 +935,17 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, float_format : one-parameter function, optional formatter function to apply to columns' elements if they are floats default None + header: boolean, default True + Add the Series header (index name) length : boolean, default False Add the Series length dtype : boolean, default False Add the Series dtype name : boolean, default False - Add the Series name (which may be None) + Add the Series name if not None + max_rows : int, optional + Maximum number of rows to show before truncating. If None, show + all. Returns ------- @@ -975,7 +953,8 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, """ the_repr = self._get_repr(float_format=float_format, na_rep=na_rep, - length=length, dtype=dtype, name=name) + header=header, length=length, dtype=dtype, + name=name, max_rows=max_rows) # catch contract violations if not isinstance(the_repr, compat.text_type): @@ -993,17 +972,18 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, f.write(the_repr) def _get_repr( - self, name=False, print_header=False, length=True, dtype=True, - na_rep='NaN', float_format=None): + self, name=False, header=True, length=True, dtype=True, na_rep='NaN', + float_format=None, max_rows=None): """ Internal function, should always return unicode string """ - - formatter = fmt.SeriesFormatter(self, name=name, header=print_header, - length=length, dtype=dtype, + formatter = fmt.SeriesFormatter(self, name=name, + length=length, header=header, + dtype=dtype, na_rep=na_rep, - float_format=float_format) + float_format=float_format, + max_rows=max_rows) result = formatter.to_string() # TODO: following check prob. not neces. diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index cd78fd22e64ca..7f4b3fcb94dfa 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -13,6 +13,7 @@ from pandas import Categorical, Index, Series, DataFrame, PeriodIndex, Timestamp +from pandas.core.config import option_context import pandas.core.common as com import pandas.compat as compat import pandas.util.testing as tm @@ -1559,12 +1560,12 @@ def test_repr(self): self.assertEqual(exp, a.__unicode__()) - a = pd.Series(pd.Categorical(["a","b"] *25, name="a", ordered=True)) - exp = u("".join(["%s a\n%s b\n"%(i,i+1) for i in range(0,10,2)]) + "...\n" + - "".join(["%s a\n%s b\n"%(i,i+1) for i in range(40,50,2)]) + - "Name: a, Length: 50, dtype: category\n" + - "Categories (2, object): [a < b]") - self.assertEqual(exp,a._tidy_repr()) + a = pd.Series(pd.Categorical(["a","b"] *25, name="a")) + exp = u("0 a\n1 b\n" + " ..\n" + + "48 a\n49 b\n" + + "Name: a, dtype: category\nCategories (2, object): [a, b]") + with option_context("display.max_rows", 5): + self.assertEqual(exp, repr(a)) levs = list("abcdefghijklmnopqrstuvwxyz") a = pd.Series(pd.Categorical(["a","b"], name="a", categories=levs, ordered=True)) @@ -1573,7 +1574,6 @@ def test_repr(self): "Categories (26, object): [a < b < c < d ... w < x < y < z]") self.assertEqual(exp,a.__unicode__()) - def test_info(self): # make sure it works diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py index b52e4f7e3947b..94a7dd4dd9e87 100644 --- a/pandas/tests/test_format.py +++ b/pandas/tests/test_format.py @@ -2438,16 +2438,16 @@ def test_to_string(self): # pass float_format format = '%.4f'.__mod__ result = self.ts.to_string(float_format=format) - result = [x.split()[1] for x in result.split('\n')] + result = [x.split()[1] for x in result.split('\n')[:-1]] expected = [format(x) for x in self.ts] self.assertEqual(result, expected) # empty string result = self.ts[:0].to_string() - self.assertEqual(result, '') + self.assertEqual(result, 'Series([], Freq: B)') result = self.ts[:0].to_string(length=0) - self.assertEqual(result, '') + self.assertEqual(result, 'Series([], Freq: B)') # name and length cp = self.ts.copy() @@ -2623,7 +2623,7 @@ def test_max_multi_index_display(self): with option_context("display.max_rows", 2): self.assertEqual(len(str(s).split('\n')),5) with option_context("display.max_rows", 1): - self.assertEqual(len(str(s).split('\n')),5) + self.assertEqual(len(str(s).split('\n')),4) with option_context("display.max_rows", 0): self.assertEqual(len(str(s).split('\n')),10) @@ -2637,10 +2637,137 @@ def test_max_multi_index_display(self): with option_context("display.max_rows", 2): self.assertEqual(len(str(s).split('\n')),4) with option_context("display.max_rows", 1): - self.assertEqual(len(str(s).split('\n')),4) + self.assertEqual(len(str(s).split('\n')),3) with option_context("display.max_rows", 0): self.assertEqual(len(str(s).split('\n')),9) + # Make sure #8532 is fixed + def test_consistent_format(self): + s = pd.Series([1,1,1,1,1,1,1,1,1,1,0.9999,1,1]*10) + with option_context("display.max_rows", 10): + res = repr(s) + exp = ('0 1.0000\n1 1.0000\n2 1.0000\n3 ' + '1.0000\n4 1.0000\n ... \n125 ' + '1.0000\n126 1.0000\n127 0.9999\n128 ' + '1.0000\n129 1.0000\ndtype: float64') + self.assertEqual(res, exp) + + @staticmethod + def gen_test_series(): + s1 = pd.Series(['a']*100) + s2 = pd.Series(['ab']*100) + s3 = pd.Series(['a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef']) + s4 = s3[::-1] + test_sers = {'onel': s1, 'twol': s2, 'asc': s3, 'desc': s4} + return test_sers + + def chck_ncols(self, s): + with option_context("display.max_rows", 10): + res = repr(s) + lines = res.split('\n') + lines = [line for line in repr(s).split('\n') \ + if not re.match('[^\.]*\.+', line)][:-1] + ncolsizes = len(set(len(line.strip()) for line in lines)) + self.assertEqual(ncolsizes, 1) + + def test_format_explicit(self): + test_sers = self.gen_test_series() + with option_context("display.max_rows", 4): + res = repr(test_sers['onel']) + exp = '0 a\n1 a\n ..\n98 a\n99 a\ndtype: object' + self.assertEqual(exp, res) + res = repr(test_sers['twol']) + exp = ('0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype:' + ' object') + self.assertEqual(exp, res) + res = repr(test_sers['asc']) + exp = ('0 a\n1 ab\n ... \n4 abcde\n5' + ' abcdef\ndtype: object') + self.assertEqual(exp, res) + res = repr(test_sers['desc']) + exp = ('5 abcdef\n4 abcde\n ... \n1 ab\n0' + ' a\ndtype: object') + self.assertEqual(exp, res) + + def test_ncols(self): + test_sers = self.gen_test_series() + for s in test_sers.values(): + self.chck_ncols(s) + + def test_max_rows_eq_one(self): + s = Series(range(10)) + with option_context("display.max_rows", 1): + strrepr = repr(s).split('\n') + exp1 = ['0', '0'] + res1 = strrepr[0].split() + self.assertEqual(exp1, res1) + exp2 = ['..'] + res2 = strrepr[1].split() + self.assertEqual(exp2, res2) + + def test_truncate_ndots(self): + def getndots(s): + return len(re.match('[^\.]*(\.*)', s).groups()[0]) + + s = Series([0, 2, 3, 6]) + with option_context("display.max_rows", 2): + strrepr = repr(s).replace('\n', '') + self.assertEqual(getndots(strrepr), 2) + + s = Series([0, 100, 200, 400]) + with option_context("display.max_rows", 2): + strrepr = repr(s).replace('\n', '') + self.assertEqual(getndots(strrepr), 3) + + def test_to_string_name(self): + s = Series(range(100)) + s.name = 'myser' + res = s.to_string(max_rows=2, name=True) + exp = '0 0\n ..\n99 99\nName: myser' + self.assertEqual(res, exp) + res = s.to_string(max_rows=2, name=False) + exp = '0 0\n ..\n99 99' + self.assertEqual(res, exp) + + def test_to_string_dtype(self): + s = Series(range(100)) + res = s.to_string(max_rows=2, dtype=True) + exp = '0 0\n ..\n99 99\ndtype: int64' + self.assertEqual(res, exp) + res = s.to_string(max_rows=2, dtype=False) + exp = '0 0\n ..\n99 99' + self.assertEqual(res, exp) + + def test_to_string_length(self): + s = Series(range(100)) + res = s.to_string(max_rows=2, length=True) + exp = '0 0\n ..\n99 99\nLength: 100' + self.assertEqual(res, exp) + + def test_to_string_na_rep(self): + s = pd.Series(index=range(100)) + res = s.to_string(na_rep='foo', max_rows=2) + exp = '0 foo\n ..\n99 foo' + self.assertEqual(res, exp) + + def test_to_string_float_format(self): + s = pd.Series(range(10), dtype=float) + res = s.to_string(float_format=lambda x: '{0:2.1f}'.format(x), + max_rows=2) + exp = '0 0.0\n ..\n9 9.0' + self.assertEqual(res, exp) + + def test_to_string_header(self): + s = pd.Series(range(10)) + s.index.name = 'foo' + res = s.to_string(header=True, max_rows=2) + exp = 'foo\n0 0\n ..\n9 9' + self.assertEqual(res, exp) + res = s.to_string(header=False, max_rows=2) + exp = '0 0\n ..\n9 9' + self.assertEqual(res, exp) + + class TestEngFormatter(tm.TestCase): _multiprocess_can_split_ = True diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 7e0dbaa735456..ae2ed4eaca2f4 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -2046,7 +2046,7 @@ def test_repr(self): # with empty series (#4651) s = Series([], dtype=np.int64, name='foo') - self.assertEqual(repr(s), 'Series([], name: foo, dtype: int64)') + self.assertEqual(repr(s), 'Series([], Name: foo, dtype: int64)') s = Series([], dtype=np.int64, name=None) self.assertEqual(repr(s), 'Series([], dtype: int64)')
This PR harmonizes the way DataFrame and Series are printed. closes #8532 closes #7508 Before ``` In [1]: import pandas as pd In [2]: pd.options.display.max_rows = 10 In [3]: s = pd.Series([1,1,1,1,1,1,1,1,1,1,0.9999,1,1]*10) In [4]: s Out[4]: 0 1 1 1 2 1 ... 127 0.9999 128 1.0000 129 1.0000 Length: 130, dtype: float64 ``` Now ``` 0 1.0000 1 1.0000 2 1.0000 3 1.0000 4 1.0000 ... 125 1.0000 126 1.0000 127 0.9999 128 1.0000 129 1.0000 dtype: float64 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9182
2015-01-02T03:08:18Z
2015-03-17T00:18:39Z
2015-03-17T00:18:39Z
2015-03-17T00:19:21Z
is_list_like() for to_excel, fixes #8188.
diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 2ece91b5dea11..4355d65718765 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -401,7 +401,8 @@ def _conv_value(val): val = bool(val) elif isinstance(val, Period): val = "%s" % val - + elif com.is_list_like(val): + val = str(val) return val diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 4f97cef3d46d3..4c1d58de8aa8b 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -930,6 +930,17 @@ def test_to_excel_float_format(self): index=['A', 'B'], columns=['X', 'Y', 'Z']) tm.assert_frame_equal(rs, xp) + def test_to_excel_list_format(self): + _skip_if_no_xlrd() + + df = DataFrame([0.0, ['0.0',0.0]], + columns=['not_list','list']) + + with ensure_clean(self.ext) as filename: + df.to_excel(filename, sheetname='TestList') + result = read_excel(filename,'TestList') + tm.assert_equal(result,df) + def test_to_excel_output_encoding(self): _skip_if_no_xlrd() ext = self.ext
closes #8188
https://api.github.com/repos/pandas-dev/pandas/pulls/9181
2015-01-02T01:23:19Z
2015-05-09T16:17:11Z
null
2015-05-09T16:17:11Z
BUG: Fix not to reindex on non-Categorical groups (GH9049)
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 0234a0dab8e28..3e9dcde6113b8 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -267,3 +267,4 @@ Bug Fixes - ``SparseSeries`` and ``SparsePanel`` now accept zero argument constructors (same as their non-sparse counterparts) (:issue:`9272`). - Bug in ``read_csv`` with buffer overflows with certain malformed input files (:issue:`9205`) +- Bug in groupby MultiIndex with missing pair (:issue:`9049`, :issue:`9344`) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 0a12484f9ab3a..29bdbe93866ed 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1862,7 +1862,6 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self.grouper = grouper.values # pre-computed - self._was_factor = False self._should_compress = True # we have a single grouper which may be a myriad of things, some of which are @@ -1887,8 +1886,6 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, level_values = index.levels[level].take(inds) self.grouper = level_values.map(self.grouper) else: - self._was_factor = True - # all levels may not be observed labels, uniques = algos.factorize(inds, sort=True) @@ -1913,17 +1910,10 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, # a passed Categorical elif isinstance(self.grouper, Categorical): - - factor = self.grouper - self._was_factor = True - - # Is there any way to avoid this? - self.grouper = np.asarray(factor) - - self._labels = factor.codes - self._group_index = factor.categories + self._labels = self.grouper.codes + self._group_index = self.grouper.categories if self.name is None: - self.name = factor.name + self.name = self.grouper.name # a passed Grouper like elif isinstance(self.grouper, Grouper): @@ -1936,8 +1926,8 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self.name = grouper.name # no level passed - if not isinstance(self.grouper, (Series, Index, np.ndarray)): - if getattr(self.grouper,'ndim', 1) != 1: + if not isinstance(self.grouper, (Series, Index, Categorical, np.ndarray)): + if getattr(self.grouper, 'ndim', 1) != 1: t = self.name or str(type(self.grouper)) raise ValueError("Grouper for '%s' not 1-dimensional" % t) self.grouper = self.index.map(self.grouper) @@ -1988,21 +1978,15 @@ def group_index(self): return self._group_index def _make_labels(self): - if self._was_factor: # pragma: no cover - raise Exception('Should not call this method grouping by level') - else: + if self._labels is None or self._group_index is None: labels, uniques = algos.factorize(self.grouper, sort=self.sort) uniques = Index(uniques, name=self.name) self._labels = labels self._group_index = uniques - _groups = None - - @property + @cache_readonly def groups(self): - if self._groups is None: - self._groups = self.index.groupby(self.grouper) - return self._groups + return self.index.groupby(self.grouper) def _get_grouper(obj, key=None, axis=0, level=None, sort=True): """ @@ -3238,10 +3222,11 @@ def _reindex_output(self, result): return result elif len(groupings) == 1: return result - elif not any([ping._was_factor for ping in groupings]): + elif not any([isinstance(ping.grouper, Categorical) + for ping in groupings]): return result - levels_list = [ ping._group_index for ping in groupings ] + levels_list = [ ping.group_index for ping in groupings ] index = MultiIndex.from_product(levels_list, names=self.grouper.names) d = { self.obj._get_axis_name(self.axis) : index, 'copy' : False } return result.reindex(**d).sortlevel(axis=self.axis) diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index d1ab33e607f4d..1d309e2a6389f 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -3297,6 +3297,34 @@ def test_groupby_categorical(self): expected.index.names = ['myfactor', None] assert_frame_equal(desc_result, expected) + def test_groupby_datetime_categorical(self): + # GH9049: ensure backward compatibility + levels = pd.date_range('2014-01-01', periods=4) + codes = np.random.randint(0, 4, size=100) + + cats = Categorical.from_codes(codes, levels, name='myfactor') + + data = DataFrame(np.random.randn(100, 4)) + + result = data.groupby(cats).mean() + + expected = data.groupby(np.asarray(cats)).mean() + expected = expected.reindex(levels) + expected.index.name = 'myfactor' + + assert_frame_equal(result, expected) + self.assertEqual(result.index.name, cats.name) + + grouped = data.groupby(cats) + desc_result = grouped.describe() + + idx = cats.codes.argsort() + ord_labels = np.asarray(cats).take(idx) + ord_data = data.take(idx) + expected = ord_data.groupby(ord_labels, sort=False).describe() + expected.index.names = ['myfactor', None] + assert_frame_equal(desc_result, expected) + def test_groupby_groups_datetimeindex(self): # #1430 from pandas.tseries.api import DatetimeIndex @@ -3484,6 +3512,31 @@ def test_groupby_categorical_unequal_len(self): # len(bins) != len(series) here self.assertRaises(ValueError,lambda : series.groupby(bins).mean()) + def test_groupby_multiindex_missing_pair(self): + # GH9049 + df = DataFrame({'group1': ['a','a','a','b'], + 'group2': ['c','c','d','c'], + 'value': [1,1,1,5]}) + df = df.set_index(['group1', 'group2']) + df_grouped = df.groupby(level=['group1','group2'], sort=True) + + res = df_grouped.agg('sum') + idx = MultiIndex.from_tuples([('a','c'), ('a','d'), ('b','c')], names=['group1', 'group2']) + exp = DataFrame([[2], [1], [5]], index=idx, columns=['value']) + + tm.assert_frame_equal(res, exp) + + def test_groupby_levels_and_columns(self): + # GH9344, GH9049 + idx_names = ['x', 'y'] + idx = pd.MultiIndex.from_tuples([(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names) + df = pd.DataFrame(np.arange(12).reshape(-1, 3), index=idx) + + by_levels = df.groupby(level=idx_names).mean() + by_columns = df.reset_index().groupby(idx_names).mean() + + tm.assert_frame_equal(by_levels, by_columns) + def test_gb_apply_list_of_unequal_len_arrays(self): # GH1738 diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py index eb690df4870e8..73f5f19d6a626 100644 --- a/vb_suite/groupby.py +++ b/vb_suite/groupby.py @@ -390,6 +390,18 @@ def f(g): groupby_sum_booleans = Benchmark("df.groupby('ii').sum()", setup) + +#---------------------------------------------------------------------- +# multi-indexed group sum #9049 + +setup = common_setup + """ +N = 50 +df = DataFrame({'A': range(N) * 2, 'B': range(N*2), 'C': 1}).set_index(["A", "B"]) +""" + +groupby_sum_multiindex = Benchmark("df.groupby(level=[0, 1]).sum()", setup) + + #---------------------------------------------------------------------- # Transform testing
closes #9049. closes #9344 _self._was_factor_ is not appropriate to judge whether grouper is Categorical or not, because it can be "True" when we groupby indices (not columns). So, I added another flag _self._is_categorical_ to judge Categorical state. Also, I added a GroupBy test for MultiIndexed data, which was failed before this fix.
https://api.github.com/repos/pandas-dev/pandas/pulls/9177
2014-12-31T16:16:37Z
2015-02-10T14:52:04Z
2015-02-10T14:52:04Z
2015-02-11T01:49:04Z
BUG: "index_col=False" not working when "usecols" is specified in read_csv (GH9082)
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 4231551c50a6b..1378804d99e71 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -139,4 +139,6 @@ Bug Fixes - DataFrame now properly supports simultaneous ``copy`` and ``dtype`` arguments in constructor (:issue:`9099`) - Bug in read_csv when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) +- BUG: "index_col=False" not working when "usecols" is specified in read_csv. (:issue:`9082`) - isnull now detects NaT in PeriodIndex (:issue:`9129`) + diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index d805727394f33..750e107a3074c 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -509,6 +509,17 @@ def test_index_col_named(self): tm.assert_frame_equal(xp, rs) self.assertEqual(xp.index.name, rs.index.name) + def test_usecols_index_col_False(self): + # Issue 9082 + s = "a,b,c,d\n1,2,3,4\n5,6,7,8" + s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8," + cols = ['a','c','d'] + expected = DataFrame({'a':[1,5], 'c':[3,7], 'd':[4,8]}) + df = self.read_csv(StringIO(s), usecols=cols, index_col=False) + tm.assert_frame_equal(expected, df) + df = self.read_csv(StringIO(s_malformed), usecols=cols, index_col=False) + tm.assert_frame_equal(expected, df) + def test_converter_index_col_bug(self): # 1835 data = "A;B\n1;2\n3;4" diff --git a/pandas/parser.pyx b/pandas/parser.pyx index 0409ee56f22bb..330cb63a00c3f 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -728,7 +728,7 @@ cdef class TextReader: # 'data has %d fields' # % (passed_count, field_count)) - if self.has_usecols: + if self.has_usecols and self.allow_leading_cols: nuse = len(self.usecols) if nuse == passed_count: self.leading_cols = 0
Fixes #9082 My access to the internet is spotty for the next few days. This fixes 9082 and does not break any of the existing tests so hopefully it won't break anyone's existing code. Happy New Year!
https://api.github.com/repos/pandas-dev/pandas/pulls/9176
2014-12-31T05:32:02Z
2015-01-10T17:33:28Z
null
2015-01-10T17:33:28Z
BUG: Fix for Timestamp handling in xlwt and xlsxwriter engines.
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 21b1ddea0e9da..2dbb1f284560d 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -96,6 +96,8 @@ Bug Fixes - Bug in ``pivot`` and `unstack`` where ``nan`` values would break index alignment (:issue:`7466`) +- Fixed bug where minutes and seconds components were zeroed when writing + Timestamp objects to Excel using xlwt and xlsxwriter (:issue:`9138`). diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 2ece91b5dea11..792b9d554be68 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -16,6 +16,7 @@ from pandas.compat import map, zip, reduce, range, lrange, u, add_metaclass from pandas.core import config from pandas.core.common import pprint_thing +from pandas import Timestamp import pandas.compat as compat import pandas.compat.openpyxl_compat as openpyxl_compat import pandas.core.common as com @@ -1118,6 +1119,10 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): val = _conv_value(cell.val) num_format_str = None + + if isinstance(val, Timestamp): + val = val.to_pydatetime() + if isinstance(cell.val, datetime.datetime): num_format_str = self.datetime_format elif isinstance(cell.val, datetime.date): @@ -1239,6 +1244,10 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): for cell in cells: num_format_str = None + + if isinstance(cell.val, Timestamp): + cell.val = cell.val.to_pydatetime() + if isinstance(cell.val, datetime.datetime): num_format_str = self.datetime_format elif isinstance(cell.val, datetime.date): diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 4f97cef3d46d3..95b4c0ace642f 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -1151,6 +1151,28 @@ def test_swapped_columns(self): tm.assert_series_equal(write_frame['A'], read_frame['A']) tm.assert_series_equal(write_frame['B'], read_frame['B']) + def test_datetimes(self): + # Test writing and reading datetimes. For issue #9139. + _skip_if_no_xlrd() + + datetimes = [datetime(2013, 1, 13, 1, 2, 3), + datetime(2013, 1, 13, 2, 45, 56), + datetime(2013, 1, 13, 4, 29, 49), + datetime(2013, 1, 13, 6, 13, 42), + datetime(2013, 1, 13, 7, 57, 35), + datetime(2013, 1, 13, 9, 41, 28), + datetime(2013, 1, 13, 11, 25, 21), + datetime(2013, 1, 13, 13, 9, 14), + datetime(2013, 1, 13, 14, 53, 7), + datetime(2013, 1, 13, 16, 37, 0), + datetime(2013, 1, 13, 18, 20, 52)] + + with ensure_clean(self.ext) as path: + write_frame = DataFrame.from_items([('A', datetimes)]) + write_frame.to_excel(path, 'Sheet1') + read_frame = read_excel(path, 'Sheet1', header=0) + + tm.assert_series_equal(write_frame['A'], read_frame['A']) def raise_wrapper(major_ver): def versioned_raise_wrapper(orig_method):
Fix for writing Timestamp objects using the xlwt and xlsxwriter engines. Both modules write Excel dates and times using datetime.timedelta which differs from pandas.Timedelta. This fix coerces Timestamp objects to datetime objects. fixes #9139.
https://api.github.com/repos/pandas-dev/pandas/pulls/9175
2014-12-31T03:04:58Z
2015-01-15T02:35:28Z
null
2015-01-15T02:35:28Z
ERR: better concat error messages #9157
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index 8320f3cbc8e76..8d4ad7f155423 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -494,8 +494,7 @@ Other API Changes ^^^^^^^^^^^^^^^^^ - Line and kde plot with ``subplots=True`` now uses default colors, not all black. Specify ``color='k'`` to draw all lines in black (:issue:`9894`) -- Calling the ``.value_counts`` method on a Series with ``categorical`` dtype now returns a -Series with a ``CategoricalIndex`` (:issue:`10704`) +- Calling the ``.value_counts`` method on a Series with ``categorical`` dtype now returns a Series with a ``CategoricalIndex`` (:issue:`10704`) - Enable writing Excel files in :ref:`memory <_io.excel_writing_buffer>` using StringIO/BytesIO (:issue:`7074`) - Enable serialization of lists and dicts to strings in ExcelWriter (:issue:`8188`) - Allow passing `kwargs` to the interpolation methods (:issue:`10378`). @@ -526,6 +525,7 @@ Series with a ``CategoricalIndex`` (:issue:`10704`) ``return np.datetime64('NaT')`` ``to_datetime64`` (unchanged) ``raise ValueError`` All other public methods (names not beginning with underscores) =============================== =============================================================== +- Improved error message when concatenating an empty iterable of dataframes (:issue:`9157`) .. _whatsnew_0170.deprecations: diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 430828a3db31b..5ee774635e59e 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -1,7 +1,6 @@ """ SQL-style merge routines """ -import types import numpy as np from pandas.compat import range, long, lrange, lzip, zip, map, filter @@ -17,11 +16,9 @@ concatenate_block_managers) from pandas.util.decorators import Appender, Substitution from pandas.core.common import ABCSeries -from pandas.io.parsers import TextFileReader import pandas.core.common as com -import pandas.lib as lib import pandas.algos as algos import pandas.hashtable as _hash @@ -775,9 +772,14 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, if keys is None: keys = sorted(objs) objs = [objs[k] for k in keys] + else: + objs = list(objs) + + if len(objs) == 0: + raise ValueError('No objects to concatenate') if keys is None: - objs = [obj for obj in objs if obj is not None ] + objs = [obj for obj in objs if obj is not None] else: # #1649 clean_keys = [] diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index 8b1457e7fd490..236157d028db3 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -2552,6 +2552,23 @@ def _constructor(self): tm.assertIsInstance(result, NotADataFrame) + def test_empty_sequence_concat(self): + # GH 9157 + empty_pat = "[Nn]o objects" + none_pat = "objects.*None" + test_cases = [ + ((), empty_pat), + ([], empty_pat), + ({}, empty_pat), + ([None], none_pat), + ([None, None], none_pat) + ] + for df_seq, pattern in test_cases: + assertRaisesRegexp(ValueError, pattern, pd.concat, df_seq) + + pd.concat([pd.DataFrame()]) + pd.concat([None, pd.DataFrame()]) + pd.concat([pd.DataFrame(), None]) if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
closes #9157 This is a first attempt to improve the error message when trying to concat an empty list/set of objects: `pd.concat([])` currently raises following error: ``` /Users/ch/miniconda/envs/sci33/lib/python3.3/site-packages/pandas/tools/merge.py in __init__(self, objs, axis, join, join_axes, keys, levels, names, ignore_index, verify_integrity, copy) 765 766 if len(objs) == 0: --> 767 raise ValueError('All objects passed were None') 768 769 # consolidate data & figure out what our result ndim is going to be ValueError: All objects passed were None ``` After the tiny fix it is: ``` /Users/ch/repo/pandas/pandas/tools/merge.py in __init__(self, objs, axis, join, join_axes, keys, levels, names, ignore_index, verify_integrity, copy) 767 768 if len(objs) == 0: --> 769 raise ValueError('No objects passed') 770 771 if isinstance(objs, dict): ValueError: No objects passed ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9172
2014-12-30T13:35:46Z
2015-08-15T17:05:08Z
null
2015-08-16T08:17:52Z
BUG: Fix for extraneous default cell format in xlsxwriter files.
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 21b1ddea0e9da..656d1d31d3188 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -141,3 +141,7 @@ Bug Fixes - Bug in read_csv when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) - isnull now detects NaT in PeriodIndex (:issue:`9129`) - Bug in groupby ``.nth()`` with a multiple column groupby (:issue:`8979`) + +- Fixed issue in the ``xlsxwriter`` engine where it added a default 'General' + format to cells if no other format wass applied. This prevented other row or + column formatting being applied. (:issue:`9167`) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 2ece91b5dea11..acec411a2e546 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -1274,6 +1274,10 @@ def _convert_to_style(self, style_dict, num_format_str=None): num_format_str: optional number format string """ + # If there is no formatting we don't create a format object. + if num_format_str is None and style_dict is None: + return None + # Create a XlsxWriter format object. xl_format = self.book.add_format() diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 4f97cef3d46d3..634402d891e53 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -1353,6 +1353,47 @@ class XlsxWriterTests(ExcelWriterBase, tm.TestCase): engine_name = 'xlsxwriter' check_skip = staticmethod(_skip_if_no_xlsxwriter) + def test_column_format(self): + # Test that column formats are applied to cells. Test for issue #9167. + # Applicable to xlsxwriter only. + _skip_if_no_xlsxwriter() + + import warnings + with warnings.catch_warnings(): + # Ignore the openpyxl lxml warning. + warnings.simplefilter("ignore") + _skip_if_no_openpyxl() + import openpyxl + + with ensure_clean(self.ext) as path: + frame = DataFrame({'A': [123456, 123456], + 'B': [123456, 123456]}) + + writer = ExcelWriter(path) + frame.to_excel(writer) + + # Add a number format to col B and ensure it is applied to cells. + num_format = '#,##0' + write_workbook = writer.book + write_worksheet = write_workbook.worksheets()[0] + col_format = write_workbook.add_format({'num_format': num_format}) + write_worksheet.set_column('B:B', None, col_format) + writer.save() + + read_workbook = openpyxl.load_workbook(path) + read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1') + + # Get the number format from the cell. This method is backward + # compatible with older versions of openpyxl. + cell = read_worksheet.cell('B2') + + try: + read_num_format = cell.style.number_format._format_code + except: + read_num_format = cell.style.number_format + + self.assertEqual(read_num_format, num_format) + class OpenpyxlTests_NoMerge(ExcelWriterBase, tm.TestCase): ext = '.xlsx'
Fix for issue in the xlsxwriter engine where is adds a default 'General' format to cells if no other format is applied. This isn't a bug, per se, but it prevents other row or column formatting. closes #9167
https://api.github.com/repos/pandas-dev/pandas/pulls/9171
2014-12-30T04:40:18Z
2015-01-06T00:13:06Z
null
2015-01-06T00:13:06Z
TST: tests for GH5873
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 8fe6698917efc..d474981771015 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -33,6 +33,7 @@ import pandas as pd from pandas.lib import Timestamp +from itertools import product class Base(object): @@ -3525,6 +3526,25 @@ def check(nlevels, with_nulls): right = pd.lib.duplicated(mi.values, take_last=take_last) tm.assert_array_equal(left, right) + # GH5873 + for a in [101, 102]: + mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]]) + self.assertFalse(mi.has_duplicates) + self.assertEqual(mi.get_duplicates(), []) + self.assert_array_equal(mi.duplicated(), np.zeros(2, dtype='bool')) + + for n in range(1, 6): # 1st level shape + for m in range(1, 5): # 2nd level shape + # all possible unique combinations, including nan + lab = product(range(-1, n), range(-1, m)) + mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]], + labels=np.random.permutation(list(lab)).T) + self.assertEqual(len(mi), (n + 1) * (m + 1)) + self.assertFalse(mi.has_duplicates) + self.assertEqual(mi.get_duplicates(), []) + self.assert_array_equal(mi.duplicated(), + np.zeros(len(mi), dtype='bool')) + def test_tolist(self): result = self.index.tolist() exp = list(self.index.values) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index c0daeb793fc40..93cebd034b4df 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -5884,6 +5884,24 @@ def test_unstack(self): unstacked = s.unstack(0) assert_frame_equal(unstacked, expected) + # GH5873 + idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]]) + ts = pd.Series([1,2], index=idx) + left = ts.unstack() + left.columns = left.columns.astype('float64') + right = DataFrame([[nan, 1], [2, nan]], index=[101, 102], + columns=[nan, 3.5]) + assert_frame_equal(left, right) + + idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'], + ['a', 'a', 'b', 'a', 'b'], [1, 2, 1, 1, np.nan]]) + ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx) + right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]], + columns=['cat', 'dog']) + tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)] + right.index = pd.MultiIndex.from_tuples(tpls) + assert_frame_equal(ts.unstack(level=0), right) + def test_sortlevel(self): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) s = Series([1, 2], mi)
closes https://github.com/pydata/pandas/issues/5873 code changes were done in https://github.com/pydata/pandas/pull/9101 and https://github.com/pydata/pandas/pull/9061. just adding tests to confirm bugs reported there are handled.
https://api.github.com/repos/pandas-dev/pandas/pulls/9169
2014-12-30T03:43:01Z
2015-01-02T15:41:25Z
null
2015-01-10T20:19:44Z
BUG: hidden ticklabels with sharex and secondary
Closes https://github.com/pydata/pandas/issues/9158 ``` python import pandas as pd d = {'A' : [1., 2., 3., 4.], 'B' : [4., 3., 2., 1.], 'C': [5, 1, 3, 4]} df = pd.DataFrame(d, index=pd.date_range('2014 10 11', '2014 10 14')) axes = df[['A','B']].plot(subplots=True) df['C'].plot(ax=axes[0], secondary_y=True) # x tick labels dissappear when this line is executed ``` ![gh](https://cloud.githubusercontent.com/assets/1312546/7216419/ffe3bc2e-e5c4-11e4-89de-d18e5e59d3b0.png) Introduced in the subplot refactoring in https://github.com/pydata/pandas/pull/7457 cc @sinhrks The fix is pretty simple, but I'm worried I may have broken other things. No tests failed though, so hopefully we're good.
https://api.github.com/repos/pandas-dev/pandas/pulls/9164
2014-12-28T14:28:29Z
2015-04-20T12:12:47Z
null
2015-10-31T02:39:15Z
PERF: Utilize mixed dtypes in df.count() with MultiIndexes
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 9a3a6bf319810..9ffcfb70c29db 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -68,6 +68,7 @@ Performance - Performance improvements in ``MultiIndex.duplicated`` by working with labels instead of values (:issue:`9125`) - Improved the speed of `nunique` by calling `unique` instead of `value_counts` (:issue:`9129`, :issue:`7771`) - Performance improvement of up to 10x in ``DataFrame.count`` and ``DataFrame.dropna`` by taking advantage of homogeneous/heterogeneous dtypes appropriately (:issue:`9136`) +- Performance improvement of up to 20x in ``DataFrame.count`` when using a ``MultiIndex`` and the ``level`` keyword argument (:issue:`9163`) Bug Fixes ~~~~~~~~~ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c30a3035de4cb..8ee65949e6bc1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4108,27 +4108,38 @@ def _count_level(self, level, axis=0, numeric_only=False): else: frame = self - if axis == 1: - frame = frame.T + count_axis = frame._get_axis(axis) + agg_axis = frame._get_agg_axis(axis) - if not isinstance(frame.index, MultiIndex): + if not isinstance(count_axis, MultiIndex): raise TypeError("Can only count levels on hierarchical %s." % self._get_axis_name(axis)) - # python 2.5 - mask = notnull(frame.values).view(np.uint8) + if frame._is_mixed_type: + # Since we have mixed types, calling notnull(frame.values) might + # upcast everything to object + mask = notnull(frame).values + else: + # But use the speedup when we have homogeneous dtypes + mask = notnull(frame.values) + + if axis == 1: + # We're transposing the mask rather than frame to avoid potential + # upcasts to object, which induces a ~20x slowdown + mask = mask.T if isinstance(level, compat.string_types): - level = self.index._get_level_number(level) + level = count_axis._get_level_number(level) - level_index = frame.index.levels[level] - labels = com._ensure_int64(frame.index.labels[level]) + level_index = count_axis.levels[level] + labels = com._ensure_int64(count_axis.labels[level]) counts = lib.count_level_2d(mask, labels, len(level_index)) result = DataFrame(counts, index=level_index, - columns=frame.columns) + columns=agg_axis) if axis == 1: + # Undo our earlier transpose return result.T else: return result diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index 2fe2b6d76ec5c..334534ed466f2 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -328,6 +328,38 @@ def f(K=100): frame_dropna_axis1_all_mixed_dtypes = Benchmark('df.dropna(how="all",axis=1)', dropna_mixed_setup, start_date=datetime(2012,1,1)) +## dropna multi +dropna_setup = common_setup + """ +data = np.random.randn(10000, 1000) +df = DataFrame(data) +df.ix[50:1000,20:50] = np.nan +df.ix[2000:3000] = np.nan +df.ix[:,60:70] = np.nan +df.index = MultiIndex.from_tuples(df.index.map(lambda x: (x, x))) +df.columns = MultiIndex.from_tuples(df.columns.map(lambda x: (x, x))) +""" +frame_count_level_axis0_multi = Benchmark('df.count(axis=0, level=1)', dropna_setup, + start_date=datetime(2012,1,1)) + +frame_count_level_axis1_multi = Benchmark('df.count(axis=1, level=1)', dropna_setup, + start_date=datetime(2012,1,1)) + +# dropna on mixed dtypes +dropna_mixed_setup = common_setup + """ +data = np.random.randn(10000, 1000) +df = DataFrame(data) +df.ix[50:1000,20:50] = np.nan +df.ix[2000:3000] = np.nan +df.ix[:,60:70] = np.nan +df['foo'] = 'bar' +df.index = MultiIndex.from_tuples(df.index.map(lambda x: (x, x))) +df.columns = MultiIndex.from_tuples(df.columns.map(lambda x: (x, x))) +""" +frame_count_level_axis0_mixed_dtypes_multi = Benchmark('df.count(axis=0, level=1)', dropna_mixed_setup, + start_date=datetime(2012,1,1)) + +frame_count_level_axis1_mixed_dtypes_multi = Benchmark('df.count(axis=1, level=1)', dropna_mixed_setup, + start_date=datetime(2012,1,1)) #---------------------------------------------------------------------- # apply
@jreback Same underlying cause as #9136 but the solution is a bit more involved here. Basically, we're transposing a potentially mixed-type frame before calling `notnull(frame.values)`; the same result can be obtained by deferring the transpose until after `notnull` gives us a non-mixed frame. Here are the vbench results: ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- frame_count_level_axis1_mixed_dtypes_multi | 82.2484 | 1489.9830 | 0.0552 | frame_count_level_axis0_mixed_dtypes_multi | 101.2537 | 1737.6347 | 0.0583 | frame_count_level_axis0_multi | 51.0643 | 51.3713 | 0.9940 | frame_count_level_axis1_multi | 98.5887 | 82.6767 | 1.1925 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [04ec4c6] : PERF: Utilize mixed dtypes in df.count() with MultiIndexes Base [def58c9] : Merge pull request #9128 from hsperr/expanduser ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9163
2014-12-28T09:07:36Z
2014-12-29T00:08:52Z
null
2014-12-29T03:05:54Z
PERF: improves merge performance when key space exceeds i8 bounds
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 9a3a6bf319810..2a4d11413c6df 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -68,6 +68,7 @@ Performance - Performance improvements in ``MultiIndex.duplicated`` by working with labels instead of values (:issue:`9125`) - Improved the speed of `nunique` by calling `unique` instead of `value_counts` (:issue:`9129`, :issue:`7771`) - Performance improvement of up to 10x in ``DataFrame.count`` and ``DataFrame.dropna`` by taking advantage of homogeneous/heterogeneous dtypes appropriately (:issue:`9136`) +- Performance and memory usage improvements in ``merge`` when key space exceeds ``int64`` bounds (:issue:`9151`) Bug Fixes ~~~~~~~~~ diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index e19c0de884c31..56eb8c68ad275 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -4,7 +4,7 @@ import types import numpy as np -from pandas.compat import range, long, lrange, lzip, zip +from pandas.compat import range, long, lrange, lzip, zip, map, filter import pandas.compat as compat from pandas.core.categorical import Categorical from pandas.core.frame import DataFrame, _merge_doc @@ -450,39 +450,29 @@ def _get_join_indexers(left_keys, right_keys, sort=False, how='inner'): ------- """ - if len(left_keys) != len(right_keys): - raise AssertionError('left_key and right_keys must be the same length') + from functools import partial - left_labels = [] - right_labels = [] - group_sizes = [] + assert len(left_keys) == len(right_keys), \ + 'left_key and right_keys must be the same length' - for lk, rk in zip(left_keys, right_keys): - llab, rlab, count = _factorize_keys(lk, rk, sort=sort) + # bind `sort` arg. of _factorize_keys + fkeys = partial(_factorize_keys, sort=sort) - left_labels.append(llab) - right_labels.append(rlab) - group_sizes.append(count) + # get left & right join labels and num. of levels at each location + llab, rlab, shape = map(list, zip( * map(fkeys, left_keys, right_keys))) - max_groups = long(1) - for x in group_sizes: - max_groups *= long(x) + # get flat i8 keys from label lists + lkey, rkey = _get_join_keys(llab, rlab, shape, sort) - if max_groups > 2 ** 63: # pragma: no cover - left_group_key, right_group_key, max_groups = \ - _factorize_keys(lib.fast_zip(left_labels), - lib.fast_zip(right_labels)) - else: - left_group_key = get_group_index(left_labels, group_sizes) - right_group_key = get_group_index(right_labels, group_sizes) - - left_group_key, right_group_key, max_groups = \ - _factorize_keys(left_group_key, right_group_key, sort=sort) + # factorize keys to a dense i8 space + # `count` is the num. of unique keys + # set(lkey) | set(rkey) == range(count) + lkey, rkey, count = fkeys(lkey, rkey) # preserve left frame order if how == 'left' and sort == False kwargs = {'sort':sort} if how == 'left' else {} join_func = _join_functions[how] - return join_func(left_group_key, right_group_key, max_groups, **kwargs) + return join_func(lkey, rkey, count, **kwargs) class _OrderedMerge(_MergeOperation): @@ -590,9 +580,9 @@ def _left_join_on_index(left_ax, right_ax, join_keys, sort=False): # if asked to sort or there are 1-to-many matches join_index = left_ax.take(left_indexer) return join_index, left_indexer, right_indexer - else: - # left frame preserves order & length of its index - return left_ax, None, right_indexer + + # left frame preserves order & length of its index + return left_ax, None, right_indexer def _right_outer_join(x, y, max_groups): @@ -663,6 +653,35 @@ def _sort_labels(uniques, left, right): return new_left, new_right +def _get_join_keys(llab, rlab, shape, sort): + from pandas.core.groupby import _int64_overflow_possible + + # how many levels can be done without overflow + pred = lambda i: not _int64_overflow_possible(shape[:i]) + nlev = next(filter(pred, range(len(shape), 0, -1))) + + # get keys for the first `nlev` levels + stride = np.prod(shape[1:nlev], dtype='i8') + lkey = stride * llab[0].astype('i8', subok=False, copy=False) + rkey = stride * rlab[0].astype('i8', subok=False, copy=False) + + for i in range(1, nlev): + stride //= shape[i] + lkey += llab[i] * stride + rkey += rlab[i] * stride + + if nlev == len(shape): # all done! + return lkey, rkey + + # densify current keys to avoid overflow + lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort) + + llab = [lkey] + llab[nlev:] + rlab = [rkey] + rlab[nlev:] + shape = [count] + shape[nlev:] + + return _get_join_keys(llab, rlab, shape, sort) + #---------------------------------------------------------------------- # Concatenate DataFrame objects diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index 96e4b32d2ad25..27176596c87c2 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -1141,6 +1141,10 @@ def test_merge_na_keys(self): tm.assert_frame_equal(result, expected) def test_int64_overflow_issues(self): + from itertools import product + from collections import defaultdict + from pandas.core.groupby import _int64_overflow_possible + # #2690, combinatorial explosion df1 = DataFrame(np.random.randn(1000, 7), columns=list('ABCDEF') + ['G1']) @@ -1151,6 +1155,119 @@ def test_int64_overflow_issues(self): result = merge(df1, df2, how='outer') self.assertTrue(len(result) == 2000) + low, high, n = -1 << 10, 1 << 10, 1 << 20 + left = DataFrame(np.random.randint(low, high, (n, 7)), + columns=list('ABCDEFG')) + left['left'] = left.sum(axis=1) + + # one-2-one match + i = np.random.permutation(len(left)) + right = left.iloc[i].copy() + right.columns = right.columns[:-1].tolist() + ['right'] + right.index = np.arange(len(right)) + right['right'] *= -1 + + out = merge(left, right, how='outer') + self.assertEqual(len(out), len(left)) + assert_series_equal(out['left'], - out['right']) + assert_series_equal(out['left'], out.iloc[:, :-2].sum(axis=1)) + + out.sort(out.columns.tolist(), inplace=True) + out.index = np.arange(len(out)) + for how in ['left', 'right', 'outer', 'inner']: + assert_frame_equal(out, merge(left, right, how=how, sort=True)) + + # check that left merge w/ sort=False maintains left frame order + out = merge(left, right, how='left', sort=False) + assert_frame_equal(left, out[left.columns.tolist()]) + + out = merge(right, left, how='left', sort=False) + assert_frame_equal(right, out[right.columns.tolist()]) + + # one-2-many/none match + n = 1 << 11 + left = DataFrame(np.random.randint(low, high, (n, 7)), + columns=list('ABCDEFG')) + + # confirm that this is checking what it is supposed to check + shape = left.apply(pd.Series.nunique).values + self.assertTrue(_int64_overflow_possible(shape)) + + # add duplicates to left frame + left = pd.concat([left, left], ignore_index=True) + + right = DataFrame(np.random.randint(low, high, (n // 2, 7)), + columns=list('ABCDEFG')) + + # add duplicates & overlap with left to the right frame + i = np.random.choice(len(left), n) + right = pd.concat([right, right, left.iloc[i]], ignore_index=True) + + left['left'] = np.random.randn(len(left)) + right['right'] = np.random.randn(len(right)) + + # shuffle left & right frames + i = np.random.permutation(len(left)) + left = left.iloc[i].copy() + left.index = np.arange(len(left)) + + i = np.random.permutation(len(right)) + right = right.iloc[i].copy() + right.index = np.arange(len(right)) + + # manually compute outer merge + ldict, rdict = defaultdict(list), defaultdict(list) + + for idx, row in left.set_index(list('ABCDEFG')).iterrows(): + ldict[idx].append(row['left']) + + for idx, row in right.set_index(list('ABCDEFG')).iterrows(): + rdict[idx].append(row['right']) + + vals = [] + for k, lval in ldict.items(): + rval = rdict.get(k, [np.nan]) + for lv, rv in product(lval, rval): + vals.append(k + tuple([lv, rv])) + + for k, rval in rdict.items(): + if k not in ldict: + for rv in rval: + vals.append(k + tuple([np.nan, rv])) + + def align(df): + df = df.sort(df.columns.tolist()) + df.index = np.arange(len(df)) + return df + + def verify_order(df): + kcols = list('ABCDEFG') + assert_frame_equal(df[kcols].copy(), + df[kcols].sort(kcols, kind='mergesort')) + + out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right']) + out = align(out) + + jmask = {'left': out['left'].notnull(), + 'right': out['right'].notnull(), + 'inner': out['left'].notnull() & out['right'].notnull(), + 'outer': np.ones(len(out), dtype='bool')} + + for how in 'left', 'right', 'outer', 'inner': + mask = jmask[how] + frame = align(out[mask].copy()) + self.assertTrue(mask.all() ^ mask.any() or how == 'outer') + + for sort in [False, True]: + res = merge(left, right, how=how, sort=sort) + if sort: + verify_order(res) + + # as in GH9092 dtypes break with outer/right join + assert_frame_equal(frame, align(res), + check_dtype=how not in ('right', 'outer')) + + def test_join_multi_levels(self): # GH 3662 diff --git a/vb_suite/join_merge.py b/vb_suite/join_merge.py index facec39559ed3..02132acb71a33 100644 --- a/vb_suite/join_merge.py +++ b/vb_suite/join_merge.py @@ -249,4 +249,22 @@ def sample(values, k): columns=['jolie', 'jolia']).set_index('jolie') ''' -left_outer_join_index = Benchmark("left.join(right, on='jim')", setup) +left_outer_join_index = Benchmark("left.join(right, on='jim')", setup, + name='left_outer_join_index') + + +setup = common_setup + """ +low, high, n = -1 << 10, 1 << 10, 1 << 20 +left = DataFrame(np.random.randint(low, high, (n, 7)), + columns=list('ABCDEFG')) +left['left'] = left.sum(axis=1) + +i = np.random.permutation(len(left)) +right = left.iloc[i].copy() +right.columns = right.columns[:-1].tolist() + ['right'] +right.index = np.arange(len(right)) +right['right'] *= -1 +""" + +i8merge = Benchmark("merge(left, right, how='outer')", setup, + name='i8merge')
In join operations, current master switches to [a less efficient path](https://github.com/pydata/pandas/blob/def58c9bcabd5bda63696a30f25d4012ab1160ac/pandas/tools/merge.py#L471) if the key space exceeds `int64` bounds. This commit improves performance and memory usage: on master: ``` In [1]: np.random.seed(2718281) In [2]: left = DataFrame(np.random.randint(-1 << 10, 1 << 10, (1 << 20, 8)), ...: columns=list('ABCDEFG') + ['left']) In [3]: i = np.random.permutation(len(left)) In [4]: right = left.iloc[i].copy() In [5]: right.columns = right.columns[:-1].tolist() + ['right'] In [6]: %timeit pd.merge(left, right, how='outer') 1 loops, best of 3: 13.8 s per loop In [7]: %memit pd.merge(left, right, how='outer') peak memory: 1064.16 MiB, increment: 820.65 MiB ``` on branch: ``` In [6]: %timeit pd.merge(left, right, how='outer') 1 loops, best of 3: 1.42 s per loop In [7]: %memit pd.merge(left, right, how='outer') peak memory: 440.72 MiB, increment: 199.89 MiB ``` `join|merge` benchmarks: ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- i8merge | 1510.8590 | 13001.0207 | 0.1162 | merge_2intkey_nosort | 20.9847 | 27.0010 | 0.7772 | join_dataframe_index_single_key_small | 16.4944 | 19.0903 | 0.8640 | join_dataframe_integer_2key | 7.3093 | 8.0273 | 0.9106 | merge_2intkey_sort | 60.6734 | 61.8456 | 0.9810 | left_outer_join_index | 3165.2357 | 3214.8040 | 0.9846 | join_dataframe_index_multi | 36.0967 | 36.5300 | 0.9881 | join_dataframe_index_single_key_bigger_sort | 24.7893 | 25.0640 | 0.9890 | join_non_unique_equal | 0.9350 | 0.9403 | 0.9943 | join_dataframe_index_single_key_bigger | 24.8820 | 24.9500 | 0.9973 | strings_join_split | 57.1507 | 56.9630 | 1.0033 | join_dataframe_integer_key | 2.9247 | 2.8093 | 1.0411 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [f2d9e17] : improves merge performance when key space exceeds i8 bounds Base [def58c9] : Merge pull request #9128 from hsperr/expanduser ENH: Expanduser in to_file methods GH9066 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9151
2014-12-24T23:50:09Z
2014-12-28T23:43:38Z
2014-12-28T23:43:38Z
2014-12-29T15:43:56Z
BUG: Bug in Panel indexing with an object-like (GH9140)
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index d7de5a7ac5979..14427dd8c453f 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -56,7 +56,7 @@ Bug Fixes .. _whatsnew_0160.bug_fixes: - Fixed compatibility issue in ``DatetimeIndex`` affecting architectures where ``numpy.int_`` defaults to ``numpy.int32`` (:issue:`8943`) - +- Bug in Panel indexing with an object-like (:issue:`9140`) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index c35eb3f88bc4a..df3e6c0195be3 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -253,7 +253,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): def __getitem__(self, key): if isinstance(self._info_axis, MultiIndex): return self._getitem_multilevel(key) - if lib.isscalar(key): + if not (_is_list_like(key) or isinstance(key, slice)): return super(Panel, self).__getitem__(key) return self.ix[key] diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 8e2e6e612a1a3..c2d5910e7859f 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -2448,6 +2448,22 @@ def test_panel_getitem(self): result = panel.ix[['ItemA','ItemB']] tm.assert_panel_equal(result,expected) + # with an object-like + # GH 9140 + class TestObject: + def __str__(self): + return "TestObject" + + obj = TestObject() + + p = Panel(np.random.randn(1,5,4), items=[obj], + major_axis = date_range('1/1/2000', periods=5), + minor_axis=['A', 'B', 'C', 'D']) + + expected = p.iloc[0] + result = p[obj] + tm.assert_frame_equal(result, expected) + def test_panel_setitem(self): # GH 7763
closes #9140
https://api.github.com/repos/pandas-dev/pandas/pulls/9143
2014-12-24T02:04:20Z
2014-12-24T15:50:18Z
2014-12-24T15:50:18Z
2014-12-24T15:50:19Z
Check whether GBQ Job is finished
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 572a8be5c65e8..91ec4831b0472 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -185,7 +185,8 @@ def run_query(self, query): job_reference = query_reply['jobReference'] - while(not 'jobComplete' in query_reply): + # Verify the job has finished running + while(not query_reply.get('jobComplete', False)): print('Job not yet complete...') query_reply = job_collection.getQueryResults( projectId=job_reference['projectId'],
xref #8728 jobComplete can be False in query_reply. Simply checking for the existence of the field isn't enough, as a False value will cause a KeyError when checking for query_reply['totalRows']
https://api.github.com/repos/pandas-dev/pandas/pulls/9141
2014-12-23T19:46:31Z
2015-01-18T21:09:11Z
null
2015-01-18T21:09:18Z
FIX: to_sql dtype argument accepting SQLAlchemy type instance (GH9083)
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 7433adaa4b738..55b35368215c4 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -60,7 +60,8 @@ Bug Fixes - Fixed bug in ``to_sql`` when mapping a Timestamp object column (datetime column with timezone info) to the according sqlalchemy type (:issue:`9085`). - +- Fixed bug in ``to_sql`` ``dtype`` argument not accepting an instantiated + SQLAlchemy type (:issue:`9083`). diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 50c620c044403..b4318bdc2a3bf 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -1159,9 +1159,9 @@ def to_sql(self, frame, name, if_exists='fail', index=True, """ if dtype is not None: - import sqlalchemy.sql.type_api as type_api + from sqlalchemy.types import to_instance, TypeEngine for col, my_type in dtype.items(): - if not issubclass(my_type, type_api.TypeEngine): + if not isinstance(to_instance(my_type), TypeEngine): raise ValueError('The type of %s is not a SQLAlchemy ' 'type ' % col) diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 2ca30f3aea0ea..b185d530e056c 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -1229,6 +1229,14 @@ def test_dtype(self): self.assertRaises(ValueError, df.to_sql, 'error', self.conn, dtype={'B': str}) + # GH9083 + df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)}) + meta.reflect() + sqltype = meta.tables['dtype_test3'].columns['B'].type + print(sqltype) + self.assertTrue(isinstance(sqltype, sqlalchemy.String)) + self.assertEqual(sqltype.length, 10) + def test_notnull_dtype(self): cols = {'Bool': Series([True,None]), 'Date': Series([datetime(2012, 5, 1), None]),
Closes #9083
https://api.github.com/repos/pandas-dev/pandas/pulls/9138
2014-12-23T10:27:51Z
2014-12-24T10:36:06Z
2014-12-24T10:36:06Z
2014-12-24T10:36:06Z
TST: Fix failing test due to revision in WB data #9115
diff --git a/pandas/io/tests/test_wb.py b/pandas/io/tests/test_wb.py index e2a086e9dcdf4..51d6ac02f0f20 100644 --- a/pandas/io/tests/test_wb.py +++ b/pandas/io/tests/test_wb.py @@ -40,10 +40,14 @@ def test_wdi_download(self): expected = {'NY.GDP.PCAP.CD': {('Canada', '2003'): 28026.006013044702, ('Mexico', '2003'): 6601.0420648056606, ('Canada', '2004'): 31829.522562759001, ('Kosovo', '2003'): 1969.56271307405, ('Mexico', '2004'): 7042.0247834044303, ('United States', '2004'): 41928.886136479705, ('United States', '2003'): 39682.472247320402, ('Kosovo', '2004'): 2135.3328465238301}} expected = pandas.DataFrame(expected) + #Round, to ignore revisions to data. + expected = pandas.np.round(expected,decimals=-3) expected.sort(inplace=True) result = download(country=cntry_codes, indicator=inds, start=2003, end=2004, errors='ignore') result.sort(inplace=True) + #Round, to ignore revisions to data. + result = pandas.np.round(result,decimals=-3) expected.index = result.index assert_frame_equal(result, pandas.DataFrame(expected))
Closes #9115 I just round the expected and result data for the failing test. I have no idea why the WB would be revising 2003 and 2004 data, but they did. I would have redesigned the test, but spending that time has minimal benefit.
https://api.github.com/repos/pandas-dev/pandas/pulls/9137
2014-12-23T01:41:05Z
2014-12-23T03:13:20Z
2014-12-23T03:13:20Z
2015-02-09T00:37:14Z
Remove codepath asymmetry in dataframe count()
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index d7de5a7ac5979..544ab27157a17 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -48,6 +48,7 @@ Performance - Fixed a performance regression for ``.loc`` indexing with an array or list-like (:issue:`9126`:). - Performance improvements in ``MultiIndex.duplicated`` by working with labels instead of values (:issue:`9125`) - Improved the speed of `nunique` by calling `unique` instead of `value_counts` (:issue:`9129`, :issue:`7771`) +- Performance improvement of up to 10x in ``DataFrame.count`` and ``DataFrame.dropna`` by taking advantage of homogeneous/heterogeneous dtypes appropriately (:issue:`9136`) Bug Fixes diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7c7872cf7b6a5..c30a3035de4cb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4094,11 +4094,11 @@ def count(self, axis=0, level=None, numeric_only=False): if len(frame._get_axis(axis)) == 0: result = Series(0, index=frame._get_agg_axis(axis)) else: - if axis == 1: - counts = notnull(frame.values).sum(1) - result = Series(counts, index=frame._get_agg_axis(axis)) - else: + if frame._is_mixed_type: result = notnull(frame).sum(axis=axis) + else: + counts = notnull(frame.values).sum(axis=axis) + result = Series(counts, index=frame._get_agg_axis(axis)) return result.astype('int64') diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py index 12ba042487ebe..2fe2b6d76ec5c 100644 --- a/vb_suite/frame_methods.py +++ b/vb_suite/frame_methods.py @@ -290,30 +290,43 @@ def f(K=100): start_date=datetime(2012,1,1)) ## dropna -setup = common_setup + """ +dropna_setup = common_setup + """ data = np.random.randn(10000, 1000) df = DataFrame(data) df.ix[50:1000,20:50] = np.nan df.ix[2000:3000] = np.nan df.ix[:,60:70] = np.nan """ -frame_dropna_axis0_any = Benchmark('df.dropna(how="any",axis=0)', setup, +frame_dropna_axis0_any = Benchmark('df.dropna(how="any",axis=0)', dropna_setup, start_date=datetime(2012,1,1)) -frame_dropna_axis0_all = Benchmark('df.dropna(how="all",axis=0)', setup, +frame_dropna_axis0_all = Benchmark('df.dropna(how="all",axis=0)', dropna_setup, start_date=datetime(2012,1,1)) -setup = common_setup + """ +frame_dropna_axis1_any = Benchmark('df.dropna(how="any",axis=1)', dropna_setup, + start_date=datetime(2012,1,1)) + +frame_dropna_axis1_all = Benchmark('df.dropna(how="all",axis=1)', dropna_setup, + start_date=datetime(2012,1,1)) + +# dropna on mixed dtypes +dropna_mixed_setup = common_setup + """ data = np.random.randn(10000, 1000) df = DataFrame(data) df.ix[50:1000,20:50] = np.nan df.ix[2000:3000] = np.nan df.ix[:,60:70] = np.nan +df['foo'] = 'bar' """ -frame_dropna_axis1_any = Benchmark('df.dropna(how="any",axis=1)', setup, - start_date=datetime(2012,1,1)) +frame_dropna_axis0_any_mixed_dtypes = Benchmark('df.dropna(how="any",axis=0)', dropna_mixed_setup, + start_date=datetime(2012,1,1)) +frame_dropna_axis0_all_mixed_dtypes = Benchmark('df.dropna(how="all",axis=0)', dropna_mixed_setup, + start_date=datetime(2012,1,1)) -frame_dropna_axis1_all = Benchmark('df.dropna(how="all",axis=1)', setup, - start_date=datetime(2012,1,1)) +frame_dropna_axis1_any_mixed_dtypes = Benchmark('df.dropna(how="any",axis=1)', dropna_mixed_setup, + start_date=datetime(2012,1,1)) + +frame_dropna_axis1_all_mixed_dtypes = Benchmark('df.dropna(how="all",axis=1)', dropna_mixed_setup, + start_date=datetime(2012,1,1)) #----------------------------------------------------------------------
@jreback I noticed a codepath asymmetry in core.frame.count that leads to a substantial difference in dropna() performance depending on the axis. Using the path `df.dropna(axis=0)` takes yields a 2.5-5x improvement. ``` $ python vb_suite/test_perf.py -b upstream/master -t HEAD -r "dropna" -S -n 30 Invoked with : --ncalls: 3 --repeats: 30 ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- frame_dropna_axis1_any | 18.5493 | 101.6117 | 0.1826 | frame_dropna_axis1_all | 48.0193 | 128.8197 | 0.3728 | frame_dropna_axis0_any | 17.0240 | 17.3127 | 0.9833 | frame_dropna_axis0_all | 43.1127 | 43.3304 | 0.9950 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [84ad341] : Remove codepath asymmetry in dataframe count() Base [099a02c] : Merge pull request #9061 from behzadnouri/nan-pivot pivot & unstack with nan in the index count mean std min 25% 50% 75% max frame_dropna_axis1_any 3 40.114509 54.044075 0.182551 9.365917 18.549283 60.080489 101.611694 frame_dropna_axis1_all 3 59.070599 64.932673 0.372764 24.196047 48.019330 88.419517 128.819704 frame_dropna_axis0_any 3 11.773351 9.345549 0.983328 9.003684 17.024040 17.168363 17.312686 frame_dropna_axis0_all 3 29.146001 24.379745 0.994976 22.053826 43.112675 43.221513 43.330352 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9136
2014-12-22T23:05:39Z
2014-12-24T15:55:19Z
2014-12-24T15:55:19Z
2014-12-24T15:55:26Z
fixup unicode compat with 3.2 in test_index.py
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index fd2e83e9609c5..5c581b548e583 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -3452,24 +3452,24 @@ def test_has_duplicates(self): self.assertTrue(index.has_duplicates) # GH 9075 - t = [(u'x', u'out', u'z', 5, u'y', u'in', u'z', 169), - (u'x', u'out', u'z', 7, u'y', u'in', u'z', 119), - (u'x', u'out', u'z', 9, u'y', u'in', u'z', 135), - (u'x', u'out', u'z', 13, u'y', u'in', u'z', 145), - (u'x', u'out', u'z', 14, u'y', u'in', u'z', 158), - (u'x', u'out', u'z', 16, u'y', u'in', u'z', 122), - (u'x', u'out', u'z', 17, u'y', u'in', u'z', 160), - (u'x', u'out', u'z', 18, u'y', u'in', u'z', 180), - (u'x', u'out', u'z', 20, u'y', u'in', u'z', 143), - (u'x', u'out', u'z', 21, u'y', u'in', u'z', 128), - (u'x', u'out', u'z', 22, u'y', u'in', u'z', 129), - (u'x', u'out', u'z', 25, u'y', u'in', u'z', 111), - (u'x', u'out', u'z', 28, u'y', u'in', u'z', 114), - (u'x', u'out', u'z', 29, u'y', u'in', u'z', 121), - (u'x', u'out', u'z', 31, u'y', u'in', u'z', 126), - (u'x', u'out', u'z', 32, u'y', u'in', u'z', 155), - (u'x', u'out', u'z', 33, u'y', u'in', u'z', 123), - (u'x', u'out', u'z', 12, u'y', u'in', u'z', 144)] + t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169), + (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119), + (u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135), + (u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145), + (u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158), + (u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122), + (u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160), + (u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180), + (u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143), + (u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128), + (u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129), + (u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111), + (u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114), + (u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121), + (u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126), + (u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155), + (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123), + (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)] index = pd.MultiIndex.from_tuples(t) self.assertFalse(index.has_duplicates)
fixes w.r.t. comment in #9101
https://api.github.com/repos/pandas-dev/pandas/pulls/9135
2014-12-22T22:02:36Z
2014-12-22T23:49:43Z
2014-12-22T23:49:43Z
2014-12-22T23:49:43Z
PERF: use unique and isnull in nunique instead of value_counts.
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index d572835a76218..1b52ee2d08370 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -45,8 +45,12 @@ Performance .. _whatsnew_0160.performance: + - Fixed a severe performance regression for ``.loc`` indexing with an array or list (:issue:9126:). +- Improved the speed of `nunique` by calling `unique` instead of `value_counts` (:issue:`9129`, :issue:`7771`) + + Bug Fixes ~~~~~~~~~ @@ -114,3 +118,4 @@ Bug Fixes - DataFrame now properly supports simultaneous ``copy`` and ``dtype`` arguments in constructor (:issue:`9099`) - Bug in read_csv when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) +- isnull now detects NaT in PeriodIndex (:issue:`9129`) diff --git a/pandas/core/base.py b/pandas/core/base.py index 04b431ae8cf67..c3b3024a16d0c 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -441,7 +441,12 @@ def nunique(self, dropna=True): ------- nunique : int """ - return len(self.value_counts(dropna=dropna)) + uniqs = self.unique() + n = len(uniqs) + if dropna and com.isnull(uniqs).any(): + n -= 1 + return n + def factorize(self, sort=False, na_sentinel=-1): """ diff --git a/pandas/core/common.py b/pandas/core/common.py index e5ff353104fe9..143f65ee64e60 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -302,7 +302,7 @@ def _isnull_ndarraylike(obj): vec = lib.isnullobj(values.ravel()) result[...] = vec.reshape(shape) - elif dtype in _DATELIKE_DTYPES: + elif is_datetimelike(obj): # this is the NaT pattern result = values.view('i8') == tslib.iNaT else: @@ -2366,6 +2366,9 @@ def is_datetime_arraylike(arr): return arr.dtype == object and lib.infer_dtype(arr) == 'datetime' return getattr(arr, 'inferred_type', None) == 'datetime' +def is_datetimelike(arr): + return arr.dtype in _DATELIKE_DTYPES or isinstance(arr, ABCPeriodIndex) + def _coerce_to_dtype(dtype): """ coerce a string / np.dtype to a dtype """ if is_categorical_dtype(dtype): diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 6c64cbc08ca63..2f57fa593bc40 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -157,6 +157,15 @@ def test_isnull_datetime(): assert(mask[0]) assert(not mask[1:].any()) + # GH 9129 + pidx = idx.to_period(freq='M') + mask = isnull(pidx) + assert(mask[0]) + assert(not mask[1:].any()) + + mask = isnull(pidx[1:]) + assert(not mask.any()) + class TestIsNull(tm.TestCase): def test_0d_array(self):
closes #9129 Currently, `Series.nunique` [(source)](https://github.com/pydata/pandas/blob/master/pandas/core/base.py#L429) calls `Series.value_counts` [(source)](https://github.com/pydata/pandas/blob/master/pandas/core/base.py#L371), which by default, sorts the values. Counting unique values certainly doesn't require sorting, so we could fix this by passing `sort=False` to `value_counts`. But `nunique` can also be calculated by calling `Series.unique` instead of `value_counts`, and using `com.isnull` to handle the `dropna` parameter. This PR attempts to implement this. Here is a vbench perf test which seems to show an improvement for tests using `nunique`. ``` /usr/bin/time -v ./test_perf.sh -b master -t nunique-unique -r groupby ``` Here are the best and worst ratios: ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- groupby_ngroups_10000_nunique | 900.9844 | 3768.5087 | 0.2391 | groupby_ngroups_100_nunique | 9.6850 | 38.9043 | 0.2489 | groupby_ngroups_100_sem | 0.7834 | 1.4904 | 0.5256 | groupby_ngroups_100_size | 0.4920 | 0.8560 | 0.5748 | groupby_ngroups_10000_max | 2.5337 | 4.1683 | 0.6078 | groupby_frame_nth_none | 2.3570 | 3.2880 | 0.7168 | groupby_ngroups_10000_var | 2.4277 | 3.3390 | 0.7271 | groupby_ngroups_10000_sem | 3.5107 | 4.6523 | 0.7546 | ... groupby_transform_multi_key3 | 630.2720 | 599.7047 | 1.0510 | groupby_nth_datetimes_none | 424.5253 | 403.6326 | 1.0518 | groupby_transform_series2 | 109.7130 | 104.1580 | 1.0533 | groupby_transform_multi_key1 | 58.9686 | 55.7120 | 1.0585 | groupby_nth_datetimes_any | 1206.7020 | 1132.4236 | 1.0656 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- ``` While working on this PR I ran into issue GH 9129. This PR includes Jeff's suggested fix.
https://api.github.com/repos/pandas-dev/pandas/pulls/9134
2014-12-22T21:00:13Z
2014-12-23T03:22:57Z
2014-12-23T03:22:57Z
2014-12-23T03:23:23Z
PERF: json support for blocks GH9037
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 0e6fb94cf7273..4112ef81ba6d1 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -28,6 +28,22 @@ Backwards incompatible API changes .. _whatsnew_0160.api_breaking: - ``Index.duplicated`` now returns `np.array(dtype=bool)` rather than `Index(dtype=object)` containing `bool` values. (:issue:`8875`) +- ``DataFrame.to_json`` now returns accurate type serialisation for each column for frames of mixed dtype (:issue:`9037`) + + Previously data was coerced to a common dtype before serialisation, which for + example resulted in integers being serialised to floats: + + .. code-block:: python + + In [2]: pd.DataFrame({'i': [1,2], 'f': [3.0, 4.2]}).to_json() + Out[2]: '{"f":{"0":3.0,"1":4.2},"i":{"0":1.0,"1":2.0}}' + + Now each column is serialised using its correct dtype: + + .. code-block:: python + + In [2]: pd.DataFrame({'i': [1,2], 'f': [3.0, 4.2]}).to_json() + Out[2]: '{"f":{"0":3.0,"1":4.2},"i":{"0":1,"1":2}}' Deprecations ~~~~~~~~~~~~ @@ -46,10 +62,10 @@ Performance .. _whatsnew_0160.performance: - Fixed a performance regression for ``.loc`` indexing with an array or list-like (:issue:`9126`:). +- ``DataFrame.to_json`` 30x performance improvement for mixed dtype frames. (:issue:`9037`) - Performance improvements in ``MultiIndex.duplicated`` by working with labels instead of values (:issue:`9125`) - Improved the speed of `nunique` by calling `unique` instead of `value_counts` (:issue:`9129`, :issue:`7771`) - Bug Fixes ~~~~~~~~~ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 392fda6eeebe3..dd48a470e7dd4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2160,20 +2160,12 @@ def as_blocks(self): Convert the frame to a dict of dtype -> Constructor Types that each has a homogeneous dtype. - are presented in sorted order unless a specific list of columns is - provided. - NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in as_matrix) - Parameters - ---------- - columns : array-like - Specific column order - Returns ------- - values : a list of Object + values : a dict of dtype -> Constructor Types """ self._consolidate_inplace() diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py index 897760f988d25..7fe9cd9ce5cdf 100644 --- a/pandas/io/tests/test_json/test_pandas.py +++ b/pandas/io/tests/test_json/test_pandas.py @@ -1,5 +1,5 @@ # pylint: disable-msg=W0612,E1101 -from pandas.compat import range, lrange, StringIO +from pandas.compat import range, lrange, StringIO, OrderedDict from pandas import compat import os @@ -337,7 +337,6 @@ def test_v12_compat(self): v12_json = os.path.join(self.dirpath, 'tsframe_v012.json') df_unser = pd.read_json(v12_json) - df_unser = pd.read_json(v12_json) assert_frame_equal(df, df_unser) df_iso = df.drop(['modified'], axis=1) @@ -345,6 +344,37 @@ def test_v12_compat(self): df_unser_iso = pd.read_json(v12_iso_json) assert_frame_equal(df_iso, df_unser_iso) + def test_blocks_compat_GH9037(self): + index = pd.date_range('20000101', periods=10, freq='H') + df_mixed = DataFrame(OrderedDict( + float_1=[-0.92077639, 0.77434435, 1.25234727, 0.61485564, + -0.60316077, 0.24653374, 0.28668979, -2.51969012, + 0.95748401, -1.02970536], + int_1=[19680418, 75337055, 99973684, 65103179, 79373900, + 40314334, 21290235, 4991321, 41903419, 16008365], + str_1=['78c608f1', '64a99743', '13d2ff52', 'ca7f4af2', '97236474', + 'bde7e214', '1a6bde47', 'b1190be5', '7a669144', '8d64d068'], + float_2=[-0.0428278, -1.80872357, 3.36042349, -0.7573685, + -0.48217572, 0.86229683, 1.08935819, 0.93898739, + -0.03030452, 1.43366348], + str_2=['14f04af9', 'd085da90', '4bcfac83', '81504caf', '2ffef4a9', + '08e2f5c4', '07e1af03', 'addbd4a7', '1f6a09ba', '4bfc4d87'], + int_2=[86967717, 98098830, 51927505, 20372254, 12601730, 20884027, + 34193846, 10561746, 24867120, 76131025] + ), index=index) + + # JSON deserialisation always creates unicode strings + df_mixed.columns = df_mixed.columns.astype('unicode') + + df_roundtrip = pd.read_json(df_mixed.to_json(orient='split'), + orient='split') + assert_frame_equal(df_mixed, df_roundtrip, + check_index_type=True, + check_column_type=True, + check_frame_type=True, + by_blocks=True, + check_exact=True) + def test_series_non_unique_index(self): s = Series(['a', 'b'], index=[1, 1]) diff --git a/pandas/src/datetime_helper.h b/pandas/src/datetime_helper.h index c8c54dd5fc947..d78e91e747854 100644 --- a/pandas/src/datetime_helper.h +++ b/pandas/src/datetime_helper.h @@ -13,8 +13,11 @@ void mangle_nat(PyObject *val) { } npy_int64 get_long_attr(PyObject *o, const char *attr) { + npy_int64 long_val; PyObject *value = PyObject_GetAttrString(o, attr); - return PyLong_Check(value) ? PyLong_AsLongLong(value) : PyInt_AS_LONG(value); + long_val = (PyLong_Check(value) ? PyLong_AsLongLong(value) : PyInt_AS_LONG(value)); + Py_DECREF(value); + return long_val; } npy_float64 total_seconds(PyObject *td) { diff --git a/pandas/src/ujson/python/objToJSON.c b/pandas/src/ujson/python/objToJSON.c index 25fbb71482f9e..75967bce87f76 100644 --- a/pandas/src/ujson/python/objToJSON.c +++ b/pandas/src/ujson/python/objToJSON.c @@ -63,6 +63,7 @@ typedef void *(*PFN_PyTypeToJSON)(JSOBJ obj, JSONTypeContext *ti, void *outValue typedef ssize_t Py_ssize_t; #endif + typedef struct __NpyArrContext { PyObject *array; @@ -81,6 +82,16 @@ typedef struct __NpyArrContext char** columnLabels; } NpyArrContext; +typedef struct __PdBlockContext +{ + int colIdx; + int ncols; + int transpose; + + int* cindices; // frame column -> block column map + NpyArrContext** npyCtxts; // NpyArrContext for each column +} PdBlockContext; + typedef struct __TypeContext { JSPFN_ITERBEGIN iterBegin; @@ -98,10 +109,12 @@ typedef struct __TypeContext PyObject *attrList; PyObject *iterator; + double doubleValue; JSINT64 longValue; char *cStr; NpyArrContext *npyarr; + PdBlockContext *pdblock; int transpose; char** rowLabels; char** columnLabels; @@ -116,9 +129,13 @@ typedef struct __PyObjectEncoder // pass through the NpyArrContext when encoding multi-dimensional arrays NpyArrContext* npyCtxtPassthru; - // pass through a request for a specific encoding context - int requestType; - TypeContext* requestTypeContext; + // pass through the PdBlockContext when encoding blocks + PdBlockContext* blkCtxtPassthru; + + // pass-through to encode numpy data directly + int npyType; + void* npyValue; + TypeContext basicTypeContext; int datetimeIso; PANDAS_DATETIMEUNIT datetimeUnit; @@ -132,13 +149,6 @@ typedef struct __PyObjectEncoder #define GET_TC(__ptrtc) ((TypeContext *)((__ptrtc)->prv)) -struct PyDictIterState -{ - PyObject *keys; - size_t i; - size_t sz; -}; - enum PANDAS_FORMAT { SPLIT, @@ -189,7 +199,7 @@ void initObjToJSON(void) return NUMPY_IMPORT_ARRAY_RETVAL; } -TypeContext* createTypeContext() +static TypeContext* createTypeContext(void) { TypeContext *pc; @@ -207,8 +217,10 @@ TypeContext* createTypeContext() pc->index = 0; pc->size = 0; pc->longValue = 0; + pc->doubleValue = 0.0; pc->cStr = NULL; pc->npyarr = NULL; + pc->pdblock = NULL; pc->rowLabels = NULL; pc->columnLabels = NULL; pc->transpose = 0; @@ -218,19 +230,99 @@ TypeContext* createTypeContext() return pc; } -static void *PyIntToINT32(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) +static PyObject* get_sub_attr(PyObject *obj, char *attr, char *subAttr) { - PyObject *obj = (PyObject *) _obj; - *((JSINT32 *) outValue) = PyInt_AS_LONG (obj); + PyObject *tmp = PyObject_GetAttrString(obj, attr); + PyObject *ret; + + if (tmp == 0) + { + return 0; + } + ret = PyObject_GetAttrString(tmp, subAttr); + Py_DECREF(tmp); + + return ret; +} + +static int is_simple_frame(PyObject *obj) +{ + PyObject *check = get_sub_attr(obj, "_data", "is_mixed_type"); + int ret = (check == Py_False); + + if (!check) + { + return 0; + } + + Py_DECREF(check); + return ret; +} + +static Py_ssize_t get_attr_length(PyObject *obj, char *attr) +{ + PyObject *tmp = PyObject_GetAttrString(obj, attr); + Py_ssize_t ret; + + if (tmp == 0) + { + return 0; + } + ret = PyObject_Length(tmp); + Py_DECREF(tmp); + + if (ret == -1) + { + return 0; + } + + return ret; +} + +static PyObject* get_item(PyObject *obj, Py_ssize_t i) +{ + PyObject *tmp = PyInt_FromSsize_t(i); + PyObject *ret; + + if (tmp == 0) + { + return 0; + } + ret = PyObject_GetItem(obj, tmp); + Py_DECREF(tmp); + + return ret; +} + +static void *CDouble(JSOBJ obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) +{ + PRINTMARK(); + *((double *) outValue) = GET_TC(tc)->doubleValue; + return NULL; +} + +static void *CLong(JSOBJ obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) +{ + PRINTMARK(); + *((JSINT64 *) outValue) = GET_TC(tc)->longValue; return NULL; } +#ifdef _LP64 static void *PyIntToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { PyObject *obj = (PyObject *) _obj; *((JSINT64 *) outValue) = PyInt_AS_LONG (obj); return NULL; } +#else +static void *PyIntToINT32(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) +{ + PyObject *obj = (PyObject *) _obj; + *((JSINT32 *) outValue) = PyInt_AS_LONG (obj); + return NULL; +} +#endif static void *PyLongToINT64(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { @@ -309,7 +401,7 @@ static void *PandasDateTimeStructToJSON(pandas_datetimestruct *dts, JSONTypeCont } } -static void *NpyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) +static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { pandas_datetimestruct dts; PyDatetimeScalarObject *obj = (PyDatetimeScalarObject *) _obj; @@ -345,11 +437,11 @@ static void *PyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, s static void *NpyDatetime64ToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { pandas_datetimestruct dts; - PyObject *obj = (PyObject *) _obj; - PRINTMARK(); - pandas_datetime_to_datetimestruct(PyLong_AsLongLong(obj), PANDAS_FR_ns, &dts); + pandas_datetime_to_datetimestruct( + (npy_datetime) GET_TC(tc)->longValue, + PANDAS_FR_ns, &dts); return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); } @@ -377,29 +469,102 @@ static void *PyTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_ return outValue; } -void requestDateEncoding(PyObject* obj, PyObjectEncoder* pyenc) +static int NpyTypeToJSONType(PyObject* obj, JSONTypeContext* tc, int npyType, void* value) { - if (obj == Py_None) { - pyenc->requestType = JT_NULL; - return; + PyArray_VectorUnaryFunc* castfunc; + npy_double doubleVal; + npy_int64 longVal; + + if (PyTypeNum_ISFLOAT(npyType)) + { + PRINTMARK(); + castfunc = PyArray_GetCastFunc(PyArray_DescrFromType(npyType), NPY_DOUBLE); + if (!castfunc) + { + PyErr_Format ( + PyExc_ValueError, + "Cannot cast numpy dtype %d to double", + npyType); + } + castfunc(value, &doubleVal, 1, NULL, NULL); + if (npy_isnan(doubleVal) || npy_isinf(doubleVal)) + { + PRINTMARK(); + return JT_NULL; + } + GET_TC(tc)->doubleValue = (double) doubleVal; + GET_TC(tc)->PyTypeToJSON = CDouble; + return JT_DOUBLE; } - if (pyenc->datetimeIso) + if (PyTypeNum_ISDATETIME(npyType)) { - pyenc->requestType = JT_UTF8; + PRINTMARK(); + castfunc = PyArray_GetCastFunc(PyArray_DescrFromType(npyType), NPY_INT64); + if (!castfunc) + { + PyErr_Format ( + PyExc_ValueError, + "Cannot cast numpy dtype %d to long", + npyType); + } + castfunc(value, &longVal, 1, NULL, NULL); + if (longVal == get_nat()) + { + PRINTMARK(); + return JT_NULL; + } + GET_TC(tc)->longValue = (JSINT64) longVal; + GET_TC(tc)->PyTypeToJSON = NpyDatetime64ToJSON; + return ((PyObjectEncoder *) tc->encoder)->datetimeIso ? JT_UTF8 : JT_LONG; } - else + + if (PyTypeNum_ISINTEGER(npyType)) { - pyenc->requestType = JT_LONG; + PRINTMARK(); + castfunc = PyArray_GetCastFunc(PyArray_DescrFromType(npyType), NPY_INT64); + if (!castfunc) + { + PyErr_Format ( + PyExc_ValueError, + "Cannot cast numpy dtype %d to long", + npyType); + } + castfunc(value, &longVal, 1, NULL, NULL); + GET_TC(tc)->longValue = (JSINT64) longVal; + GET_TC(tc)->PyTypeToJSON = CLong; + return JT_LONG; } - pyenc->requestTypeContext = createTypeContext(); - pyenc->requestTypeContext->PyTypeToJSON = NpyDatetime64ToJSON; + + if (PyTypeNum_ISBOOL(npyType)) + { + PRINTMARK(); + return *((npy_bool *) value) == NPY_TRUE ? JT_TRUE : JT_FALSE; + } + + PRINTMARK(); + PyErr_Format ( + PyExc_RuntimeError, + "Unhandled numpy dtype %d", + npyType); + return JT_INVALID; } //============================================================================= // Numpy array iteration functions //============================================================================= + +static void NpyArr_freeItemValue(JSOBJ _obj, JSONTypeContext *tc) +{ + if (GET_TC(tc)->npyarr && GET_TC(tc)->itemValue != GET_TC(tc)->npyarr->array) + { + PRINTMARK(); + Py_XDECREF(GET_TC(tc)->itemValue); + GET_TC(tc)->itemValue = NULL; + } +} + int NpyArr_iterNextNone(JSOBJ _obj, JSONTypeContext *tc) { return 0; @@ -461,26 +626,21 @@ void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) } else { + PRINTMARK(); GET_TC(tc)->iterNext = NpyArr_iterNextNone; } - PRINTMARK(); } void NpyArr_iterEnd(JSOBJ obj, JSONTypeContext *tc) { NpyArrContext *npyarr = GET_TC(tc)->npyarr; + PRINTMARK(); if (npyarr) { - if (GET_TC(tc)->itemValue != npyarr->array) - { - Py_XDECREF(GET_TC(tc)->itemValue); - } - GET_TC(tc)->itemValue = NULL; - + NpyArr_freeItemValue(obj, tc); PyObject_Free(npyarr); } - PRINTMARK(); } void NpyArrPassThru_iterBegin(JSOBJ obj, JSONTypeContext *tc) @@ -490,10 +650,9 @@ void NpyArrPassThru_iterBegin(JSOBJ obj, JSONTypeContext *tc) void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) { - NpyArrContext* npyarr; + NpyArrContext* npyarr = GET_TC(tc)->npyarr; PRINTMARK(); // finished this dimension, reset the data pointer - npyarr = GET_TC(tc)->npyarr; npyarr->curdim--; npyarr->dataptr -= npyarr->stride * npyarr->index[npyarr->stridedim]; npyarr->stridedim -= npyarr->inc; @@ -501,53 +660,50 @@ void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) npyarr->stride = PyArray_STRIDE(npyarr->array, npyarr->stridedim); npyarr->dataptr += npyarr->stride; - if (GET_TC(tc)->itemValue != npyarr->array) - { - Py_XDECREF(GET_TC(tc)->itemValue); - GET_TC(tc)->itemValue = NULL; - } + NpyArr_freeItemValue(obj, tc); } -int NpyArr_iterNextItem(JSOBJ _obj, JSONTypeContext *tc) +int NpyArr_iterNextItem(JSOBJ obj, JSONTypeContext *tc) { - NpyArrContext* npyarr; - + NpyArrContext* npyarr = GET_TC(tc)->npyarr; PRINTMARK(); - npyarr = GET_TC(tc)->npyarr; - if (PyErr_Occurred()) { return 0; } - if (GET_TC(tc)->itemValue != npyarr->array) - { - Py_XDECREF(GET_TC(tc)->itemValue); - GET_TC(tc)->itemValue = NULL; - } - if (npyarr->index[npyarr->stridedim] >= npyarr->dim) { + PRINTMARK(); return 0; } + NpyArr_freeItemValue(obj, tc); + #if NPY_API_VERSION < 0x00000007 - if(PyTypeNum_ISDATETIME(npyarr->type_num)) + if(PyArray_ISDATETIME(npyarr->array)) { + PRINTMARK(); GET_TC(tc)->itemValue = PyArray_ToScalar(npyarr->dataptr, npyarr->array); } else + if (PyArray_ISNUMBER(npyarr->array)) +#else + if (PyArray_ISNUMBER(npyarr->array) || PyArray_ISDATETIME(npyarr->array)) +#endif { - GET_TC(tc)->itemValue = npyarr->getitem(npyarr->dataptr, npyarr->array); + PRINTMARK(); + GET_TC(tc)->itemValue = obj; + Py_INCREF(obj); + ((PyObjectEncoder*) tc->encoder)->npyType = PyArray_TYPE(npyarr->array); + ((PyObjectEncoder*) tc->encoder)->npyValue = npyarr->dataptr; } -#else - GET_TC(tc)->itemValue = npyarr->getitem(npyarr->dataptr, npyarr->array); - if(PyTypeNum_ISDATETIME(npyarr->type_num)) + else { - requestDateEncoding(GET_TC(tc)->itemValue, (PyObjectEncoder*) tc->encoder); + PRINTMARK(); + GET_TC(tc)->itemValue = npyarr->getitem(npyarr->dataptr, npyarr->array); } -#endif npyarr->dataptr += npyarr->stride; npyarr->index[npyarr->stridedim]++; @@ -556,9 +712,8 @@ int NpyArr_iterNextItem(JSOBJ _obj, JSONTypeContext *tc) int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc) { - NpyArrContext* npyarr; + NpyArrContext* npyarr = GET_TC(tc)->npyarr; PRINTMARK(); - npyarr = GET_TC(tc)->npyarr; if (PyErr_Occurred()) { @@ -568,6 +723,7 @@ int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc) if (npyarr->curdim >= npyarr->ndim || npyarr->index[npyarr->stridedim] >= npyarr->dim) { + PRINTMARK(); // innermost dimension, start retrieving item values GET_TC(tc)->iterNext = NpyArr_iterNextItem; return NpyArr_iterNextItem(_obj, tc); @@ -593,33 +749,378 @@ JSOBJ NpyArr_iterGetValue(JSOBJ obj, JSONTypeContext *tc) return GET_TC(tc)->itemValue; } -char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) +static void NpyArr_getLabel(JSOBJ obj, JSONTypeContext *tc, size_t *outLen, npy_intp idx, char** labels) { JSONObjectEncoder* enc = (JSONObjectEncoder*) tc->encoder; - NpyArrContext* npyarr; + PRINTMARK(); + *outLen = strlen(labels[idx]); + memcpy(enc->offset, labels[idx], sizeof(char)*(*outLen)); + enc->offset += *outLen; + *outLen = 0; +} + +char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) +{ + NpyArrContext* npyarr = GET_TC(tc)->npyarr; npy_intp idx; PRINTMARK(); - npyarr = GET_TC(tc)->npyarr; + if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) { idx = npyarr->index[npyarr->stridedim] - 1; - *outLen = strlen(npyarr->columnLabels[idx]); - memcpy(enc->offset, npyarr->columnLabels[idx], sizeof(char)*(*outLen)); - enc->offset += *outLen; - *outLen = 0; - return NULL; + NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels); } else { idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1; - *outLen = strlen(npyarr->rowLabels[idx]); - memcpy(enc->offset, npyarr->rowLabels[idx], sizeof(char)*(*outLen)); - enc->offset += *outLen; - *outLen = 0; - return NULL; + NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels); } + return NULL; } + +//============================================================================= +// Pandas block iteration functions +// +// Serialises a DataFrame column by column to avoid unnecessary data copies and +// more representative serialisation when dealing with mixed dtypes. +// +// Uses a dedicated NpyArrContext for each column. +//============================================================================= + + +void PdBlockPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) +{ + PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; + PRINTMARK(); + + if (blkCtxt->transpose) + { + blkCtxt->colIdx++; + } + else + { + blkCtxt->colIdx = 0; + } + + NpyArr_freeItemValue(obj, tc); +} + +int PdBlock_iterNextItem(JSOBJ obj, JSONTypeContext *tc) +{ + PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; + PRINTMARK(); + + if (blkCtxt->colIdx >= blkCtxt->ncols) + { + return 0; + } + + GET_TC(tc)->npyarr = blkCtxt->npyCtxts[blkCtxt->colIdx]; + blkCtxt->colIdx++; + return NpyArr_iterNextItem(obj, tc); +} + +char *PdBlock_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) +{ + PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; + NpyArrContext *npyarr = blkCtxt->npyCtxts[0]; + npy_intp idx; + PRINTMARK(); + + if (GET_TC(tc)->iterNext == PdBlock_iterNextItem) + { + idx = blkCtxt->colIdx - 1; + NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels); + } + else + { + idx = npyarr->index[npyarr->stridedim - npyarr->inc] - 1; + NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels); + } + return NULL; +} + +char *PdBlock_iterGetName_Transpose(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) +{ + PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; + NpyArrContext* npyarr = blkCtxt->npyCtxts[blkCtxt->colIdx]; + npy_intp idx; + PRINTMARK(); + + if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) + { + idx = npyarr->index[npyarr->stridedim] - 1; + NpyArr_getLabel(obj, tc, outLen, idx, npyarr->columnLabels); + } + else + { + idx = blkCtxt->colIdx; + NpyArr_getLabel(obj, tc, outLen, idx, npyarr->rowLabels); + } + return NULL; +} + +int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) +{ + PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; + NpyArrContext* npyarr; + PRINTMARK(); + + if (PyErr_Occurred()) + { + return 0; + } + + if (blkCtxt->transpose) + { + if (blkCtxt->colIdx >= blkCtxt->ncols) + { + return 0; + } + } + else + { + npyarr = blkCtxt->npyCtxts[0]; + if (npyarr->index[npyarr->stridedim] >= npyarr->dim) + { + return 0; + } + } + + ((PyObjectEncoder*) tc->encoder)->blkCtxtPassthru = blkCtxt; + GET_TC(tc)->itemValue = obj; + + return 1; +} + +void PdBlockPassThru_iterBegin(JSOBJ obj, JSONTypeContext *tc) +{ + PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; + PRINTMARK(); + + if (blkCtxt->transpose) + { + // if transposed we exhaust each column before moving to the next + GET_TC(tc)->iterNext = NpyArr_iterNextItem; + GET_TC(tc)->iterGetName = PdBlock_iterGetName_Transpose; + GET_TC(tc)->npyarr = blkCtxt->npyCtxts[blkCtxt->colIdx]; + } +} + +void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) +{ + PyObject *obj, *blocks, *block, *values, *tmp; + PyArrayObject *locs; + PdBlockContext *blkCtxt; + NpyArrContext *npyarr; + Py_ssize_t i; + PyArray_Descr *dtype; + NpyIter *iter; + NpyIter_IterNextFunc *iternext; + npy_int64 **dataptr; + npy_int64 colIdx; + npy_intp idx; + + PRINTMARK(); + + i = 0; + blocks = NULL; + dtype = PyArray_DescrFromType(NPY_INT64); + obj = (PyObject *)_obj; + + GET_TC(tc)->iterGetName = GET_TC(tc)->transpose ? PdBlock_iterGetName_Transpose : PdBlock_iterGetName; + + blkCtxt = PyObject_Malloc(sizeof(PdBlockContext)); + if (!blkCtxt) + { + PyErr_NoMemory(); + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + GET_TC(tc)->pdblock = blkCtxt; + + blkCtxt->colIdx = 0; + blkCtxt->transpose = GET_TC(tc)->transpose; + blkCtxt->ncols = get_attr_length(obj, "columns"); + + if (blkCtxt->ncols == 0) + { + blkCtxt->npyCtxts = NULL; + blkCtxt->cindices = NULL; + + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + + blkCtxt->npyCtxts = PyObject_Malloc(sizeof(NpyArrContext*) * blkCtxt->ncols); + if (!blkCtxt->npyCtxts) + { + PyErr_NoMemory(); + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + for (i = 0; i < blkCtxt->ncols; i++) + { + blkCtxt->npyCtxts[i] = NULL; + } + + blkCtxt->cindices = PyObject_Malloc(sizeof(int) * blkCtxt->ncols); + if (!blkCtxt->cindices) + { + PyErr_NoMemory(); + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + + blocks = get_sub_attr(obj, "_data", "blocks"); + if (!blocks) + { + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + + // force transpose so each NpyArrContext strides down its column + GET_TC(tc)->transpose = 1; + + for (i = 0; i < PyObject_Length(blocks); i++) + { + block = get_item(blocks, i); + if (!block) + { + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + + tmp = PyObject_GetAttrString(block, "values"); + if (!tmp) + { + Py_DECREF(block); + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + values = PyArray_Transpose((PyArrayObject*) tmp, NULL); + Py_DECREF(tmp); + if (!values) + { + Py_DECREF(block); + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + + + locs = (PyArrayObject*) get_sub_attr(block, "mgr_locs", "as_array"); + if (!locs) + { + Py_DECREF(block); + Py_DECREF(values); + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + + iter = NpyIter_New(locs, NPY_ITER_READONLY, NPY_KEEPORDER, NPY_NO_CASTING, dtype); + if (!iter) + { + Py_DECREF(block); + Py_DECREF(values); + Py_DECREF(locs); + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + iternext = NpyIter_GetIterNext(iter, NULL); + if (!iternext) + { + NpyIter_Deallocate(iter); + Py_DECREF(block); + Py_DECREF(values); + Py_DECREF(locs); + GET_TC(tc)->iterNext = NpyArr_iterNextNone; + goto BLKRET; + } + dataptr = (npy_int64 **) NpyIter_GetDataPtrArray(iter); + do + { + colIdx = **dataptr; + idx = NpyIter_GetIterIndex(iter); + + blkCtxt->cindices[colIdx] = idx; + + // Reference freed in Pdblock_iterend + Py_INCREF(values); + GET_TC(tc)->newObj = values; + + // init a dedicated context for this column + NpyArr_iterBegin(obj, tc); + npyarr = GET_TC(tc)->npyarr; + + // set the dataptr to our desired column and initialise + npyarr->dataptr += npyarr->stride * idx; + NpyArr_iterNext(obj, tc); + GET_TC(tc)->itemValue = NULL; + ((PyObjectEncoder*) tc->encoder)->npyCtxtPassthru = NULL; + + blkCtxt->npyCtxts[colIdx] = npyarr; + GET_TC(tc)->newObj = NULL; + + } while (iternext(iter)); + + NpyIter_Deallocate(iter); + Py_DECREF(block); + Py_DECREF(values); + Py_DECREF(locs); + } + GET_TC(tc)->npyarr = blkCtxt->npyCtxts[0]; + +BLKRET: + Py_XDECREF(dtype); + Py_XDECREF(blocks); +} + +void PdBlock_iterEnd(JSOBJ obj, JSONTypeContext *tc) +{ + PdBlockContext *blkCtxt; + NpyArrContext *npyarr; + int i; + PRINTMARK(); + + GET_TC(tc)->itemValue = NULL; + npyarr = GET_TC(tc)->npyarr; + + blkCtxt = GET_TC(tc)->pdblock; + + if (blkCtxt) + { + for (i = 0; i < blkCtxt->ncols; i++) + { + npyarr = blkCtxt->npyCtxts[i]; + if (npyarr) + { + if (npyarr->array) + { + Py_DECREF(npyarr->array); + npyarr->array = NULL; + } + + GET_TC(tc)->npyarr = npyarr; + NpyArr_iterEnd(obj, tc); + + blkCtxt->npyCtxts[i] = NULL; + } + } + + if (blkCtxt->npyCtxts) + { + PyObject_Free(blkCtxt->npyCtxts); + } + if (blkCtxt->cindices) + { + PyObject_Free(blkCtxt->cindices); + } + PyObject_Free(blkCtxt); + } +} + + //============================================================================= // Tuple iteration functions // itemValue is borrowed reference, no ref counting @@ -909,16 +1410,16 @@ int Index_iterNext(JSOBJ obj, JSONTypeContext *tc) GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "name"); } else - if (index == 1) - { - memcpy(GET_TC(tc)->cStr, "data", sizeof(char)*5); - GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values"); - } - else - { - PRINTMARK(); - return 0; - } + if (index == 1) + { + memcpy(GET_TC(tc)->cStr, "data", sizeof(char)*5); + GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values"); + } + else + { + PRINTMARK(); + return 0; + } GET_TC(tc)->index++; PRINTMARK(); @@ -1045,22 +1546,30 @@ int DataFrame_iterNext(JSOBJ obj, JSONTypeContext *tc) GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "columns"); } else - if (index == 1) + if (index == 1) + { + memcpy(GET_TC(tc)->cStr, "index", sizeof(char)*6); + GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index"); + } + else + if (index == 2) + { + memcpy(GET_TC(tc)->cStr, "data", sizeof(char)*5); + if (is_simple_frame(obj)) { - memcpy(GET_TC(tc)->cStr, "index", sizeof(char)*6); - GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "index"); + GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values"); } else - if (index == 2) - { - memcpy(GET_TC(tc)->cStr, "data", sizeof(char)*5); - GET_TC(tc)->itemValue = PyObject_GetAttrString(obj, "values"); - } - else - { - PRINTMARK(); - return 0; - } + { + Py_INCREF(obj); + GET_TC(tc)->itemValue = obj; + } + } + else + { + PRINTMARK(); + return 0; + } GET_TC(tc)->index++; PRINTMARK(); @@ -1176,7 +1685,7 @@ void NpyArr_freeLabels(char** labels, npy_intp len) char** NpyArr_encodeLabels(PyArrayObject* labels, JSONObjectEncoder* enc, npy_intp num) { // NOTE this function steals a reference to labels. - PyArrayObject* labelsTmp = NULL; + PyObjectEncoder* pyenc = (PyObjectEncoder *) enc; PyObject* item = NULL; npy_intp i, stride, len, need_quotes; char** ret; @@ -1186,6 +1695,11 @@ char** NpyArr_encodeLabels(PyArrayObject* labels, JSONObjectEncoder* enc, npy_in int type_num; PRINTMARK(); + if (!labels) + { + return 0; + } + if (PyArray_SIZE(labels) < num) { PyErr_SetString(PyExc_ValueError, "Label array sizes do not match corresponding data shape"); @@ -1213,7 +1727,7 @@ char** NpyArr_encodeLabels(PyArrayObject* labels, JSONObjectEncoder* enc, npy_in stride = PyArray_STRIDE(labels, 0); dataptr = PyArray_DATA(labels); getitem = (PyArray_GetItemFunc*) PyArray_DESCR(labels)->f->getitem; - type_num = PyArray_DESCR(labels)->type_num; + type_num = PyArray_TYPE(labels); for (i = 0; i < num; i++) { @@ -1222,26 +1736,32 @@ char** NpyArr_encodeLabels(PyArrayObject* labels, JSONObjectEncoder* enc, npy_in { item = PyArray_ToScalar(dataptr, labels); } - else - { - item = getitem(dataptr, labels); - } + else if(PyTypeNum_ISNUMBER(type_num)) #else - item = getitem(dataptr, labels); - if(PyTypeNum_ISDATETIME(type_num)) + if(PyTypeNum_ISDATETIME(type_num) || PyTypeNum_ISNUMBER(type_num)) +#endif { - requestDateEncoding(item, (PyObjectEncoder*) enc); + item = (PyObject *) labels; + pyenc->npyType = type_num; + pyenc->npyValue = dataptr; } -#endif - if (!item) + else { - NpyArr_freeLabels(ret, num); - ret = 0; - break; + item = getitem(dataptr, labels); + if (!item) + { + NpyArr_freeLabels(ret, num); + ret = 0; + break; + } } cLabel = JSON_EncodeObject(item, enc, labelBuffer, NPY_JSON_BUFSIZE); - Py_DECREF(item); + + if (item != (PyObject *) labels) + { + Py_DECREF(item); + } if (PyErr_Occurred() || enc->errorMsg) { @@ -1286,11 +1806,16 @@ char** NpyArr_encodeLabels(PyArrayObject* labels, JSONObjectEncoder* enc, npy_in void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) { - PyObject *obj, *exc, *toDictFunc, *defaultObj; + PyObject *obj, *exc, *toDictFunc, *tmpObj; TypeContext *pc; PyObjectEncoder *enc; double val; + npy_int64 value; + int base; PRINTMARK(); + + tc->prv = NULL; + if (!_obj) { tc->type = JT_INVALID; return; @@ -1299,14 +1824,26 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) obj = (PyObject*) _obj; enc = (PyObjectEncoder*) tc->encoder; - if (enc->requestType) + if (enc->npyType >= 0) { PRINTMARK(); - tc->type = enc->requestType; - tc->prv = enc->requestTypeContext; + tc->prv = &(enc->basicTypeContext); + tc->type = NpyTypeToJSONType(obj, tc, enc->npyType, enc->npyValue); + enc->npyType = -1; + return; + } - enc->requestType = 0; - enc->requestTypeContext = NULL; + if (PyBool_Check(obj)) + { + PRINTMARK(); + tc->type = (obj == Py_True) ? JT_TRUE : JT_FALSE; + return; + } + else + if (obj == Py_None) + { + PRINTMARK(); + tc->type = JT_NULL; return; } @@ -1324,13 +1861,6 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) goto ISITERABLE; } - if (PyBool_Check(obj)) - { - PRINTMARK(); - tc->type = (obj == Py_True) ? JT_TRUE : JT_FALSE; - return; - } - else if (PyLong_Check(obj)) { PRINTMARK(); @@ -1390,13 +1920,6 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) return; } else - if (obj == Py_None) - { - PRINTMARK(); - tc->type = JT_NULL; - return; - } - else if (PyObject_IsInstance(obj, type_decimal)) { PRINTMARK(); @@ -1445,19 +1968,39 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) } PRINTMARK(); - pc->PyTypeToJSON = NpyDateTimeToJSON; + pc->PyTypeToJSON = NpyDateTimeScalarToJSON; tc->type = enc->datetimeIso ? JT_UTF8 : JT_LONG; return; } else if (PyDelta_Check(obj)) { - npy_int64 value; + if (PyObject_HasAttrString(obj, "value")) + { + PRINTMARK(); + value = get_long_attr(obj, "value"); + } + else + { + PRINTMARK(); + value = total_seconds(obj) * 1000000000LL; // nanoseconds per second + } - if (PyObject_HasAttrString(obj, "value")) { - value = get_long_attr(obj, "value"); - } else - value = total_seconds(obj) * 1000000000LL; // nanoseconds per second + base = ((PyObjectEncoder*) tc->encoder)->datetimeUnit; + switch (base) + { + case PANDAS_FR_ns: + break; + case PANDAS_FR_us: + value /= 1000LL; + break; + case PANDAS_FR_ms: + value /= 1000000LL; + break; + case PANDAS_FR_s: + value /= 1000000000LL; + break; + } exc = PyErr_Occurred(); @@ -1467,7 +2010,8 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) goto INVALID; } - if (value == get_nat()) { + if (value == get_nat()) + { PRINTMARK(); tc->type = JT_NULL; return; @@ -1588,11 +2132,13 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) PRINTMARK(); pc->npyarr = enc->npyCtxtPassthru; tc->type = (pc->npyarr->columnLabels ? JT_OBJECT : JT_ARRAY); + pc->iterBegin = NpyArrPassThru_iterBegin; - pc->iterEnd = NpyArrPassThru_iterEnd; pc->iterNext = NpyArr_iterNext; + pc->iterEnd = NpyArrPassThru_iterEnd; pc->iterGetValue = NpyArr_iterGetValue; pc->iterGetName = NpyArr_iterGetName; + enc->npyCtxtPassthru = NULL; return; } @@ -1609,6 +2155,22 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) else if (PyObject_TypeCheck(obj, cls_dataframe)) { + if (enc->blkCtxtPassthru) + { + PRINTMARK(); + pc->pdblock = enc->blkCtxtPassthru; + tc->type = (pc->pdblock->npyCtxts[0]->columnLabels ? JT_OBJECT : JT_ARRAY); + + pc->iterBegin = PdBlockPassThru_iterBegin; + pc->iterEnd = PdBlockPassThru_iterEnd; + pc->iterNext = PdBlock_iterNextItem; + pc->iterGetName = PdBlock_iterGetName; + pc->iterGetValue = NpyArr_iterGetValue; + + enc->blkCtxtPassthru = NULL; + return; + } + if (enc->outputFormat == SPLIT) { PRINTMARK(); @@ -1622,12 +2184,24 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) } PRINTMARK(); - pc->newObj = PyObject_GetAttrString(obj, "values"); - pc->iterBegin = NpyArr_iterBegin; - pc->iterEnd = NpyArr_iterEnd; - pc->iterNext = NpyArr_iterNext; + if (is_simple_frame(obj)) + { + pc->iterBegin = NpyArr_iterBegin; + pc->iterEnd = NpyArr_iterEnd; + pc->iterNext = NpyArr_iterNext; + pc->iterGetName = NpyArr_iterGetName; + + pc->newObj = PyObject_GetAttrString(obj, "values"); + } + else + { + pc->iterBegin = PdBlock_iterBegin; + pc->iterEnd = PdBlock_iterEnd; + pc->iterNext = PdBlock_iterNext; + pc->iterGetName = PdBlock_iterGetName; + } pc->iterGetValue = NpyArr_iterGetValue; - pc->iterGetName = NpyArr_iterGetName; + if (enc->outputFormat == VALUES) { PRINTMARK(); @@ -1638,52 +2212,62 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) { PRINTMARK(); tc->type = JT_ARRAY; - pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(PyObject_GetAttrString(obj, "columns"), "values"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); + tmpObj = PyObject_GetAttrString(obj, "columns"); + if (!tmpObj) + { + goto INVALID; + } + pc->columnLabelsLen = PyObject_Size(tmpObj); + pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(tmpObj, "values"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); + Py_DECREF(tmpObj); if (!pc->columnLabels) { goto INVALID; } } else - if (enc->outputFormat == INDEX) + if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS) { PRINTMARK(); tc->type = JT_OBJECT; - pc->rowLabelsLen = PyArray_DIM(pc->newObj, 0); - pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(PyObject_GetAttrString(obj, "index"), "values"), (JSONObjectEncoder*) enc, pc->rowLabelsLen); - if (!pc->rowLabels) + tmpObj = (enc->outputFormat == INDEX ? PyObject_GetAttrString(obj, "index") : PyObject_GetAttrString(obj, "columns")); + if (!tmpObj) { goto INVALID; } - pc->columnLabelsLen = PyArray_DIM(pc->newObj, 1); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(PyObject_GetAttrString(obj, "columns"), "values"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); - if (!pc->columnLabels) + pc->rowLabelsLen = PyObject_Size(tmpObj); + pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(tmpObj, "values"), (JSONObjectEncoder*) enc, pc->rowLabelsLen); + Py_DECREF(tmpObj); + if (!pc->rowLabels) { - NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen); - pc->rowLabels = NULL; goto INVALID; } - } - else - { - PRINTMARK(); - tc->type = JT_OBJECT; - pc->rowLabelsLen = PyArray_DIM(pc->newObj, 1); - pc->rowLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(PyObject_GetAttrString(obj, "columns"), "values"), (JSONObjectEncoder*) enc, pc->rowLabelsLen); - if (!pc->rowLabels) + tmpObj = (enc->outputFormat == INDEX ? PyObject_GetAttrString(obj, "columns") : PyObject_GetAttrString(obj, "index")); + if (!tmpObj) { + NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen); + pc->rowLabels = NULL; goto INVALID; } - pc->columnLabelsLen = PyArray_DIM(pc->newObj, 0); - pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(PyObject_GetAttrString(obj, "index"), "values"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); + pc->columnLabelsLen = PyObject_Size(tmpObj); + pc->columnLabels = NpyArr_encodeLabels((PyArrayObject*) PyObject_GetAttrString(tmpObj, "values"), (JSONObjectEncoder*) enc, pc->columnLabelsLen); + Py_DECREF(tmpObj); if (!pc->columnLabels) { NpyArr_freeLabels(pc->rowLabels, pc->rowLabelsLen); pc->rowLabels = NULL; goto INVALID; } - pc->transpose = 1; + + if (enc->outputFormat == COLUMNS) + { + PRINTMARK(); + pc->transpose = 1; + } + } + else + { + goto INVALID; } return; } @@ -1778,8 +2362,8 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) if (enc->defaultHandler) { PRINTMARK(); - defaultObj = PyObject_CallFunctionObjArgs(enc->defaultHandler, obj, NULL); - if (defaultObj == NULL || PyErr_Occurred()) + tmpObj = PyObject_CallFunctionObjArgs(enc->defaultHandler, obj, NULL); + if (tmpObj == NULL || PyErr_Occurred()) { if (!PyErr_Occurred()) { @@ -1787,8 +2371,8 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) } goto INVALID; } - encode (defaultObj, enc, NULL, 0); - Py_DECREF(defaultObj); + encode (tmpObj, enc, NULL, 0); + Py_DECREF(tmpObj); goto INVALID; } @@ -1811,13 +2395,23 @@ void Object_beginTypeContext (JSOBJ _obj, JSONTypeContext *tc) void Object_endTypeContext(JSOBJ obj, JSONTypeContext *tc) { PRINTMARK(); - Py_XDECREF(GET_TC(tc)->newObj); - NpyArr_freeLabels(GET_TC(tc)->rowLabels, GET_TC(tc)->rowLabelsLen); - NpyArr_freeLabels(GET_TC(tc)->columnLabels, GET_TC(tc)->columnLabelsLen); - - PyObject_Free(GET_TC(tc)->cStr); - PyObject_Free(tc->prv); - tc->prv = NULL; + if(tc->prv) + { + Py_XDECREF(GET_TC(tc)->newObj); + GET_TC(tc)->newObj = NULL; + NpyArr_freeLabels(GET_TC(tc)->rowLabels, GET_TC(tc)->rowLabelsLen); + GET_TC(tc)->rowLabels = NULL; + NpyArr_freeLabels(GET_TC(tc)->columnLabels, GET_TC(tc)->columnLabelsLen); + GET_TC(tc)->columnLabels = NULL; + + PyObject_Free(GET_TC(tc)->cStr); + GET_TC(tc)->cStr = NULL; + if (tc->prv != &(((PyObjectEncoder*) tc->encoder)->basicTypeContext)) + { + PyObject_Free(tc->prv); + } + tc->prv = NULL; + } } const char *Object_getStringValue(JSOBJ obj, JSONTypeContext *tc, size_t *_outLen) @@ -1919,12 +2513,23 @@ PyObject* objToJSON(PyObject* self, PyObject *args, PyObject *kwargs) JSONObjectEncoder* encoder = (JSONObjectEncoder*) &pyEncoder; pyEncoder.npyCtxtPassthru = NULL; - pyEncoder.requestType = 0; - pyEncoder.requestTypeContext = NULL; + pyEncoder.blkCtxtPassthru = NULL; + pyEncoder.npyType = -1; + pyEncoder.npyValue = NULL; pyEncoder.datetimeIso = 0; pyEncoder.datetimeUnit = PANDAS_FR_ms; pyEncoder.outputFormat = COLUMNS; pyEncoder.defaultHandler = 0; + pyEncoder.basicTypeContext.newObj = NULL; + pyEncoder.basicTypeContext.dictObj = NULL; + pyEncoder.basicTypeContext.itemValue = NULL; + pyEncoder.basicTypeContext.itemName = NULL; + pyEncoder.basicTypeContext.attrList = NULL; + pyEncoder.basicTypeContext.iterator = NULL; + pyEncoder.basicTypeContext.cStr = NULL; + pyEncoder.basicTypeContext.npyarr = NULL; + pyEncoder.basicTypeContext.rowLabels = NULL; + pyEncoder.basicTypeContext.columnLabels = NULL; PRINTMARK(); diff --git a/pandas/src/ujson/python/py_defines.h b/pandas/src/ujson/python/py_defines.h index 312914217d8e3..7a5083e131512 100644 --- a/pandas/src/ujson/python/py_defines.h +++ b/pandas/src/ujson/python/py_defines.h @@ -42,6 +42,7 @@ Numeric decoder derived from from TCL library #define PyInt_Check PyLong_Check #define PyInt_AS_LONG PyLong_AsLong #define PyInt_FromLong PyLong_FromLong +#define PyInt_FromSsize_t PyLong_FromSsize_t #define PyString_Check PyBytes_Check #define PyString_GET_SIZE PyBytes_GET_SIZE diff --git a/vb_suite/packers.py b/vb_suite/packers.py index 8d3d833ed9704..6c7005cb03c4f 100644 --- a/vb_suite/packers.py +++ b/vb_suite/packers.py @@ -140,8 +140,50 @@ def remove(f): setup = common_setup + """ """ packers_write_json_date_index = Benchmark("df.to_json(f,orient='split')", setup, cleanup="remove(f)", start_date=start_date) + setup = setup + setup_int_index packers_write_json = Benchmark("df.to_json(f,orient='split')", setup, cleanup="remove(f)", start_date=start_date) +packers_write_json_T = Benchmark("df.to_json(f,orient='columns')", setup, cleanup="remove(f)", start_date=start_date) + +setup = common_setup + """ +from numpy.random import randint +from collections import OrderedDict + +cols = [ + lambda i: ("{0}_timedelta".format(i), [pd.Timedelta('%d seconds' % randrange(1e6)) for _ in range(N)]), + lambda i: ("{0}_int".format(i), randint(1e8, size=N)), + lambda i: ("{0}_timestamp".format(i), [pd.Timestamp( 1418842918083256000 + randrange(1e9, 1e18, 200)) for _ in range(N)]) + ] +df_mixed = DataFrame(OrderedDict([cols[i % len(cols)](i) for i in range(C)]), + index=index) +""" +packers_write_json_mixed_delta_int_tstamp = Benchmark("df_mixed.to_json(f,orient='split')", setup, cleanup="remove(f)", start_date=start_date) + +setup = common_setup + """ +from numpy.random import randint +from collections import OrderedDict +cols = [ + lambda i: ("{0}_float".format(i), randn(N)), + lambda i: ("{0}_int".format(i), randint(1e8, size=N)) + ] +df_mixed = DataFrame(OrderedDict([cols[i % len(cols)](i) for i in range(C)]), + index=index) +""" +packers_write_json_mixed_float_int = Benchmark("df_mixed.to_json(f,orient='index')", setup, cleanup="remove(f)", start_date=start_date) +packers_write_json_mixed_float_int_T = Benchmark("df_mixed.to_json(f,orient='columns')", setup, cleanup="remove(f)", start_date=start_date) + +setup = common_setup + """ +from numpy.random import randint +from collections import OrderedDict +cols = [ + lambda i: ("{0}_float".format(i), randn(N)), + lambda i: ("{0}_int".format(i), randint(1e8, size=N)), + lambda i: ("{0}_str".format(i), ['%08x'%randrange(16**8) for _ in range(N)]) + ] +df_mixed = DataFrame(OrderedDict([cols[i % len(cols)](i) for i in range(C)]), + index=index) +""" +packers_write_json_mixed_float_int_str = Benchmark("df_mixed.to_json(f,orient='split')", setup, cleanup="remove(f)", start_date=start_date) #---------------------------------------------------------------------- # stata
This adds block support to the JSON serialiser, as per #9037. I also added code to directly cast and serialise numpy data, which replaces the previous use of intermediate Python objects. Large performance improvement (~25x) for mixed frames containing datetimes / timedeltas. ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- packers_write_json_mixed_delta_int_tstamp | 97.1374 | 2633.7843 | 0.0369 | packers_write_json_mixed_float_int_T | 68.7390 | 86.4150 | 0.7955 | packers_write_json_date_index | 68.9886 | 83.2930 | 0.8283 | packers_write_json_T | 61.1283 | 72.9477 | 0.8380 | packers_write_json | 60.7293 | 71.3053 | 0.8517 | packers_read_json_date_index | 157.4903 | 161.6703 | 0.9741 | packers_read_json | 157.4220 | 157.8477 | 0.9973 | packers_write_json_mixed_float_int | 98.1897 | 98.1696 | 1.0002 | packers_write_json_mixed_float_int_str | 84.0390 | 83.6937 | 1.0041 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- ``` Some questions, any comments appreciated: 1. there's a little overhead to dealing with blocks so I'm avoiding using them and using `values` instead if the frame is 'simple'. I'm using the BlockManager methods `_is_single_block` and `is_mixed_dtype` for this to check for a simple frame. 2. I'm using the DataFrame `_data` attr and `mgr_locs` to get access to the block data and the block-to-column mapping. Are there any caveats to this? I know the DataFrame does some caching, but I'm not familiar enough with the details. Tested locally on Python 2.7 for 32 & 64 bit linux and 3.3. on 64 bit linux. JSON tests run through valgrind. Would appreciate if someone could give it a bash on Windows before merging. @cpcloud I also fixed a ref leak and added support for `date_unit` in the #9028 code.
https://api.github.com/repos/pandas-dev/pandas/pulls/9130
2014-12-22T16:13:48Z
2014-12-24T15:53:17Z
2014-12-24T15:53:17Z
2014-12-24T18:27:30Z
ENH: Expanduser in to_file methods GH9066
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 7433adaa4b738..32b79b4c8031f 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -40,6 +40,8 @@ Enhancements .. _whatsnew_0160.enhancements: +- Paths beginning with ~ will now be expanded to begin with the user's home directory (:issue:`9066`) + Performance ~~~~~~~~~~~ diff --git a/pandas/io/common.py b/pandas/io/common.py index daf441f2cdb8c..aafd551d82b05 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -1,10 +1,11 @@ """Common IO api utilities""" import sys +import os import zipfile from contextlib import contextmanager, closing -from pandas.compat import StringIO +from pandas.compat import StringIO, string_types from pandas import compat @@ -99,6 +100,24 @@ def maybe_read_encoded_stream(reader, encoding=None): return reader, encoding +def _expand_user(filepath_or_buffer): + """Return the argument with an initial component of ~ or ~user + replaced by that user's home directory. + + Parameters + ---------- + filepath_or_buffer : object to be converted if possible + + Returns + ------- + expanded_filepath_or_buffer : an expanded filepath or the + input if not expandable + """ + if isinstance(filepath_or_buffer, string_types): + return os.path.expanduser(filepath_or_buffer) + return filepath_or_buffer + + def get_filepath_or_buffer(filepath_or_buffer, encoding=None): """ If the filepath_or_buffer is a url, translate and return the buffer @@ -138,7 +157,8 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None): filepath_or_buffer = StringIO(k.get_contents_as_string()) return filepath_or_buffer, None - return filepath_or_buffer, None + + return _expand_user(filepath_or_buffer), None def file_path_to_url(path): diff --git a/pandas/io/tests/test_common.py b/pandas/io/tests/test_common.py new file mode 100644 index 0000000000000..fe163cc13c5da --- /dev/null +++ b/pandas/io/tests/test_common.py @@ -0,0 +1,40 @@ +""" + Tests for the pandas.io.common functionalities +""" +from pandas.compat import StringIO +import os + +import pandas.util.testing as tm + +from pandas.io import common + + +class TestCommonIOCapabilities(tm.TestCase): + + def test_expand_user(self): + filename = '~/sometest' + expanded_name = common._expand_user(filename) + + self.assertNotEqual(expanded_name, filename) + self.assertNotIn('~', expanded_name) + self.assertEqual(os.path.expanduser(filename), expanded_name) + + def test_expand_user_normal_path(self): + filename = '/somefolder/sometest' + expanded_name = common._expand_user(filename) + + self.assertEqual(expanded_name, filename) + self.assertNotIn('~', expanded_name) + self.assertEqual(os.path.expanduser(filename), expanded_name) + + def test_get_filepath_or_buffer_with_path(self): + filename = '~/sometest' + filepath_or_buffer, _ = common.get_filepath_or_buffer(filename) + self.assertNotEqual(filepath_or_buffer, filename) + self.assertNotIn('~', filepath_or_buffer) + self.assertEqual(os.path.expanduser(filename), filepath_or_buffer) + + def test_get_filepath_or_buffer_with_buffer(self): + input_buffer = StringIO() + filepath_or_buffer, _ = common.get_filepath_or_buffer(input_buffer) + self.assertEqual(filepath_or_buffer, input_buffer)
closes #9066 First time contributor, tried to follow all rules as good as possible. If something is missing please let me know. As specified in task 9066 tilde in a pathname get resolved into a absolute path. In case the object is a buffer the buffer will be returned. I did not add additional tests, since I could not find any module explicitly testing pandas/io/common.py if that is necessary please let me know how to do that. The current tests are passing on my system and seem to cover the method below. Best Regards,
https://api.github.com/repos/pandas-dev/pandas/pulls/9128
2014-12-22T11:57:36Z
2014-12-24T19:11:56Z
2014-12-24T19:11:56Z
2014-12-25T07:33:06Z
PERF: fix slow s.loc[[0]]
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 7433adaa4b738..c34218b434b1b 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -45,6 +45,7 @@ Performance .. _whatsnew_0160.performance: +- Fixed a severe performance regression for ``.loc`` indexing with an array or list (:issue:9126:). Bug Fixes ~~~~~~~~~ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index c9322a9371309..7202ed64e1c9c 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1275,9 +1275,8 @@ def _has_valid_type(self, key, axis): if isinstance(key, tuple) and isinstance(ax, MultiIndex): return True - # require at least 1 element in the index - idx = _ensure_index(key) - if len(idx) and not idx.isin(ax).any(): + # TODO: don't check the entire key unless necessary + if len(key) and np.all(ax.get_indexer_for(key) < 0): raise KeyError("None of [%s] are in the [%s]" % (key, self.obj._get_axis_name(axis))) diff --git a/vb_suite/indexing.py b/vb_suite/indexing.py index 320f261050e07..f05ebc47d2e25 100644 --- a/vb_suite/indexing.py +++ b/vb_suite/indexing.py @@ -209,3 +209,30 @@ frame_iloc_big = Benchmark('df.iloc[:100,0]', setup, start_date=datetime(2013, 1, 1)) + +#---------------------------------------------------------------------- +# basic tests for [], .loc[], .iloc[] and .ix[] + +setup = common_setup + """ +s = Series(np.random.rand(1000000)) +""" + +series_getitem_scalar = Benchmark("s[800000]", setup) +series_getitem_slice = Benchmark("s[:800000]", setup) +series_getitem_list_like = Benchmark("s[[800000]]", setup) +series_getitem_array = Benchmark("s[np.arange(10000)]", setup) + +series_loc_scalar = Benchmark("s.loc[800000]", setup) +series_loc_slice = Benchmark("s.loc[:800000]", setup) +series_loc_list_like = Benchmark("s.loc[[800000]]", setup) +series_loc_array = Benchmark("s.loc[np.arange(10000)]", setup) + +series_iloc_scalar = Benchmark("s.loc[800000]", setup) +series_iloc_slice = Benchmark("s.loc[:800000]", setup) +series_iloc_list_like = Benchmark("s.loc[[800000]]", setup) +series_iloc_array = Benchmark("s.loc[np.arange(10000)]", setup) + +series_ix_scalar = Benchmark("s.ix[800000]", setup) +series_ix_slice = Benchmark("s.ix[:800000]", setup) +series_ix_list_like = Benchmark("s.ix[[800000]]", setup) +series_ix_array = Benchmark("s.ix[np.arange(10000)]", setup)
Fixes #9126 ``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- series_loc_list_like | 0.2840 | 125.0146 | 0.0023 | series_loc_array | 0.9947 | 125.5813 | 0.0079 | series_loc_scalar | 0.0430 | 0.0424 | 1.0150 | series_loc_slice | 0.0647 | 0.0606 | 1.0668 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- ``` Whee! I also wrote this fix for `IntervalIndex` (#8707); this change pulls it out separately. I believe it changes the complexity of the lookup check from O(n*m) for length n index and length m key to O(n+m).
https://api.github.com/repos/pandas-dev/pandas/pulls/9127
2014-12-22T07:49:35Z
2014-12-23T01:24:17Z
2014-12-23T01:24:17Z
2014-12-24T01:57:20Z
PERF: use labels to find duplicates in multi-index
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index d572835a76218..531955c765deb 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -46,6 +46,7 @@ Performance .. _whatsnew_0160.performance: - Fixed a severe performance regression for ``.loc`` indexing with an array or list (:issue:9126:). +- Performance improvements in ``MultiIndex.duplicated`` by working with labels instead of values. Bug Fixes ~~~~~~~~~ diff --git a/pandas/core/index.py b/pandas/core/index.py index 97890299657cf..1b4a691851a8a 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -3225,14 +3225,17 @@ def _has_complex_internals(self): @cache_readonly def is_unique(self): - from pandas.hashtable import Int64HashTable + return not self.duplicated().any() + + @Appender(_shared_docs['duplicated'] % _index_doc_kwargs) + def duplicated(self, take_last=False): from pandas.core.groupby import get_flat_ids + from pandas.hashtable import duplicated_int64 shape = map(len, self.levels) ids = get_flat_ids(self.labels, shape, False) - table = Int64HashTable(min(1 << 20, len(ids))) - return len(table.unique(ids)) == len(self) + return duplicated_int64(ids, take_last) def get_value(self, series, key): # somewhat broken encapsulation diff --git a/pandas/hashtable.pyx b/pandas/hashtable.pyx index cf9428d5862ec..26fba1a4b9615 100644 --- a/pandas/hashtable.pyx +++ b/pandas/hashtable.pyx @@ -1062,3 +1062,27 @@ def mode_int64(ndarray[int64_t] values): kh_destroy_int64(table) return modes[:j+1] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def duplicated_int64(ndarray[int64_t, ndim=1] values, int take_last): + cdef: + int ret = 0 + Py_ssize_t i, n = len(values) + kh_int64_t * table = kh_init_int64() + ndarray[uint8_t, ndim=1, cast=True] out = np.empty(n, dtype='bool') + + kh_resize_int64(table, min(1 << 20, n)) + + if take_last: + for i from n > i >=0: + kh_put_int64(table, values[i], &ret) + out[i] = ret == 0 + else: + for i from 0 <= i < n: + kh_put_int64(table, values[i], &ret) + out[i] = ret == 0 + + kh_destroy_int64(table) + return out diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 5c581b548e583..c8c46309eb016 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -3514,6 +3514,16 @@ def check(nlevels, with_nulls): check(8, False) check(8, True) + n, k = 200, 5000 + levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)] + labels = [np.random.choice(n, k * n) for lev in levels] + mi = MultiIndex(levels=levels, labels=labels) + + for take_last in [False, True]: + left = mi.duplicated(take_last=take_last) + right = pd.lib.duplicated(mi.values, take_last=take_last) + tm.assert_array_equal(left, right) + def test_tolist(self): result = self.index.tolist() exp = list(self.index.values) diff --git a/vb_suite/index_object.py b/vb_suite/index_object.py index a8cc0e9ec5f89..08ad96d1d0427 100644 --- a/vb_suite/index_object.py +++ b/vb_suite/index_object.py @@ -138,6 +138,17 @@ name='multiindex_with_datetime_level_sliced', start_date=datetime(2014, 10, 11)) +# multi-index duplicated +setup = common_setup + """ +n, k = 200, 5000 +levels = [np.arange(n), tm.makeStringIndex(n).values, 1000 + np.arange(n)] +labels = [np.random.choice(n, k * n) for lev in levels] +mi = MultiIndex(levels=levels, labels=labels) +""" + +multiindex_duplicated = Benchmark('mi.duplicated()', setup, + name='multiindex_duplicated') + #---------------------------------------------------------------------- # repr
``` ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- multiindex_duplicated | 139.2883 | 469.6666 | 0.2966 | ------------------------------------------------------------------------------- Test name | head[ms] | base[ms] | ratio | ------------------------------------------------------------------------------- Ratio < 1.0 means the target commit is faster then the baseline. Seed used: 1234 Target [2b99b80] : use labels to identify duplicates in multi-index Base [0fe43a6] : Merge pull request #9120 from minrk/nbviewer-link fix an nbviewer link in tutorials ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9125
2014-12-21T23:47:56Z
2014-12-23T10:57:56Z
null
2014-12-23T12:59:59Z
Use tz.gettz() instead of zoneinfo.gettz()
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index cc044bc35a707..785f35083a24e 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -26,6 +26,26 @@ New features Other enhancements ^^^^^^^^^^^^^^^^^^ +- Add helper functions to check for OS running on + + from pandas.compat import is_platform_windows + if is_platform_window(): + pass + +- Use dateutil.tz.gettz() after upstream recommandations + + python-dateutil provides two implementations of gettz(). + "dateutil.tz.gettz()" tries to load zone information from system provided data and fals back to + an included tarball, where as "dateutil.zoneinfo.gettz() loads directly from the tarball. Using the later on systems + which aren't providing included zone informations (e.g. Fedora or Gentoo) breaks (#9059, #8639, #9663 and #10121) + As stated by upstream in https://github.com/dateutil/dateutil/issues/11#issuecomment-70769019 only the former should be + used by library consumers. + + For compatibility in pandas following code should be used + + from pandas.tslib import _dateutil_gettz as gettz + tz = gettz('Europe/Brussels') + .. _whatsnew_0170.api: Backwards incompatible API changes @@ -66,5 +86,4 @@ Bug Fixes - Bug in ``Timestamp``'s' ``microsecond``, ``quarter``, ``dayofyear``, ``week`` and ``daysinmonth`` properties return ``np.int`` type, not built-in ``int``. (:issue:`10050`) - Bug in ``NaT`` raises ``AttributeError`` when accessing to ``daysinmonth``, ``dayofweek`` properties. (:issue:`10096`) - - +- Bug in dateutil.tz.gettz() vs. dateutil.zoneinfo.gettz() usage which creates problems on systems solely rely on systems timezone data (:issue:`9123`, :issue:`9059`, :issue:`8639`, :issue:`9663`, :issue:`10121`) diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 6be0facf2bffc..2a273629544cb 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -26,6 +26,7 @@ Other items: * OrderedDefaultDict +* platform checker """ # pylint disable=W0611 import functools @@ -754,3 +755,16 @@ def __missing__(self, key): def __reduce__(self): # optional, for pickle support args = self.default_factory if self.default_factory else tuple() return type(self), args, None, None, list(self.items()) + + +# https://github.com/pydata/pandas/pull/9123 +def is_platform_windows(): + return sys.platform == 'win32' or sys.platform == 'cygwin' + + +def is_platform_linux(): + return sys.platform == 'linux2' + + +def is_platform_mac(): + return sys.platform == 'darwin' diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 22f8aee1e0a4e..925cfa875196c 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -5398,7 +5398,8 @@ def test_getitem_setitem_datetime_tz_pytz(self): def test_getitem_setitem_datetime_tz_dateutil(self): tm._skip_if_no_dateutil(); from dateutil.tz import tzutc - from dateutil.zoneinfo import gettz + from pandas.tslib import _dateutil_gettz as gettz + tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil from pandas import date_range diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py index 841d81c15b4e9..69b1d84670d45 100644 --- a/pandas/tseries/tests/test_daterange.py +++ b/pandas/tseries/tests/test_daterange.py @@ -441,7 +441,7 @@ def test_month_range_union_tz_pytz(self): def test_month_range_union_tz_dateutil(self): _skip_if_windows_python_3() tm._skip_if_no_dateutil() - from dateutil.zoneinfo import gettz as timezone + from pandas.tslib import _dateutil_gettz as timezone tz = timezone('US/Eastern') early_start = datetime(2011, 1, 1) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 70c706fc66398..0218af63ca7d6 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -101,15 +101,15 @@ def test_timestamp_tz_arg(self): pytz.timezone('Europe/Brussels').normalize(p).tzinfo) def test_timestamp_tz_arg_dateutil(self): - import dateutil + from pandas.tslib import _dateutil_gettz as gettz from pandas.tslib import maybe_get_tz p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz('dateutil/Europe/Brussels')) - self.assertEqual(p.tz, dateutil.zoneinfo.gettz('Europe/Brussels')) + self.assertEqual(p.tz, gettz('Europe/Brussels')) def test_timestamp_tz_arg_dateutil_from_string(self): - import dateutil + from pandas.tslib import _dateutil_gettz as gettz p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels') - self.assertEqual(p.tz, dateutil.zoneinfo.gettz('Europe/Brussels')) + self.assertEqual(p.tz, gettz('Europe/Brussels')) def test_timestamp_nat_tz(self): t = Period('NaT', freq='M').to_timestamp() diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 0c4961d80a5f4..6c20b02324688 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -417,9 +417,9 @@ def test_timestamp_to_datetime_explicit_pytz(self): def test_timestamp_to_datetime_explicit_dateutil(self): _skip_if_windows_python_3() tm._skip_if_no_dateutil() - import dateutil + from pandas.tslib import _dateutil_gettz as gettz rng = date_range('20090415', '20090519', - tz=dateutil.zoneinfo.gettz('US/Eastern')) + tz=gettz('US/Eastern')) stamp = rng[0] dtval = stamp.to_pydatetime() @@ -1807,7 +1807,7 @@ def test_append_concat_tz_explicit_pytz(self): def test_append_concat_tz_dateutil(self): # GH 2938 tm._skip_if_no_dateutil() - from dateutil.zoneinfo import gettz as timezone + from pandas.tslib import _dateutil_gettz as timezone rng = date_range('5/8/2012 1:45', periods=10, freq='5T', tz='dateutil/US/Eastern') diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 66f14bfb0346a..2b45718d1f9ea 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -41,7 +41,11 @@ from datetime import time as datetime_time # dateutil compat from dateutil.tz import (tzoffset, tzlocal as _dateutil_tzlocal, tzfile as _dateutil_tzfile, tzutc as _dateutil_tzutc) -from dateutil.zoneinfo import gettz as _dateutil_gettz +from pandas.compat import is_platform_windows +if is_platform_windows(): + from dateutil.zoneinfo import gettz as _dateutil_gettz +else: + from dateutil.tz import gettz as _dateutil_gettz from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo from pandas.compat import parse_date, string_types, PY3, iteritems
zoneinfo.gettz() seems to have problems (1 & 2) on system which do not install the zoninfo tarball (e.g. Debian, Gentoo and Fedora) but rely on the system zoneinfo files. This results in test failures (3 & 4) tz.gettz() doesn't suffer from this problem. 1 https://github.com/dateutil/dateutil/issues/8 2 https://github.com/dateutil/dateutil/issues/11 closes https://github.com/pydata/pandas/issues/9059 closes https://github.com/pydata/pandas/issues/8639 closes #10121 Signed-off-by: Justin Lecher jlec@gentoo.org
https://api.github.com/repos/pandas-dev/pandas/pulls/9123
2014-12-21T18:45:24Z
2015-05-15T20:26:37Z
2015-05-15T20:26:36Z
2015-06-02T19:26:59Z
Remove bar line
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 6e93535451fbc..b17f8195d339e 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -49,6 +49,10 @@ Backwards incompatible API changes - ``TimedeltaIndex.freqstr`` now output the same string format as ``DatetimeIndex``. (:issue:`9116`) +- Bar and horizontal bar plots no longer add a dashed line along the info axis. +The prior style can be achieved with matplotlib's `axhline` or `axvline` +methods (:issue:`9088`). + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 2d7976d567108..1b815c519ea59 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1895,8 +1895,6 @@ def _post_plot_logic(self): ax.set_xlim((s_edge, e_edge)) ax.set_xticks(self.tick_pos) ax.set_xticklabels(str_index) - if not self.log: # GH3254+ - ax.axhline(0, color='k', linestyle='--') if name is not None and self.use_index: ax.set_xlabel(name) elif self.kind == 'barh': @@ -1904,7 +1902,6 @@ def _post_plot_logic(self): ax.set_ylim((s_edge, e_edge)) ax.set_yticks(self.tick_pos) ax.set_yticklabels(str_index) - ax.axvline(0, color='k', linestyle='--') if name is not None and self.use_index: ax.set_ylabel(name) else:
Closes https://github.com/pydata/pandas/issues/9088 Closes https://github.com/pydata/pandas/issues/7128 Votes on whether we should 1. Remove the line entirely 2. Add a parameter to `.plot` control whether the line should be added. I vote for 1, but I can go either way.
https://api.github.com/repos/pandas-dev/pandas/pulls/9122
2014-12-21T15:44:10Z
2015-01-19T13:21:48Z
2015-01-19T13:21:47Z
2017-04-05T02:06:12Z
fix an nbviewer link in tutorials
diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index 2c913f8911066..7c1d1e345d273 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -26,7 +26,7 @@ repository <http://github.com/jvns/pandas-cookbook>`_. To run the examples in th clone the GitHub repository and get IPython Notebook running. See `How to use this cookbook <https://github.com/jvns/pandas-cookbook#how-to-use-this-cookbook>`_. -- `A quick tour of the IPython Notebook: <http://nbviewer.ipython.org/github/jvns/pandas-c|%2055ookbook/blob/v0.1/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb>`_ +- `A quick tour of the IPython Notebook: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb>`_ Shows off IPython's awesome tab completion and magic functions. - `Chapter 1: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%201%20-%20Reading%20from%20a%20CSV.ipynb>`_ Reading your data into pandas is pretty much the easiest thing. Even
some escape characters snuck in there that cause the URL to be invalid
https://api.github.com/repos/pandas-dev/pandas/pulls/9120
2014-12-21T01:03:19Z
2014-12-21T07:26:53Z
2014-12-21T07:26:53Z
2014-12-21T07:26:53Z
ENH: get_dummies can accept value and na_value kw
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index e8b398aec4b74..adb2f8673e92a 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -152,6 +152,14 @@ Enhancements - ``Timedelta`` will now accept ``nanoseconds`` keyword in constructor (:issue:`9273`) - SQL code now safely escapes table and column names (:issue:`8986`) +- ``pd.get_dummies`` can accept ``pos_value`` and ``neg_value`` keywords for filling with positive and negative valriables (:issue:`8028`) + + .. ipython:: python + + s = pd.Series(['X', 'Y', 'X', 'Y']) + pd.get_dummies(s, pos_value='YES', neg_value='NO') + + - Added auto-complete for ``Series.str.<tab>``, ``Series.dt.<tab>`` and ``Series.cat.<tab>`` (:issue:`9322`) - Added ``StringMethods.isalnum()``, ``isalpha()``, ``isdigit()``, ``isspace()``, ``islower()``, ``isupper()``, ``istitle()`` which behave as the same as standard ``str`` (:issue:`9282`) diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index ba227f4e3d3d1..92e97db4f76b2 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -16,6 +16,7 @@ import pandas.core.common as com import pandas.algos as algos +import pandas.lib as lib from pandas.core.index import MultiIndex @@ -975,7 +976,7 @@ def convert_dummies(data, cat_variables, prefix_sep='_'): def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, - columns=None): + columns=None, pos_value=None, neg_value=None): """ Convert categorical variable into dummy/indicator variables @@ -996,6 +997,10 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object` or `category` dtype will be converted. + pos_value : scalar value or dict + replacement value corresponding to positive variables (dummy variables) + neg_value : list-like, default None + replacement value corresponding to negative variables Returns ------- @@ -1088,6 +1093,19 @@ def check_len(item, name): result = concat(with_dummies, axis=1) else: result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na) + + if pos_value is not None: + if isinstance(pos_value, dict) or lib.isscalar(pos_value): + result = result.replace(to_replace=1, value=pos_value) + else: + raise ValueError('invalid pos_value, use scalar or dict: %s' % type(pos_value)) + + if neg_value is not None: + if isinstance(neg_value, dict) or lib.isscalar(neg_value): + result = result.replace(to_replace=0, value=neg_value) + else: + raise ValueError('invalid neg_value, use scalar or dict: %s' % type(neg_value)) + return result diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py index 933cfe54bac27..b1f9ff898c190 100644 --- a/pandas/tests/test_reshape.py +++ b/pandas/tests/test_reshape.py @@ -322,6 +322,138 @@ def test_dataframe_dummies_with_categorical(self): 'cat_x', 'cat_y']] assert_frame_equal(result, expected) + def test_series_fill_scalar(self): + s_list = ['a', 'b', 'c', 'a'] + s_series = Series(s_list) + s_series_index = Series(s_list, index=['A', 'B', 'C', 'D']) + + expected = DataFrame({'a': ['Yes', 0.0, 0.0, 'Yes'], + 'b': [0.0, 'Yes', 0.0, 0.0], + 'c': [0.0, 0.0, 'Yes', 0.0]}) + result = get_dummies(s_series, pos_value='Yes') + + assert_frame_equal(result, expected) + + expected.index = list('ABCD') + result = get_dummies(s_series_index, pos_value='Yes') + assert_frame_equal(result, expected) + + expected = DataFrame({'a': ['Yes', 'No', 'No', 'Yes'], + 'b': ['No', 'Yes', 'No', 'No'], + 'c': ['No', 'No', 'Yes', 'No']}) + result = get_dummies(s_series, pos_value='Yes', neg_value='No') + assert_frame_equal(result, expected) + + expected.index = list('ABCD') + result = get_dummies(s_series_index, pos_value='Yes', neg_value='No') + assert_frame_equal(result, expected) + + def test_dataframe_fill_scalar(self): + df = DataFrame({'A': ['X', 'Y', 'X'], 'B':['x', 'x', 'y']}) + + expected = DataFrame({'A_X': ['Yes', 0, 'Yes'], + 'A_Y': [0, 'Yes', 0], + 'B_x': ['Yes', 'Yes', 0], + 'B_y': [0, 0, 'Yes']}) + result = get_dummies(df, pos_value='Yes') + assert_frame_equal(result, expected) + + expected = DataFrame({'A_X': ['Yes', 'No', 'Yes'], + 'A_Y': ['No', 'Yes', 'No'], + 'B_x': ['Yes', 'Yes', 'No'], + 'B_y': ['No', 'No', 'Yes']}) + result = get_dummies(df, pos_value='Yes', neg_value='No') + assert_frame_equal(result, expected) + + def test_series_fill_dict(self): + s_list = ['a', 'b', 'c', 'a'] + s_series = Series(['a', 'b', 'c', 'a']) + s_series_index = Series(s_list, index=['A', 'B', 'C', 'D']) + + d_true = {'a': 'Y_a', 'b': 'Y_b', 'c': 'Y_c'} + d_na = {'a': 'N_a', 'b': 'N_b', 'c': 'N_c'} + expected = DataFrame({'a': [1.0, 'N_a', 'N_a', 1.0], + 'b': ['N_b', 1.0, 'N_b', 'N_b'], + 'c': ['N_c', 'N_c', 1.0, 'N_c']}) + result = get_dummies(s_series, neg_value=d_na) + assert_frame_equal(result, expected) + + expected.index = list('ABCD') + result = get_dummies(s_series_index, neg_value=d_na) + assert_frame_equal(result, expected) + + expected = DataFrame({'a': ['Y_a', 'N_a', 'N_a', 'Y_a'], + 'b': ['N_b', 'Y_b', 'N_b', 'N_b'], + 'c': ['N_c', 'N_c', 'Y_c', 'N_c']}) + result = get_dummies(s_series, pos_value=d_true, neg_value=d_na) + assert_frame_equal(result, expected) + + expected.index = list('ABCD') + result = get_dummies(s_series_index, pos_value=d_true, neg_value=d_na) + assert_frame_equal(result, expected) + + def test_dataframe_fill_dict(self): + df = DataFrame({'A': ['X', 'Y', 'X'], 'B':['x', 'x', 'y']}) + + expected = DataFrame({'A_X': ['Y_AX', 0, 'Y_AX'], + 'A_Y': [0, 'Y_AY', 0], + 'B_x': ['Y_Bx', 'Y_Bx', 0], + 'B_y': [0, 0, 'Y_By']}) + result = get_dummies(df, pos_value={'A_X': 'Y_AX', 'A_Y': 'Y_AY', + 'B_x': 'Y_Bx', 'B_y': 'Y_By'}) + assert_frame_equal(result, expected) + + expected = DataFrame({'A_X': [1, 'N_AX', 1], + 'A_Y': ['N_AY', 1, 'N_AY'], + 'B_x': [1, 1, 'N_Bx'], + 'B_y': ['N_By', 'N_By', 1]}) + result = get_dummies(df, neg_value={'A_X': 'N_AX', 'A_Y': 'N_AY', + 'B_x': 'N_Bx', 'B_y': 'N_By'}) + assert_frame_equal(result, expected) + + expected = DataFrame({'A_X': ['Y_AX', 'N_AX', 'Y_AX'], + 'A_Y': ['N_AY', 'Y_AY', 'N_AY'], + 'B_x': ['Y_Bx', 'Y_Bx', 'N_Bx'], + 'B_y': ['N_By', 'N_By', 'Y_By']}) + result = get_dummies(df, pos_value={'A_X': 'Y_AX', 'A_Y': 'Y_AY', + 'B_x': 'Y_Bx', 'B_y': 'Y_By'}, + neg_value={'A_X': 'N_AX', 'A_Y': 'N_AY', + 'B_x': 'N_Bx', 'B_y': 'N_By'}) + assert_frame_equal(result, expected) + + def test_series_dummies_fill_list(self): + # must be all errors + s = Series(['a', 'b', 'c', 'a']) + + s_list = [1, 1, 0, 1] + s_series = Series([0, 1, 1, 0]) + v_msg = "invalid pos_value, use scalar or dict" + na_msg = "invalid neg_value, use scalar or dict" + with tm.assertRaisesRegexp(ValueError, v_msg): + result = get_dummies(s, pos_value=s_list) + with tm.assertRaisesRegexp(ValueError, v_msg): + result = get_dummies(s, pos_value=s_series) + with tm.assertRaisesRegexp(ValueError, na_msg): + result = get_dummies(s, neg_value=s_list) + with tm.assertRaisesRegexp(ValueError, na_msg): + result = get_dummies(s, neg_value=s_series) + + def test_dataframe_dummies_fill_list(self): + df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}) + + s_list = ['a', 'b', 'c', 'a'] + s_series = Series(['a', 'b', 'c', 'a']) + v_msg = "invalid pos_value, use scalar or dict" + na_msg = "invalid neg_value, use scalar or dict" + with tm.assertRaisesRegexp(ValueError, v_msg): + result = get_dummies(df, pos_value=s_list) + with tm.assertRaisesRegexp(ValueError, v_msg): + result = get_dummies(df, pos_value=s_series) + with tm.assertRaisesRegexp(ValueError, na_msg): + result = get_dummies(df, neg_value=s_list) + with tm.assertRaisesRegexp(ValueError, na_msg): + result = get_dummies(df, neg_value=s_series) + class TestConvertDummies(tm.TestCase): def test_convert_dummies(self):
Closes #8028. It is still under work, but want to clarify a point. Currently it works as: ``` s = pd.Series(['A', 'B', 'A', 'C']) s #0 A #1 B #2 A #3 C # dtype: object pd.get_dummies(s) # A B C #0 1 0 0 #1 0 1 0 #2 1 0 0 #3 0 0 1 pd.get_dummies(s, value='YES', na_value='NO') # A B C #0 YES NO NO #1 NO YES NO #2 YES NO NO #3 NO NO YES pd.get_dummies(s, value={'A': 'X', 'B': 'Y', 'C': 'Z'}) # A B C #0 X 0 0 #1 0 Y 0 #2 X 0 0 #3 0 0 Z ``` And what I'm concerning is cases for list-likes. ``` # This is natural and compat with replace, because the number of the hole will be the same number as list (one hole in each column). pd.get_dummies(s, value=['A', 'B', 'A', 'C']) # A B C #0 A 0 0 #1 0 B 0 #2 A 0 0 #3 0 0 C # When we specify na_value and make it compat with replace, behavior should be: pd.get_dummies(s, na_value=['A', 'B', 'A', 'C']) # A B C 0 1 A B 1 A 1 C 2 1 A B 3 A C 1 # Or it needs different broadcasting rule? pd.get_dummies(s, na_value=['A', 'B', 'A', 'C']) # A B C #0 1 A A #1 B 1 B #2 1 A A #3 C C 1 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9117
2014-12-20T21:55:54Z
2015-05-11T14:55:18Z
null
2023-05-11T01:12:46Z
API: Datetime-like indexes `summary` should output the same format
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 9b8382bd3f4c4..ee5ed49dfb5d1 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -29,6 +29,10 @@ Backwards incompatible API changes - ``Index.duplicated`` now returns `np.array(dtype=bool)` rather than `Index(dtype=object)` containing `bool` values. (:issue:`8875`) +- ``DatetimeIndex``, ``PeriodIndex`` and ``TimedeltaIndex.summary`` now output the same format. (:issue:`9116`) +- ``TimedeltaIndex.freqstr`` now output the same string format as ``DatetimeIndex``. (:issue:`9116`) + + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py index b223d2bfd9ebc..266e69cf3484b 100644 --- a/pandas/tseries/base.py +++ b/pandas/tseries/base.py @@ -69,6 +69,13 @@ def __contains__(self, key): except (KeyError, TypeError): return False + @property + def freqstr(self): + """ return the frequency object as a string if its set, otherwise None """ + if self.freq is None: + return None + return self.freq.freqstr + @cache_readonly def inferred_freq(self): try: @@ -459,3 +466,23 @@ def repeat(self, repeats, axis=None): """ return self._simple_new(self.values.repeat(repeats), name=self.name) + + def summary(self, name=None): + """ + return a summarized representation + """ + formatter = self._formatter_func + if len(self) > 0: + index_summary = ', %s to %s' % (formatter(self[0]), + formatter(self[-1])) + else: + index_summary = '' + + if name is None: + name = type(self).__name__ + result = '%s: %s entries%s' % (com.pprint_thing(name), + len(self), index_summary) + if self.freq: + result += '\nFreq: %s' % self.freqstr + + return result diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 5e621761d63b6..65414fe39d18c 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -682,22 +682,6 @@ def _format_native_types(self, na_rep=u('NaT'), def to_datetime(self, dayfirst=False): return self.copy() - def summary(self, name=None): - if len(self) > 0: - index_summary = ', %s to %s' % (com.pprint_thing(self[0]), - com.pprint_thing(self[-1])) - else: - index_summary = '' - - if name is None: - name = type(self).__name__ - result = '%s: %s entries%s' % (com.pprint_thing(name), - len(self), index_summary) - if self.freq: - result += '\nFreq: %s' % self.freqstr - - return result - def _format_footer(self): tagline = 'Length: %d, Freq: %s, Timezone: %s' return tagline % (len(self), self.freqstr, self.tz) @@ -1392,13 +1376,6 @@ def _set_freq(self, value): self.offset = value freq = property(fget=_get_freq, fset=_set_freq, doc="get/set the frequncy of the Index") - @property - def freqstr(self): - """ return the frequency object as a string if its set, otherwise None """ - if self.freq is None: - return None - return self.offset.freqstr - year = _field_accessor('year', 'Y', "The year of the datetime") month = _field_accessor('month', 'M', "The month as January=1, December=12") day = _field_accessor('day', 'D', "The days of the datetime") diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py index 5a946acac2baa..097ccef9e462b 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/tseries/tdi.py @@ -410,23 +410,6 @@ def f(x): result = result.astype('int64') return result - def summary(self, name=None): - formatter = self._formatter_func - if len(self) > 0: - index_summary = ', %s to %s' % (formatter(self[0]), - formatter(self[-1])) - else: - index_summary = '' - - if name is None: - name = type(self).__name__ - result = '%s: %s entries%s' % (com.pprint_thing(name), - len(self), index_summary) - if self.freq: - result += '\nFreq: %s' % self.freqstr - - return result - def to_pytimedelta(self): """ Return TimedeltaIndex as object ndarray of datetime.timedelta objects @@ -796,13 +779,6 @@ def __getitem__(self, key): return self._simple_new(result, self.name) - @property - def freqstr(self): - """ return the frequency object as a string if its set, otherwise None """ - if self.freq is None: - return None - return self.freq - def searchsorted(self, key, side='left'): if isinstance(key, (np.ndarray, Index)): key = np.array(key, dtype=_TD_DTYPE, copy=False) diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py index d3393feb2ca33..1ecd723ed8d56 100644 --- a/pandas/tseries/tests/test_base.py +++ b/pandas/tseries/tests/test_base.py @@ -143,6 +143,34 @@ def test_representation(self): result = getattr(idx, func)() self.assertEqual(result, expected) + def test_summary(self): + # GH9116 + idx1 = DatetimeIndex([], freq='D') + idx2 = DatetimeIndex(['2011-01-01'], freq='D') + idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') + idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], + freq='H', tz='Asia/Tokyo') + idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], + tz='US/Eastern') + + exp1 = """DatetimeIndex: 0 entries +Freq: D""" + exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01 +Freq: D""" + exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02 +Freq: D""" + exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03 +Freq: D""" + exp5 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 to 2011-01-01 11:00:00+09:00 +Freq: H""" + exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT""" + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6], + [exp1, exp2, exp3, exp4, exp5, exp6]): + result = idx.summary() + self.assertEqual(result, expected) + def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], ['day', 'day', 'day', 'day', @@ -336,16 +364,16 @@ def test_representation(self): exp1 = """<class 'pandas.tseries.tdi.TimedeltaIndex'> -Length: 0, Freq: <Day>""" +Length: 0, Freq: D""" exp2 = """<class 'pandas.tseries.tdi.TimedeltaIndex'> ['1 days'] -Length: 1, Freq: <Day>""" +Length: 1, Freq: D""" exp3 = """<class 'pandas.tseries.tdi.TimedeltaIndex'> ['1 days', '2 days'] -Length: 2, Freq: <Day>""" +Length: 2, Freq: D""" exp4 = """<class 'pandas.tseries.tdi.TimedeltaIndex'> ['1 days', ..., '3 days'] -Length: 3, Freq: <Day>""" +Length: 3, Freq: D""" exp5 = """<class 'pandas.tseries.tdi.TimedeltaIndex'> ['1 days 00:00:01', ..., '3 days 00:00:00'] Length: 3, Freq: None""" @@ -356,6 +384,29 @@ def test_representation(self): result = getattr(idx, func)() self.assertEqual(result, expected) + def test_summary(self): + # GH9116 + idx1 = TimedeltaIndex([], freq='D') + idx2 = TimedeltaIndex(['1 days'], freq='D') + idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') + idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') + idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) + + exp1 = """TimedeltaIndex: 0 entries +Freq: D""" + exp2 = """TimedeltaIndex: 1 entries, '1 days' to '1 days' +Freq: D""" + exp3 = """TimedeltaIndex: 2 entries, '1 days' to '2 days' +Freq: D""" + exp4 = """TimedeltaIndex: 3 entries, '1 days' to '3 days' +Freq: D""" + exp5 = """TimedeltaIndex: 3 entries, '1 days 00:00:01' to '3 days 00:00:00'""" + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], + [exp1, exp2, exp3, exp4, exp5]): + result = idx.summary() + self.assertEqual(result, expected) + def test_add_iadd(self): # only test adding/sub offsets as + is now numeric @@ -755,6 +806,43 @@ def test_representation(self): result = getattr(idx, func)() self.assertEqual(result, expected) + def test_summary(self): + # GH9116 + idx1 = PeriodIndex([], freq='D') + idx2 = PeriodIndex(['2011-01-01'], freq='D') + idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') + idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') + idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H') + + idx7 = pd.period_range('2013Q1', periods=1, freq="Q") + idx8 = pd.period_range('2013Q1', periods=2, freq="Q") + idx9 = pd.period_range('2013Q1', periods=3, freq="Q") + + exp1 = """PeriodIndex: 0 entries +Freq: D""" + exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01 +Freq: D""" + exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02 +Freq: D""" + exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03 +Freq: D""" + exp5 = """PeriodIndex: 3 entries, 2011 to 2013 +Freq: A-DEC""" + exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT +Freq: H""" + exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1 +Freq: Q-DEC""" + exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2 +Freq: Q-DEC""" + exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3 +Freq: Q-DEC""" + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9], + [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]): + result = idx.summary() + self.assertEqual(result, expected) + def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], ['day', 'day', 'day', 'day', diff --git a/pandas/tseries/tests/test_timedeltas.py b/pandas/tseries/tests/test_timedeltas.py index de23ddcc397d9..b6c5327357590 100644 --- a/pandas/tseries/tests/test_timedeltas.py +++ b/pandas/tseries/tests/test_timedeltas.py @@ -540,7 +540,7 @@ def conv(v): expected = np.array([0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]') result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False) tm.assert_numpy_array_equal(expected, result) - + def testit(unit, transform): # array
Related to #6469. Made all datetime-like indexes `summary` should output the same format. #### Before ``` didx = pd.date_range('2014-12-20', freq='M', periods=3) pidx = pd.period_range('2014-12-20', freq='M', periods=3) tdidx = pd.timedelta_range(start='1 days', freq='D', periods=3) didx.summary() # DatetimeIndex: 3 entries, 2014-12-31 00:00:00 to 2015-02-28 00:00:00 # Freq: M pidx.summary() # PeriodIndex: 3 entries, 2014-12 to 2015-02 tdidx.summary() # TimedeltaIndex: 3 entries, '1 days' to '3 days' # Freq: <Day> ``` #### After - `DatetimeIndex` and `PeriodIndex` use `_format_func`. This made `DatetimeIndex` not to output hour/minutes.. when its frequency is more than a day. - `PeriodIndex` outputs its `freq` - `TimedeltaIndex` has no changes. ``` # DatetimeIndex: 3 entries, 2014-12-31 to 2015-02-28 # Freq: M # PeriodIndex: 3 entries, 2014-12 to 2015-02 # Freq: M # TimedeltaIndex: 3 entries, '1 days' to '3 days' # Freq: <Day> ```
https://api.github.com/repos/pandas-dev/pandas/pulls/9116
2014-12-20T21:26:08Z
2015-01-05T23:48:24Z
2015-01-05T23:48:24Z
2015-01-17T03:34:52Z
API: Index.duplicated should return np.array
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 8122b60b736c9..244f15fdb1fbb 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -27,6 +27,8 @@ Backwards incompatible API changes .. _whatsnew_0160.api_breaking: +- ``Index.duplicated`` now returns `np.array(dtype=bool)` rathar than `Index(dtype=object)` containing `bool` values. (:issue:`8875`) + Deprecations ~~~~~~~~~~~~ diff --git a/pandas/core/base.py b/pandas/core/base.py index f648af85b68c5..04b431ae8cf67 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -13,7 +13,8 @@ _shared_docs = dict() -_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='') +_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='', + duplicated='IndexOpsMixin') class StringMixin(object): @@ -486,14 +487,14 @@ def searchsorted(self, key, side='left'): @Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs) def drop_duplicates(self, take_last=False, inplace=False): duplicated = self.duplicated(take_last=take_last) - result = self[~(duplicated.values).astype(bool)] + result = self[np.logical_not(duplicated)] if inplace: return self._update_inplace(result) else: return result _shared_docs['duplicated'] = ( - """Return boolean %(klass)s denoting duplicate values + """Return boolean %(duplicated)s denoting duplicate values Parameters ---------- @@ -502,7 +503,7 @@ def drop_duplicates(self, take_last=False, inplace=False): Returns ------- - duplicated : %(klass)s + duplicated : %(duplicated)s """) @Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs) @@ -513,8 +514,7 @@ def duplicated(self, take_last=False): return self._constructor(duplicated, index=self.index).__finalize__(self) except AttributeError: - from pandas.core.index import Index - return Index(duplicated) + return np.array(duplicated, dtype=bool) #---------------------------------------------------------------------- # abstracts diff --git a/pandas/core/index.py b/pandas/core/index.py index d0253efb180f6..d2a3093e686a7 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -33,7 +33,8 @@ _unsortable_types = frozenset(('mixed', 'mixed-integer')) -_index_doc_kwargs = dict(klass='Index', inplace='') +_index_doc_kwargs = dict(klass='Index', inplace='', + duplicated='np.array') def _try_get_item(x): diff --git a/pandas/core/series.py b/pandas/core/series.py index 081e5c50946bc..60b601a462520 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -59,7 +59,8 @@ klass='Series', axes_single_arg="{0,'index'}", inplace="""inplace : boolean, default False - If True, performs operation inplace and returns None.""" + If True, performs operation inplace and returns None.""", + duplicated='Series' ) diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index be5e102691fa0..61bfeb6631d68 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -614,8 +614,10 @@ def test_duplicated_drop_duplicates(self): continue # original doesn't have duplicates - expected = Index([False] * len(original)) - tm.assert_index_equal(original.duplicated(), expected) + expected = np.array([False] * len(original), dtype=bool) + duplicated = original.duplicated() + tm.assert_numpy_array_equal(duplicated, expected) + self.assertTrue(duplicated.dtype == bool) result = original.drop_duplicates() tm.assert_index_equal(result, original) self.assertFalse(result is original) @@ -625,15 +627,19 @@ def test_duplicated_drop_duplicates(self): # create repeated values, 3rd and 5th values are duplicated idx = original[list(range(len(original))) + [5, 3]] - expected = Index([False] * len(original) + [True, True]) - tm.assert_index_equal(idx.duplicated(), expected) + expected = np.array([False] * len(original) + [True, True], dtype=bool) + duplicated = idx.duplicated() + tm.assert_numpy_array_equal(duplicated, expected) + self.assertTrue(duplicated.dtype == bool) tm.assert_index_equal(idx.drop_duplicates(), original) last_base = [False] * len(idx) last_base[3] = True last_base[5] = True - expected = Index(last_base) - tm.assert_index_equal(idx.duplicated(take_last=True), expected) + expected = np.array(last_base) + duplicated = idx.duplicated(take_last=True) + tm.assert_numpy_array_equal(duplicated, expected) + self.assertTrue(duplicated.dtype == bool) tm.assert_index_equal(idx.drop_duplicates(take_last=True), idx[~np.array(last_base)]) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index a10467cf7ab4a..f70d652b5b1eb 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -2075,13 +2075,17 @@ def test_duplicated_drop_duplicates(self): # GH 4060 idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2 ,3], [1, 1, 1, 1, 2, 2])) - expected = Index([False, False, False, True, False, False]) - tm.assert_index_equal(idx.duplicated(), expected) + expected = np.array([False, False, False, True, False, False], dtype=bool) + duplicated = idx.duplicated() + tm.assert_numpy_array_equal(duplicated, expected) + self.assertTrue(duplicated.dtype == bool) expected = MultiIndex.from_arrays(([1, 2, 3, 2 ,3], [1, 1, 1, 2, 2])) tm.assert_index_equal(idx.drop_duplicates(), expected) - expected = Index([True, False, False, False, False, False]) - tm.assert_index_equal(idx.duplicated(take_last=True), expected) + expected = np.array([True, False, False, False, False, False]) + duplicated = idx.duplicated(take_last=True) + tm.assert_numpy_array_equal(duplicated, expected) + self.assertTrue(duplicated.dtype == bool) expected = MultiIndex.from_arrays(([2, 3, 1, 2 ,3], [1, 1, 1, 2, 2])) tm.assert_index_equal(idx.drop_duplicates(take_last=True), expected)
For 2nd issue of #8875. - Change `Index.duplicated` to return `np.array`.
https://api.github.com/repos/pandas-dev/pandas/pulls/9112
2014-12-20T02:31:55Z
2014-12-20T20:10:58Z
2014-12-20T20:10:58Z
2015-02-15T03:26:28Z
TST: sql skip tests at class level if database is not available
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index c7b6b77ae7aea..dd456db0131de 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -868,24 +868,38 @@ class _TestSQLAlchemy(PandasSQLTest): """ flavor = None + @classmethod + def setUpClass(cls): + cls.setup_import() + cls.setup_driver() + + # test connection + try: + conn = cls.connect() + conn.connect() + except sqlalchemy.exc.OperationalError: + msg = "{0} - can't connect to {1} server".format(cls, cls.flavor) + raise nose.SkipTest(msg) + def setUp(self): - self.setup_import() - self.setup_driver() self.setup_connect() self._load_iris_data() self._load_raw_sql() self._load_test1_data() - def setup_import(self): + @classmethod + def setup_import(cls): # Skip this test if SQLAlchemy not available if not SQLALCHEMY_INSTALLED: raise nose.SkipTest('SQLAlchemy not installed') - def setup_driver(self): + @classmethod + def setup_driver(cls): raise NotImplementedError() - def connect(self): + @classmethod + def connect(cls): raise NotImplementedError() def setup_connect(self): @@ -1221,12 +1235,14 @@ class TestSQLiteAlchemy(_TestSQLAlchemy): """ flavor = 'sqlite' - def connect(self): + @classmethod + def connect(cls): return sqlalchemy.create_engine('sqlite:///:memory:') - def setup_driver(self): + @classmethod + def setup_driver(cls): # sqlite3 is built-in - self.driver = None + cls.driver = None def tearDown(self): # in memory so tables should not be removed explicitly @@ -1275,14 +1291,16 @@ class TestMySQLAlchemy(_TestSQLAlchemy): """ flavor = 'mysql' - def connect(self): - return sqlalchemy.create_engine( - 'mysql+{driver}://root@localhost/pandas_nosetest'.format(driver=self.driver)) + @classmethod + def connect(cls): + url = 'mysql+{driver}://root@localhost/pandas_nosetest' + return sqlalchemy.create_engine(url.format(driver=cls.driver)) - def setup_driver(self): + @classmethod + def setup_driver(cls): try: import pymysql - self.driver = 'pymysql' + cls.driver = 'pymysql' except ImportError: raise nose.SkipTest('pymysql not installed') @@ -1347,14 +1365,16 @@ class TestPostgreSQLAlchemy(_TestSQLAlchemy): """ flavor = 'postgresql' - def connect(self): + @classmethod + def connect(cls): url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest' - return sqlalchemy.create_engine(url.format(driver=self.driver)) + return sqlalchemy.create_engine(url.format(driver=cls.driver)) - def setup_driver(self): + @classmethod + def setup_driver(cls): try: import psycopg2 - self.driver = 'psycopg2' + cls.driver = 'psycopg2' except ImportError: raise nose.SkipTest('psycopg2 not installed') @@ -1431,7 +1451,8 @@ class TestSQLiteFallback(PandasSQLTest): """ flavor = 'sqlite' - def connect(self): + @classmethod + def connect(cls): return sqlite3.connect(':memory:') def drop_table(self, table_name): @@ -1582,6 +1603,28 @@ class TestMySQLLegacy(TestSQLiteFallback): """ flavor = 'mysql' + @classmethod + def setUpClass(cls): + cls.setup_driver() + + # test connection + try: + cls.connect() + except cls.driver.err.OperationalError: + raise nose.SkipTest("{0} - can't connect to MySQL server".format(cls)) + + @classmethod + def setup_driver(cls): + try: + import pymysql + cls.driver = pymysql + except ImportError: + raise nose.SkipTest('pymysql not installed') + + @classmethod + def connect(cls): + return cls.driver.connect(host='127.0.0.1', user='root', passwd='', db='pandas_nosetest') + def drop_table(self, table_name): cur = self.conn.cursor() cur.execute("DROP TABLE IF EXISTS %s" % table_name) @@ -1594,16 +1637,7 @@ def _count_rows(self, table_name): rows = cur.fetchall() return rows[0][0] - def connect(self): - return self.driver.connect(host='127.0.0.1', user='root', passwd='', db='pandas_nosetest') - def setUp(self): - try: - import pymysql - self.driver = pymysql - except ImportError: - raise nose.SkipTest('pymysql not installed') - try: self.conn = self.connect() except self.driver.err.OperationalError: @@ -1941,6 +1975,35 @@ def clean_up(test_table_to_drop): class TestXMySQL(tm.TestCase): + @classmethod + def setUpClass(cls): + _skip_if_no_pymysql() + + # test connection + import pymysql + try: + # Try Travis defaults. + # No real user should allow root access with a blank password. + pymysql.connect(host='localhost', user='root', passwd='', + db='pandas_nosetest') + except: + pass + else: + return + try: + pymysql.connect(read_default_group='pandas') + except pymysql.ProgrammingError as e: + raise nose.SkipTest( + "Create a group of connection parameters under the heading " + "[pandas] in your system's mysql default file, " + "typically located at ~/.my.cnf or /etc/.my.cnf. ") + except pymysql.Error as e: + raise nose.SkipTest( + "Cannot connect to database. " + "Create a group of connection parameters under the heading " + "[pandas] in your system's mysql default file, " + "typically located at ~/.my.cnf or /etc/.my.cnf. ") + def setUp(self): _skip_if_no_pymysql() import pymysql
Added a `SetUpClass` to skip the full class when no connection can be made to the database, and made some of the other methods class methods, to be able to use them in `SetUpClass` Eg if you are testing locally and don't have one of the databases available, then it will test the connection and skip each individual test, which can take some time.
https://api.github.com/repos/pandas-dev/pandas/pulls/9110
2014-12-19T15:39:55Z
2014-12-20T22:05:18Z
2014-12-20T22:05:18Z
2014-12-20T22:05:18Z
SQL: fix type conversion for Timestamp object typed columns (GH9085)
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 9b8382bd3f4c4..7433adaa4b738 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -58,6 +58,8 @@ Bug Fixes +- Fixed bug in ``to_sql`` when mapping a Timestamp object column (datetime + column with timezone info) to the according sqlalchemy type (:issue:`9085`). diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 3f52711f68767..50c620c044403 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -911,7 +911,7 @@ def _sqlalchemy_type(self, col): from sqlalchemy.types import (BigInteger, Float, Text, Boolean, DateTime, Date, Time) - if col_type == 'datetime64': + if col_type == 'datetime64' or col_type == 'datetime': try: tz = col.tzinfo return DateTime(timezone=True) diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index c7b6b77ae7aea..44bbb584ad946 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -792,6 +792,15 @@ def _get_index_columns(self, tbl_name): ixs = [i['column_names'] for i in ixs] return ixs + def test_sqlalchemy_type_mapping(self): + + # Test Timestamp objects (no datetime64 because of timezone) (GH9085) + df = DataFrame({'time': to_datetime(['201412120154', '201412110254'], + utc=True)}) + db = sql.SQLDatabase(self.conn) + table = sql.SQLTable("test_type", db, frame=df) + self.assertTrue(isinstance(table.table.c['time'].type, sqltypes.DateTime)) + class TestSQLiteFallbackApi(_TestSQLApi): """ @@ -853,6 +862,24 @@ def test_uquery(self): rows = sql.uquery("SELECT * FROM iris LIMIT 1", con=self.conn) self.assertEqual(rows, -1) + def _get_sqlite_column_type(self, schema, column): + + for col in schema.split('\n'): + if col.split()[0].strip('[]') == column: + return col.split()[1] + raise ValueError('Column %s not found' % (column)) + + def test_sqlite_type_mapping(self): + + # Test Timestamp objects (no datetime64 because of timezone) (GH9085) + df = DataFrame({'time': to_datetime(['201412120154', '201412110254'], + utc=True)}) + db = sql.SQLiteDatabase(self.conn, self.flavor) + table = sql.SQLiteTable("test_type", db, frame=df) + schema = table.sql_schema() + self.assertEqual(self._get_sqlite_column_type(schema, 'time'), + "TIMESTAMP") + #------------------------------------------------------------------------------ #--- Database flavor specific tests @@ -1550,7 +1577,7 @@ def test_dtype(self): # sqlite stores Boolean values as INTEGER self.assertEqual(self._get_sqlite_column_type('dtype_test', 'B'), 'INTEGER') - + self.assertEqual(self._get_sqlite_column_type('dtype_test2', 'B'), 'STRING') self.assertRaises(ValueError, df.to_sql, 'error', self.conn, dtype={'B': bool}) @@ -1558,7 +1585,7 @@ def test_dtype(self): def test_notnull_dtype(self): if self.flavor == 'mysql': raise nose.SkipTest('Not applicable to MySQL legacy') - + cols = {'Bool': Series([True,None]), 'Date': Series([datetime(2012, 5, 1), None]), 'Int' : Series([1, None], dtype='object'),
Closes #9085
https://api.github.com/repos/pandas-dev/pandas/pulls/9109
2014-12-19T15:37:22Z
2014-12-20T22:30:13Z
2014-12-20T22:30:13Z
2014-12-20T22:30:13Z
make DataFrame.to_dict(orient='list') output native python elements
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7c7872cf7b6a5..6cb64c2a5761e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -698,7 +698,7 @@ def to_dict(self, orient='dict'): if orient.lower().startswith('d'): return dict((k, v.to_dict()) for k, v in compat.iteritems(self)) elif orient.lower().startswith('l'): - return dict((k, v.tolist()) for k, v in compat.iteritems(self)) + return dict((k, v.values.tolist()) for k, v in compat.iteritems(self)) elif orient.lower().startswith('sp'): return {'index': self.index.tolist(), 'columns': self.columns.tolist(),
https://api.github.com/repos/pandas-dev/pandas/pulls/9108
2014-12-19T00:24:30Z
2014-12-19T18:27:03Z
null
2014-12-19T18:27:03Z
Refs BUG-9099: Support simultaneous copy and dtype args in DataFrame init
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 8720774b821a2..0389b829bf6c9 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -51,3 +51,4 @@ Bug Fixes - Fixed compatibility issue in ``DatetimeIndex`` affecting architectures where ``numpy.int_`` defaults to ``numpy.int32`` (:issue:`8943`) - Bug in ``MultiIndex.has_duplicates`` when having many levels causes an indexer overflow (:issue:`9075`) +- DataFrame now properly supports simultaneous ``copy`` and ``dtype`` arguments in constructor (:issue:`9099`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0fc7171410152..392fda6eeebe3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -121,11 +121,11 @@ def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): mgr = mgr.reindex_axis( axe, axis=self._get_block_manager_axis(a), copy=False) - # do not copy BlockManager unless explicitly done - if copy and dtype is None: + # make a copy if explicitly requested + if copy: mgr = mgr.copy() - elif dtype is not None: - # avoid copy if we can + if dtype is not None: + # avoid further copies if we can if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype: mgr = mgr.astype(dtype=dtype) return mgr diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 40823537dbc04..a19a32ea793ba 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -2489,6 +2489,17 @@ def test_constructor_cast_failure(self): # this is ok df['foo2'] = np.ones((4,2)).tolist() + def test_constructor_dtype_copy(self): + orig_df = DataFrame({ + 'col1': [1.], + 'col2': [2.], + 'col3': [3.]}) + + new_df = pd.DataFrame(orig_df, dtype=float, copy=True) + + new_df['col1'] = 200. + self.assertEqual(orig_df['col1'][0], 1.) + def test_constructor_dtype_nocast_view(self): df = DataFrame([[1, 2]]) should_be_view = DataFrame(df, dtype=df[0].dtype)
closes #9099 This is a somewhat brute-force fix for supporting copy and dtype arguments in DataFrame constructor. Some data might be copied twice if both arguments are set, but if I did the operations in a different order, one other unit test would fail.
https://api.github.com/repos/pandas-dev/pandas/pulls/9105
2014-12-18T14:04:11Z
2014-12-20T20:05:52Z
2014-12-20T20:05:52Z
2014-12-20T20:06:40Z
BUG: Bug in DatetimeIndex iteration, related to (GH8890), fixed in (GH9100)
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 8720774b821a2..8122b60b736c9 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -51,3 +51,4 @@ Bug Fixes - Fixed compatibility issue in ``DatetimeIndex`` affecting architectures where ``numpy.int_`` defaults to ``numpy.int32`` (:issue:`8943`) - Bug in ``MultiIndex.has_duplicates`` when having many levels causes an indexer overflow (:issue:`9075`) +- Bug in DatetimeIndex iteration, related to (:issue:`8890`), fixed in (:issue:`9100`) diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 9b8200e266e5a..cc81c2d8a8960 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -2332,8 +2332,18 @@ def test_iteration_preserves_tz(self): for i, ts in enumerate(index): result = ts expected = index[i] + self.assertEqual(result._repr_base, expected._repr_base) self.assertEqual(result, expected) + # 9100 + index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00','2014-12-01 04:12:34.987000-08:00']) + for i, ts in enumerate(index): + result = ts + expected = index[i] + self.assertEqual(result._repr_base, expected._repr_base) + self.assertEqual(result, expected) + + def test_misc_coverage(self): rng = date_range('1/1/2000', periods=5) result = rng.groupby(rng.day) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index e3e18b912132d..78a3a5d75cfd3 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -128,9 +128,10 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, offset=None, box=False): result[i] = NaT else: pandas_datetime_to_datetimestruct(value, PANDAS_FR_ns, &dts) - dt = func_create(value, dts, tz, offset) - if not box: - dt = dt + tz.utcoffset(dt) + dt = create_datetime_from_ts(value, dts, tz, offset) + dt = dt + tz.utcoffset(dt) + if box: + dt = Timestamp(dt) result[i] = dt else: trans, deltas, typ = _get_dst_info(tz)
closes #9100
https://api.github.com/repos/pandas-dev/pandas/pulls/9104
2014-12-18T11:28:40Z
2014-12-19T21:03:11Z
2014-12-19T21:03:11Z
2014-12-19T21:03:11Z
BUG in read_csv when using skiprows on a file with CR line endings #9079
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 8720774b821a2..6176555dea27e 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -51,3 +51,4 @@ Bug Fixes - Fixed compatibility issue in ``DatetimeIndex`` affecting architectures where ``numpy.int_`` defaults to ``numpy.int32`` (:issue:`8943`) - Bug in ``MultiIndex.has_duplicates`` when having many levels causes an indexer overflow (:issue:`9075`) +- Bug in read_csv when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 2f211ab0381a2..d805727394f33 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -3048,6 +3048,30 @@ def test_comment_skiprows(self): df = self.read_csv(StringIO(data), comment='#', skiprows=4) tm.assert_almost_equal(df.values, expected) + def test_skiprows_lineterminator(self): + #GH #9079 + data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ', + '2007/01/01 01:00 0.2140 U M ', + '2007/01/01 02:00 0.2141 M O ', + '2007/01/01 04:00 0.2142 D M ']) + expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'], + ['2007/01/01', '02:00', 0.2141, 'M', 'O'], + ['2007/01/01', '04:00', 0.2142, 'D', 'M']], + columns=['date', 'time', 'var', 'flag', + 'oflag']) + # test with the three default lineterminators LF, CR and CRLF + df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True, + names=['date', 'time', 'var', 'flag', 'oflag']) + tm.assert_frame_equal(df, expected) + df = self.read_csv(StringIO(data.replace('\n', '\r')), + skiprows=1, delim_whitespace=True, + names=['date', 'time', 'var', 'flag', 'oflag']) + tm.assert_frame_equal(df, expected) + df = self.read_csv(StringIO(data.replace('\n', '\r\n')), + skiprows=1, delim_whitespace=True, + names=['date', 'time', 'var', 'flag', 'oflag']) + tm.assert_frame_equal(df, expected) + def test_trailing_spaces(self): data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" expected = pd.DataFrame([[1., 2., 4.], diff --git a/pandas/src/parser/tokenizer.c b/pandas/src/parser/tokenizer.c index a64235c7c9732..f56945db87326 100644 --- a/pandas/src/parser/tokenizer.c +++ b/pandas/src/parser/tokenizer.c @@ -707,6 +707,9 @@ int tokenize_delimited(parser_t *self, size_t line_limit) // TRACE(("tokenize_delimited SKIP_LINE %c, state %d\n", c, self->state)); if (c == '\n') { END_LINE(); + } else if (c == '\r') { + self->file_lines++; + self->state = EAT_CRNL_NOP; } break; @@ -1304,6 +1307,9 @@ int tokenize_whitespace(parser_t *self, size_t line_limit) // TRACE(("tokenize_whitespace SKIP_LINE %c, state %d\n", c, self->state)); if (c == '\n') { END_LINE(); + } else if (c == '\r') { + self->file_lines++; + self->state = EAT_CRNL_NOP; } break;
closes #9079 Fix the read_csv bug introduced in 6bf83c5d when using skiprows on a file with CR line terminators
https://api.github.com/repos/pandas-dev/pandas/pulls/9102
2014-12-18T03:09:51Z
2014-12-20T20:31:45Z
2014-12-20T20:31:45Z
2014-12-20T20:31:55Z
overflow bug in multi-index when checking for duplicates
diff --git a/doc/source/api.rst b/doc/source/api.rst index d2f94c22f0335..b6fd14f425bd0 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1176,6 +1176,7 @@ Attributes Index.is_monotonic_increasing Index.is_monotonic_decreasing Index.is_unique + Index.has_duplicates Index.dtype Index.inferred_type Index.is_all_dates diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 74fcf2e1fbf06..8720774b821a2 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -50,4 +50,4 @@ Bug Fixes .. _whatsnew_0160.bug_fixes: - Fixed compatibility issue in ``DatetimeIndex`` affecting architectures where ``numpy.int_`` defaults to ``numpy.int32`` (:issue:`8943`) - +- Bug in ``MultiIndex.has_duplicates`` when having many levels causes an indexer overflow (:issue:`9075`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 0f682893490dd..d0253efb180f6 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -3,7 +3,7 @@ import warnings import operator from functools import partial -from pandas.compat import range, zip, lrange, lzip, u, reduce +from pandas.compat import range, zip, lrange, lzip, u, reduce, filter, map from pandas import compat import numpy as np @@ -600,6 +600,10 @@ def is_unique(self): """ return if the index has unique values """ return self._engine.is_unique + @property + def has_duplicates(self): + return not self.is_unique + def is_boolean(self): return self.inferred_type in ['boolean'] @@ -3218,22 +3222,47 @@ def _has_complex_internals(self): # to disable groupby tricks return True - @property - def has_duplicates(self): - """ - Return True if there are no unique groups - """ - # has duplicates - shape = [len(lev) for lev in self.levels] - group_index = np.zeros(len(self), dtype='i8') - for i in range(len(shape)): - stride = np.prod([x for x in shape[i + 1:]], dtype='i8') - group_index += self.labels[i] * stride + @cache_readonly + def is_unique(self): + from pandas.hashtable import Int64HashTable - if len(np.unique(group_index)) < len(group_index): - return True + def _get_group_index(labels, shape): + from pandas.core.groupby import _int64_overflow_possible, \ + _compress_group_index - return False + # how many levels can be done without overflow + pred = lambda i: not _int64_overflow_possible(shape[:i]) + nlev = next(filter(pred, range(len(shape), 0, -1))) + + # compute group indicies for the first `nlev` levels + group_index = labels[0].astype('i8', subok=False, copy=True) + stride = shape[0] + + for i in range(1, nlev): + group_index += labels[i] * stride + stride *= shape[i] + + if nlev == len(shape): + return group_index + + comp_ids, obs_ids = _compress_group_index(group_index, sort=False) + + labels = [comp_ids] + labels[nlev:] + shape = [len(obs_ids)] + shape[nlev:] + + return _get_group_index(labels, shape) + + def _maybe_lift(lab, size): # pormote nan values + return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) + + shape = map(len, self.levels) + labels = map(_ensure_int64, self.labels) + + labels, shape = map(list, zip(*map(_maybe_lift, labels, shape))) + group_index = _get_group_index(labels, shape) + + table = Int64HashTable(min(1 << 20, len(group_index))) + return len(table.unique(group_index)) == len(self) def get_value(self, series, key): # somewhat broken encapsulation diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 615346f34b5bf..be5e102691fa0 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -620,6 +620,9 @@ def test_duplicated_drop_duplicates(self): tm.assert_index_equal(result, original) self.assertFalse(result is original) + # has_duplicates + self.assertFalse(original.has_duplicates) + # create repeated values, 3rd and 5th values are duplicated idx = original[list(range(len(original))) + [5, 3]] expected = Index([False] * len(original) + [True, True]) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 575c4e8cb4140..fd2e83e9609c5 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -3451,6 +3451,69 @@ def test_has_duplicates(self): [0, 1, 2, 0, 0, 1, 2]]) self.assertTrue(index.has_duplicates) + # GH 9075 + t = [(u'x', u'out', u'z', 5, u'y', u'in', u'z', 169), + (u'x', u'out', u'z', 7, u'y', u'in', u'z', 119), + (u'x', u'out', u'z', 9, u'y', u'in', u'z', 135), + (u'x', u'out', u'z', 13, u'y', u'in', u'z', 145), + (u'x', u'out', u'z', 14, u'y', u'in', u'z', 158), + (u'x', u'out', u'z', 16, u'y', u'in', u'z', 122), + (u'x', u'out', u'z', 17, u'y', u'in', u'z', 160), + (u'x', u'out', u'z', 18, u'y', u'in', u'z', 180), + (u'x', u'out', u'z', 20, u'y', u'in', u'z', 143), + (u'x', u'out', u'z', 21, u'y', u'in', u'z', 128), + (u'x', u'out', u'z', 22, u'y', u'in', u'z', 129), + (u'x', u'out', u'z', 25, u'y', u'in', u'z', 111), + (u'x', u'out', u'z', 28, u'y', u'in', u'z', 114), + (u'x', u'out', u'z', 29, u'y', u'in', u'z', 121), + (u'x', u'out', u'z', 31, u'y', u'in', u'z', 126), + (u'x', u'out', u'z', 32, u'y', u'in', u'z', 155), + (u'x', u'out', u'z', 33, u'y', u'in', u'z', 123), + (u'x', u'out', u'z', 12, u'y', u'in', u'z', 144)] + + index = pd.MultiIndex.from_tuples(t) + self.assertFalse(index.has_duplicates) + + # handle int64 overflow if possible + def check(nlevels, with_nulls): + labels = np.tile(np.arange(500), 2) + level = np.arange(500) + + if with_nulls: # inject some null values + labels[500] = -1 # common nan value + labels = list(labels.copy() for i in range(nlevels)) + for i in range(nlevels): + labels[i][500 + i - nlevels // 2 ] = -1 + + labels += [np.array([-1, 1]).repeat(500)] + else: + labels = [labels] * nlevels + [np.arange(2).repeat(500)] + + levels = [level] * nlevels + [[0, 1]] + + # no dups + index = MultiIndex(levels=levels, labels=labels) + self.assertFalse(index.has_duplicates) + + # with a dup + if with_nulls: + f = lambda a: np.insert(a, 1000, a[0]) + labels = list(map(f, labels)) + index = MultiIndex(levels=levels, labels=labels) + else: + values = index.values.tolist() + index = MultiIndex.from_tuples(values + [values[0]]) + + self.assertTrue(index.has_duplicates) + + # no overflow + check(4, False) + check(4, True) + + # overflow possible + check(8, False) + check(8, True) + def test_tolist(self): result = self.index.tolist() exp = list(self.index.values)
closes https://github.com/pydata/pandas/issues/9075
https://api.github.com/repos/pandas-dev/pandas/pulls/9101
2014-12-18T01:10:15Z
2014-12-18T11:40:22Z
2014-12-18T11:40:22Z
2014-12-21T13:52:03Z
Fixes .nth() with groupby multiple columns
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index d7de5a7ac5979..75fc44cfee33f 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -118,3 +118,4 @@ Bug Fixes - DataFrame now properly supports simultaneous ``copy`` and ``dtype`` arguments in constructor (:issue:`9099`) - Bug in read_csv when using skiprows on a file with CR line endings with the c engine. (:issue:`9079`) - isnull now detects NaT in PeriodIndex (:issue:`9129`) +- groupby .nth() method works with MultiIndex / multiple column groupby now (:issue:`8979`)` diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 888dca3914b53..cb5dedc887bca 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -859,7 +859,7 @@ def nth(self, n, dropna=None): # this is a pass-thru pass elif all([ n in ax for n in names ]): - result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names) + result.index = MultiIndex.from_arrays([self.obj[name][is_nth] for name in names]).set_names(names) elif self._group_selection is not None: result.index = self.obj._get_axis(self.axis)[is_nth] diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py index 0b439a84c0113..039bb2cd599e5 100644 --- a/pandas/tests/test_groupby.py +++ b/pandas/tests/test_groupby.py @@ -337,6 +337,35 @@ def test_nth(self): expected = DataFrame(1, columns=['a', 'b'], index=expected_dates) assert_frame_equal(result, expected) + def test_nth_multi_index(self): + # PR 9090, related to issue 8979 + # test nth on MultiIndex, should match .first() + grouped = self.three_group.groupby(['A', 'B']) + result = grouped.nth(0) + expected = grouped.first() + assert_frame_equal(result, expected) + + + def test_nth_multi_index_as_expected(self): + # PR 9090, related to issue 8979 + # test nth on MultiIndex + three_group = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', + 'bar', 'bar', 'bar', 'bar', + 'foo', 'foo', 'foo'], + 'B': ['one', 'one', 'one', 'two', + 'one', 'one', 'one', 'two', + 'two', 'two', 'one'], + 'C': ['dull', 'dull', 'shiny', 'dull', + 'dull', 'shiny', 'shiny', 'dull', + 'shiny', 'shiny', 'shiny']}) + grouped = three_group.groupby(['A', 'B']) + result = grouped.nth(0) + expected = DataFrame({'C': ['dull', 'dull', 'dull', 'dull']}, + index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'], ['one', 'two', 'one', 'two']], + names=['A', 'B'])) + assert_frame_equal(result, expected) + + def test_grouper_index_types(self): # related GH5375 # groupby misbehaving when using a Floatlike index
closes #8979 Let me know if this makes sense Problem mentioned in issue 8979 too
https://api.github.com/repos/pandas-dev/pandas/pulls/9090
2014-12-16T00:37:56Z
2015-01-02T16:29:36Z
null
2015-01-02T16:29:36Z
Closes #8943: COMPAT: periods needs coercion to np.int64
diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index a377b9caadc0c..74fcf2e1fbf06 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -48,3 +48,6 @@ Bug Fixes ~~~~~~~~~ .. _whatsnew_0160.bug_fixes: + +- Fixed compatibility issue in ``DatetimeIndex`` affecting architectures where ``numpy.int_`` defaults to ``numpy.int32`` (:issue:`8943`) + diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index c3b1ea6d742e3..575c4e8cb4140 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1909,6 +1909,22 @@ def test_time_loc(self): # GH8667 right.iloc[i] *= -10 tm.assert_series_equal(left, right) + def test_time_overflow_for_32bit_machines(self): + # GH8943. On some machines NumPy defaults to np.int32 (for example, + # 32-bit Linux machines). In the function _generate_regular_range + # found in tseries/index.py, `periods` gets multiplied by `strides` + # (which has value 1e9) and since the max value for np.int32 is ~2e9, + # and since those machines won't promote np.int32 to np.int64, we get + # overflow. + periods = np.int_(1000) + + idx1 = pd.date_range(start='2000', periods=periods, freq='S') + self.assertEqual(len(idx1), periods) + + idx2 = pd.date_range(end='2000', periods=periods, freq='S') + self.assertEqual(len(idx2), periods) + + class TestPeriodIndex(Base, tm.TestCase): _holder = PeriodIndex _multiprocess_can_split_ = True diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index edb6d34f65f5e..5e621761d63b6 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1781,11 +1781,11 @@ def _generate_regular_range(start, end, periods, offset): tz = start.tz elif start is not None: b = Timestamp(start).value - e = b + periods * stride + e = b + np.int64(periods) * stride tz = start.tz elif end is not None: e = Timestamp(end).value + stride - b = e - periods * stride + b = e - np.int64(periods) * stride tz = end.tz else: raise NotImplementedError
Let me know if this needs any work. closes #8943
https://api.github.com/repos/pandas-dev/pandas/pulls/9078
2014-12-15T08:11:15Z
2014-12-17T22:14:26Z
2014-12-17T22:14:26Z
2014-12-18T02:06:30Z
BUG: Bug in MultiIndex.has_duplicates when having many levels causes an indexer overflow (GH9075)
diff --git a/doc/source/api.rst b/doc/source/api.rst index d2f94c22f0335..b6fd14f425bd0 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1176,6 +1176,7 @@ Attributes Index.is_monotonic_increasing Index.is_monotonic_decreasing Index.is_unique + Index.has_duplicates Index.dtype Index.inferred_type Index.is_all_dates diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index a377b9caadc0c..8d7cd610c14c3 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -48,3 +48,5 @@ Bug Fixes ~~~~~~~~~ .. _whatsnew_0160.bug_fixes: + +- Bug in ``MultiIndex.has_duplicates`` when having many levels causes an indexer overflow (:issue:`9075`) diff --git a/pandas/core/index.py b/pandas/core/index.py index 0f682893490dd..e082881d8e831 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -600,6 +600,10 @@ def is_unique(self): """ return if the index has unique values """ return self._engine.is_unique + @property + def has_duplicates(self): + return not self.is_unique + def is_boolean(self): return self.inferred_type in ['boolean'] @@ -3223,12 +3227,19 @@ def has_duplicates(self): """ Return True if there are no unique groups """ - # has duplicates + + from pandas.core.groupby import _int64_overflow_possible + + # if we have a possible overflow, then fallback to safe method shape = [len(lev) for lev in self.levels] + if _int64_overflow_possible(shape): + return self.duplicated().any() + + # int64 capable group_index = np.zeros(len(self), dtype='i8') for i in range(len(shape)): stride = np.prod([x for x in shape[i + 1:]], dtype='i8') - group_index += self.labels[i] * stride + group_index += _ensure_int64(self.labels[i]) * stride if len(np.unique(group_index)) < len(group_index): return True diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 615346f34b5bf..be5e102691fa0 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -620,6 +620,9 @@ def test_duplicated_drop_duplicates(self): tm.assert_index_equal(result, original) self.assertFalse(result is original) + # has_duplicates + self.assertFalse(original.has_duplicates) + # create repeated values, 3rd and 5th values are duplicated idx = original[list(range(len(original))) + [5, 3]] expected = Index([False] * len(original) + [True, True]) diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index c3b1ea6d742e3..5ce5ea84dec65 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -3435,6 +3435,49 @@ def test_has_duplicates(self): [0, 1, 2, 0, 0, 1, 2]]) self.assertTrue(index.has_duplicates) + # GH 9075 + t = [(u'x', u'out', u'z', 5, u'y', u'in', u'z', 169), + (u'x', u'out', u'z', 7, u'y', u'in', u'z', 119), + (u'x', u'out', u'z', 9, u'y', u'in', u'z', 135), + (u'x', u'out', u'z', 13, u'y', u'in', u'z', 145), + (u'x', u'out', u'z', 14, u'y', u'in', u'z', 158), + (u'x', u'out', u'z', 16, u'y', u'in', u'z', 122), + (u'x', u'out', u'z', 17, u'y', u'in', u'z', 160), + (u'x', u'out', u'z', 18, u'y', u'in', u'z', 180), + (u'x', u'out', u'z', 20, u'y', u'in', u'z', 143), + (u'x', u'out', u'z', 21, u'y', u'in', u'z', 128), + (u'x', u'out', u'z', 22, u'y', u'in', u'z', 129), + (u'x', u'out', u'z', 25, u'y', u'in', u'z', 111), + (u'x', u'out', u'z', 28, u'y', u'in', u'z', 114), + (u'x', u'out', u'z', 29, u'y', u'in', u'z', 121), + (u'x', u'out', u'z', 31, u'y', u'in', u'z', 126), + (u'x', u'out', u'z', 32, u'y', u'in', u'z', 155), + (u'x', u'out', u'z', 33, u'y', u'in', u'z', 123), + (u'x', u'out', u'z', 12, u'y', u'in', u'z', 144)] + index = pd.MultiIndex.from_tuples(t) + self.assertFalse(index.has_duplicates) + + # handle int64 overflow if possible + def check(nlevels): + labels = np.tile(np.arange(500), 2) + level = np.arange(500) + + # no dups + index = MultiIndex(levels=[level] * nlevels + [[0, 1]], + labels=[labels] * nlevels + [np.arange(2).repeat(500)]) + self.assertFalse(index.has_duplicates) + + # with a dup + values = index.values.tolist() + index = MultiIndex.from_tuples(values + [values[0]]) + self.assertTrue(index.has_duplicates) + + # no overflow + check(4) + + # overflow possible + check(8) + def test_tolist(self): result = self.index.tolist() exp = list(self.index.values)
closes #9075
https://api.github.com/repos/pandas-dev/pandas/pulls/9077
2014-12-14T23:55:41Z
2014-12-18T11:39:07Z
null
2014-12-18T11:39:07Z