title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
CLN: ASV replace
diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py index 157d5fe1e3948..6330a2b36c516 100644 --- a/asv_bench/benchmarks/replace.py +++ b/asv_bench/benchmarks/replace.py @@ -1,70 +1,58 @@ -from .pandas_vb_common import * +import numpy as np +import pandas as pd +from .pandas_vb_common import setup # noqa -class replace_fillna(object): - goal_time = 0.2 - - def setup(self): - self.N = 1000000 - try: - self.rng = date_range('1/1/2000', periods=self.N, freq='min') - except NameError: - self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute()) - self.date_range = DateRange - self.ts = Series(np.random.randn(self.N), index=self.rng) - def time_replace_fillna(self): - self.ts.fillna(0.0, inplace=True) +class FillNa(object): - -class replace_large_dict(object): goal_time = 0.2 + params = [True, False] + param_names = ['inplace'] - def setup(self): - self.n = (10 ** 6) - self.start_value = (10 ** 5) - self.to_rep = {i: self.start_value + i for i in range(self.n)} - self.s = Series(np.random.randint(self.n, size=(10 ** 3))) - - def time_replace_large_dict(self): - self.s.replace(self.to_rep, inplace=True) + def setup(self, inplace): + N = 10**6 + rng = pd.date_range('1/1/2000', periods=N, freq='min') + data = np.random.randn(N) + data[::2] = np.nan + self.ts = pd.Series(data, index=rng) + def time_fillna(self, inplace): + self.ts.fillna(0.0, inplace=inplace) -class replace_convert(object): - goal_time = 0.5 + def time_replace(self, inplace): + self.ts.replace(np.nan, 0.0, inplace=inplace) - def setup(self): - self.n = (10 ** 3) - self.to_ts = {i: pd.Timestamp(i) for i in range(self.n)} - self.to_td = {i: pd.Timedelta(i) for i in range(self.n)} - self.s = Series(np.random.randint(self.n, size=(10 ** 3))) - self.df = DataFrame({'A': np.random.randint(self.n, size=(10 ** 3)), - 'B': np.random.randint(self.n, size=(10 ** 3))}) - def time_replace_series_timestamp(self): - self.s.replace(self.to_ts) +class ReplaceDict(object): - def time_replace_series_timedelta(self): - self.s.replace(self.to_td) + goal_time = 0.2 + params = [True, False] + param_names = ['inplace'] - def time_replace_frame_timestamp(self): - self.df.replace(self.to_ts) + def setup(self, inplace): + N = 10**5 + start_value = 10**5 + self.to_rep = dict(enumerate(np.arange(N) + start_value)) + self.s = pd.Series(np.random.randint(N, size=10**3)) - def time_replace_frame_timedelta(self): - self.df.replace(self.to_td) + def time_replace_series(self, inplace): + self.s.replace(self.to_rep, inplace=inplace) -class replace_replacena(object): - goal_time = 0.2 +class Convert(object): - def setup(self): - self.N = 1000000 - try: - self.rng = date_range('1/1/2000', periods=self.N, freq='min') - except NameError: - self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute()) - self.date_range = DateRange - self.ts = Series(np.random.randn(self.N), index=self.rng) - - def time_replace_replacena(self): - self.ts.replace(np.nan, 0.0, inplace=True) + goal_time = 0.5 + params = (['DataFrame', 'Series'], ['Timestamp', 'Timedelta']) + param_names = ['contructor', 'replace_data'] + + def setup(self, contructor, replace_data): + N = 10**3 + data = {'Series': pd.Series(np.random.randint(N, size=N)), + 'DataFrame': pd.DataFrame({'A': np.random.randint(N, size=N), + 'B': np.random.randint(N, size=N)})} + self.to_replace = {i: getattr(pd, replace_data) for i in range(N)} + self.data = data[contructor] + + def time_replace(self, contructor, replace_data): + self.data.replace(self.to_replace)
- Flake8'd and removed star imports For the `ReplaceDict` benchmark I reduced the number of samples from 10^6 to 10^5 because it was taking a significant amount of time, but 10^5 samples should still be representative of a large sample performance. ``` asv dev -b ^replace · Discovering benchmarks · Running 4 total benchmarks (1 commits * 1 environments * 4 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 25.00%] ··· Running replace.Convert.time_replace ok [ 25.00%] ···· ============ =========== =========== -- replace_data ------------ ----------------------- contructor Timestamp Timedelta ============ =========== =========== DataFrame 411ms 409ms Series 349ms 349ms ============ =========== =========== [ 50.00%] ··· Running replace.NaN.time_fillna ok [ 50.00%] ···· ========= ======== inplace --------- -------- True 6.42ms False 15.0ms ========= ======== [ 75.00%] ··· Running replace.NaN.time_replace ok [ 75.00%] ···· ========= ======== inplace --------- -------- True 7.09ms False 23.6ms ========= ======== [100.00%] ··· Running replace.ReplaceDict.time_replace_series ok [100.00%] ···· ========= ======= inplace --------- ------- True 7.43s False 7.48s ========= ======= ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18833
2017-12-19T05:21:15Z
2017-12-21T15:06:02Z
2017-12-21T15:06:02Z
2017-12-21T17:39:57Z
Refactor _TimeOp._validate to separate datetime vs timedelta vs dateoffset
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 2fb0cbb14c225..6165eea708796 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -386,54 +386,19 @@ def __init__(self, left, right, name, na_op): self.lvalues, self.rvalues = self._convert_for_datetime(lvalues, rvalues) - def _validate(self, lvalues, rvalues, name): - # timedelta and integer mul/div - - if ((self.is_timedelta_lhs and - (self.is_integer_rhs or self.is_floating_rhs)) or - (self.is_timedelta_rhs and - (self.is_integer_lhs or self.is_floating_lhs))): - - if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'): - raise TypeError("can only operate on a timedelta and an " - "integer or a float for division and " - "multiplication, but the operator [{name}] " - "was passed".format(name=name)) - - # 2 timedeltas - elif ((self.is_timedelta_lhs and - (self.is_timedelta_rhs or self.is_offset_rhs)) or - (self.is_timedelta_rhs and - (self.is_timedelta_lhs or self.is_offset_lhs))): - - if name not in ('__div__', '__rdiv__', '__truediv__', - '__rtruediv__', '__add__', '__radd__', '__sub__', - '__rsub__'): - raise TypeError("can only operate on a timedeltas for addition" - ", subtraction, and division, but the operator" - " [{name}] was passed".format(name=name)) - - # datetime and timedelta/DateOffset - elif (self.is_datetime_lhs and - (self.is_timedelta_rhs or self.is_offset_rhs)): + def _validate_datetime(self, lvalues, rvalues, name): + # assumes self.is_datetime_lhs + if (self.is_timedelta_rhs or self.is_offset_rhs): + # datetime and timedelta/DateOffset if name not in ('__add__', '__radd__', '__sub__'): raise TypeError("can only operate on a datetime with a rhs of " "a timedelta/DateOffset for addition and " "subtraction, but the operator [{name}] was " "passed".format(name=name)) - elif (self.is_datetime_rhs and - (self.is_timedelta_lhs or self.is_offset_lhs)): - if name not in ('__add__', '__radd__', '__rsub__'): - raise TypeError("can only operate on a timedelta/DateOffset " - "with a rhs of a datetime for addition, " - "but the operator [{name}] was passed" - .format(name=name)) - - # 2 datetimes - elif self.is_datetime_lhs and self.is_datetime_rhs: - + elif self.is_datetime_rhs: + # 2 datetimes if name not in ('__sub__', '__rsub__'): raise TypeError("can only operate on a datetimes for" " subtraction, but the operator [{name}] was" @@ -444,18 +409,82 @@ def _validate(self, lvalues, rvalues, name): raise ValueError("Incompatible tz's on datetime subtraction " "ops") - elif ((self.is_timedelta_lhs or self.is_offset_lhs) and - self.is_datetime_rhs): + else: + raise TypeError('cannot operate on a series without a rhs ' + 'of a series/ndarray of type datetime64[ns] ' + 'or a timedelta') + + def _validate_timedelta(self, name): + # assumes self.is_timedelta_lhs + + if self.is_integer_rhs or self.is_floating_rhs: + # timedelta and integer mul/div + self._check_timedelta_with_numeric(name) + elif self.is_timedelta_rhs or self.is_offset_rhs: + # 2 timedeltas + if name not in ('__div__', '__rdiv__', '__truediv__', + '__rtruediv__', '__add__', '__radd__', '__sub__', + '__rsub__'): + raise TypeError("can only operate on a timedeltas for addition" + ", subtraction, and division, but the operator" + " [{name}] was passed".format(name=name)) + elif self.is_datetime_rhs: + if name not in ('__add__', '__radd__', '__rsub__'): + raise TypeError("can only operate on a timedelta/DateOffset " + "with a rhs of a datetime for addition, " + "but the operator [{name}] was passed" + .format(name=name)) + else: + raise TypeError('cannot operate on a series without a rhs ' + 'of a series/ndarray of type datetime64[ns] ' + 'or a timedelta') + + def _validate_offset(self, name): + # assumes self.is_offset_lhs + + if self.is_timedelta_rhs: + # 2 timedeltas + if name not in ('__div__', '__rdiv__', '__truediv__', + '__rtruediv__', '__add__', '__radd__', '__sub__', + '__rsub__'): + raise TypeError("can only operate on a timedeltas for addition" + ", subtraction, and division, but the operator" + " [{name}] was passed".format(name=name)) + elif self.is_datetime_rhs: if name not in ('__add__', '__radd__'): raise TypeError("can only operate on a timedelta/DateOffset " "and a datetime for addition, but the operator" " [{name}] was passed".format(name=name)) + else: raise TypeError('cannot operate on a series without a rhs ' 'of a series/ndarray of type datetime64[ns] ' 'or a timedelta') + def _validate(self, lvalues, rvalues, name): + if self.is_datetime_lhs: + return self._validate_datetime(lvalues, rvalues, name) + elif self.is_timedelta_lhs: + return self._validate_timedelta(name) + elif self.is_offset_lhs: + return self._validate_offset(name) + + if ((self.is_integer_lhs or self.is_floating_lhs) and + self.is_timedelta_rhs): + self._check_timedelta_with_numeric(name) + else: + raise TypeError('cannot operate on a series without a rhs ' + 'of a series/ndarray of type datetime64[ns] ' + 'or a timedelta') + + def _check_timedelta_with_numeric(self, name): + if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'): + raise TypeError("can only operate on a timedelta and an " + "integer or a float for division and " + "multiplication, but the operator [{name}] " + "was passed".format(name=name)) + def _convert_to_array(self, values, name=None, other=None): """converts values to ndarray""" from pandas.core.tools.timedeltas import to_timedelta diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 89a6311153d15..4adbdbca82fd2 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -960,8 +960,51 @@ def test_timedelta64_ops_nat(self): assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta) + @pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4), + Timedelta(minutes=5, seconds=4), + Timedelta('5m4s').to_timedelta64()]) + def test_operators_timedelta64_with_timedelta(self, scalar_td): + # smoke tests + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + td1 + scalar_td + scalar_td + td1 + td1 - scalar_td + scalar_td - td1 + td1 / scalar_td + scalar_td / td1 + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + pytest.param(Timedelta('5m4s'), + marks=pytest.mark.xfail(reason="Timedelta.__floordiv__ " + "bug GH#18846")), + Timedelta('5m4s').to_timedelta64()]) + def test_operators_timedelta64_with_timedelta_invalid(self, scalar_td): + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = 'operate|unsupported|cannot' + with tm.assert_raises_regex(TypeError, pattern): + td1 * scalar_td + with tm.assert_raises_regex(TypeError, pattern): + scalar_td * td1 + with tm.assert_raises_regex(TypeError, pattern): + td1 // scalar_td + with tm.assert_raises_regex(TypeError, pattern): + scalar_td // td1 + with tm.assert_raises_regex(TypeError, pattern): + scalar_td ** td1 + with tm.assert_raises_regex(TypeError, pattern): + td1 ** scalar_td + class TestDatetimeSeriesArithmetic(object): + def test_operators_datetimelike(self): def run_ops(ops, get_ser, test_ser): @@ -976,16 +1019,6 @@ def run_ops(ops, get_ser, test_ser): # ## timedelta64 ### td1 = Series([timedelta(minutes=5, seconds=3)] * 3) td1.iloc[2] = np.nan - td2 = timedelta(minutes=5, seconds=4) - ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__', - '__rfloordiv__', '__rpow__'] - run_ops(ops, td1, td2) - td1 + td2 - td2 + td1 - td1 - td2 - td2 - td1 - td1 / td2 - td2 / td1 # ## datetime64 ### dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Trying to mirror the logic in the datetimelike index classes. Separate some of the tests in the same vein. No logic should be changed in this PR, it should be pure refactor. This _will_ have a merge conflict with #18831.
https://api.github.com/repos/pandas-dev/pandas/pulls/18832
2017-12-19T02:27:18Z
2017-12-23T20:50:23Z
2017-12-23T20:50:23Z
2018-02-11T22:00:29Z
BUG: fix Series[timedelta64] arithmetic with Timedelta scalars
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index d055c49dc4721..778db17a56b58 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -267,6 +267,14 @@ yields another ``timedelta64[ns]`` dtypes Series. td * -1 td * pd.Series([1, 2, 3, 4]) +Rounded division (floor-division) of a ``timedelta64[ns]`` Series by a scalar +``Timedelta`` gives a series of integers. + +.. ipython:: python + + td // pd.Timedelta(days=3, hours=4) + pd.Timedelta(days=3, hours=4) // td + Attributes ---------- diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 0301bf0a23dd5..d5910fb078ca4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -294,6 +294,7 @@ Conversion - Bug in :meth:`DatetimeIndex.astype` when converting between timezone aware dtypes, and converting from timezone aware to naive (:issue:`18951`) - Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) - Bug in :class:`DatetimeIndex` where adding or subtracting an array-like of ``DateOffset`` objects either raised (``np.array``, ``pd.Index``) or broadcast incorrectly (``pd.Series``) (:issue:`18849`) +- Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`) Indexing diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 3a7a5e44d5a88..4fde3905c0e65 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -425,7 +425,7 @@ def _validate_timedelta(self, name): # 2 timedeltas if name not in ('__div__', '__rdiv__', '__truediv__', '__rtruediv__', '__add__', '__radd__', '__sub__', - '__rsub__'): + '__rsub__', '__floordiv__', '__rfloordiv__'): raise TypeError("can only operate on a timedeltas for addition" ", subtraction, and division, but the operator" " [{name}] was passed".format(name=name)) @@ -629,7 +629,9 @@ def _offset(lvalues, rvalues): # integer gets converted to timedelta in np < 1.6 if ((self.is_timedelta_lhs and self.is_timedelta_rhs) and not self.is_integer_rhs and not self.is_integer_lhs and - self.name in ('__div__', '__truediv__')): + self.name in ('__div__', '__rdiv__', + '__truediv__', '__rtruediv__', + '__floordiv__', '__rfloordiv__')): self.dtype = 'float64' self.fill_value = np.nan lvalues = lvalues.astype(np.float64) diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 433e3cf440cbd..b1d0a749246a7 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -977,9 +977,7 @@ def test_operators_timedelta64_with_timedelta(self, scalar_td): @pytest.mark.parametrize('scalar_td', [ timedelta(minutes=5, seconds=4), - pytest.param(Timedelta('5m4s'), - marks=pytest.mark.xfail(reason="Timedelta.__floordiv__ " - "bug GH#18846")), + Timedelta('5m4s'), Timedelta('5m4s').to_timedelta64()]) def test_operators_timedelta64_with_timedelta_invalid(self, scalar_td): td1 = Series([timedelta(minutes=5, seconds=3)] * 3) @@ -993,15 +991,53 @@ def test_operators_timedelta64_with_timedelta_invalid(self, scalar_td): td1 * scalar_td with tm.assert_raises_regex(TypeError, pattern): scalar_td * td1 - with tm.assert_raises_regex(TypeError, pattern): - td1 // scalar_td - with tm.assert_raises_regex(TypeError, pattern): - scalar_td // td1 with tm.assert_raises_regex(TypeError, pattern): scalar_td ** td1 with tm.assert_raises_regex(TypeError, pattern): td1 ** scalar_td + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + pytest.param(Timedelta('5m4s'), + marks=pytest.mark.xfail(reason="Timedelta.__floordiv__ " + "bug GH#18846")), + Timedelta('5m4s').to_timedelta64()]) + def test_timedelta_rfloordiv(self, scalar_td): + # GH#18831 + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + result = scalar_td // td1 + expected = Series([1, 1, np.nan]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_timedelta_rfloordiv_explicit(self, scalar_td): + # GH#18831 + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # We can test __rfloordiv__ using this syntax, + # see `test_timedelta_rfloordiv` + result = td1.__rfloordiv__(scalar_td) + expected = Series([1, 1, np.nan]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_timedelta_floordiv(self, scalar_td): + # GH#18831 + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + result = td1 // scalar_td + expected = Series([0, 0, np.nan]) + tm.assert_series_equal(result, expected) + class TestDatetimeSeriesArithmetic(object): @pytest.mark.parametrize(
closes #18846 Before: ``` >>> ser = pd.Series([pd.Timedelta(minutes=5, seconds=3)] * 3) >>> ser.iloc[-1] = np.nan >>> scalar = pd.Timedelta(minutes=5, seconds=4) >>> ser // scalar [...] TypeError: can only operate on a timedeltas for addition, subtraction, and division, but the operator [__floordiv__] was passed >>> scalar / ser 0 00:00:00.000000 1 00:00:00.000000 2 NaT dtype: timedelta64[ns] >>> scalar.to_pytimedelta() / ser 0 00:00:00.000000 1 00:00:00.000000 2 NaT dtype: timedelta64[ns] >>> scalar.to_timedelta64() / ser 0 00:00:00.000000 1 00:00:00.000000 2 NaT dtype: timedelta64[ns] >>> scalar.to_pytimedelta() // ser [...] TypeError: can only operate on a timedeltas for addition, subtraction, and division, but the operator [__rfloordiv__] was passed >>> scalar.to_timedelta64() // ser [...] TypeError: can only operate on a timedeltas for addition, subtraction, and division, but the operator [__rfloordiv__] was passed ``` After ``` >>> ser // scalar 0 0.0 1 0.0 2 NaN dtype: float64 >>> scalar / ser 0 1.0033 1 1.0033 2 NaN dtype: float64 >>> scalar.to_pytimedelta() / ser 0 1.0033 1 1.0033 2 NaN dtype: float64 >>> scalar.to_timedelta64() / ser 0 1.0033 1 1.0033 2 NaN dtype: float64 >>> scalar.to_pytimedelta() // ser 0 1.0 1 1.0 2 NaN dtype: float64 >>> scalar.to_timedelta64() // ser 0 1.0 1 1.0 2 NaN dtype: float64 ``` Also identified a bug in `Timedelta.__floordiv__`, noted it in #18824, will address that separately. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18831
2017-12-19T02:21:05Z
2017-12-31T17:26:10Z
2017-12-31T17:26:10Z
2018-02-11T22:01:10Z
Prevent RecursionError on Timestamp comparison against object array
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index ae6d0816abc41..9f517e628512c 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -346,3 +346,4 @@ Other ^^^^^ - Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`) +- Bug in :class:`Timestamp` where comparison with an array of ``Timestamp`` objects would result in a ``RecursionError`` (:issue:`15183`) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 478611fe9cab9..8b0719bc8b93f 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -17,7 +17,7 @@ from cpython.datetime cimport (datetime, PyDateTime_IMPORT from util cimport (is_datetime64_object, is_timedelta64_object, - is_integer_object, is_string_object, + is_integer_object, is_string_object, is_array, INT64_MAX) cimport ccalendar @@ -108,6 +108,9 @@ cdef class _Timestamp(datetime): raise TypeError('Cannot compare type %r with type %r' % (type(self).__name__, type(other).__name__)) + elif is_array(other): + # avoid recursion error GH#15183 + return PyObject_RichCompare(np.array([self]), other, op) return PyObject_RichCompare(other, self, reverse_ops[op]) else: if op == Py_EQ: diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index 001f6c1fdbef4..c260700c9473b 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -163,6 +163,27 @@ def test_binary_ops_with_timedelta(self): pytest.raises(TypeError, lambda: td * td) +class TestTimedeltaComparison(object): + def test_comparison_object_array(self): + # analogous to GH#15183 + td = Timedelta('2 days') + other = Timedelta('3 hours') + + arr = np.array([other, td], dtype=object) + res = arr == td + expected = np.array([False, True], dtype=bool) + assert (res == expected).all() + + # 2D case + arr = np.array([[other, td], + [td, other]], + dtype=object) + res = arr != td + expected = np.array([[True, False], [False, True]], dtype=bool) + assert res.shape == expected.shape + assert (res == expected).all() + + class TestTimedeltas(object): _multiprocess_can_split_ = True diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index 9c649a42fb8ee..19c09701f6106 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -969,6 +969,31 @@ def test_timestamp(self): class TestTimestampComparison(object): + def test_comparison_object_array(self): + # GH#15183 + ts = Timestamp('2011-01-03 00:00:00-0500', tz='US/Eastern') + other = Timestamp('2011-01-01 00:00:00-0500', tz='US/Eastern') + naive = Timestamp('2011-01-01 00:00:00') + + arr = np.array([other, ts], dtype=object) + res = arr == ts + expected = np.array([False, True], dtype=bool) + assert (res == expected).all() + + # 2D case + arr = np.array([[other, ts], + [ts, other]], + dtype=object) + res = arr != ts + expected = np.array([[True, False], [False, True]], dtype=bool) + assert res.shape == expected.shape + assert (res == expected).all() + + # tzaware mismatch + arr = np.array([naive], dtype=object) + with pytest.raises(TypeError): + arr < ts + def test_comparison(self): # 5-18-2012 00:00:00.000 stamp = long(1337299200000000000)
Timedelta comparison already handled this correctly but I didn't see a test for it, so added one. - [x] closes #15183 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18829
2017-12-18T22:44:18Z
2017-12-19T11:09:45Z
2017-12-19T11:09:45Z
2018-02-11T22:00:32Z
ENH: Added public accessor registrar
diff --git a/LICENSES/XARRAY_LICENSE b/LICENSES/XARRAY_LICENSE new file mode 100644 index 0000000000000..37ec93a14fdcd --- /dev/null +++ b/LICENSES/XARRAY_LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/doc/source/api.rst b/doc/source/api.rst index 02f729c89295b..88419df1880ec 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -2498,6 +2498,20 @@ Scalar introspection api.types.is_re_compilable api.types.is_scalar +Extensions +---------- + +These are primarily intented for library authors looking to extend pandas +objects. + +.. currentmodule:: pandas + +.. autosummary:: + :toctree: generated/ + + api.extensions.register_dataframe_accessor + api.extensions.register_series_accessor + api.extensions.register_index_accessor .. This is to prevent warnings in the doc build. We don't want to encourage .. these methods. diff --git a/doc/source/developer.rst b/doc/source/developer.rst index b8bb2b2fcbe2f..5c3b114ce7299 100644 --- a/doc/source/developer.rst +++ b/doc/source/developer.rst @@ -140,3 +140,46 @@ As an example of fully-formed metadata: 'metadata': None} ], 'pandas_version': '0.20.0'} + +.. _developer.register-accessors: + +Registering Custom Accessors +---------------------------- + +Libraries can use the decorators +:func:`pandas.api.extensions.register_dataframe_accessor`, +:func:`pandas.api.extensions.register_series_accessor`, and +:func:`pandas.api.extensions.register_index_accessor`, to add additional "namespaces" to +pandas objects. All of these follow a similar convention: you decorate a class, providing the name of attribute to add. The +class's `__init__` method gets the object being decorated. For example: + +.. ipython:: python + + @pd.api.extensions.register_dataframe_accessor("geo") + class GeoAccessor(object): + def __init__(self, pandas_obj): + self._obj = pandas_obj + + @property + def center(self): + # return the geographic center point of this DataFarme + lon = self._obj.latitude + lat = self._obj.longitude + return (float(lon.mean()), float(lat.mean())) + + def plot(self): + # plot this array's data on a map, e.g., using Cartopy + pass + +Now users can access your methods using the `geo` namespace: + + >>> ds = pd.DataFrame({'longitude': np.linspace(0, 10), + ... 'latitude': np.linspace(0, 20)}) + >>> ds.geo.center + (5.0, 10.0) + >>> ds.geo.plot() + # plots data on a map + +This can be a convenient way to extend pandas objects without subclassing them. +If you write a custom accessor, make a pull request adding it to our +:ref:`ecosystem` page. diff --git a/doc/source/internals.rst b/doc/source/internals.rst index a321b4202296f..ee4df879d9478 100644 --- a/doc/source/internals.rst +++ b/doc/source/internals.rst @@ -100,6 +100,8 @@ Subclassing pandas Data Structures 2. Use *composition*. See `here <http://en.wikipedia.org/wiki/Composition_over_inheritance>`_. + 3. Extending by :ref:`registering an accessor <internals.register-accessors>` + This section describes how to subclass ``pandas`` data structures to meet more specific needs. There are 2 points which need attention: 1. Override constructor properties. diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6fe15133914da..952182cfdcf82 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -202,6 +202,13 @@ Other Enhancements - ``Resampler`` objects now have a functioning :attr:`~pandas.core.resample.Resampler.pipe` method. Previously, calls to ``pipe`` were diverted to the ``mean`` method (:issue:`17905`). - :func:`~pandas.api.types.is_scalar` now returns ``True`` for ``DateOffset`` objects (:issue:`18943`). +- Added :func:`pandas.api.extensions.register_dataframe_accessor`, + :func:`pandas.api.extensions.register_series_accessor`, and + :func:`pandas.api.extensions.register_index_accessor`, accessor for libraries downstream of pandas + to register custom accessors like ``.cat`` on pandas objects. See + :ref:`Registering Custom Accessors <developer.register-accessors>` for more (:issue:`14781`). + + - ``IntervalIndex.astype`` now supports conversions between subtypes when passed an ``IntervalDtype`` (:issue:`19197`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/api/__init__.py b/pandas/api/__init__.py index fcbf42f6dabc4..afff059e7b601 100644 --- a/pandas/api/__init__.py +++ b/pandas/api/__init__.py @@ -1 +1,2 @@ """ public toolkit API """ +from . import types, extensions # noqa diff --git a/pandas/api/extensions/__init__.py b/pandas/api/extensions/__init__.py new file mode 100644 index 0000000000000..64f5e8fb939a4 --- /dev/null +++ b/pandas/api/extensions/__init__.py @@ -0,0 +1,4 @@ +"""Public API for extending panadas objects.""" +from pandas.core.accessor import (register_dataframe_accessor, # noqa + register_index_accessor, + register_series_accessor) diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 73e01fbf17205..96bf628c8d7ff 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -5,7 +5,9 @@ that can be mixed into or pinned onto other pandas classes. """ -from pandas.core.common import AbstractMethodError +import warnings + +from pandas.util._decorators import Appender class DirNamesMixin(object): @@ -37,38 +39,9 @@ def __dir__(self): return sorted(rv) -class AccessorProperty(object): - """Descriptor for implementing accessor properties like Series.str - """ - - def __init__(self, accessor_cls, construct_accessor=None): - self.accessor_cls = accessor_cls - self.construct_accessor = (construct_accessor or - accessor_cls._make_accessor) - self.__doc__ = accessor_cls.__doc__ - - def __get__(self, instance, owner=None): - if instance is None: - # this ensures that Series.str.<method> is well defined - return self.accessor_cls - return self.construct_accessor(instance) - - def __set__(self, instance, value): - raise AttributeError("can't set attribute") - - def __delete__(self, instance): - raise AttributeError("can't delete attribute") - - class PandasDelegate(object): """ an abstract base class for delegating methods/properties """ - @classmethod - def _make_accessor(cls, data): - raise AbstractMethodError("_make_accessor should be implemented" - "by subclass and return an instance" - "of `cls`.") - def _delegate_property_get(self, name, *args, **kwargs): raise TypeError("You cannot access the " "property {name}".format(name=name)) @@ -129,3 +102,138 @@ def f(self, *args, **kwargs): # don't overwrite existing methods/properties if overwrite or not hasattr(cls, name): setattr(cls, name, f) + + +# Ported with modifications from xarray +# https://github.com/pydata/xarray/blob/master/xarray/core/extensions.py +# 1. We don't need to catch and re-raise AttributeErrors as RuntimeErrors +# 2. We use a UserWarning instead of a custom Warning + +class CachedAccessor(object): + """Custom property-like object (descriptor) for caching accessors. + + Parameters + ---------- + name : str + The namespace this will be accessed under, e.g. ``df.foo`` + accessor : cls + The class with the extension methods. The class' __init__ method + should expect one of a ``Series``, ``DataFrame`` or ``Index`` as + the single argument ``data`` + """ + def __init__(self, name, accessor): + self._name = name + self._accessor = accessor + + def __get__(self, obj, cls): + if obj is None: + # we're accessing the attribute of the class, i.e., Dataset.geo + return self._accessor + accessor_obj = self._accessor(obj) + # Replace the property with the accessor object. Inspired by: + # http://www.pydanny.com/cached-property.html + # We need to use object.__setattr__ because we overwrite __setattr__ on + # NDFrame + object.__setattr__(obj, self._name, accessor_obj) + return accessor_obj + + +def _register_accessor(name, cls): + def decorator(accessor): + if hasattr(cls, name): + warnings.warn( + 'registration of accessor {!r} under name {!r} for type ' + '{!r} is overriding a preexisting attribute with the same ' + 'name.'.format(accessor, name, cls), + UserWarning, + stacklevel=2) + setattr(cls, name, CachedAccessor(name, accessor)) + return accessor + return decorator + + +_doc = """Register a custom accessor on %(klass)s objects. + +Parameters +---------- +name : str + Name under which the accessor should be registered. A warning is issued + if this name conflicts with a preexisting attribute. + +Notes +----- +When accessed, your accessor will be initialized with the pandas object +the user is interacting with. So the signature must be + +.. code-block:: python + + def __init__(self, pandas_object): + +For consistency with pandas methods, you should raise an ``AttributeError`` +if the data passed to your accessor has an incorrect dtype. + +>>> pd.Series(['a', 'b']).dt +Traceback (most recent call last): +... +AttributeError: Can only use .dt accessor with datetimelike values + +Examples +-------- + +In your library code:: + + import pandas as pd + + @pd.api.extensions.register_dataframe_accessor("geo") + class GeoAccessor(object): + def __init__(self, pandas_obj): + self._obj = pandas_obj + + @property + def center(self): + # return the geographic center point of this DataFarme + lon = self._obj.latitude + lat = self._obj.longitude + return (float(lon.mean()), float(lat.mean())) + + def plot(self): + # plot this array's data on a map, e.g., using Cartopy + pass + +Back in an interactive IPython session: + + >>> ds = pd.DataFrame({'longitude': np.linspace(0, 10), + ... 'latitude': np.linspace(0, 20)}) + >>> ds.geo.center + (5.0, 10.0) + >>> ds.geo.plot() + # plots data on a map + +See also +-------- +%(others)s +""" + + +@Appender(_doc % dict(klass="DataFrame", + others=("register_series_accessor, " + "register_index_accessor"))) +def register_dataframe_accessor(name): + from pandas import DataFrame + return _register_accessor(name, DataFrame) + + +@Appender(_doc % dict(klass="Series", + others=("register_dataframe_accessor, " + "register_index_accessor"))) +def register_series_accessor(name): + from pandas import Series + return _register_accessor(name, Series) + + +@Appender(_doc % dict(klass="Index", + others=("register_dataframe_accessor, " + "register_series_accessor"))) +def register_index_accessor(name): + from pandas import Index + return _register_accessor(name, Index) diff --git a/pandas/core/base.py b/pandas/core/base.py index e90794c6c2e1a..4b3e74eae36b8 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -145,10 +145,14 @@ def _freeze(self): # prevent adding any attribute via s.xxx.new_attribute = ... def __setattr__(self, key, value): # _cache is used by a decorator - # dict lookup instead of getattr as getattr is false for getter - # which error - if getattr(self, "__frozen", False) and not \ - (key in type(self).__dict__ or key == "_cache"): + # We need to check both 1.) cls.__dict__ and 2.) getattr(self, key) + # because + # 1.) getattr is false for attributes that raise errors + # 2.) cls.__dict__ doesn't traverse into base classes + if (getattr(self, "__frozen", False) and not + (key == "_cache" or + key in type(self).__dict__ or + getattr(self, key, None) is not None)): raise AttributeError("You cannot add any new attribute '{key}'". format(key=key)) object.__setattr__(self, key, value) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 92fcdc0f4625b..7b11e37a14b51 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -2142,6 +2142,10 @@ class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): methods return new categorical data per default (but can be called with `inplace=True`). + Parameters + ---------- + data : Series or CategoricalIndex + Examples -------- >>> s.cat.categories @@ -2157,12 +2161,19 @@ class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): """ - def __init__(self, values, index, name): - self.categorical = values - self.index = index - self.name = name + def __init__(self, data): + self._validate(data) + self.categorical = data.values + self.index = data.index + self.name = data.name self._freeze() + @staticmethod + def _validate(data): + if not is_categorical_dtype(data.dtype): + raise AttributeError("Can only use .cat accessor with a " + "'category' dtype") + def _delegate_property_get(self, name): return getattr(self.categorical, name) @@ -2181,14 +2192,6 @@ def _delegate_method(self, name, *args, **kwargs): if res is not None: return Series(res, index=self.index, name=self.name) - @classmethod - def _make_accessor(cls, data): - if not is_categorical_dtype(data.dtype): - raise AttributeError("Can only use .cat accessor with a " - "'category' dtype") - return CategoricalAccessor(data.values, data.index, - getattr(data, 'name', None),) - CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=["categories", diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4d907180da00a..2c05eefa5706e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -23,6 +23,7 @@ import numpy as np import numpy.ma as ma +from pandas.core.accessor import CachedAccessor from pandas.core.dtypes.cast import ( maybe_upcast, cast_scalar_to_array, @@ -92,7 +93,6 @@ from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex -from pandas.core import accessor import pandas.core.common as com import pandas.core.nanops as nanops import pandas.core.ops as ops @@ -6003,8 +6003,7 @@ def isin(self, values): # ---------------------------------------------------------------------- # Add plotting methods to DataFrame - plot = accessor.AccessorProperty(gfx.FramePlotMethods, - gfx.FramePlotMethods) + plot = CachedAccessor("plot", gfx.FramePlotMethods) hist = gfx.hist_frame boxplot = gfx.boxplot_frame diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 116c7eb8c7958..d40230386216c 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -4,6 +4,7 @@ import numpy as np +from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.common import ( is_period_arraylike, is_datetime_arraylike, is_integer_dtype, @@ -20,81 +21,44 @@ from pandas.core.algorithms import take_1d -def is_datetimelike(data): - """ - return a boolean if we can be successfully converted to a datetimelike - """ - try: - maybe_to_datetimelike(data) - return True - except (Exception): - pass - return False +class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin): + def __init__(self, data, orig): + if not isinstance(data, ABCSeries): + raise TypeError("cannot convert an object of type {0} to a " + "datetimelike index".format(type(data))) -def maybe_to_datetimelike(data, copy=False): - """ - return a DelegatedClass of a Series that is datetimelike - (e.g. datetime64[ns],timedelta64[ns] dtype or a Series of Periods) - raise TypeError if this is not possible. + self.values = data + self.orig = orig + self.name = getattr(data, 'name', None) + self.index = getattr(data, 'index', None) + self._freeze() - Parameters - ---------- - data : Series - copy : boolean, default False - copy the input data + def _get_values(self): + data = self.values + if is_datetime64_dtype(data.dtype): + return DatetimeIndex(data, copy=False, name=self.name) - Returns - ------- - DelegatedClass + elif is_datetime64tz_dtype(data.dtype): + return DatetimeIndex(data, copy=False, name=self.name) - """ - from pandas import Series + elif is_timedelta64_dtype(data.dtype): + return TimedeltaIndex(data, copy=False, name=self.name) + + else: + if is_period_arraylike(data): + return PeriodIndex(data, copy=False, name=self.name) + if is_datetime_arraylike(data): + return DatetimeIndex(data, copy=False, name=self.name) - if not isinstance(data, Series): raise TypeError("cannot convert an object of type {0} to a " "datetimelike index".format(type(data))) - index = data.index - name = data.name - orig = data if is_categorical_dtype(data) else None - if orig is not None: - data = orig.values.categories - - if is_datetime64_dtype(data.dtype): - return DatetimeProperties(DatetimeIndex(data, copy=copy), - index, name=name, orig=orig) - elif is_datetime64tz_dtype(data.dtype): - return DatetimeProperties(DatetimeIndex(data, copy=copy), - index, data.name, orig=orig) - elif is_timedelta64_dtype(data.dtype): - return TimedeltaProperties(TimedeltaIndex(data, copy=copy), index, - name=name, orig=orig) - else: - if is_period_arraylike(data): - return PeriodProperties(PeriodIndex(data, copy=copy), index, - name=name, orig=orig) - if is_datetime_arraylike(data): - return DatetimeProperties(DatetimeIndex(data, copy=copy), index, - name=name, orig=orig) - - raise TypeError("cannot convert an object of type {0} to a " - "datetimelike index".format(type(data))) - - -class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin): - - def __init__(self, values, index, name, orig=None): - self.values = values - self.index = index - self.name = name - self.orig = orig - self._freeze() - def _delegate_property_get(self, name): from pandas import Series + values = self._get_values() - result = getattr(self.values, name) + result = getattr(values, name) # maybe need to upcast (ints) if isinstance(result, np.ndarray): @@ -126,8 +90,9 @@ def _delegate_property_set(self, name, value, *args, **kwargs): def _delegate_method(self, name, *args, **kwargs): from pandas import Series + values = self._get_values() - method = getattr(self.values, name) + method = getattr(values, name) result = method(*args, **kwargs) if not is_list_like(result): @@ -158,11 +123,11 @@ class DatetimeProperties(Properties): """ def to_pydatetime(self): - return self.values.to_pydatetime() + return self._get_values().to_pydatetime() @property def freq(self): - return self.values.inferred_freq + return self._get_values().inferred_freq DatetimeProperties._add_delegate_accessors( @@ -189,7 +154,7 @@ class TimedeltaProperties(Properties): """ def to_pytimedelta(self): - return self.values.to_pytimedelta() + return self._get_values().to_pytimedelta() @property def components(self): @@ -202,11 +167,11 @@ def components(self): a DataFrame """ - return self.values.components.set_index(self.index) + return self._get_values().components.set_index(self.index) @property def freq(self): - return self.values.inferred_freq + return self._get_values().inferred_freq TimedeltaProperties._add_delegate_accessors( @@ -245,15 +210,38 @@ class PeriodProperties(Properties): class CombinedDatetimelikeProperties(DatetimeProperties, TimedeltaProperties): - # This class is never instantiated, and exists solely for the benefit of - # the Series.dt class property. For Series objects, .dt will always be one - # of the more specific classes above. - __doc__ = DatetimeProperties.__doc__ - @classmethod - def _make_accessor(cls, data): + def __new__(cls, data): + # CombinedDatetimelikeProperties isn't really instantiated. Instead + # we need to choose which parent (datetime or timedelta) is + # appropriate. Since we're checking the dtypes anyway, we'll just + # do all the validation here. + from pandas import Series + + if not isinstance(data, Series): + raise TypeError("cannot convert an object of type {0} to a " + "datetimelike index".format(type(data))) + + orig = data if is_categorical_dtype(data) else None + if orig is not None: + data = Series(orig.values.categories, + name=orig.name, + copy=False) + try: - return maybe_to_datetimelike(data) + if is_datetime64_dtype(data.dtype): + return DatetimeProperties(data, orig) + elif is_datetime64tz_dtype(data.dtype): + return DatetimeProperties(data, orig) + elif is_timedelta64_dtype(data.dtype): + return TimedeltaProperties(data, orig) + else: + if is_period_arraylike(data): + return PeriodProperties(data, orig) + if is_datetime_arraylike(data): + return DatetimeProperties(data, orig) except Exception: - raise AttributeError("Can only use .dt accessor with " - "datetimelike values") + pass # we raise an attribute error anyway + + raise AttributeError("Can only use .dt accessor with datetimelike " + "values") diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f634d809560ee..a5949c62ad913 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -12,6 +12,7 @@ from pandas.compat.numpy import function as nv from pandas import compat +from pandas.core.accessor import CachedAccessor from pandas.core.dtypes.generic import ( ABCSeries, ABCMultiIndex, @@ -55,8 +56,8 @@ import pandas.core.sorting as sorting from pandas.io.formats.printing import pprint_thing from pandas.core.ops import _comp_method_OBJECT_ARRAY -from pandas.core import strings, accessor from pandas.core.config import get_option +from pandas.core.strings import StringMethods # simplify @@ -172,9 +173,7 @@ class Index(IndexOpsMixin, PandasObject): _engine_type = libindex.ObjectEngine _accessors = frozenset(['str']) - - # String Methods - str = accessor.AccessorProperty(strings.StringMethods) + str = CachedAccessor("str", StringMethods) def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, tupleize_cols=True, **kwargs): diff --git a/pandas/core/series.py b/pandas/core/series.py index 71cded4f9c888..25c8bacd31940 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -13,6 +13,7 @@ import numpy as np import numpy.ma as ma +from pandas.core.accessor import CachedAccessor from pandas.core.dtypes.common import ( is_categorical_dtype, is_bool, @@ -53,7 +54,6 @@ from pandas.core import generic, base from pandas.core.internals import SingleBlockManager from pandas.core.categorical import Categorical, CategoricalAccessor -import pandas.core.strings as strings from pandas.core.indexes.accessors import CombinedDatetimelikeProperties from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex @@ -64,7 +64,6 @@ zip, u, OrderedDict, StringIO, range, get_range_parameters) from pandas.compat.numpy import function as nv -from pandas.core import accessor import pandas.core.ops as ops import pandas.core.algorithms as algorithms @@ -77,6 +76,7 @@ from pandas._libs import index as libindex, tslib as libts, lib, iNaT from pandas.core.config import get_option +from pandas.core.strings import StringMethods import pandas.plotting._core as gfx @@ -3069,21 +3069,16 @@ def to_period(self, freq=None, copy=True): return self._constructor(new_values, index=new_index).__finalize__(self) - # ------------------------------------------------------------------------- - # Datetimelike delegation methods - dt = accessor.AccessorProperty(CombinedDatetimelikeProperties) - - # ------------------------------------------------------------------------- - # Categorical methods - cat = accessor.AccessorProperty(CategoricalAccessor) - - # String Methods - str = accessor.AccessorProperty(strings.StringMethods) + # ---------------------------------------------------------------------- + # Accessor Methods + # ---------------------------------------------------------------------- + str = CachedAccessor("str", StringMethods) + dt = CachedAccessor("dt", CombinedDatetimelikeProperties) + cat = CachedAccessor("cat", CategoricalAccessor) + plot = CachedAccessor("plot", gfx.SeriesPlotMethods) # ---------------------------------------------------------------------- # Add plotting methods to Series - plot = accessor.AccessorProperty(gfx.SeriesPlotMethods, - gfx.SeriesPlotMethods) hist = gfx.hist_series diff --git a/pandas/core/strings.py b/pandas/core/strings.py index e0012c25e366d..278b220753196 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1370,12 +1370,44 @@ class StringMethods(NoNewAttributesMixin): """ def __init__(self, data): + self._validate(data) self._is_categorical = is_categorical_dtype(data) self._data = data.cat.categories if self._is_categorical else data # save orig to blow up categoricals to the right type self._orig = data self._freeze() + @staticmethod + def _validate(data): + from pandas.core.index import Index + + if (isinstance(data, ABCSeries) and + not ((is_categorical_dtype(data.dtype) and + is_object_dtype(data.values.categories)) or + (is_object_dtype(data.dtype)))): + # it's neither a string series not a categorical series with + # strings inside the categories. + # this really should exclude all series with any non-string values + # (instead of test for object dtype), but that isn't practical for + # performance reasons until we have a str dtype (GH 9343) + raise AttributeError("Can only use .str accessor with string " + "values, which use np.object_ dtype in " + "pandas") + elif isinstance(data, Index): + # can't use ABCIndex to exclude non-str + + # see scc/inferrence.pyx which can contain string values + allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer') + if data.inferred_type not in allowed_types: + message = ("Can only use .str accessor with string values " + "(i.e. inferred_type is 'string', 'unicode' or " + "'mixed')") + raise AttributeError(message) + if data.nlevels > 1: + message = ("Can only use .str accessor with Index, not " + "MultiIndex") + raise AttributeError(message) + def __getitem__(self, key): if isinstance(key, slice): return self.slice(start=key.start, stop=key.stop, step=key.step) @@ -1895,32 +1927,5 @@ def rindex(self, sub, start=0, end=None): @classmethod def _make_accessor(cls, data): - from pandas.core.index import Index - - if (isinstance(data, ABCSeries) and - not ((is_categorical_dtype(data.dtype) and - is_object_dtype(data.values.categories)) or - (is_object_dtype(data.dtype)))): - # it's neither a string series not a categorical series with - # strings inside the categories. - # this really should exclude all series with any non-string values - # (instead of test for object dtype), but that isn't practical for - # performance reasons until we have a str dtype (GH 9343) - raise AttributeError("Can only use .str accessor with string " - "values, which use np.object_ dtype in " - "pandas") - elif isinstance(data, Index): - # can't use ABCIndex to exclude non-str - - # see scc/inferrence.pyx which can contain string values - allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer') - if data.inferred_type not in allowed_types: - message = ("Can only use .str accessor with string values " - "(i.e. inferred_type is 'string', 'unicode' or " - "'mixed')") - raise AttributeError(message) - if data.nlevels > 1: - message = ("Can only use .str accessor with Index, not " - "MultiIndex") - raise AttributeError(message) + cls._validate(data) return cls(data) diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index b3d1ce31d66ae..d843126c60144 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -65,3 +65,7 @@ class MergeError(ValueError): Error raised when problems arise during merging due to problems with input data. Subclass of `ValueError`. """ + + +class AccessorRegistrationWarning(Warning): + """Warning for attribute conflicts in accessor registration.""" diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 8962eb90be828..821c7858c7a5c 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -122,7 +122,7 @@ def test_api(self): class TestApi(Base): - allowed = ['types'] + allowed = ['types', 'extensions'] def test_api(self): diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 8ae7feab451f9..73cc87855acbd 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -512,6 +512,7 @@ def test_cat_accessor(self): def test_cat_accessor_api(self): # GH 9322 from pandas.core.categorical import CategoricalAccessor + assert Series.cat is CategoricalAccessor s = Series(list('aabbcde')).astype('category') assert isinstance(s.cat, CategoricalAccessor) diff --git a/pandas/tests/test_register_accessor.py b/pandas/tests/test_register_accessor.py new file mode 100644 index 0000000000000..fe0cf4c9b38af --- /dev/null +++ b/pandas/tests/test_register_accessor.py @@ -0,0 +1,87 @@ +import contextlib + +import pytest + +import pandas as pd +import pandas.util.testing as tm + + +@contextlib.contextmanager +def ensure_removed(obj, attr): + """Ensure that an attribute added to 'obj' during the test is + removed when we're done""" + try: + yield + finally: + try: + delattr(obj, attr) + except AttributeError: + pass + + +class MyAccessor(object): + + def __init__(self, obj): + self.obj = obj + self.item = 'item' + + @property + def prop(self): + return self.item + + def method(self): + return self.item + + +@pytest.mark.parametrize('obj, registrar', [ + (pd.Series, pd.api.extensions.register_series_accessor), + (pd.DataFrame, pd.api.extensions.register_dataframe_accessor), + (pd.Index, pd.api.extensions.register_index_accessor) +]) +def test_series_register(obj, registrar): + with ensure_removed(obj, 'mine'): + before = set(dir(obj)) + registrar('mine')(MyAccessor) + assert obj([]).mine.prop == 'item' + after = set(dir(obj)) + assert (before ^ after) == {'mine'} + + +def test_accessor_works(): + with ensure_removed(pd.Series, 'mine'): + pd.api.extensions.register_series_accessor('mine')(MyAccessor) + + s = pd.Series([1, 2]) + assert s.mine.obj is s + + assert s.mine.prop == 'item' + assert s.mine.method() == 'item' + + +def test_overwrite_warns(): + # Need to restore mean + mean = pd.Series.mean + try: + with tm.assert_produces_warning(UserWarning) as w: + pd.api.extensions.register_series_accessor('mean')(MyAccessor) + s = pd.Series([1, 2]) + assert s.mean.prop == 'item' + msg = str(w[0].message) + assert 'mean' in msg + assert 'MyAccessor' in msg + assert 'Series' in msg + finally: + pd.Series.mean = mean + + +def test_raises_attribute_error(): + + with ensure_removed(pd.Series, 'bad'): + + @pd.api.extensions.register_series_accessor("bad") + class Bad(object): + def __init__(self, data): + raise AttributeError("whoops") + + with tm.assert_raises_regex(AttributeError, "whoops"): + pd.Series([]).bad
Adds new methods for registing custom accessors to pandas objects. This will be helpful for implementing https://github.com/pandas-dev/pandas/issues/18767 outside of pandas. If we accept this I'm not sure it belongs in the top-level namespace where I have it currently. What would a good home be for this be? `pd.api.extensions`? Closes https://github.com/pandas-dev/pandas/issues/14781
https://api.github.com/repos/pandas-dev/pandas/pulls/18827
2017-12-18T21:17:00Z
2018-01-16T00:43:17Z
2018-01-16T00:43:16Z
2018-01-16T00:43:20Z
BUG: raise error for groupby() with invalid tuple key
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index ae6d0816abc41..8c6226ec15739 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -310,6 +310,7 @@ Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug when grouping by a single column and aggregating with a class like ``list`` or ``tuple`` (:issue:`18079`) +- Fixed regression in :func:`DataFrame.groupby` which would not emit an error when called with a tuple key not in the index (:issue:`18798`) - - diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index b4223ac0a177a..ced120fbdbe29 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2977,7 +2977,7 @@ def is_in_obj(gpr): def _is_label_like(val): - return (isinstance(val, compat.string_types) or + return (isinstance(val, (compat.string_types, tuple)) or (val is not None and is_scalar(val))) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 3327612b016f4..cf4a6ec1c932a 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2750,7 +2750,6 @@ def test_tuple_warns_unhashable(self): assert "Interpreting tuple 'by' as a list" in str(w[0].message) - @pytest.mark.xfail(reason="GH-18798") def test_tuple_correct_keyerror(self): # https://github.com/pandas-dev/pandas/issues/18798 df = pd.DataFrame(1, index=range(3),
- [x] closes #18798 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18826
2017-12-18T20:55:24Z
2017-12-19T11:12:24Z
2017-12-19T11:12:24Z
2017-12-19T11:17:43Z
BUG make hashtable.unique support readonly arrays
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f300deddebeb..ff3cdf60a2f8c 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -344,6 +344,7 @@ Reshaping - Bug in :func:`DataFrame.stack` which fails trying to sort mixed type levels under Python 3 (:issue:`18310`) - Fixed construction of a :class:`Series` from a ``dict`` containing ``NaN`` as key (:issue:`18480`) - Bug in :func:`Series.rank` where ``Series`` containing ``NaT`` modifies the ``Series`` inplace (:issue:`18521`) +- Bug in :func:`cut` which fails when using readonly arrays (:issue:`18773`) - Bug in :func:`Dataframe.pivot_table` which fails when the ``aggfunc`` arg is of type string. The behavior is now consistent with other methods like ``agg`` and ``apply`` (:issue:`18713`) diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 6e1c4397810b7..bd9dd1f9bae37 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -255,10 +255,56 @@ dtypes = [('Float64', 'float64', 'val != val', True), ('UInt64', 'uint64', 'False', False), ('Int64', 'int64', 'val == iNaT', False)] +def get_dispatch(dtypes): + for (name, dtype, null_condition, float_group) in dtypes: + unique_template = """\ + cdef: + Py_ssize_t i, n = len(values) + int ret = 0 + {dtype}_t val + khiter_t k + bint seen_na = 0 + {name}Vector uniques = {name}Vector() + {name}VectorData *ud + + ud = uniques.data + + with nogil: + for i in range(n): + val = values[i] + IF {float_group}: + if val == val: + k = kh_get_{dtype}(self.table, val) + if k == self.table.n_buckets: + kh_put_{dtype}(self.table, val, &ret) + if needs_resize(ud): + with gil: + uniques.resize() + append_data_{dtype}(ud, val) + elif not seen_na: + seen_na = 1 + if needs_resize(ud): + with gil: + uniques.resize() + append_data_{dtype}(ud, NAN) + ELSE: + k = kh_get_{dtype}(self.table, val) + if k == self.table.n_buckets: + kh_put_{dtype}(self.table, val, &ret) + if needs_resize(ud): + with gil: + uniques.resize() + append_data_{dtype}(ud, val) + return uniques.to_array() + """ + + unique_template = unique_template.format(name=name, dtype=dtype, null_condition=null_condition, float_group=float_group) + + yield (name, dtype, null_condition, float_group, unique_template) }} -{{for name, dtype, null_condition, float_group in dtypes}} +{{for name, dtype, null_condition, float_group, unique_template in get_dispatch(dtypes)}} cdef class {{name}}HashTable(HashTable): @@ -450,48 +496,20 @@ cdef class {{name}}HashTable(HashTable): return np.asarray(labels), arr_uniques @cython.boundscheck(False) - def unique(self, {{dtype}}_t[:] values): - cdef: - Py_ssize_t i, n = len(values) - int ret = 0 - {{dtype}}_t val - khiter_t k - bint seen_na = 0 - {{name}}Vector uniques = {{name}}Vector() - {{name}}VectorData *ud + def unique(self, ndarray[{{dtype}}_t, ndim=1] values): + if values.flags.writeable: + # If the value is writeable (mutable) then use memview + return self.unique_memview(values) - ud = uniques.data - - with nogil: - for i in range(n): - val = values[i] - - {{if float_group}} - if val == val: - k = kh_get_{{dtype}}(self.table, val) - if k == self.table.n_buckets: - kh_put_{{dtype}}(self.table, val, &ret) - if needs_resize(ud): - with gil: - uniques.resize() - append_data_{{dtype}}(ud, val) - elif not seen_na: - seen_na = 1 - if needs_resize(ud): - with gil: - uniques.resize() - append_data_{{dtype}}(ud, NAN) - {{else}} - k = kh_get_{{dtype}}(self.table, val) - if k == self.table.n_buckets: - kh_put_{{dtype}}(self.table, val, &ret) - if needs_resize(ud): - with gil: - uniques.resize() - append_data_{{dtype}}(ud, val) - {{endif}} + # We cannot use the memoryview version on readonly-buffers due to + # a limitation of Cython's typed memoryviews. Instead we can use + # the slightly slower Cython ndarray type directly. + # see https://github.com/cython/cython/issues/1605 +{{unique_template}} - return uniques.to_array() + @cython.boundscheck(False) + def unique_memview(self, {{dtype}}_t[:] values): +{{unique_template}} {{endfor}} diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index c27af7a5bf8e4..48f25112b45cf 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -512,7 +512,18 @@ def f(): tm.assert_numpy_array_equal( mask, np.array([False, True, True, True, True])) + @pytest.mark.parametrize( + "array_1_writeable, array_2_writeable", + [(True, True), (True, False), (False, False)]) + def test_cut_read_only(self, array_1_writeable, array_2_writeable): + # issue 18773 + array_1 = np.arange(0, 100, 10) + array_1.flags.writeable = array_1_writeable -def curpath(): - pth, _ = os.path.split(os.path.abspath(__file__)) - return pth + array_2 = np.arange(0, 100, 10) + array_2.flags.writeable = array_2_writeable + + hundred_elements = np.arange(100) + + tm.assert_categorical_equal(cut(hundred_elements, array_1), + cut(hundred_elements, array_2))
This problem was brought up in https://github.com/pandas-dev/pandas/issues/18773 and effectively comes down to how Cython deals with readonly arrays. While it would be ideal for Cython to fix the underlying problem in the meantime we can rely on this. - [x] closes #18773 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18825
2017-12-18T19:54:51Z
2017-12-27T20:27:03Z
2017-12-27T20:27:03Z
2017-12-27T20:27:13Z
implement is_offsetlike
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 5b1335c1a834e..e2ee3deb5396e 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -12,8 +12,8 @@ from .generic import (ABCCategorical, ABCPeriodIndex, ABCDatetimeIndex, ABCSeries, ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, - ABCIndexClass) -from .inference import is_string_like + ABCIndexClass, ABCDateOffset) +from .inference import is_string_like, is_list_like from .inference import * # noqa @@ -266,6 +266,37 @@ def is_datetimetz(arr): is_datetime64tz_dtype(arr)) +def is_offsetlike(arr_or_obj): + """ + Check if obj or all elements of list-like is DateOffset + + Parameters + ---------- + arr_or_obj : object + + Returns + ------- + boolean : Whether the object is a DateOffset or listlike of DatetOffsets + + Examples + -------- + >>> is_offsetlike(pd.DateOffset(days=1)) + True + >>> is_offsetlike('offset') + False + >>> is_offsetlike([pd.offsets.Minute(4), pd.offsets.MonthEnd()]) + True + >>> is_offsetlike(np.array([pd.DateOffset(months=3), pd.Timestamp.now()])) + False + """ + if isinstance(arr_or_obj, ABCDateOffset): + return True + elif (is_list_like(arr_or_obj) and len(arr_or_obj) and + is_object_dtype(arr_or_obj)): + return all(isinstance(x, ABCDateOffset) for x in arr_or_obj) + return False + + def is_period(arr): """ Check whether an array-like is a periodical index. diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 2fb0cbb14c225..ca6d888625873 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -30,7 +30,7 @@ is_object_dtype, is_timedelta64_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_bool_dtype, is_datetimetz, - is_list_like, + is_list_like, is_offsetlike, is_scalar, _ensure_object) from pandas.core.dtypes.cast import maybe_upcast_putmask, find_common_type @@ -38,8 +38,7 @@ ABCSeries, ABCDataFrame, ABCIndex, - ABCPeriodIndex, - ABCDateOffset) + ABCPeriodIndex) # ----------------------------------------------------------------------------- # Functions that add arithmetic methods to objects, given arithmetic factory @@ -363,7 +362,7 @@ def __init__(self, left, right, name, na_op): rvalues = self._convert_to_array(right, name=name, other=lvalues) # left - self.is_offset_lhs = self._is_offset(left) + self.is_offset_lhs = is_offsetlike(left) self.is_timedelta_lhs = is_timedelta64_dtype(lvalues) self.is_datetime64_lhs = is_datetime64_dtype(lvalues) self.is_datetime64tz_lhs = is_datetime64tz_dtype(lvalues) @@ -373,7 +372,7 @@ def __init__(self, left, right, name, na_op): self.is_floating_lhs = left.dtype.kind == 'f' # right - self.is_offset_rhs = self._is_offset(right) + self.is_offset_rhs = is_offsetlike(right) self.is_datetime64_rhs = is_datetime64_dtype(rvalues) self.is_datetime64tz_rhs = is_datetime64tz_dtype(rvalues) self.is_datetime_rhs = (self.is_datetime64_rhs or @@ -515,7 +514,7 @@ def _convert_to_array(self, values, name=None, other=None): values = np.empty(values.shape, dtype=other.dtype) values[:] = iNaT return values - elif self._is_offset(values): + elif is_offsetlike(values): return values else: raise TypeError("incompatible type [{dtype}] for a " @@ -618,15 +617,6 @@ def f(x): return lvalues, rvalues - def _is_offset(self, arr_or_obj): - """ check if obj or all elements of list-like is DateOffset """ - if isinstance(arr_or_obj, ABCDateOffset): - return True - elif (is_list_like(arr_or_obj) and len(arr_or_obj) and - is_object_dtype(arr_or_obj)): - return all(isinstance(x, ABCDateOffset) for x in arr_or_obj) - return False - def _align_method_SERIES(left, right, align_asobject=False): """ align lhs and rhs Series """ diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 2146704fea95f..bfec229d32b22 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -537,6 +537,19 @@ def test_is_complex_dtype(): assert com.is_complex_dtype(np.array([1 + 1j, 5])) +def test_is_offsetlike(): + assert com.is_offsetlike(np.array([pd.DateOffset(month=3), + pd.offsets.Nano()])) + assert com.is_offsetlike(pd.offsets.MonthEnd()) + assert com.is_offsetlike(pd.Index([pd.DateOffset(second=1)])) + + assert not com.is_offsetlike(pd.Timedelta(1)) + assert not com.is_offsetlike(np.array([1 + 1j, 5])) + + # mixed case + assert not com.is_offsetlike(np.array([pd.DateOffset(), pd.Timestamp(0)])) + + @pytest.mark.parametrize('input_param,result', [ (int, np.dtype(int)), ('int32', np.dtype('int32')),
This is a subset of #18817 that just implements `is_offsetlike` in `dtypes.common` (plus a nicer docstring). This needs to be implemented before some fixes can be made in indexes.datetimelike, so is worth separating from the more difficult bits of 18817. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18823
2017-12-18T16:34:49Z
2017-12-19T11:21:07Z
2017-12-19T11:21:07Z
2018-02-11T22:00:35Z
BUG: Fixed Categorical.Equals with unordered
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index bd3bee507baa3..f0083ab46d14d 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -378,7 +378,7 @@ Numeric Categorical ^^^^^^^^^^^ -- +- Bug in ``Categorical.equals`` between two unordered categories with the same categories, but in a different order (:issue:`16603`) - - diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index d47cb0762447b..8b86de285b0fe 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -2081,8 +2081,16 @@ def equals(self, other): ------- are_equal : boolean """ - return (self.is_dtype_equal(other) and - np.array_equal(self._codes, other._codes)) + if self.is_dtype_equal(other): + if self.categories.equals(other.categories): + # fastpath to avoid re-coding + other_codes = other._codes + else: + other_codes = _recode_for_categories(other.codes, + other.categories, + self.categories) + return np.array_equal(self._codes, other_codes) + return False def is_dtype_equal(self, other): """ diff --git a/pandas/tests/categorical/test_operators.py b/pandas/tests/categorical/test_operators.py index 09a0607b67a88..fa8bb817616e4 100644 --- a/pandas/tests/categorical/test_operators.py +++ b/pandas/tests/categorical/test_operators.py @@ -250,6 +250,13 @@ def test_compare_different_lengths(self): with tm.assert_raises_regex(TypeError, msg): c1 == c2 + def test_compare_unordered_different_order(self): + # https://github.com/pandas-dev/pandas/issues/16603#issuecomment- + # 349290078 + a = pd.Categorical(['a'], categories=['a', 'b']) + b = pd.Categorical(['b'], categories=['b', 'a']) + assert not a.equals(b) + def test_numeric_like_ops(self): df = DataFrame({'value': np.random.randint(0, 10000, 100)}) diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index f7328a99195b9..d5c0494df030a 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -747,6 +747,15 @@ def test_equals_categorical(self): ordered=True)) assert ci.equals(ci.copy()) + def test_equals_categoridcal_unordered(self): + # https://github.com/pandas-dev/pandas/issues/16603 + a = pd.CategoricalIndex(['A'], categories=['A', 'B']) + b = pd.CategoricalIndex(['A'], categories=['B', 'A']) + c = pd.CategoricalIndex(['C'], categories=['B', 'A']) + assert a.equals(b) + assert not a.equals(c) + assert not b.equals(c) + def test_string_categorical_index_repr(self): # short idx = pd.CategoricalIndex(['a', 'bb', 'ccc'])
The original issue was already fixed. I added tests to verify (but no whatsnew entry). This addes tests and a fix for https://github.com/pandas-dev/pandas/issues/16603#issuecomment-349290078 about `Categorical.equals` Closes https://github.com/pandas-dev/pandas/issues/16603
https://api.github.com/repos/pandas-dev/pandas/pulls/18822
2017-12-18T16:31:41Z
2018-01-06T17:37:43Z
2018-01-06T17:37:43Z
2018-01-06T17:37:46Z
Skipif no lzma ne
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 06e645563d51c..90f197738543a 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -97,11 +97,11 @@ def _is_py3_complex_incompat(result, expected): _good_arith_ops = com.difference(_arith_ops_syms, _special_case_arith_ops_syms) +@td.skip_if_no_ne class TestEvalNumexprPandas(object): @classmethod def setup_class(cls): - tm.skip_if_no_ne() import numexpr as ne cls.ne = ne cls.engine = 'numexpr' @@ -374,7 +374,6 @@ def check_single_invert_op(self, lhs, cmp1, rhs): tm.assert_almost_equal(expected, result) for engine in self.current_engines: - tm.skip_if_no_ne(engine) tm.assert_almost_equal(result, pd.eval('~elb', engine=engine, parser=self.parser)) @@ -400,7 +399,6 @@ def check_compound_invert_op(self, lhs, cmp1, rhs): # make sure the other engines work the same as this one for engine in self.current_engines: - tm.skip_if_no_ne(engine) ev = pd.eval(ex, engine=self.engine, parser=self.parser) tm.assert_almost_equal(ev, result) @@ -731,12 +729,12 @@ def test_disallow_python_keywords(self): df.query('lambda == 0') +@td.skip_if_no_ne class TestEvalNumexprPython(TestEvalNumexprPandas): @classmethod def setup_class(cls): super(TestEvalNumexprPython, cls).setup_class() - tm.skip_if_no_ne() import numexpr as ne cls.ne = ne cls.engine = 'numexpr' @@ -1078,11 +1076,11 @@ def test_performance_warning_for_poor_alignment(self, engine, parser): # ------------------------------------ # Slightly more complex ops +@td.skip_if_no_ne class TestOperationsNumExprPandas(object): @classmethod def setup_class(cls): - tm.skip_if_no_ne() cls.engine = 'numexpr' cls.parser = 'pandas' cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms @@ -1528,6 +1526,7 @@ def test_simple_in_ops(self): parser=self.parser) +@td.skip_if_no_ne class TestOperationsNumExprPython(TestOperationsNumExprPandas): @classmethod @@ -1535,7 +1534,6 @@ def setup_class(cls): super(TestOperationsNumExprPython, cls).setup_class() cls.engine = 'numexpr' cls.parser = 'python' - tm.skip_if_no_ne(cls.engine) cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms cls.arith_ops = filter(lambda x: x not in ('in', 'not in'), cls.arith_ops) @@ -1623,11 +1621,11 @@ def setup_class(cls): cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms +@td.skip_if_no_ne class TestMathPythonPython(object): @classmethod def setup_class(cls): - tm.skip_if_no_ne() cls.engine = 'python' cls.parser = 'pandas' cls.unary_fns = _unary_math_ops @@ -1782,15 +1780,15 @@ def test_no_new_globals(self, engine, parser): assert gbls == gbls2 +@td.skip_if_no_ne def test_invalid_engine(): - tm.skip_if_no_ne() tm.assert_raises_regex(KeyError, 'Invalid engine \'asdf\' passed', pd.eval, 'x + y', local_dict={'x': 1, 'y': 2}, engine='asdf') +@td.skip_if_no_ne def test_invalid_parser(): - tm.skip_if_no_ne() tm.assert_raises_regex(KeyError, 'Invalid parser \'asdf\' passed', pd.eval, 'x + y', local_dict={'x': 1, 'y': 2}, parser='asdf') @@ -1803,7 +1801,6 @@ def test_invalid_parser(): @pytest.mark.parametrize('engine', _parsers) @pytest.mark.parametrize('parser', _parsers) def test_disallowed_nodes(engine, parser): - tm.skip_if_no_ne(engine) VisitorClass = _parsers[parser] uns_ops = VisitorClass.unsupported_nodes inst = VisitorClass('x + 1', engine, parser) diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index a6c36792ef074..22066d59cf14d 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -17,13 +17,14 @@ makeCustomDataframe as mkdf) import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.core.computation.check import _NUMEXPR_INSTALLED from pandas.tests.frame.common import TestData PARSERS = 'python', 'pandas' -ENGINES = 'python', 'numexpr' +ENGINES = 'python', pytest.param('numexpr', marks=td.skip_if_no_ne) @pytest.fixture(params=PARSERS, ids=lambda x: x) @@ -41,13 +42,6 @@ def skip_if_no_pandas_parser(parser): pytest.skip("cannot evaluate with parser {0!r}".format(parser)) -def skip_if_no_ne(engine='numexpr'): - if engine == 'numexpr': - if not _NUMEXPR_INSTALLED: - pytest.skip("cannot query engine numexpr when numexpr not " - "installed") - - class TestCompat(object): def setup_method(self, method): @@ -175,7 +169,6 @@ def test_eval_resolvers_as_list(self): class TestDataFrameQueryWithMultiIndex(object): def test_query_with_named_multiindex(self, parser, engine): - tm.skip_if_no_ne(engine) skip_if_no_pandas_parser(parser) a = np.random.choice(['red', 'green'], size=10) b = np.random.choice(['eggs', 'ham'], size=10) @@ -225,7 +218,6 @@ def test_query_with_named_multiindex(self, parser, engine): assert_frame_equal(res2, exp) def test_query_with_unnamed_multiindex(self, parser, engine): - tm.skip_if_no_ne(engine) skip_if_no_pandas_parser(parser) a = np.random.choice(['red', 'green'], size=10) b = np.random.choice(['eggs', 'ham'], size=10) @@ -316,7 +308,6 @@ def test_query_with_unnamed_multiindex(self, parser, engine): assert_frame_equal(res2, exp) def test_query_with_partially_named_multiindex(self, parser, engine): - tm.skip_if_no_ne(engine) skip_if_no_pandas_parser(parser) a = np.random.choice(['red', 'green'], size=10) b = np.arange(10) @@ -370,27 +361,25 @@ def to_series(mi, level): raise AssertionError("object must be a Series or Index") def test_raise_on_panel_with_multiindex(self, parser, engine): - tm.skip_if_no_ne() p = tm.makePanel(7) p.items = tm.makeCustomIndex(len(p.items), nlevels=2) with pytest.raises(NotImplementedError): pd.eval('p + 1', parser=parser, engine=engine) def test_raise_on_panel4d_with_multiindex(self, parser, engine): - tm.skip_if_no_ne() p4d = tm.makePanel4D(7) p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2) with pytest.raises(NotImplementedError): pd.eval('p4d + 1', parser=parser, engine=engine) +@td.skip_if_no_ne class TestDataFrameQueryNumExprPandas(object): @classmethod def setup_class(cls): cls.engine = 'numexpr' cls.parser = 'pandas' - tm.skip_if_no_ne(cls.engine) @classmethod def teardown_class(cls): @@ -714,6 +703,7 @@ def test_inf(self): assert_frame_equal(result, expected) +@td.skip_if_no_ne class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas): @classmethod @@ -721,7 +711,6 @@ def setup_class(cls): super(TestDataFrameQueryNumExprPython, cls).setup_class() cls.engine = 'numexpr' cls.parser = 'python' - tm.skip_if_no_ne(cls.engine) cls.frame = TestData().frame def test_date_query_no_attribute_access(self): @@ -859,7 +848,6 @@ def test_query_builtin(self): class TestDataFrameQueryStrings(object): def test_str_query_method(self, parser, engine): - tm.skip_if_no_ne(engine) df = DataFrame(randn(10, 1), columns=['b']) df['strings'] = Series(list('aabbccddee')) expect = df[df.strings == 'a'] @@ -896,7 +884,6 @@ def test_str_query_method(self, parser, engine): assert_frame_equal(res, df[~df.strings.isin(['a'])]) def test_str_list_query_method(self, parser, engine): - tm.skip_if_no_ne(engine) df = DataFrame(randn(10, 1), columns=['b']) df['strings'] = Series(list('aabbccddee')) expect = df[df.strings.isin(['a', 'b'])] @@ -935,7 +922,6 @@ def test_str_list_query_method(self, parser, engine): assert_frame_equal(res, expect) def test_query_with_string_columns(self, parser, engine): - tm.skip_if_no_ne(engine) df = DataFrame({'a': list('aaaabbbbcccc'), 'b': list('aabbccddeeff'), 'c': np.random.randint(5, size=12), @@ -956,7 +942,6 @@ def test_query_with_string_columns(self, parser, engine): df.query('a in b and c < d', parser=parser, engine=engine) def test_object_array_eq_ne(self, parser, engine): - tm.skip_if_no_ne(engine) df = DataFrame({'a': list('aaaabbbbcccc'), 'b': list('aabbccddeeff'), 'c': np.random.randint(5, size=12), @@ -970,7 +955,6 @@ def test_object_array_eq_ne(self, parser, engine): assert_frame_equal(res, exp) def test_query_with_nested_strings(self, parser, engine): - tm.skip_if_no_ne(engine) skip_if_no_pandas_parser(parser) raw = """id event timestamp 1 "page 1 load" 1/1/2014 0:00:01 @@ -995,7 +979,6 @@ def test_query_with_nested_strings(self, parser, engine): def test_query_with_nested_special_character(self, parser, engine): skip_if_no_pandas_parser(parser) - tm.skip_if_no_ne(engine) df = DataFrame({'a': ['a', 'b', 'test & test'], 'b': [1, 2, 3]}) res = df.query('a == "test & test"', parser=parser, engine=engine) @@ -1003,7 +986,6 @@ def test_query_with_nested_special_character(self, parser, engine): assert_frame_equal(res, expec) def test_query_lex_compare_strings(self, parser, engine): - tm.skip_if_no_ne(engine=engine) import operator as opr a = Series(np.random.choice(list('abcde'), 20)) @@ -1018,7 +1000,6 @@ def test_query_lex_compare_strings(self, parser, engine): assert_frame_equal(res, expected) def test_query_single_element_booleans(self, parser, engine): - tm.skip_if_no_ne(engine) columns = 'bid', 'bidsize', 'ask', 'asksize' data = np.random.randint(2, size=(1, len(columns))).astype(bool) df = DataFrame(data, columns=columns) @@ -1027,7 +1008,6 @@ def test_query_single_element_booleans(self, parser, engine): assert_frame_equal(res, expected) def test_query_string_scalar_variable(self, parser, engine): - tm.skip_if_no_ne(engine) skip_if_no_pandas_parser(parser) df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'], 'Price': [109.70, 109.72, 183.30, 183.35]}) @@ -1037,13 +1017,7 @@ def test_query_string_scalar_variable(self, parser, engine): assert_frame_equal(e, r) -class TestDataFrameEvalNumExprPandas(object): - - @classmethod - def setup_class(cls): - cls.engine = 'numexpr' - cls.parser = 'pandas' - tm.skip_if_no_ne() +class TestDataFrameEvalWithFrame(object): def setup_method(self, method): self.frame = DataFrame(randn(10, 3), columns=list('abc')) @@ -1051,49 +1025,21 @@ def setup_method(self, method): def teardown_method(self, method): del self.frame - def test_simple_expr(self): - res = self.frame.eval('a + b', engine=self.engine, parser=self.parser) + def test_simple_expr(self, parser, engine): + res = self.frame.eval('a + b', engine=engine, parser=parser) expect = self.frame.a + self.frame.b assert_series_equal(res, expect) - def test_bool_arith_expr(self): - res = self.frame.eval('a[a < 1] + b', engine=self.engine, - parser=self.parser) + def test_bool_arith_expr(self, parser, engine): + res = self.frame.eval('a[a < 1] + b', engine=engine, parser=parser) expect = self.frame.a[self.frame.a < 1] + self.frame.b assert_series_equal(res, expect) - def test_invalid_type_for_operator_raises(self): + def test_invalid_type_for_operator_raises(self, parser, engine): df = DataFrame({'a': [1, 2], 'b': ['c', 'd']}) ops = '+', '-', '*', '/' for op in ops: with tm.assert_raises_regex(TypeError, "unsupported operand type\(s\) " "for .+: '.+' and '.+'"): - df.eval('a {0} b'.format(op), engine=self.engine, - parser=self.parser) - - -class TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas): - - @classmethod - def setup_class(cls): - super(TestDataFrameEvalNumExprPython, cls).setup_class() - cls.engine = 'numexpr' - cls.parser = 'python' - tm.skip_if_no_ne(cls.engine) - - -class TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas): - - @classmethod - def setup_class(cls): - super(TestDataFrameEvalPythonPandas, cls).setup_class() - cls.engine = 'python' - cls.parser = 'pandas' - - -class TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython): - - @classmethod - def setup_class(cls): - cls.engine = cls.parser = 'python' + df.eval('a {0} b'.format(op), engine=engine, parser=parser) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index ca8a0d8bda3ab..0ca25735fc03f 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -21,6 +21,7 @@ ensure_clean, makeCustomDataframe as mkdf) import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.tests.frame.common import TestData @@ -965,10 +966,10 @@ def test_to_csv_compression_bz2(self): for col in df.columns: assert col in text + @td.skip_if_no_lzma def test_to_csv_compression_xz(self): # GH11852 # use the compression kw in to_csv - tm._skip_if_no_lzma() df = DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], index=['A', 'B'], columns=['X', 'Y', 'Z']) diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index 077752039a558..a83ec53904b28 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -3,10 +3,12 @@ import pandas as pd from pandas import compat import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.util.testing import assert_frame_equal, assert_raises_regex -COMPRESSION_TYPES = [None, 'bz2', 'gzip', 'xz'] +COMPRESSION_TYPES = [None, 'bz2', 'gzip', + pytest.param('xz', marks=td.skip_if_no_lzma)] def decompress_file(path, compression): @@ -32,9 +34,6 @@ def decompress_file(path, compression): @pytest.mark.parametrize('compression', COMPRESSION_TYPES) def test_compression_roundtrip(compression): - if compression == 'xz': - tm._skip_if_no_lzma() - df = pd.DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], index=['A', 'B'], columns=['X', 'Y', 'Z']) @@ -74,9 +73,6 @@ def test_with_s3_url(compression): pytest.importorskip('s3fs') moto = pytest.importorskip('moto') - if compression == 'xz': - tm._skip_if_no_lzma() - df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}') with moto.mock_s3(): conn = boto3.resource("s3", region_name="us-east-1") @@ -94,9 +90,6 @@ def test_with_s3_url(compression): @pytest.mark.parametrize('compression', COMPRESSION_TYPES) def test_lines_with_compression(compression): - if compression == 'xz': - tm._skip_if_no_lzma() - with tm.ensure_clean() as path: df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}') df.to_json(path, orient='records', lines=True, compression=compression) @@ -107,9 +100,6 @@ def test_lines_with_compression(compression): @pytest.mark.parametrize('compression', COMPRESSION_TYPES) def test_chunksize_with_compression(compression): - if compression == 'xz': - tm._skip_if_no_lzma() - with tm.ensure_clean() as path: df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}') df.to_json(path, orient='records', lines=True, compression=compression) diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py index 84db9d14eee07..4291d59123e8b 100644 --- a/pandas/tests/io/parser/compression.py +++ b/pandas/tests/io/parser/compression.py @@ -8,7 +8,9 @@ import pytest import pandas as pd +import pandas.compat as compat import pandas.util.testing as tm +import pandas.util._test_decorators as td class CompressionTests(object): @@ -117,8 +119,9 @@ def test_bz2(self): result = self.read_csv(path, compression='infer') tm.assert_frame_equal(result, expected) + @td.skip_if_no_lzma def test_xz(self): - lzma = tm._skip_if_no_lzma() + lzma = compat.import_lzma() with open(self.csv1, 'rb') as data_file: data = data_file.read() diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index d00d3f31ce189..2d0a23d71a2e6 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -7,6 +7,7 @@ import pytest import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas import DataFrame from pandas.io.parsers import read_csv, read_table from pandas.compat import BytesIO @@ -14,12 +15,11 @@ @pytest.mark.network @pytest.mark.parametrize( - "compression,extension", - [('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'), - pytest.param('xz', '.xz', - marks=pytest.mark.skipif(not tm._check_if_lzma(), - reason='need backports.lzma ' - 'to run'))]) + "compression,extension", [ + ('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'), + pytest.param('xz', '.xz', marks=td.skip_if_no_lzma) + ] +) @pytest.mark.parametrize('mode', ['explicit', 'infer']) @pytest.mark.parametrize('engine', ['python', 'c']) def test_compressed_urls(salaries_table, compression, extension, mode, engine): diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index c49339f112d6a..d5bcf72488d09 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -23,6 +23,7 @@ from pandas.compat import is_platform_little_endian import pandas import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.tseries.offsets import Day, MonthEnd import shutil import sys @@ -382,12 +383,11 @@ def decompress_file(self, src_path, dest_path, compression): fh.write(f.read()) f.close() - @pytest.mark.parametrize('compression', [None, 'gzip', 'bz2', 'xz']) + @pytest.mark.parametrize('compression', [ + None, 'gzip', 'bz2', + pytest.param('xz', marks=td.skip_if_no_lzma) # issue 11666 + ]) def test_write_explicit(self, compression, get_random_path): - # issue 11666 - if compression == 'xz': - tm._skip_if_no_lzma() - base = get_random_path path1 = base + ".compressed" path2 = base + ".raw" @@ -414,11 +414,11 @@ def test_write_explicit_bad(self, compression, get_random_path): df = tm.makeDataFrame() df.to_pickle(path, compression=compression) - @pytest.mark.parametrize('ext', ['', '.gz', '.bz2', '.xz', '.no_compress']) + @pytest.mark.parametrize('ext', [ + '', '.gz', '.bz2', '.no_compress', + pytest.param('.xz', marks=td.skip_if_no_lzma) + ]) def test_write_infer(self, ext, get_random_path): - if ext == '.xz': - tm._skip_if_no_lzma() - base = get_random_path path1 = base + ext path2 = base + ".raw" @@ -442,12 +442,11 @@ def test_write_infer(self, ext, get_random_path): tm.assert_frame_equal(df, df2) - @pytest.mark.parametrize('compression', [None, 'gzip', 'bz2', 'xz', "zip"]) + @pytest.mark.parametrize('compression', [ + None, 'gzip', 'bz2', "zip", + pytest.param('xz', marks=td.skip_if_no_lzma) + ]) def test_read_explicit(self, compression, get_random_path): - # issue 11666 - if compression == 'xz': - tm._skip_if_no_lzma() - base = get_random_path path1 = base + ".raw" path2 = base + ".compressed" @@ -466,12 +465,11 @@ def test_read_explicit(self, compression, get_random_path): tm.assert_frame_equal(df, df2) - @pytest.mark.parametrize('ext', ['', '.gz', '.bz2', '.xz', '.zip', - '.no_compress']) + @pytest.mark.parametrize('ext', [ + '', '.gz', '.bz2', '.zip', '.no_compress', + pytest.param('.xz', marks=td.skip_if_no_lzma) + ]) def test_read_infer(self, ext, get_random_path): - if ext == '.xz': - tm._skip_if_no_lzma() - base = get_random_path path1 = base + ".raw" path2 = base + ext diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 95a9a8fed42f7..0fd5648739e5c 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -28,7 +28,10 @@ def test_foo(): import locale from distutils.version import LooseVersion -from pandas.compat import is_platform_windows, is_platform_32bit, PY3 +from pandas.compat import (is_platform_windows, is_platform_32bit, PY3, + import_lzma) +from pandas.core.computation.expressions import (_USE_NUMEXPR, + _NUMEXPR_INSTALLED) def safe_import(mod_name, min_version=None): @@ -99,6 +102,13 @@ def _skip_if_no_scipy(): safe_import('scipy.interpolate')) +def _skip_if_no_lzma(): + try: + import_lzma() + except ImportError: + return True + + def skip_if_no(package, min_version=None): """ Generic function to help skip test functions when required packages are not @@ -153,3 +163,10 @@ def decorated_func(func): lang=locale.getlocale()[0])) skip_if_no_scipy = pytest.mark.skipif(_skip_if_no_scipy(), reason="Missing SciPy requirement") +skip_if_no_lzma = pytest.mark.skipif(_skip_if_no_lzma(), + reason="need backports.lzma to run") +skip_if_no_ne = pytest.mark.skipif(not _USE_NUMEXPR, + reason="numexpr enabled->{enabled}, " + "installed->{installed}".format( + enabled=_USE_NUMEXPR, + installed=_NUMEXPR_INSTALLED)) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 4c6e3217ed6b4..131d470053a79 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -336,32 +336,6 @@ def _skip_if_no_scipy(): pytest.importorskip("scipy.interpolate") -def _check_if_lzma(): - try: - return compat.import_lzma() - except ImportError: - return False - - -def _skip_if_no_lzma(): - import pytest - return _check_if_lzma() or pytest.skip('need backports.lzma to run') - - -def skip_if_no_ne(engine='numexpr'): - from pandas.core.computation.expressions import ( - _USE_NUMEXPR, - _NUMEXPR_INSTALLED) - - if engine == 'numexpr': - if not _USE_NUMEXPR: - import pytest - pytest.skip("numexpr enabled->{enabled}, " - "installed->{installed}".format( - enabled=_USE_NUMEXPR, - installed=_NUMEXPR_INSTALLED)) - - def _skip_if_no_mock(): try: import mock # noqa
- [ ] progress #18190 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18820
2017-12-18T15:58:50Z
2017-12-19T11:26:51Z
2017-12-19T11:26:51Z
2018-02-27T01:32:12Z
delegate (most) datetimelike Series arithmetic ops to DatetimeIndex
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 0579a80aad28e..2f6b039b4e080 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -198,6 +198,7 @@ Other API Changes - Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:issue:`16672`) - :func:`pandas.merge` now raises a ``ValueError`` when trying to merge on incompatible data types (:issue:`9780`) - :func:`wide_to_long` previously kept numeric-like suffixes as ``object`` dtype. Now they are cast to numeric if possible (:issue:`17627`) +- Subtraction of :class:`DatetimeIndex` with mis-matched timezones will now raise a ``TypeError`` instead of a ``ValueError`` (:issue:`18817`) .. _whatsnew_0220.deprecations: diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 8cc996285fbbd..52ad95ae9ab0d 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -11,7 +11,7 @@ import numpy as np from pandas.core.dtypes.common import ( - is_integer, is_float, + is_integer, is_float, is_integer_dtype, is_bool_dtype, _ensure_int64, is_scalar, is_dtype_equal, is_list_like, is_timedelta64_dtype) @@ -650,6 +650,7 @@ def _add_datetimelike_methods(cls): def __add__(self, other): from pandas.core.index import Index from pandas.core.indexes.timedeltas import TimedeltaIndex + from pandas.core.indexes.datetimes import DatetimeIndex from pandas.tseries.offsets import DateOffset if is_timedelta64_dtype(other): return self._add_delta(other) @@ -664,6 +665,12 @@ def __add__(self, other): return self.shift(other) elif isinstance(other, (Index, datetime, np.datetime64)): return self._add_datelike(other) + elif (isinstance(self, DatetimeIndex) and + isinstance(other, np.ndarray) and other.size == 1 and + is_integer_dtype(other)): + # TODO: Should this be allowed if self.freq is not None? + raise TypeError("cannot add {cls} and {typ}" + .format(cls=type(cls), typ=type(other))) else: # pragma: no cover return NotImplemented cls.__add__ = __add__ @@ -695,6 +702,12 @@ def __sub__(self, other): return self._sub_datelike(other) elif isinstance(other, Period): return self._sub_period(other) + elif (isinstance(self, DatetimeIndex) and + isinstance(other, np.ndarray) and other.size == 1 and + is_integer_dtype(other)): + # TODO: Should this be allowed if self.freq is not None? + raise TypeError("cannot add {cls} and {typ}" + .format(cls=type(cls), typ=type(other))) else: # pragma: no cover return NotImplemented cls.__sub__ = __sub__ diff --git a/pandas/core/ops.py b/pandas/core/ops.py index e23609b23f529..30171e3b204ac 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -490,6 +490,7 @@ def _convert_to_array(self, values, name=None, other=None): # datetime with tz elif (isinstance(ovalues, datetime.datetime) and hasattr(ovalues, 'tzinfo')): + # TODO: does this mean to say `ovalues.tzinfo is not None`? values = pd.DatetimeIndex(values) # datetime array with tz elif is_datetimetz(values): @@ -655,6 +656,15 @@ def _construct_divmod_result(left, result, index, name, dtype): ) +def _get_series_result_name(left, rvalues): + # TODO: Can we just use right instead of rvalues? + if isinstance(rvalues, ABCSeries): + name = _maybe_match_name(left, rvalues) + else: + name = left.name + return name + + def _arith_method_SERIES(op, name, str_rep, fill_zeros=None, default_axis=None, construct_result=_construct_result, **eval_kwargs): """ @@ -707,6 +717,36 @@ def wrapper(left, right, name=name, na_op=na_op): if isinstance(right, ABCDataFrame): return NotImplemented + elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): + # Dispatch to DatetimeIndex method; there are a handful of cases + # that DatetimeIndex handles differently from Series so we avoid + # dispatching. + if right is pd.NaT: + # DatetimeIndex and Series handle this differently, so + # until that is resolved we need to special-case here + return construct_result(left, pd.NaT, index=left.index, + name=left.name, dtype=left.dtype) + # TODO: double-check that the tz part of the dtype + # is supposed to be retained + elif is_offsetlike(right): + # special handling for alignment + pass + elif isinstance(right, pd.PeriodIndex): + # not supported for DatetimeIndex + pass + elif (isinstance(right, np.ndarray) and right.size == 1 and + is_integer_dtype(right)): + # DatetimeIndex adds this as nanoseconds, needs fixing + pass + else: + left, right = _align_method_SERIES(left, right) + name = _get_series_result_name(left, right) + result = op(pd.DatetimeIndex(left), right) + result.name = name # Needs to be overriden if name is None + return construct_result(left, result, + index=left.index, name=name, + dtype=result.dtype) + left, right = _align_method_SERIES(left, right) converted = _Op.get_op(left, right, name, na_op) diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index a46462e91a866..3b612ef3e1f89 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -363,6 +363,20 @@ def test_datetimeindex_sub_timestamp_overflow(self): with pytest.raises(OverflowError): dtimin - variant + def test_dti_add_intarray(self, tz): + rng = pd.date_range('2000-01-01 09:00', freq='H', + periods=10, tz=tz) + other = np.array(1, dtype=np.int64) + with pytest.raises(TypeError): + rng + other + + def test_dti_sub_intarray(self, tz): + rng = pd.date_range('2000-01-01 09:00', freq='H', + periods=10, tz=tz) + other = np.array(1, dtype=np.int64) + with pytest.raises(TypeError): + rng - other + # GH 10699 @pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex], diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 89a6311153d15..c936e7cac1ad9 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -970,7 +970,7 @@ def run_ops(ops, get_ser, test_ser): # defined for op_str in ops: op = getattr(get_ser, op_str, None) - with tm.assert_raises_regex(TypeError, 'operate'): + with tm.assert_raises_regex(TypeError, 'operate|cannot'): op(test_ser) # ## timedelta64 ### @@ -1203,6 +1203,21 @@ def test_datetime64_ops_nat(self): with pytest.raises(TypeError): nat_series_dtype_timestamp / 1 + def test_datetime_sub_datetime_overflow(self): + # GH#12534 + dt = pd.Timestamp('1700-01-31') + dti = pd.date_range('1999-09-30', freq='M', periods=10) + with pytest.raises(OverflowError): + dti - dt + with pytest.raises(OverflowError): + dt - dti + + ser = pd.Series(dti) + with pytest.raises(OverflowError): + ser - dt + with pytest.raises(OverflowError): + dt - ser + class TestSeriesOperators(TestData): def test_op_method(self): diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 2e3a7a6c28a11..6e711abf4491b 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -107,7 +107,7 @@ def test_shift(self): # incompat tz s2 = Series(date_range('2000-01-01 09:00:00', periods=5, tz='CET'), name='foo') - pytest.raises(ValueError, lambda: s - s2) + pytest.raises(TypeError, lambda: s - s2) def test_shift2(self): ts = Series(np.random.randn(5),
DatetimeIndex arithmetic ops do overflow checks that Series does not. Instead of re-implementing those checks (which I tried and its a PITA) this PR just delegates the appropriate operations to the DatetimeIndex implementation. There are a couple of things that are missing from the DatetimeIndex implementations, e.g. the int-array case added to indexes.datetimelike. If there's consensus that having a single implementation for these ops is the way to go, I'll port the other cases over in their own PRs. If/when these implementations get merged, ops._TimeOp can be trimmed/removed. - [x] closes #12534 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18817
2017-12-18T04:51:26Z
2017-12-28T19:57:00Z
null
2018-02-11T22:00:33Z
CLN: ASV io_bench, parser_vb
diff --git a/asv_bench/benchmarks/io/__init__.py b/asv_bench/benchmarks/io/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py new file mode 100644 index 0000000000000..bc4599436111f --- /dev/null +++ b/asv_bench/benchmarks/io/csv.py @@ -0,0 +1,249 @@ +import random +import timeit +import string + +import numpy as np +import pandas.util.testing as tm +from pandas import DataFrame, Categorical, date_range, read_csv +from pandas.compat import PY2 +from pandas.compat import cStringIO as StringIO + +from ..pandas_vb_common import setup, BaseIO # noqa + + +class ToCSV(BaseIO): + + goal_time = 0.2 + fname = '__test__.csv' + params = ['wide', 'long', 'mixed'] + param_names = ['kind'] + + def setup(self, kind): + wide_frame = DataFrame(np.random.randn(3000, 30)) + long_frame = DataFrame({'A': np.arange(50000), + 'B': np.arange(50000) + 1., + 'C': np.arange(50000) + 2., + 'D': np.arange(50000) + 3.}) + mixed_frame = DataFrame({'float': np.random.randn(5000), + 'int': np.random.randn(5000).astype(int), + 'bool': (np.arange(5000) % 2) == 0, + 'datetime': date_range('2001', + freq='s', + periods=5000), + 'object': ['foo'] * 5000}) + mixed_frame.loc[30:500, 'float'] = np.nan + data = {'wide': wide_frame, + 'long': long_frame, + 'mixed': mixed_frame} + self.df = data[kind] + + def time_frame(self, kind): + self.df.to_csv(self.fname) + + +class ToCSVDatetime(BaseIO): + + goal_time = 0.2 + fname = '__test__.csv' + + def setup(self): + rng = date_range('1/1/2000', periods=1000) + self.data = DataFrame(rng, index=rng) + + def time_frame_date_formatting(self): + self.data.to_csv(self.fname, date_format='%Y%m%d') + + +class ReadCSVDInferDatetimeFormat(object): + + goal_time = 0.2 + params = ([True, False], ['custom', 'iso8601', 'ymd']) + param_names = ['infer_datetime_format', 'format'] + + def setup(self, infer_datetime_format, format): + rng = date_range('1/1/2000', periods=1000) + formats = {'custom': '%m/%d/%Y %H:%M:%S.%f', + 'iso8601': '%Y-%m-%d %H:%M:%S', + 'ymd': '%Y%m%d'} + dt_format = formats[format] + self.data = StringIO('\n'.join(rng.strftime(dt_format).tolist())) + + def time_read_csv(self, infer_datetime_format, format): + read_csv(self.data, header=None, names=['foo'], parse_dates=['foo'], + infer_datetime_format=infer_datetime_format) + + +class ReadCSVSkipRows(BaseIO): + + goal_time = 0.2 + fname = '__test__.csv' + params = [None, 10000] + param_names = ['skiprows'] + + def setup(self, skiprows): + N = 20000 + index = tm.makeStringIndex(N) + df = DataFrame({'float1': np.random.randn(N), + 'float2': np.random.randn(N), + 'string1': ['foo'] * N, + 'bool1': [True] * N, + 'int1': np.random.randint(0, N, size=N)}, + index=index) + df.to_csv(self.fname) + + def time_skipprows(self, skiprows): + read_csv(self.fname, skiprows=skiprows) + + +class ReadUint64Integers(object): + + goal_time = 0.2 + + def setup(self): + self.na_values = [2**63 + 500] + arr = np.arange(10000).astype('uint64') + 2**63 + self.data1 = StringIO('\n'.join(arr.astype(str).tolist())) + arr = arr.astype(object) + arr[500] = -1 + self.data2 = StringIO('\n'.join(arr.astype(str).tolist())) + + def time_read_uint64(self): + read_csv(self.data1, header=None, names=['foo']) + + def time_read_uint64_neg_values(self): + read_csv(self.data2, header=None, names=['foo']) + + def time_read_uint64_na_values(self): + read_csv(self.data1, header=None, names=['foo'], + na_values=self.na_values) + + +class S3(object): + # Make sure that we can read part of a file from S3 without + # needing to download the entire thing. Use the timeit.default_timer + # to measure wall time instead of CPU time -- we want to see + # how long it takes to download the data. + timer = timeit.default_timer + params = ([None, "gzip", "bz2"], ["python", "c"]) + param_names = ["compression", "engine"] + + def setup(self, compression, engine): + if compression == "bz2" and engine == "c" and PY2: + # The Python 2 C parser can't read bz2 from open files. + raise NotImplementedError + try: + import s3fs + except ImportError: + # Skip these benchmarks if `boto` is not installed. + raise NotImplementedError + + ext = "" + if compression == "gzip": + ext = ".gz" + elif compression == "bz2": + ext = ".bz2" + self.big_fname = "s3://pandas-test/large_random.csv" + ext + + def time_read_csv_10_rows(self, compression, engine): + # Read a small number of rows from a huge (100,000 x 50) table. + read_csv(self.big_fname, nrows=10, compression=compression, + engine=engine) + + +class ReadCSVThousands(BaseIO): + + goal_time = 0.2 + fname = '__test__.csv' + params = ([',', '|'], [None, ',']) + param_names = ['sep', 'thousands'] + + def setup(self, sep, thousands): + N = 10000 + K = 8 + data = np.random.randn(N, K) * np.random.randint(100, 10000, (N, K)) + df = DataFrame(data) + if thousands is not None: + fmt = ':{}'.format(thousands) + fmt = '{' + fmt + '}' + df = df.applymap(lambda x: fmt.format(x)) + df.to_csv(self.fname, sep=sep) + + def time_thousands(self, sep, thousands): + read_csv(self.fname, sep=sep, thousands=thousands) + + +class ReadCSVComment(object): + + goal_time = 0.2 + + def setup(self): + data = ['A,B,C'] + (['1,2,3 # comment'] * 100000) + self.s_data = StringIO('\n'.join(data)) + + def time_comment(self): + read_csv(self.s_data, comment='#', header=None, names=list('abc')) + + +class ReadCSVFloatPrecision(object): + + goal_time = 0.2 + params = ([',', ';'], ['.', '_'], [None, 'high', 'round_trip']) + param_names = ['sep', 'decimal', 'float_precision'] + + def setup(self, sep, decimal, float_precision): + floats = [''.join(random.choice(string.digits) for _ in range(28)) + for _ in range(15)] + rows = sep.join(['0{}'.format(decimal) + '{}'] * 3) + '\n' + data = rows * 5 + data = data.format(*floats) * 200 # 1000 x 3 strings csv + self.s_data = StringIO(data) + + def time_read_csv(self, sep, decimal, float_precision): + read_csv(self.s_data, sep=sep, header=None, names=list('abc'), + float_precision=float_precision) + + def time_read_csv_python_engine(self, sep, decimal, float_precision): + read_csv(self.s_data, sep=sep, header=None, engine='python', + float_precision=None, names=list('abc')) + + +class ReadCSVCategorical(BaseIO): + + goal_time = 0.2 + fname = '__test__.csv' + + def setup(self): + N = 100000 + group1 = ['aaaaaaaa', 'bbbbbbb', 'cccccccc', 'dddddddd', 'eeeeeeee'] + df = DataFrame(np.random.choice(group1, (N, 3)), columns=list('abc')) + df.to_csv(self.fname, index=False) + + def time_convert_post(self): + read_csv(self.fname).apply(Categorical) + + def time_convert_direct(self): + read_csv(self.fname, dtype='category') + + +class ReadCSVParseDates(object): + + goal_time = 0.2 + + def setup(self): + data = """{},19:00:00,18:56:00,0.8100,2.8100,7.2000,0.0000,280.0000\n + {},20:00:00,19:56:00,0.0100,2.2100,7.2000,0.0000,260.0000\n + {},21:00:00,20:56:00,-0.5900,2.2100,5.7000,0.0000,280.0000\n + {},21:00:00,21:18:00,-0.9900,2.0100,3.6000,0.0000,270.0000\n + {},22:00:00,21:56:00,-0.5900,1.7100,5.1000,0.0000,290.0000\n + """ + two_cols = ['KORD,19990127'] * 5 + data = data.format(*two_cols) + self.s_data = StringIO(data) + + def time_multiple_date(self): + read_csv(self.s_data, sep=',', header=None, + names=list(string.digits[:9]), parse_dates=[[1, 2], [1, 3]]) + + def time_baseline(self): + read_csv(self.s_data, sep=',', header=None, parse_dates=[1], + names=list(string.digits[:9])) diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py new file mode 100644 index 0000000000000..acfdd327c3b51 --- /dev/null +++ b/asv_bench/benchmarks/io/json.py @@ -0,0 +1,127 @@ +import numpy as np +import pandas.util.testing as tm +from pandas import DataFrame, date_range, timedelta_range, concat, read_json + +from ..pandas_vb_common import setup, BaseIO # noqa + + +class ReadJSON(BaseIO): + + goal_time = 0.2 + fname = "__test__.json" + params = (['split', 'index', 'records'], ['int', 'datetime']) + param_names = ['orient', 'index'] + + def setup(self, orient, index): + N = 100000 + indexes = {'int': np.arange(N), + 'datetime': date_range('20000101', periods=N, freq='H')} + df = DataFrame(np.random.randn(N, 5), + columns=['float_{}'.format(i) for i in range(5)], + index=indexes[index]) + df.to_json(self.fname, orient=orient) + + def time_read_json(self, orient, index): + read_json(self.fname, orient=orient) + + +class ReadJSONLines(BaseIO): + + goal_time = 0.2 + fname = "__test_lines__.json" + params = ['int', 'datetime'] + param_names = ['index'] + + def setup(self, index): + N = 100000 + indexes = {'int': np.arange(N), + 'datetime': date_range('20000101', periods=N, freq='H')} + df = DataFrame(np.random.randn(N, 5), + columns=['float_{}'.format(i) for i in range(5)], + index=indexes[index]) + df.to_json(self.fname, orient='records', lines=True) + + def time_read_json_lines(self, index): + read_json(self.fname, orient='records', lines=True) + + def time_read_json_lines_concat(self, index): + concat(read_json(self.fname, orient='records', lines=True, + chunksize=25000)) + + def peakmem_read_json_lines(self, index): + read_json(self.fname, orient='records', lines=True) + + def peakmem_read_json_lines_concat(self, index): + concat(read_json(self.fname, orient='records', lines=True, + chunksize=25000)) + + +class ToJSON(BaseIO): + + goal_time = 0.2 + fname = "__test__.json" + params = ['split', 'columns', 'index'] + param_names = ['orient'] + + def setup(self, lines_orient): + N = 10**5 + ncols = 5 + index = date_range('20000101', periods=N, freq='H') + timedeltas = timedelta_range(start=1, periods=N, freq='s') + datetimes = date_range(start=1, periods=N, freq='s') + ints = np.random.randint(100000000, size=N) + floats = np.random.randn(N) + strings = tm.makeStringIndex(N) + self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N)) + self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index) + self.df_td_int_ts = DataFrame({'td_1': timedeltas, + 'td_2': timedeltas, + 'int_1': ints, + 'int_2': ints, + 'ts_1': datetimes, + 'ts_2': datetimes}, + index=index) + self.df_int_floats = DataFrame({'int_1': ints, + 'int_2': ints, + 'int_3': ints, + 'float_1': floats, + 'float_2': floats, + 'float_3': floats}, + index=index) + self.df_int_float_str = DataFrame({'int_1': ints, + 'int_2': ints, + 'float_1': floats, + 'float_2': floats, + 'str_1': strings, + 'str_2': strings}, + index=index) + + def time_floats_with_int_index(self, orient): + self.df.to_json(self.fname, orient=orient) + + def time_floats_with_dt_index(self, orient): + self.df_date_idx.to_json(self.fname, orient=orient) + + def time_delta_int_tstamp(self, orient): + self.df_td_int_ts.to_json(self.fname, orient=orient) + + def time_float_int(self, orient): + self.df_int_floats.to_json(self.fname, orient=orient) + + def time_float_int_str(self, orient): + self.df_int_float_str.to_json(self.fname, orient=orient) + + def time_floats_with_int_idex_lines(self, orient): + self.df.to_json(self.fname, orient='records', lines=True) + + def time_floats_with_dt_index_lines(self, orient): + self.df_date_idx.to_json(self.fname, orient='records', lines=True) + + def time_delta_int_tstamp_lines(self, orient): + self.df_td_int_ts.to_json(self.fname, orient='records', lines=True) + + def time_float_int_lines(self, orient): + self.df_int_floats.to_json(self.fname, orient='records', lines=True) + + def time_float_int_str_lines(self, orient): + self.df_int_float_str.to_json(self.fname, orient='records', lines=True) diff --git a/asv_bench/benchmarks/io_bench.py b/asv_bench/benchmarks/io_bench.py deleted file mode 100644 index e8112cc41f032..0000000000000 --- a/asv_bench/benchmarks/io_bench.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -from .pandas_vb_common import * -from pandas import concat, Timestamp, compat -try: - from StringIO import StringIO -except ImportError: - from io import StringIO -import timeit - - -class frame_to_csv(BaseIO): - goal_time = 0.2 - fname = '__test__.csv' - - def setup(self): - self.df = DataFrame(np.random.randn(3000, 30)) - - def time_frame_to_csv(self): - self.df.to_csv(self.fname) - - -class frame_to_csv2(BaseIO): - goal_time = 0.2 - fname = '__test__.csv' - - def setup(self): - self.df = DataFrame({'A': range(50000), }) - self.df['B'] = (self.df.A + 1.0) - self.df['C'] = (self.df.A + 2.0) - self.df['D'] = (self.df.A + 3.0) - - def time_frame_to_csv2(self): - self.df.to_csv(self.fname) - - -class frame_to_csv_date_formatting(BaseIO): - goal_time = 0.2 - fname = '__test__.csv' - - def setup(self): - self.rng = date_range('1/1/2000', periods=1000) - self.data = DataFrame(self.rng, index=self.rng) - - def time_frame_to_csv_date_formatting(self): - self.data.to_csv(self.fname, date_format='%Y%m%d') - - -class frame_to_csv_mixed(BaseIO): - goal_time = 0.2 - fname = '__test__.csv' - - def setup(self): - self.df_float = DataFrame(np.random.randn(5000, 5), dtype='float64', columns=self.create_cols('float')) - self.df_int = DataFrame(np.random.randn(5000, 5), dtype='int64', columns=self.create_cols('int')) - self.df_bool = DataFrame(True, index=self.df_float.index, columns=self.create_cols('bool')) - self.df_object = DataFrame('foo', index=self.df_float.index, columns=self.create_cols('object')) - self.df_dt = DataFrame(Timestamp('20010101'), index=self.df_float.index, columns=self.create_cols('date')) - self.df_float.ix[30:500, 1:3] = np.nan - self.df = concat([self.df_float, self.df_int, self.df_bool, self.df_object, self.df_dt], axis=1) - - def time_frame_to_csv_mixed(self): - self.df.to_csv(self.fname) - - def create_cols(self, name): - return [('%s%03d' % (name, i)) for i in range(5)] - - -class read_csv_infer_datetime_format_custom(object): - goal_time = 0.2 - - def setup(self): - self.rng = date_range('1/1/2000', periods=1000) - self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%m/%d/%Y %H:%M:%S.%f')))) - - def time_read_csv_infer_datetime_format_custom(self): - read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'], infer_datetime_format=True) - - -class read_csv_infer_datetime_format_iso8601(object): - goal_time = 0.2 - - def setup(self): - self.rng = date_range('1/1/2000', periods=1000) - self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%Y-%m-%d %H:%M:%S')))) - - def time_read_csv_infer_datetime_format_iso8601(self): - read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'], infer_datetime_format=True) - - -class read_csv_infer_datetime_format_ymd(object): - goal_time = 0.2 - - def setup(self): - self.rng = date_range('1/1/2000', periods=1000) - self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%Y%m%d')))) - - def time_read_csv_infer_datetime_format_ymd(self): - read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'], infer_datetime_format=True) - - -class read_csv_skiprows(BaseIO): - goal_time = 0.2 - fname = '__test__.csv' - - def setup(self): - self.index = tm.makeStringIndex(20000) - self.df = DataFrame({'float1': randn(20000), 'float2': randn(20000), 'string1': (['foo'] * 20000), 'bool1': ([True] * 20000), 'int1': np.random.randint(0, 200000, size=20000), }, index=self.index) - self.df.to_csv(self.fname) - - def time_read_csv_skiprows(self): - read_csv(self.fname, skiprows=10000) - - -class read_csv_standard(BaseIO): - goal_time = 0.2 - fname = '__test__.csv' - - def setup(self): - self.index = tm.makeStringIndex(10000) - self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index) - self.df.to_csv(self.fname) - - def time_read_csv_standard(self): - read_csv(self.fname) - - -class read_parse_dates_iso8601(object): - goal_time = 0.2 - - def setup(self): - self.rng = date_range('1/1/2000', periods=1000) - self.data = '\n'.join(self.rng.map((lambda x: x.strftime('%Y-%m-%d %H:%M:%S')))) - - def time_read_parse_dates_iso8601(self): - read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo']) - - -class read_uint64_integers(object): - goal_time = 0.2 - - def setup(self): - self.na_values = [2**63 + 500] - - self.arr1 = np.arange(10000).astype('uint64') + 2**63 - self.data1 = '\n'.join(map(lambda x: str(x), self.arr1)) - - self.arr2 = self.arr1.copy().astype(object) - self.arr2[500] = -1 - self.data2 = '\n'.join(map(lambda x: str(x), self.arr2)) - - def time_read_uint64(self): - read_csv(StringIO(self.data1), header=None) - - def time_read_uint64_neg_values(self): - read_csv(StringIO(self.data2), header=None) - - def time_read_uint64_na_values(self): - read_csv(StringIO(self.data1), header=None, na_values=self.na_values) - - -class write_csv_standard(BaseIO): - goal_time = 0.2 - fname = '__test__.csv' - - def setup(self): - self.index = tm.makeStringIndex(10000) - self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index) - - def time_write_csv_standard(self): - self.df.to_csv(self.fname) - - -class read_csv_from_s3(object): - # Make sure that we can read part of a file from S3 without - # needing to download the entire thing. Use the timeit.default_timer - # to measure wall time instead of CPU time -- we want to see - # how long it takes to download the data. - timer = timeit.default_timer - params = ([None, "gzip", "bz2"], ["python", "c"]) - param_names = ["compression", "engine"] - - def setup(self, compression, engine): - if compression == "bz2" and engine == "c" and compat.PY2: - # The Python 2 C parser can't read bz2 from open files. - raise NotImplementedError - try: - import s3fs - except ImportError: - # Skip these benchmarks if `boto` is not installed. - raise NotImplementedError - - self.big_fname = "s3://pandas-test/large_random.csv" - - def time_read_nrows(self, compression, engine): - # Read a small number of rows from a huge (100,000 x 50) table. - ext = "" - if compression == "gzip": - ext = ".gz" - elif compression == "bz2": - ext = ".bz2" - pd.read_csv(self.big_fname + ext, nrows=10, - compression=compression, engine=engine) - - -class read_json_lines(BaseIO): - goal_time = 0.2 - fname = "__test__.json" - - def setup(self): - self.N = 100000 - self.C = 5 - self.df = DataFrame({'float{0}'.format(i): randn(self.N) for i in range(self.C)}) - self.df.to_json(self.fname,orient="records",lines=True) - - def time_read_json_lines(self): - pd.read_json(self.fname, lines=True) - - def time_read_json_lines_chunk(self): - pd.concat(pd.read_json(self.fname, lines=True, chunksize=self.N//4)) - - def peakmem_read_json_lines(self): - pd.read_json(self.fname, lines=True) - - def peakmem_read_json_lines_chunk(self): - pd.concat(pd.read_json(self.fname, lines=True, chunksize=self.N//4)) diff --git a/asv_bench/benchmarks/packers.py b/asv_bench/benchmarks/packers.py index 758162f000e8d..7b6cefc56f0da 100644 --- a/asv_bench/benchmarks/packers.py +++ b/asv_bench/benchmarks/packers.py @@ -77,28 +77,6 @@ def time_packers_read_hdf_table(self): pd.read_hdf(self.f, 'df') -class packers_read_json(_Packers): - - def setup(self): - self._setup() - self.df.to_json(self.f, orient='split') - self.df.index = np.arange(self.N) - - def time_packers_read_json(self): - pd.read_json(self.f, orient='split') - - -class packers_read_json_date_index(_Packers): - - def setup(self): - self._setup() - self.remove(self.f) - self.df.to_json(self.f, orient='split') - - def time_packers_read_json_date_index(self): - pd.read_json(self.f, orient='split') - - class packers_read_pack(_Packers): def setup(self): @@ -219,46 +197,6 @@ def time_write_hdf_table(self): self.df2.to_hdf(self.f, 'df', table=True) -class JSON(_Packers): - - def setup(self): - self._setup() - self.df_date = self.df.copy() - self.df.index = np.arange(self.N) - self.cols = [(lambda i: ('{0}_timedelta'.format(i), [pd.Timedelta(('%d seconds' % randrange(1000000.0))) for _ in range(self.N)])), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N))), (lambda i: ('{0}_timestamp'.format(i), [pd.Timestamp((1418842918083256000 + randrange(1000000000.0, 1e+18, 200))) for _ in range(self.N)]))] - self.df_mixed = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index) - - self.cols = [(lambda i: ('{0}_float'.format(i), randn(self.N))), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N)))] - self.df_mixed2 = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index) - - self.cols = [(lambda i: ('{0}_float'.format(i), randn(self.N))), (lambda i: ('{0}_int'.format(i), randint(100000000.0, size=self.N))), (lambda i: ('{0}_str'.format(i), [('%08x' % randrange((16 ** 8))) for _ in range(self.N)]))] - self.df_mixed3 = DataFrame(OrderedDict([self.cols[(i % len(self.cols))](i) for i in range(self.C)]), index=self.index) - - def time_write_json(self): - self.df.to_json(self.f, orient='split') - - def time_write_json_T(self): - self.df.to_json(self.f, orient='columns') - - def time_write_json_date_index(self): - self.df_date.to_json(self.f, orient='split') - - def time_write_json_mixed_delta_int_tstamp(self): - self.df_mixed.to_json(self.f, orient='split') - - def time_write_json_mixed_float_int(self): - self.df_mixed2.to_json(self.f, orient='index') - - def time_write_json_mixed_float_int_T(self): - self.df_mixed2.to_json(self.f, orient='columns') - - def time_write_json_mixed_float_int_str(self): - self.df_mixed3.to_json(self.f, orient='split') - - def time_write_json_lines(self): - self.df.to_json(self.f, orient="records", lines=True) - - class MsgPack(_Packers): def setup(self): diff --git a/asv_bench/benchmarks/parser_vb.py b/asv_bench/benchmarks/parser_vb.py deleted file mode 100644 index 32bf7e50d1a89..0000000000000 --- a/asv_bench/benchmarks/parser_vb.py +++ /dev/null @@ -1,121 +0,0 @@ -from .pandas_vb_common import * -import os -from pandas import read_csv -try: - from cStringIO import StringIO -except ImportError: - from io import StringIO - - -class read_csv1(object): - goal_time = 0.2 - - def setup(self): - self.N = 10000 - self.K = 8 - self.df = DataFrame((np.random.randn(self.N, self.K) * np.random.randint(100, 10000, (self.N, self.K)))) - self.df.to_csv('test.csv', sep='|') - - self.format = (lambda x: '{:,}'.format(x)) - self.df2 = self.df.applymap(self.format) - self.df2.to_csv('test2.csv', sep='|') - - def time_sep(self): - read_csv('test.csv', sep='|') - - def time_thousands(self): - read_csv('test.csv', sep='|', thousands=',') - - def teardown(self): - os.remove('test.csv') - os.remove('test2.csv') - - -class read_csv2(object): - goal_time = 0.2 - - def setup(self): - self.data = ['A,B,C'] - self.data = (self.data + (['1,2,3 # comment'] * 100000)) - self.data = '\n'.join(self.data) - - def time_comment(self): - read_csv(StringIO(self.data), comment='#') - - -class read_csv3(object): - goal_time = 0.2 - - def setup(self): - self.data = """0.1213700904466425978256438611,0.0525708283766902484401839501,0.4174092731488769913994474336\n -0.4096341697147408700274695547,0.1587830198973579909349496119,0.1292545832485494372576795285\n -0.8323255650024565799327547210,0.9694902427379478160318626578,0.6295047811546814475747169126\n -0.4679375305798131323697930383,0.2963942381834381301075609371,0.5268936082160610157032465394\n -0.6685382761849776311890991564,0.6721207066140679753374342908,0.6519975277021627935170045020\n""" - self.data2 = self.data.replace(',', ';').replace('.', ',') - self.data = (self.data * 200) - self.data2 = (self.data2 * 200) - - def time_default_converter(self): - read_csv(StringIO(self.data), sep=',', header=None, - float_precision=None) - - def time_default_converter_with_decimal(self): - read_csv(StringIO(self.data2), sep=';', header=None, - float_precision=None, decimal=',') - - def time_default_converter_python_engine(self): - read_csv(StringIO(self.data), sep=',', header=None, - float_precision=None, engine='python') - - def time_default_converter_with_decimal_python_engine(self): - read_csv(StringIO(self.data2), sep=';', header=None, - float_precision=None, decimal=',', engine='python') - - def time_precise_converter(self): - read_csv(StringIO(self.data), sep=',', header=None, - float_precision='high') - - def time_roundtrip_converter(self): - read_csv(StringIO(self.data), sep=',', header=None, - float_precision='round_trip') - - -class read_csv_categorical(object): - goal_time = 0.2 - - def setup(self): - N = 100000 - group1 = ['aaaaaaaa', 'bbbbbbb', 'cccccccc', 'dddddddd', 'eeeeeeee'] - df = DataFrame({'a': np.random.choice(group1, N).astype('object'), - 'b': np.random.choice(group1, N).astype('object'), - 'c': np.random.choice(group1, N).astype('object')}) - df.to_csv('strings.csv', index=False) - - def time_convert_post(self): - read_csv('strings.csv').apply(pd.Categorical) - - def time_convert_direct(self): - read_csv('strings.csv', dtype='category') - - def teardown(self): - os.remove('strings.csv') - - -class read_csv_dateparsing(object): - goal_time = 0.2 - - def setup(self): - self.N = 10000 - self.K = 8 - self.data = 'KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\n KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\n KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\n KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\n KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\n ' - self.data = (self.data * 200) - self.data2 = 'KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\n KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\n KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\n KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\n KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\n ' - self.data2 = (self.data2 * 200) - - def time_multiple_date(self): - read_csv(StringIO(self.data), sep=',', header=None, - parse_dates=[[1, 2], [1, 3]]) - - def time_baseline(self): - read_csv(StringIO(self.data2), sep=',', header=None, parse_dates=[1])
There are a lot of io benchmarks scattered among `hdfstore_bench.py`, `io_bench.py`, `parser_vb.py`, and `packer.py`. I think it would be a lot cleaner if each io method had it's own file (like `io_sql.py` which already exists) This PR creates `io_csv.py` and `io_json.py` that consolidates the csv and json benchmarks that exist. Benchmarks were flake8'd, `param`'d, and simplified where available: ``` $ asv dev -b ^io_json · Discovering benchmarks · Running 15 total benchmarks (1 commits * 1 environments * 15 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 6.67%] ··· Running io_json.ReadJSON.time_read_json ok [ 6.67%] ···· ========= ======= ========== -- index --------- ------------------ orient int datetime ========= ======= ========== split 294ms 272ms index 7.61s 7.65s records 614ms 613ms ========= ======= ========== [ 13.33%] ··· Running io_json.ReadJSONLines.peakmem_read_json_lines ok [ 13.33%] ···· ========== ====== index ---------- ------ int 192M datetime 192M ========== ====== [ 20.00%] ··· Running io_json.ReadJSONLines.peakmem_read_json_lines_concat ok [ 20.00%] ···· ========== ====== index ---------- ------ int 164M datetime 164M ========== ====== [ 26.67%] ··· Running io_json.ReadJSONLines.time_read_json_lines ok [ 26.67%] ···· ========== ======= index ---------- ------- int 755ms datetime 720ms ========== ======= [ 33.33%] ··· Running io_json.ReadJSONLines.time_read_json_lines_concat ok [ 33.33%] ···· ========== ======= index ---------- ------- int 749ms datetime 752ms ========== ======= [ 40.00%] ··· Running io_json.ToJSON.time_delta_int_tstamp ok [ 40.00%] ···· ========= ======= orient --------- ------- split 238ms columns 234ms index 397ms ========= ======= [ 46.67%] ··· Running io_json.ToJSON.time_delta_int_tstamp_lines ok [ 46.67%] ···· ========= ======= orient --------- ------- split 593ms columns 566ms index 540ms ========= ======= [ 53.33%] ··· Running io_json.ToJSON.time_float_int ok [ 53.33%] ···· ========= ======= orient --------- ------- split 237ms columns 214ms index 371ms ========= ======= [ 60.00%] ··· Running io_json.ToJSON.time_float_int_lines ok [ 60.00%] ···· ========= ======= orient --------- ------- split 569ms columns 570ms index 570ms ========= ======= [ 66.67%] ··· Running io_json.ToJSON.time_float_int_str ok [ 66.67%] ···· ========= ======= orient --------- ------- split 238ms columns 216ms index 384ms ========= ======= [ 73.33%] ··· Running io_json.ToJSON.time_float_int_str_lines ok [ 73.33%] ···· ========= ======= orient --------- ------- split 598ms columns 614ms index 617ms ========= ======= [ 80.00%] ··· Running io_json.ToJSON.time_floats_with_dt_index ok [ 80.00%] ···· ========= ======= orient --------- ------- split 182ms columns 209ms index 208ms ========= ======= [ 86.67%] ··· Running io_json.ToJSON.time_floats_with_dt_index_lines ok [ 86.67%] ···· ========= ======= orient --------- ------- split 442ms columns 444ms index 449ms ========= ======= [ 93.33%] ··· Running io_json.ToJSON.time_floats_with_int_idex_lines ok [ 93.33%] ···· ========= ======= orient --------- ------- split 442ms columns 442ms index 441ms ========= ======= [100.00%] ··· Running io_json.ToJSON.time_floats_with_int_index ok [100.00%] ···· ========= ======= orient --------- ------- split 159ms columns 171ms index 182ms ========= ======= ``` ``` asv dev -b ^io_csv · Discovering benchmarks · Running 16 total benchmarks (1 commits * 1 environments * 16 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 6.25%] ··· Running io_csv.ReadCSVCategorical.time_convert_direct 99.1ms [ 12.50%] ··· Running io_csv.ReadCSVCategorical.time_convert_post 139ms [ 18.75%] ··· Running io_csv.ReadCSVComment.time_comment 60.7ms [ 25.00%] ··· Running io_csv.ReadCSVDInferDatetimeFormat.time_read_csv ok [ 25.00%] ···· ======================= ======== ========= ======== -- format ----------------------- --------------------------- infer_datetime_format custom iso8601 ymd ======================= ======== ========= ======== True 23.0ms 4.65ms 4.77ms False 619ms 3.49ms 3.18ms ======================= ======== ========= ======== [ 31.25%] ··· Running io_csv.ReadCSVFloatPrecision.time_read_csv ok [ 31.25%] ···· ===== ========== ========== ================ ========== ========== ================ -- decimal / float_precision ----- ----------------------------------------------------------------------------- sep . / None . / high . / round_trip _ / None _ / high _ / round_trip ===== ========== ========== ================ ========== ========== ================ , 3.89ms 3.73ms 5.12ms 4.17ms 4.10ms 4.16ms ; 3.87ms 3.71ms 5.23ms 4.11ms 4.18ms 4.23ms ===== ========== ========== ================ ========== ========== ================ [ 37.50%] ··· Running io_csv.ReadCSVFloatPrecision.time_read_csv_python_engine ok [ 37.50%] ···· ===== ========== ========== ================ ========== ========== ================ -- decimal / float_precision ----- ----------------------------------------------------------------------------- sep . / None . / high . / round_trip _ / None _ / high _ / round_trip ===== ========== ========== ================ ========== ========== ================ , 7.84ms 7.76ms 7.80ms 6.14ms 6.24ms 6.25ms ; 7.76ms 7.80ms 8.41ms 6.10ms 6.25ms 6.14ms ===== ========== ========== ================ ========== ========== ================ [ 43.75%] ··· Running io_csv.ReadCSVParseDates.time_baseline 2.85ms [ 50.00%] ··· Running io_csv.ReadCSVParseDates.time_multiple_date 2.84ms [ 56.25%] ··· Running io_csv.ReadCSVSkipRows.time_skipprows ok [ 56.25%] ···· ========== ======== skiprows ---------- -------- None 44.8ms 10000 30.9ms ========== ======== [ 62.50%] ··· Running io_csv.ReadCSVThousands.time_thousands ok [ 62.50%] ···· ===== ======== ======== -- thousands ----- ----------------- sep None , ===== ======== ======== , 38.5ms 37.0ms | 37.8ms 39.6ms ===== ======== ======== [ 68.75%] ··· Running io_csv.ReadUint64Integers.time_read_uint64 8.99ms [ 75.00%] ··· Running io_csv.ReadUint64Integers.time_read_uint64_na_values 13.2ms [ 81.25%] ··· Running io_csv.ReadUint64Integers.time_read_uint64_neg_values 12.9ms [ 87.50%] ··· Running io_csv.S3.time_read_csv_10_rows ok [ 87.50%] ···· ============= ======== ======= -- engine ------------- ---------------- compression python c ============= ======== ======= None 6.41s 6.13s gzip 6.20s 6.60s bz2 35.1s n/a ============= ======== ======= [ 93.75%] ··· Running io_csv.ToCSV.time_frame ok [ 93.75%] ···· ======= ======== kind ------- -------- wide 84.4ms long 164ms mixed 37.4ms ======= ======== ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18815
2017-12-18T02:49:16Z
2017-12-19T11:33:59Z
2017-12-19T11:33:59Z
2017-12-19T17:36:25Z
Removed old xarr and ipython decorators
diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py index 90d2427bb3bd7..9da59ca77d862 100644 --- a/pandas/tests/generic/test_frame.py +++ b/pandas/tests/generic/test_frame.py @@ -17,6 +17,7 @@ assert_almost_equal) import pandas.util.testing as tm +import pandas.util._test_decorators as td from .test_generic import Generic try: @@ -218,8 +219,8 @@ def test_to_xarray_index_types(self, index): assert_frame_equal(result.to_dataframe(), expected, check_index_type=False, check_categorical=False) + @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): - tm._skip_if_no_xarray() from xarray import Dataset df = DataFrame({'a': list('abc'), diff --git a/pandas/tests/generic/test_panel.py b/pandas/tests/generic/test_panel.py index b1d9af9c8b0af..1c8be94d6eac3 100644 --- a/pandas/tests/generic/test_panel.py +++ b/pandas/tests/generic/test_panel.py @@ -11,6 +11,7 @@ assert_almost_equal) import pandas.util.testing as tm +import pandas.util._test_decorators as td from .test_generic import Generic @@ -18,9 +19,8 @@ class TestPanel(Generic): _typ = Panel _comparator = lambda self, x, y: assert_panel_equal(x, y, by_blocks=True) + @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): - - tm._skip_if_no_xarray() from xarray import DataArray with catch_warnings(record=True): @@ -44,9 +44,8 @@ class TestPanel4D(Generic): def test_sample(self): pytest.skip("sample on Panel4D") + @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): - - tm._skip_if_no_xarray() from xarray import DataArray with catch_warnings(record=True): diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py index 701d174f3e929..3393d7704e411 100644 --- a/pandas/tests/generic/test_series.py +++ b/pandas/tests/generic/test_series.py @@ -15,6 +15,7 @@ assert_almost_equal) import pandas.util.testing as tm +import pandas.util._test_decorators as td from .test_generic import Generic try: @@ -200,9 +201,8 @@ def test_to_xarray_index_types(self, index): check_index_type=False, check_categorical=True) + @td.skip_if_no('xarray', min_version='0.7.0') def test_to_xarray(self): - - tm._skip_if_no_xarray() from xarray import DataArray s = Series([]) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 2a0a7c9301752..4c6e3217ed6b4 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -348,17 +348,6 @@ def _skip_if_no_lzma(): return _check_if_lzma() or pytest.skip('need backports.lzma to run') -def _skip_if_no_xarray(): - import pytest - - xarray = pytest.importorskip("xarray") - v = xarray.__version__ - - if LooseVersion(v) < LooseVersion('0.7.0'): - import pytest - pytest.skip("xarray version is too low: {version}".format(version=v)) - - def skip_if_no_ne(engine='numexpr'): from pandas.core.computation.expressions import ( _USE_NUMEXPR, @@ -383,11 +372,6 @@ def _skip_if_no_mock(): import pytest raise pytest.skip("mock is not installed") - -def _skip_if_no_ipython(): - import pytest - pytest.importorskip("IPython") - # ----------------------------------------------------------------------------- # locale utilities
- [ ] progress towards #18190 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Didn't think either of these were used enough to justify as their own decorator, so I removed them from the old ``testing`` module and just used the generic ``skip_if_no`` decorator where applicable
https://api.github.com/repos/pandas-dev/pandas/pulls/18814
2017-12-17T17:06:15Z
2017-12-18T12:12:45Z
2017-12-18T12:12:45Z
2017-12-18T13:35:37Z
DEPR: Deprecate is_copy (#18801)
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 0579a80aad28e..3188b66c619ae 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -213,6 +213,7 @@ Deprecations retain the previous behavior, use a list instead of a tuple (:issue:`18314`) - ``Series.valid`` is deprecated. Use :meth:`Series.dropna` instead (:issue:`18800`). - :func:`read_excel` has deprecated the ``skip_footer`` parameter. Use ``skipfooter`` instead (:issue:`18836`) +- The ``is_copy`` attribute is deprecated and will be removed in a future version (:issue:`18801`). .. _whatsnew_0220.prior_deprecations: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4eb7865523cc3..98d2c3b34459c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -108,7 +108,7 @@ class NDFrame(PandasObject, SelectionMixin): axes : list copy : boolean, default False """ - _internal_names = ['_data', '_cacher', '_item_cache', '_cache', 'is_copy', + _internal_names = ['_data', '_cacher', '_item_cache', '_cache', '_is_copy', '_subtyp', '_name', '_index', '_default_kind', '_default_fill_value', '_metadata', '__array_struct__', '__array_interface__'] @@ -117,7 +117,7 @@ class NDFrame(PandasObject, SelectionMixin): _deprecations = frozenset(['as_blocks', 'blocks', 'consolidate', 'convert_objects']) _metadata = [] - is_copy = None + _is_copy = None def __init__(self, data, axes=None, copy=False, dtype=None, fastpath=False): @@ -132,10 +132,22 @@ def __init__(self, data, axes=None, copy=False, dtype=None, for i, ax in enumerate(axes): data = data.reindex_axis(ax, axis=i) - object.__setattr__(self, 'is_copy', None) + object.__setattr__(self, '_is_copy', None) object.__setattr__(self, '_data', data) object.__setattr__(self, '_item_cache', {}) + @property + def is_copy(self): + warnings.warn("Attribute 'is_copy' is deprecated and will be removed " + "in a future version.", FutureWarning, stacklevel=2) + return self._is_copy + + @is_copy.setter + def is_copy(self, msg): + warnings.warn("Attribute 'is_copy' is deprecated and will be removed " + "in a future version.", FutureWarning, stacklevel=2) + self._is_copy = msg + def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same @@ -2153,7 +2165,7 @@ def _get_item_cache(self, item): res._set_as_cached(item, self) # for a chain - res.is_copy = self.is_copy + res._is_copy = self._is_copy return res def _set_as_cached(self, item, cacher): @@ -2264,12 +2276,12 @@ def _set_item(self, key, value): def _set_is_copy(self, ref=None, copy=True): if not copy: - self.is_copy = None + self._is_copy = None else: if ref is not None: - self.is_copy = weakref.ref(ref) + self._is_copy = weakref.ref(ref) else: - self.is_copy = None + self._is_copy = None def _check_is_chained_assignment_possible(self): """ @@ -2288,7 +2300,7 @@ def _check_is_chained_assignment_possible(self): self._check_setitem_copy(stacklevel=4, t='referant', force=True) return True - elif self.is_copy: + elif self._is_copy: self._check_setitem_copy(stacklevel=4, t='referant') return False @@ -2323,7 +2335,7 @@ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): """ - if force or self.is_copy: + if force or self._is_copy: value = config.get_option('mode.chained_assignment') if value is None: @@ -2333,23 +2345,23 @@ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): # the copy weakref try: gc.collect(2) - if not gc.get_referents(self.is_copy()): - self.is_copy = None + if not gc.get_referents(self._is_copy()): + self._is_copy = None return except Exception: pass # we might be a false positive try: - if self.is_copy().shape == self.shape: - self.is_copy = None + if self._is_copy().shape == self.shape: + self._is_copy = None return except Exception: pass # a custom message - if isinstance(self.is_copy, string_types): - t = self.is_copy + if isinstance(self._is_copy, string_types): + t = self._is_copy elif t == 'referant': t = ("\n" diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 27e1006c23174..116c7eb8c7958 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -113,9 +113,9 @@ def _delegate_property_get(self, name): result = Series(result, index=self.index, name=self.name) # setting this object will show a SettingWithCopyWarning/Error - result.is_copy = ("modifications to a property of a datetimelike " - "object are not supported and are discarded. " - "Change values on the original.") + result._is_copy = ("modifications to a property of a datetimelike " + "object are not supported and are discarded. " + "Change values on the original.") return result @@ -136,9 +136,9 @@ def _delegate_method(self, name, *args, **kwargs): result = Series(result, index=self.index, name=self.name) # setting this object will show a SettingWithCopyWarning/Error - result.is_copy = ("modifications to a method of a datetimelike object " - "are not supported and are discarded. Change " - "values on the original.") + result._is_copy = ("modifications to a method of a datetimelike " + "object are not supported and are discarded. " + "Change values on the original.") return result diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index c6642657e386e..de6713249a7c7 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -366,7 +366,7 @@ def _setitem_with_indexer(self, indexer, value): labels = index.insert(len(index), key) self.obj._data = self.obj.reindex(labels, axis=i)._data self.obj._maybe_update_cacher(clear=True) - self.obj.is_copy = None + self.obj._is_copy = None nindexer.append(labels.get_loc(key)) diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 510ca6ac83ec0..9e30ed80278e0 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -475,10 +475,10 @@ def test_set_value_keeps_names(self): columns=['one', 'two', 'three', 'four'], index=idx) df = df.sort_index() - assert df.is_copy is None + assert df._is_copy is None assert df.index.names == ('Name', 'Number') df.at[('grethe', '4'), 'one'] = 99.34 - assert df.is_copy is None + assert df._is_copy is None assert df.index.names == ('Name', 'Number') def test_copy_names(self): diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index d76c53e7f36db..0e396a3248e3f 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -136,7 +136,7 @@ def test_detect_chained_assignment(self): expected = DataFrame([[-5, 1], [-6, 3]], columns=list('AB')) df = DataFrame(np.arange(4).reshape(2, 2), columns=list('AB'), dtype='int64') - assert df.is_copy is None + assert df._is_copy is None df['A'][0] = -5 df['A'][1] = -6 @@ -145,7 +145,7 @@ def test_detect_chained_assignment(self): # test with the chaining df = DataFrame({'A': Series(range(2), dtype='int64'), 'B': np.array(np.arange(2, 4), dtype=np.float64)}) - assert df.is_copy is None + assert df._is_copy is None with pytest.raises(com.SettingWithCopyError): df['A'][0] = -5 @@ -153,7 +153,7 @@ def test_detect_chained_assignment(self): with pytest.raises(com.SettingWithCopyError): df['A'][1] = np.nan - assert df['A'].is_copy is None + assert df['A']._is_copy is None # Using a copy (the chain), fails df = DataFrame({'A': Series(range(2), dtype='int64'), @@ -166,7 +166,7 @@ def test_detect_chained_assignment(self): df = DataFrame({'a': ['one', 'one', 'two', 'three', 'two', 'one', 'six'], 'c': Series(range(7), dtype='int64')}) - assert df.is_copy is None + assert df._is_copy is None with pytest.raises(com.SettingWithCopyError): indexer = df.a.str.startswith('o') @@ -186,7 +186,7 @@ def test_detect_chained_assignment(self): # gh-5475: Make sure that is_copy is picked up reconstruction df = DataFrame({"A": [1, 2]}) - assert df.is_copy is None + assert df._is_copy is None with tm.ensure_clean('__tmp__pickle') as path: df.to_pickle(path) @@ -211,16 +211,16 @@ def random_text(nobs=100): # Always a copy x = df.iloc[[0, 1, 2]] - assert x.is_copy is not None + assert x._is_copy is not None x = df.iloc[[0, 1, 2, 4]] - assert x.is_copy is not None + assert x._is_copy is not None # Explicitly copy indexer = df.letters.apply(lambda x: len(x) > 10) df = df.loc[indexer].copy() - assert df.is_copy is None + assert df._is_copy is None df['letters'] = df['letters'].apply(str.lower) # Implicitly take @@ -228,7 +228,7 @@ def random_text(nobs=100): indexer = df.letters.apply(lambda x: len(x) > 10) df = df.loc[indexer] - assert df.is_copy is not None + assert df._is_copy is not None df['letters'] = df['letters'].apply(str.lower) # Implicitly take 2 @@ -236,14 +236,14 @@ def random_text(nobs=100): indexer = df.letters.apply(lambda x: len(x) > 10) df = df.loc[indexer] - assert df.is_copy is not None + assert df._is_copy is not None df.loc[:, 'letters'] = df['letters'].apply(str.lower) # Should be ok even though it's a copy! - assert df.is_copy is None + assert df._is_copy is None df['letters'] = df['letters'].apply(str.lower) - assert df.is_copy is None + assert df._is_copy is None df = random_text(100000) indexer = df.letters.apply(lambda x: len(x) > 10) @@ -252,7 +252,7 @@ def random_text(nobs=100): # an identical take, so no copy df = DataFrame({'a': [1]}).dropna() - assert df.is_copy is None + assert df._is_copy is None df['a'] += 1 # Inplace ops, originally from: @@ -418,3 +418,14 @@ def test_cache_updating(self): tm.assert_frame_equal(df, expected) expected = Series([0, 0, 0, 2, 0], name='f') tm.assert_series_equal(df.f, expected) + + def test_deprecate_is_copy(self): + # GH18801 + df = DataFrame({"A": [1, 2, 3]}) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + # getter + df.is_copy + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + # setter + df.is_copy = "test deprecated is_copy" diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 040c3adbcaf93..34c1ee5683183 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -607,7 +607,7 @@ def test_xs(self): # Mixed-type yields a copy. self.panel['strings'] = 'foo' result = self.panel.xs('D', axis=2) - assert result.is_copy is not None + assert result._is_copy is not None def test_getitem_fancy_labels(self): with catch_warnings(record=True): diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index b064e3c7012bc..e194136ec716d 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -510,7 +510,7 @@ def test_xs(self): with catch_warnings(record=True): result = self.panel4d.xs('D', axis=3) - assert result.is_copy is not None + assert result._is_copy is not None def test_getitem_fancy_labels(self): with catch_warnings(record=True):
Not all tests for this branch on the fork are passing, but these are the same tests that fail for the clean master and appear to be unrelated to the changes. References to is_copy in tests were changed to the internal attribute _is_copy assuming that, though is_copy is not meant to be a public attribute, checking the state of this attribute is necessary to check consistent internal behavior. Closes #18801.
https://api.github.com/repos/pandas-dev/pandas/pulls/18812
2017-12-17T14:10:36Z
2017-12-21T15:07:31Z
2017-12-21T15:07:31Z
2017-12-23T15:58:52Z
DOC: improve pd.Timestamp docs with more examples and clarify differe…
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index a058b9d7de9c4..086657e8c97b4 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -381,7 +381,7 @@ class NaTType(_NaT): Parameters ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None + tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will be converted to. None will remove timezone holding UTC time. @@ -407,7 +407,7 @@ class NaTType(_NaT): date corresponding to a proleptic Gregorian ordinal freq : str, DateOffset Offset which Timestamp will have - tz : string, pytz.timezone, dateutil.tz.tzfile or None + tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will have. offset : str, DateOffset Deprecated, use freq @@ -430,7 +430,7 @@ class NaTType(_NaT): Parameters ---------- - tz : string / timezone object, default None + tz : str or timezone object, default None Timezone to localize to """) today = _make_nat_func('today', # noqa:E128 @@ -443,7 +443,7 @@ class NaTType(_NaT): Parameters ---------- - tz : string / timezone object, default None + tz : str or timezone object, default None Timezone to localize to """) round = _make_nat_func('round', # noqa:E128 @@ -485,7 +485,7 @@ class NaTType(_NaT): Parameters ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None + tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will be converted to. None will remove timezone holding UTC time. @@ -505,7 +505,7 @@ class NaTType(_NaT): Parameters ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None + tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will be converted to. None will remove timezone holding local time. diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 478611fe9cab9..1bbfa1b3aa4d8 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -369,7 +369,7 @@ cdef class _Timestamp(datetime): class Timestamp(_Timestamp): """Pandas replacement for datetime.datetime - TimeStamp is the pandas equivalent of python's Datetime + Timestamp is the pandas equivalent of python's Datetime and is interchangable with it in most cases. It's the type used for the entries that make up a DatetimeIndex, and other timeseries oriented data structures in pandas. @@ -380,10 +380,12 @@ class Timestamp(_Timestamp): Value to be converted to Timestamp freq : str, DateOffset Offset which Timestamp will have - tz : string, pytz.timezone, dateutil.tz.tzfile or None + tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will have. - unit : string - numpy unit used for conversion, if ts_input is int or float + unit : str + Unit used for conversion if ts_input is of type int or float. The + valid values are 'D', 'h', 'm', 's', 'ms', 'us', and 'ns'. For + example, 's' means seconds and 'ms' means milliseconds. offset : str, DateOffset Deprecated, use freq @@ -405,9 +407,23 @@ class Timestamp(_Timestamp): Examples -------- + Using the primary calling convention: + + This converts a datetime-like string >>> pd.Timestamp('2017-01-01T12') Timestamp('2017-01-01 12:00:00') + This converts a float representing a Unix epoch in units of seconds + >>> pd.Timestamp(1513393355.5, unit='s') + Timestamp('2017-12-16 03:02:35.500000') + + This converts an int representing a Unix-epoch in units of seconds + and for a particular timezone + >>> pd.Timestamp(1513393355, unit='s', tz='US/Pacific') + Timestamp('2017-12-15 19:02:35-0800', tz='US/Pacific') + + Using the other two forms that mimic the API for ``datetime.datetime``: + >>> pd.Timestamp(2017, 1, 1, 12) Timestamp('2017-01-01 12:00:00') @@ -429,7 +445,7 @@ class Timestamp(_Timestamp): date corresponding to a proleptic Gregorian ordinal freq : str, DateOffset Offset which Timestamp will have - tz : string, pytz.timezone, dateutil.tz.tzfile or None + tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will have. offset : str, DateOffset Deprecated, use freq @@ -447,7 +463,7 @@ class Timestamp(_Timestamp): Parameters ---------- - tz : string / timezone object, default None + tz : str or timezone object, default None Timezone to localize to """ if is_string_object(tz): @@ -465,7 +481,7 @@ class Timestamp(_Timestamp): Parameters ---------- - tz : string / timezone object, default None + tz : str or timezone object, default None Timezone to localize to """ return cls.now(tz) @@ -774,7 +790,7 @@ class Timestamp(_Timestamp): Parameters ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None + tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will be converted to. None will remove timezone holding local time. @@ -828,7 +844,7 @@ class Timestamp(_Timestamp): Parameters ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None + tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will be converted to. None will remove timezone holding UTC time.
…nt forms of API usage
https://api.github.com/repos/pandas-dev/pandas/pulls/18811
2017-12-17T03:51:32Z
2017-12-20T14:23:23Z
2017-12-20T14:23:23Z
2017-12-20T14:23:26Z
BUG: pivot_table strings as aggfunc
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index e82bdd7a88224..3f300deddebeb 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -344,7 +344,8 @@ Reshaping - Bug in :func:`DataFrame.stack` which fails trying to sort mixed type levels under Python 3 (:issue:`18310`) - Fixed construction of a :class:`Series` from a ``dict`` containing ``NaN`` as key (:issue:`18480`) - Bug in :func:`Series.rank` where ``Series`` containing ``NaT`` modifies the ``Series`` inplace (:issue:`18521`) -- +- Bug in :func:`Dataframe.pivot_table` which fails when the ``aggfunc`` arg is of type string. The behavior is now consistent with other methods like ``agg`` and ``apply`` (:issue:`18713`) + Numeric ^^^^^^^ diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index a7695bd6f732f..77babf718d78c 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -38,7 +38,8 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', fill_value=fill_value, aggfunc=func, margins=margins, margins_name=margins_name) pieces.append(table) - keys.append(func.__name__) + keys.append(getattr(func, '__name__', func)) + return concat(pieces, keys=keys, axis=1) keys = index + columns diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index c8b7ae044b71c..6b44a339fad73 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1109,6 +1109,51 @@ def test_pivot_margins_name_unicode(self): expected = pd.DataFrame(index=index) tm.assert_frame_equal(table, expected) + def test_pivot_string_as_func(self): + # GH #18713 + # for correctness purposes + data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', + 'bar', 'bar', 'foo', 'foo', 'foo'], + 'B': ['one', 'one', 'one', 'two', 'one', 'one', + 'one', 'two', 'two', 'two', 'one'], + 'C': range(11)}) + + result = pivot_table(data, index='A', columns='B', aggfunc='sum') + mi = MultiIndex(levels=[['C'], ['one', 'two']], + labels=[[0, 0], [0, 1]], names=[None, 'B']) + expected = DataFrame({('C', 'one'): {'bar': 15, 'foo': 13}, + ('C', 'two'): {'bar': 7, 'foo': 20}}, + columns=mi).rename_axis('A') + tm.assert_frame_equal(result, expected) + + result = pivot_table(data, index='A', columns='B', + aggfunc=['sum', 'mean']) + mi = MultiIndex(levels=[['sum', 'mean'], ['C'], ['one', 'two']], + labels=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]], + names=[None, None, 'B']) + expected = DataFrame({('mean', 'C', 'one'): {'bar': 5.0, 'foo': 3.25}, + ('mean', 'C', 'two'): {'bar': 7.0, + 'foo': 6.666666666666667}, + ('sum', 'C', 'one'): {'bar': 15, 'foo': 13}, + ('sum', 'C', 'two'): {'bar': 7, 'foo': 20}}, + columns=mi).rename_axis('A') + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize('f, f_numpy', + [('sum', np.sum), + ('mean', np.mean), + ('std', np.std), + (['sum', 'mean'], [np.sum, np.mean]), + (['sum', 'std'], [np.sum, np.std]), + (['std', 'mean'], [np.std, np.mean])]) + def test_pivot_string_func_vs_func(self, f, f_numpy): + # GH #18713 + # for consistency purposes + result = pivot_table(self.data, index='A', columns='B', aggfunc=f) + expected = pivot_table(self.data, index='A', columns='B', + aggfunc=f_numpy) + tm.assert_frame_equal(result, expected) + class TestCrosstab(object):
- [x] closes #18713 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18810
2017-12-17T01:16:48Z
2017-12-23T20:43:16Z
2017-12-23T20:43:16Z
2017-12-24T00:16:15Z
Centralize arithmetic tests for datetime/timedelta series
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 6cc866a35514f..89a6311153d15 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -28,8 +28,7 @@ from .common import TestData -class TestSeriesOperators(TestData): - +class TestSeriesComparisons(object): def test_series_comparison_scalars(self): series = Series(date_range('1/1/2000', periods=10)) @@ -63,1326 +62,1410 @@ def test_comparisons(self): assert_series_equal(s == s2, exp) assert_series_equal(s2 == s, exp) - def test_op_method(self): - def check(series, other, check_reverse=False): - simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow'] - if not compat.PY3: - simple_ops.append('div') + def test_operator_series_comparison_zerorank(self): + # GH 13006 + result = np.float64(0) > pd.Series([1, 2, 3]) + expected = 0.0 > pd.Series([1, 2, 3]) + tm.assert_series_equal(result, expected) + result = pd.Series([1, 2, 3]) < np.float64(0) + expected = pd.Series([1, 2, 3]) < 0.0 + tm.assert_series_equal(result, expected) + result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2]) + expected = 0.0 > pd.Series([1, 2, 3]) + tm.assert_series_equal(result, expected) - for opname in simple_ops: - op = getattr(Series, opname) + def test_object_comparisons(self): + s = Series(['a', 'b', np.nan, 'c', 'a']) - if op == 'div': - alt = operator.truediv - else: - alt = getattr(operator, opname) + result = s == 'a' + expected = Series([True, False, False, False, True]) + assert_series_equal(result, expected) - result = op(series, other) - expected = alt(series, other) - assert_almost_equal(result, expected) - if check_reverse: - rop = getattr(Series, "r" + opname) - result = rop(series, other) - expected = alt(other, series) - assert_almost_equal(result, expected) + result = s < 'a' + expected = Series([False, False, False, False, False]) + assert_series_equal(result, expected) - check(self.ts, self.ts * 2) - check(self.ts, self.ts[::2]) - check(self.ts, 5, check_reverse=True) - check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True) + result = s != 'a' + expected = -(s == 'a') + assert_series_equal(result, expected) - def test_neg(self): - assert_series_equal(-self.series, -1 * self.series) + def test_categorical_comparisons(self): + # GH 8938 + # allow equality comparisons + a = Series(list('abc'), dtype="category") + b = Series(list('abc'), dtype="object") + c = Series(['a', 'b', 'cc'], dtype="object") + d = Series(list('acb'), dtype="object") + e = Categorical(list('abc')) + f = Categorical(list('acb')) - def test_invert(self): - assert_series_equal(-(self.series < 0), ~(self.series < 0)) + # vs scalar + assert not (a == 'a').all() + assert ((a != 'a') == ~(a == 'a')).all() - def test_div(self): - with np.errstate(all='ignore'): - # no longer do integer div for any ops, but deal with the 0's - p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) - result = p['first'] / p['second'] - expected = Series( - p['first'].values.astype(float) / p['second'].values, - dtype='float64') - expected.iloc[0:3] = np.inf - assert_series_equal(result, expected) + assert not ('a' == a).all() + assert (a == 'a')[0] + assert ('a' == a)[0] + assert not ('a' != a)[0] - result = p['first'] / 0 - expected = Series(np.inf, index=p.index, name='first') - assert_series_equal(result, expected) + # vs list-like + assert (a == a).all() + assert not (a != a).all() - p = p.astype('float64') - result = p['first'] / p['second'] - expected = Series(p['first'].values / p['second'].values) - assert_series_equal(result, expected) + assert (a == list(a)).all() + assert (a == b).all() + assert (b == a).all() + assert ((~(a == b)) == (a != b)).all() + assert ((~(b == a)) == (b != a)).all() - p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]}) - result = p['first'] / p['second'] - assert_series_equal(result, p['first'].astype('float64'), - check_names=False) - assert result.name is None - assert not result.equals(p['second'] / p['first']) + assert not (a == c).all() + assert not (c == a).all() + assert not (a == d).all() + assert not (d == a).all() - # inf signing - s = Series([np.nan, 1., -1.]) - result = s / 0 - expected = Series([np.nan, np.inf, -np.inf]) - assert_series_equal(result, expected) + # vs a cat-like + assert (a == e).all() + assert (e == a).all() + assert not (a == f).all() + assert not (f == a).all() - # float/integer issue - # GH 7785 - p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)}) - expected = Series([-0.01, -np.inf]) + assert ((~(a == e) == (a != e)).all()) + assert ((~(e == a) == (e != a)).all()) + assert ((~(a == f) == (a != f)).all()) + assert ((~(f == a) == (f != a)).all()) - result = p['second'].div(p['first']) - assert_series_equal(result, expected, check_names=False) + # non-equality is not comparable + pytest.raises(TypeError, lambda: a < b) + pytest.raises(TypeError, lambda: b < a) + pytest.raises(TypeError, lambda: a > b) + pytest.raises(TypeError, lambda: b > a) - result = p['second'] / p['first'] - assert_series_equal(result, expected) + def test_comparison_tuples(self): + # GH11339 + # comparisons vs tuple + s = Series([(1, 1), (1, 2)]) - # GH 9144 - s = Series([-1, 0, 1]) + result = s == (1, 2) + expected = Series([False, True]) + assert_series_equal(result, expected) - result = 0 / s - expected = Series([0.0, nan, 0.0]) - assert_series_equal(result, expected) + result = s != (1, 2) + expected = Series([True, False]) + assert_series_equal(result, expected) - result = s / 0 - expected = Series([-inf, nan, inf]) - assert_series_equal(result, expected) + result = s == (0, 0) + expected = Series([False, False]) + assert_series_equal(result, expected) - result = s // 0 - expected = Series([-inf, nan, inf]) - assert_series_equal(result, expected) + result = s != (0, 0) + expected = Series([True, True]) + assert_series_equal(result, expected) - # GH 8674 - zero_array = np.array([0] * 5) - data = np.random.randn(5) - expected = pd.Series([0.] * 5) - result = zero_array / pd.Series(data) - assert_series_equal(result, expected) + s = Series([(1, 1), (1, 1)]) - result = pd.Series(zero_array) / data - assert_series_equal(result, expected) + result = s == (1, 1) + expected = Series([True, True]) + assert_series_equal(result, expected) - result = pd.Series(zero_array) / pd.Series(data) - assert_series_equal(result, expected) + result = s != (1, 1) + expected = Series([False, False]) + assert_series_equal(result, expected) - def test_operators(self): - def _check_op(series, other, op, pos_only=False, - check_dtype=True): - left = np.abs(series) if pos_only else series - right = np.abs(other) if pos_only else other + s = Series([frozenset([1]), frozenset([1, 2])]) - cython_or_numpy = op(left, right) - python = left.combine(right, op) - assert_series_equal(cython_or_numpy, python, - check_dtype=check_dtype) + result = s == frozenset([1]) + expected = Series([True, False]) + assert_series_equal(result, expected) - def check(series, other): - simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod'] + def test_comparison_operators_with_nas(self): + s = Series(bdate_range('1/1/2000', periods=10), dtype=object) + s[::2] = np.nan - for opname in simple_ops: - _check_op(series, other, getattr(operator, opname)) + # test that comparisons work + ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] + for op in ops: + val = s[5] - _check_op(series, other, operator.pow, pos_only=True) + f = getattr(operator, op) + result = f(s, val) - _check_op(series, other, lambda x, y: operator.add(y, x)) - _check_op(series, other, lambda x, y: operator.sub(y, x)) - _check_op(series, other, lambda x, y: operator.truediv(y, x)) - _check_op(series, other, lambda x, y: operator.floordiv(y, x)) - _check_op(series, other, lambda x, y: operator.mul(y, x)) - _check_op(series, other, lambda x, y: operator.pow(y, x), - pos_only=True) - _check_op(series, other, lambda x, y: operator.mod(y, x)) + expected = f(s.dropna(), val).reindex(s.index) - check(self.ts, self.ts * 2) - check(self.ts, self.ts * 0) - check(self.ts, self.ts[::2]) - check(self.ts, 5) + if op == 'ne': + expected = expected.fillna(True).astype(bool) + else: + expected = expected.fillna(False).astype(bool) - def check_comparators(series, other, check_dtype=True): - _check_op(series, other, operator.gt, check_dtype=check_dtype) - _check_op(series, other, operator.ge, check_dtype=check_dtype) - _check_op(series, other, operator.eq, check_dtype=check_dtype) - _check_op(series, other, operator.lt, check_dtype=check_dtype) - _check_op(series, other, operator.le, check_dtype=check_dtype) + assert_series_equal(result, expected) - check_comparators(self.ts, 5) - check_comparators(self.ts, self.ts + 1, check_dtype=False) + # fffffffuuuuuuuuuuuu + # result = f(val, s) + # expected = f(val, s.dropna()).reindex(s.index) + # assert_series_equal(result, expected) - def test_divmod(self): - def check(series, other): - results = divmod(series, other) - if isinstance(other, Iterable) and len(series) != len(other): - # if the lengths don't match, this is the test where we use - # `self.ts[::2]`. Pad every other value in `other_np` with nan. - other_np = [] - for n in other: - other_np.append(n) - other_np.append(np.nan) - else: - other_np = other - other_np = np.asarray(other_np) - with np.errstate(all='ignore'): - expecteds = divmod(series.values, np.asarray(other_np)) + # boolean &, |, ^ should work with object arrays and propagate NAs - for result, expected in zip(results, expecteds): - # check the values, name, and index separatly - assert_almost_equal(np.asarray(result), expected) + ops = ['and_', 'or_', 'xor'] + mask = s.isna() + for bool_op in ops: + f = getattr(operator, bool_op) - assert result.name == series.name - assert_index_equal(result.index, series.index) + filled = s.fillna(s[0]) - check(self.ts, self.ts * 2) - check(self.ts, self.ts * 0) - check(self.ts, self.ts[::2]) - check(self.ts, 5) + result = f(s < s[9], s > s[3]) - def test_operators_empty_int_corner(self): - s1 = Series([], [], dtype=np.int32) - s2 = Series({'x': 0.}) - assert_series_equal(s1 * s2, Series([np.nan], index=['x'])) + expected = f(filled < filled[9], filled > filled[3]) + expected[mask] = False + assert_series_equal(result, expected) - def test_operators_timedelta64(self): + def test_comparison_object_numeric_nas(self): + s = Series(np.random.randn(10), dtype=object) + shifted = s.shift(2) - # invalid ops - pytest.raises(Exception, self.objSeries.__add__, 1) - pytest.raises(Exception, self.objSeries.__add__, - np.array(1, dtype=np.int64)) - pytest.raises(Exception, self.objSeries.__sub__, 1) - pytest.raises(Exception, self.objSeries.__sub__, - np.array(1, dtype=np.int64)) + ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] + for op in ops: + f = getattr(operator, op) - # seriese ops - v1 = date_range('2012-1-1', periods=3, freq='D') - v2 = date_range('2012-1-2', periods=3, freq='D') - rs = Series(v2) - Series(v1) - xp = Series(1e9 * 3600 * 24, - rs.index).astype('int64').astype('timedelta64[ns]') - assert_series_equal(rs, xp) - assert rs.dtype == 'timedelta64[ns]' + result = f(s, shifted) + expected = f(s.astype(float), shifted.astype(float)) + assert_series_equal(result, expected) - df = DataFrame(dict(A=v1)) - td = Series([timedelta(days=i) for i in range(3)]) - assert td.dtype == 'timedelta64[ns]' + def test_comparison_invalid(self): + # GH4968 + # invalid date/int comparisons + s = Series(range(5)) + s2 = Series(date_range('20010101', periods=5)) - # series on the rhs - result = df['A'] - df['A'].shift() - assert result.dtype == 'timedelta64[ns]' + for (x, y) in [(s, s2), (s2, s)]: + pytest.raises(TypeError, lambda: x == y) + pytest.raises(TypeError, lambda: x != y) + pytest.raises(TypeError, lambda: x >= y) + pytest.raises(TypeError, lambda: x > y) + pytest.raises(TypeError, lambda: x < y) + pytest.raises(TypeError, lambda: x <= y) - result = df['A'] + td - assert result.dtype == 'M8[ns]' + def test_unequal_categorical_comparison_raises_type_error(self): + # unequal comparison should raise for unordered cats + cat = Series(Categorical(list("abc"))) - # scalar Timestamp on rhs - maxa = df['A'].max() - assert isinstance(maxa, Timestamp) + def f(): + cat > "b" - resultb = df['A'] - df['A'].max() - assert resultb.dtype == 'timedelta64[ns]' + pytest.raises(TypeError, f) + cat = Series(Categorical(list("abc"), ordered=False)) - # timestamp on lhs - result = resultb + df['A'] - values = [Timestamp('20111230'), Timestamp('20120101'), - Timestamp('20120103')] - expected = Series(values, name='A') - assert_series_equal(result, expected) + def f(): + cat > "b" - # datetimes on rhs - result = df['A'] - datetime(2001, 1, 1) - expected = Series( - [timedelta(days=4017 + i) for i in range(3)], name='A') - assert_series_equal(result, expected) - assert result.dtype == 'm8[ns]' + pytest.raises(TypeError, f) - d = datetime(2001, 1, 1, 3, 4) - resulta = df['A'] - d - assert resulta.dtype == 'm8[ns]' + # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057 + # and following comparisons with scalars not in categories should raise + # for unequal comps, but not for equal/not equal + cat = Series(Categorical(list("abc"), ordered=True)) - # roundtrip - resultb = resulta + d - assert_series_equal(df['A'], resultb) + pytest.raises(TypeError, lambda: cat < "d") + pytest.raises(TypeError, lambda: cat > "d") + pytest.raises(TypeError, lambda: "d" < cat) + pytest.raises(TypeError, lambda: "d" > cat) - # timedeltas on rhs - td = timedelta(days=1) - resulta = df['A'] + td - resultb = resulta - td - assert_series_equal(resultb, df['A']) - assert resultb.dtype == 'M8[ns]' + tm.assert_series_equal(cat == "d", Series([False, False, False])) + tm.assert_series_equal(cat != "d", Series([True, True, True])) - # roundtrip - td = timedelta(minutes=5, seconds=3) - resulta = df['A'] + td - resultb = resulta - td - assert_series_equal(df['A'], resultb) - assert resultb.dtype == 'M8[ns]' + def test_more_na_comparisons(self): + for dtype in [None, object]: + left = Series(['a', np.nan, 'c'], dtype=dtype) + right = Series(['a', np.nan, 'd'], dtype=dtype) - # inplace - value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1)) - rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1)) - assert rs[2] == value + result = left == right + expected = Series([True, False, False]) + assert_series_equal(result, expected) - def test_operator_series_comparison_zerorank(self): - # GH 13006 - result = np.float64(0) > pd.Series([1, 2, 3]) - expected = 0.0 > pd.Series([1, 2, 3]) - tm.assert_series_equal(result, expected) - result = pd.Series([1, 2, 3]) < np.float64(0) - expected = pd.Series([1, 2, 3]) < 0.0 - tm.assert_series_equal(result, expected) - result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2]) - expected = 0.0 > pd.Series([1, 2, 3]) - tm.assert_series_equal(result, expected) + result = left != right + expected = Series([False, True, True]) + assert_series_equal(result, expected) - def test_timedeltas_with_DateOffset(self): + result = left == np.nan + expected = Series([False, False, False]) + assert_series_equal(result, expected) - # GH 4532 - # operate with pd.offsets - s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) + result = left != np.nan + expected = Series([True, True, True]) + assert_series_equal(result, expected) - result = s + pd.offsets.Second(5) - result2 = pd.offsets.Second(5) + s - expected = Series([Timestamp('20130101 9:01:05'), Timestamp( - '20130101 9:02:05')]) - assert_series_equal(result, expected) - assert_series_equal(result2, expected) + def test_nat_comparisons(self): + data = [([pd.Timestamp('2011-01-01'), pd.NaT, + pd.Timestamp('2011-01-03')], + [pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]), - result = s - pd.offsets.Second(5) - result2 = -pd.offsets.Second(5) + s - expected = Series([Timestamp('20130101 9:00:55'), Timestamp( - '20130101 9:01:55')]) - assert_series_equal(result, expected) - assert_series_equal(result2, expected) + ([pd.Timedelta('1 days'), pd.NaT, + pd.Timedelta('3 days')], + [pd.NaT, pd.NaT, pd.Timedelta('3 days')]), - result = s + pd.offsets.Milli(5) - result2 = pd.offsets.Milli(5) + s - expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp( - '20130101 9:02:00.005')]) - assert_series_equal(result, expected) - assert_series_equal(result2, expected) + ([pd.Period('2011-01', freq='M'), pd.NaT, + pd.Period('2011-03', freq='M')], + [pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])] - result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5) - expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp( - '20130101 9:07:00.005')]) - assert_series_equal(result, expected) + # add lhs / rhs switched data + data = data + [(r, l) for l, r in data] - # operate with np.timedelta64 correctly - result = s + np.timedelta64(1, 's') - result2 = np.timedelta64(1, 's') + s - expected = Series([Timestamp('20130101 9:01:01'), Timestamp( - '20130101 9:02:01')]) - assert_series_equal(result, expected) - assert_series_equal(result2, expected) + for l, r in data: + for dtype in [None, object]: + left = Series(l, dtype=dtype) - result = s + np.timedelta64(5, 'ms') - result2 = np.timedelta64(5, 'ms') + s - expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp( - '20130101 9:02:00.005')]) - assert_series_equal(result, expected) - assert_series_equal(result2, expected) + # Series, Index + for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]: + expected = Series([False, False, True]) + assert_series_equal(left == right, expected) - # valid DateOffsets - for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', - 'Nano']: - op = getattr(pd.offsets, do) - s + op(5) - op(5) + s + expected = Series([True, True, False]) + assert_series_equal(left != right, expected) - def test_timedelta_series_ops(self): - # GH11925 + expected = Series([False, False, False]) + assert_series_equal(left < right, expected) - s = Series(timedelta_range('1 day', periods=3)) - ts = Timestamp('2012-01-01') - expected = Series(date_range('2012-01-02', periods=3)) - assert_series_equal(ts + s, expected) - assert_series_equal(s + ts, expected) + expected = Series([False, False, False]) + assert_series_equal(left > right, expected) - expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D')) - assert_series_equal(ts - s, expected2) - assert_series_equal(ts + (-s), expected2) + expected = Series([False, False, True]) + assert_series_equal(left >= right, expected) - def test_timedelta64_operations_with_DateOffset(self): - # GH 10699 - td = Series([timedelta(minutes=5, seconds=3)] * 3) - result = td + pd.offsets.Minute(1) - expected = Series([timedelta(minutes=6, seconds=3)] * 3) - assert_series_equal(result, expected) + expected = Series([False, False, True]) + assert_series_equal(left <= right, expected) - result = td - pd.offsets.Minute(1) - expected = Series([timedelta(minutes=4, seconds=3)] * 3) - assert_series_equal(result, expected) + def test_nat_comparisons_scalar(self): + data = [[pd.Timestamp('2011-01-01'), pd.NaT, + pd.Timestamp('2011-01-03')], - result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3), - pd.offsets.Hour(2)]) - expected = Series([timedelta(minutes=6, seconds=3), timedelta( - minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)]) - assert_series_equal(result, expected) + [pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')], - result = td + pd.offsets.Minute(1) + pd.offsets.Second(12) - expected = Series([timedelta(minutes=6, seconds=15)] * 3) - assert_series_equal(result, expected) + [pd.Period('2011-01', freq='M'), pd.NaT, + pd.Period('2011-03', freq='M')]] - # valid DateOffsets - for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', - 'Nano']: - op = getattr(pd.offsets, do) - td + op(5) - op(5) + td - td - op(5) - op(5) - td + for l in data: + for dtype in [None, object]: + left = Series(l, dtype=dtype) - def test_timedelta64_operations_with_timedeltas(self): + expected = Series([False, False, False]) + assert_series_equal(left == pd.NaT, expected) + assert_series_equal(pd.NaT == left, expected) - # td operate with td - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td2 = timedelta(minutes=5, seconds=4) - result = td1 - td2 - expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta( - seconds=1)] * 3) - assert result.dtype == 'm8[ns]' - assert_series_equal(result, expected) + expected = Series([True, True, True]) + assert_series_equal(left != pd.NaT, expected) + assert_series_equal(pd.NaT != left, expected) - result2 = td2 - td1 - expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta( - seconds=0)] * 3)) - assert_series_equal(result2, expected) + expected = Series([False, False, False]) + assert_series_equal(left < pd.NaT, expected) + assert_series_equal(pd.NaT > left, expected) + assert_series_equal(left <= pd.NaT, expected) + assert_series_equal(pd.NaT >= left, expected) - # roundtrip - assert_series_equal(result + td2, td1) + assert_series_equal(left > pd.NaT, expected) + assert_series_equal(pd.NaT < left, expected) + assert_series_equal(left >= pd.NaT, expected) + assert_series_equal(pd.NaT <= left, expected) - # Now again, using pd.to_timedelta, which should build - # a Series or a scalar, depending on input. - td1 = Series(pd.to_timedelta(['00:05:03'] * 3)) - td2 = pd.to_timedelta('00:05:04') - result = td1 - td2 - expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta( - seconds=1)] * 3) - assert result.dtype == 'm8[ns]' - assert_series_equal(result, expected) + def test_comparison_different_length(self): + a = Series(['a', 'b', 'c']) + b = Series(['b', 'a']) + pytest.raises(ValueError, a.__lt__, b) - result2 = td2 - td1 - expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta( - seconds=0)] * 3)) - assert_series_equal(result2, expected) + a = Series([1, 2]) + b = Series([2, 3, 4]) + pytest.raises(ValueError, a.__eq__, b) - # roundtrip - assert_series_equal(result + td2, td1) + def test_comparison_label_based(self): - def test_timedelta64_operations_with_integers(self): + # GH 4947 + # comparisons should be label based - # GH 4521 - # divide/multiply by integers - startdate = Series(date_range('2013-01-01', '2013-01-03')) - enddate = Series(date_range('2013-03-01', '2013-03-03')) + a = Series([True, False, True], list('bca')) + b = Series([False, True, False], list('abc')) - s1 = enddate - startdate - s1[2] = np.nan - s2 = Series([2, 3, 4]) - expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]') - expected[2] = np.nan - result = s1 / s2 + expected = Series([False, True, False], list('abc')) + result = a & b assert_series_equal(result, expected) - s2 = Series([20, 30, 40]) - expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]') - expected[2] = np.nan - result = s1 / s2 + expected = Series([True, True, False], list('abc')) + result = a | b assert_series_equal(result, expected) - result = s1 / 2 - expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]') - expected[2] = np.nan + expected = Series([True, False, False], list('abc')) + result = a ^ b assert_series_equal(result, expected) - s2 = Series([20, 30, 40]) - expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]') - expected[2] = np.nan - result = s1 * s2 + # rhs is bigger + a = Series([True, False, True], list('bca')) + b = Series([False, True, False, True], list('abcd')) + + expected = Series([False, True, False, False], list('abcd')) + result = a & b assert_series_equal(result, expected) - for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16', - 'uint8']: - s2 = Series([20, 30, 40], dtype=dtype) - expected = Series( - s1.values.astype(np.int64) * s2.astype(np.int64), - dtype='m8[ns]') - expected[2] = np.nan - result = s1 * s2 - assert_series_equal(result, expected) + expected = Series([True, True, False, False], list('abcd')) + result = a | b + assert_series_equal(result, expected) - result = s1 * 2 - expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]') - expected[2] = np.nan + # filling + + # vs empty + result = a & Series([]) + expected = Series([False, False, False], list('bca')) assert_series_equal(result, expected) - result = s1 * -1 - expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]') - expected[2] = np.nan + result = a | Series([]) + expected = Series([True, False, True], list('bca')) assert_series_equal(result, expected) - # invalid ops - assert_series_equal(s1 / s2.astype(float), - Series([Timedelta('2 days 22:48:00'), Timedelta( - '1 days 23:12:00'), Timedelta('NaT')])) - assert_series_equal(s1 / 2.0, - Series([Timedelta('29 days 12:00:00'), Timedelta( - '29 days 12:00:00'), Timedelta('NaT')])) + # vs non-matching + result = a & Series([1], ['z']) + expected = Series([False, False, False, False], list('abcz')) + assert_series_equal(result, expected) - for op in ['__add__', '__sub__']: - sop = getattr(s1, op, None) - if sop is not None: - pytest.raises(TypeError, sop, 1) - pytest.raises(TypeError, sop, s2.values) + result = a | Series([1], ['z']) + expected = Series([True, True, False, False], list('abcz')) + assert_series_equal(result, expected) - def test_timedelta64_conversions(self): - startdate = Series(date_range('2013-01-01', '2013-01-03')) - enddate = Series(date_range('2013-03-01', '2013-03-03')) + # identity + # we would like s[s|e] == s to hold for any e, whether empty or not + for e in [Series([]), Series([1], ['z']), + Series(np.nan, b.index), Series(np.nan, a.index)]: + result = a[a | e] + assert_series_equal(result, a[a]) - s1 = enddate - startdate - s1[2] = np.nan + for e in [Series(['z'])]: + if compat.PY3: + with tm.assert_produces_warning(RuntimeWarning): + result = a[a | e] + else: + result = a[a | e] + assert_series_equal(result, a[a]) - for m in [1, 3, 10]: - for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']: + # vs scalars + index = list('bca') + t = Series([True, False, True]) - # op - expected = s1.apply(lambda x: x / np.timedelta64(m, unit)) - result = s1 / np.timedelta64(m, unit) - assert_series_equal(result, expected) + for v in [True, 1, 2]: + result = Series([True, False, True], index=index) | v + expected = Series([True, True, True], index=index) + assert_series_equal(result, expected) - if m == 1 and unit != 'ns': + for v in [np.nan, 'foo']: + pytest.raises(TypeError, lambda: t | v) - # astype - result = s1.astype("timedelta64[{0}]".format(unit)) - assert_series_equal(result, expected) + for v in [False, 0]: + result = Series([True, False, True], index=index) | v + expected = Series([True, False, True], index=index) + assert_series_equal(result, expected) - # reverse op - expected = s1.apply( - lambda x: Timedelta(np.timedelta64(m, unit)) / x) - result = np.timedelta64(m, unit) / s1 + for v in [True, 1]: + result = Series([True, False, True], index=index) & v + expected = Series([True, False, True], index=index) + assert_series_equal(result, expected) - # astype - s = Series(date_range('20130101', periods=3)) - result = s.astype(object) - assert isinstance(result.iloc[0], datetime) - assert result.dtype == np.object_ + for v in [False, 0]: + result = Series([True, False, True], index=index) & v + expected = Series([False, False, False], index=index) + assert_series_equal(result, expected) + for v in [np.nan]: + pytest.raises(TypeError, lambda: t & v) - result = s1.astype(object) - assert isinstance(result.iloc[0], timedelta) - assert result.dtype == np.object_ + def test_comparison_flex_basic(self): + left = pd.Series(np.random.randn(10)) + right = pd.Series(np.random.randn(10)) - def test_timedelta64_equal_timedelta_supported_ops(self): - ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'), - Timestamp('20130228 22:00:00'), Timestamp( - '20130228 21:00:00')]) + assert_series_equal(left.eq(right), left == right) + assert_series_equal(left.ne(right), left != right) + assert_series_equal(left.le(right), left < right) + assert_series_equal(left.lt(right), left <= right) + assert_series_equal(left.gt(right), left > right) + assert_series_equal(left.ge(right), left >= right) - intervals = 'D', 'h', 'm', 's', 'us' + # axis + for axis in [0, None, 'index']: + assert_series_equal(left.eq(right, axis=axis), left == right) + assert_series_equal(left.ne(right, axis=axis), left != right) + assert_series_equal(left.le(right, axis=axis), left < right) + assert_series_equal(left.lt(right, axis=axis), left <= right) + assert_series_equal(left.gt(right, axis=axis), left > right) + assert_series_equal(left.ge(right, axis=axis), left >= right) - # TODO: unused - # npy16_mappings = {'D': 24 * 60 * 60 * 1000000, - # 'h': 60 * 60 * 1000000, - # 'm': 60 * 1000000, - # 's': 1000000, - # 'us': 1} + # + msg = 'No axis named 1 for object type' + for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']: + with tm.assert_raises_regex(ValueError, msg): + getattr(left, op)(right, axis=1) - def timedelta64(*args): - return sum(starmap(np.timedelta64, zip(args, intervals))) + def test_comparison_flex_alignment(self): + left = Series([1, 3, 2], index=list('abc')) + right = Series([2, 2, 2], index=list('bcd')) - for op, d, h, m, s, us in product([operator.add, operator.sub], - *([range(2)] * 5)): - nptd = timedelta64(d, h, m, s, us) - pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, - microseconds=us) - lhs = op(ser, nptd) - rhs = op(ser, pytd) + exp = pd.Series([False, False, True, False], index=list('abcd')) + assert_series_equal(left.eq(right), exp) - try: - assert_series_equal(lhs, rhs) - except: - raise AssertionError( - "invalid comparsion [op->{0},d->{1},h->{2},m->{3}," - "s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, - us, lhs, rhs)) + exp = pd.Series([True, True, False, True], index=list('abcd')) + assert_series_equal(left.ne(right), exp) - def test_operators_datetimelike(self): - def run_ops(ops, get_ser, test_ser): + exp = pd.Series([False, False, True, False], index=list('abcd')) + assert_series_equal(left.le(right), exp) - # check that we are getting a TypeError - # with 'operate' (from core/ops.py) for the ops that are not - # defined - for op_str in ops: - op = getattr(get_ser, op_str, None) - with tm.assert_raises_regex(TypeError, 'operate'): - op(test_ser) + exp = pd.Series([False, False, False, False], index=list('abcd')) + assert_series_equal(left.lt(right), exp) - # ## timedelta64 ### - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - td2 = timedelta(minutes=5, seconds=4) - ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__', - '__rfloordiv__', '__rpow__'] - run_ops(ops, td1, td2) - td1 + td2 - td2 + td1 - td1 - td2 - td2 - td1 - td1 / td2 - td2 / td1 + exp = pd.Series([False, True, True, False], index=list('abcd')) + assert_series_equal(left.ge(right), exp) - # ## datetime64 ### - dt1 = Series([Timestamp('20111230'), Timestamp('20120101'), - Timestamp('20120103')]) - dt1.iloc[2] = np.nan - dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), - Timestamp('20120104')]) - ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__', - '__pow__', '__radd__', '__rmul__', '__rfloordiv__', - '__rtruediv__', '__rdiv__', '__rpow__'] - run_ops(ops, dt1, dt2) - dt1 - dt2 - dt2 - dt1 + exp = pd.Series([False, True, False, False], index=list('abcd')) + assert_series_equal(left.gt(right), exp) - # ## datetime64 with timetimedelta ### - ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', - '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', - '__rpow__'] - run_ops(ops, dt1, td1) - dt1 + td1 - td1 + dt1 - dt1 - td1 - # TODO: Decide if this ought to work. - # td1 - dt1 + def test_comparison_flex_alignment_fill(self): + left = Series([1, 3, 2], index=list('abc')) + right = Series([2, 2, 2], index=list('bcd')) - # ## timetimedelta with datetime64 ### - ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__', - '__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__', - '__rdiv__', '__rpow__'] - run_ops(ops, td1, dt1) - td1 + dt1 - dt1 + td1 + exp = pd.Series([False, False, True, True], index=list('abcd')) + assert_series_equal(left.eq(right, fill_value=2), exp) - # 8260, 10763 - # datetime64 with tz - ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', - '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', - '__rpow__'] + exp = pd.Series([True, True, False, False], index=list('abcd')) + assert_series_equal(left.ne(right, fill_value=2), exp) - tz = 'US/Eastern' - dt1 = Series(date_range('2000-01-01 09:00:00', periods=5, - tz=tz), name='foo') - dt2 = dt1.copy() - dt2.iloc[2] = np.nan - td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H')) - td2 = td1.copy() - td2.iloc[1] = np.nan - run_ops(ops, dt1, td1) + exp = pd.Series([False, False, True, True], index=list('abcd')) + assert_series_equal(left.le(right, fill_value=0), exp) - result = dt1 + td1[0] - exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) + exp = pd.Series([False, False, False, True], index=list('abcd')) + assert_series_equal(left.lt(right, fill_value=0), exp) - result = dt2 + td2[0] - exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) + exp = pd.Series([True, True, True, False], index=list('abcd')) + assert_series_equal(left.ge(right, fill_value=0), exp) - # odd numpy behavior with scalar timedeltas - result = td1[0] + dt1 - exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) + exp = pd.Series([True, True, False, False], index=list('abcd')) + assert_series_equal(left.gt(right, fill_value=0), exp) - result = td2[0] + dt2 - exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) + def test_ne(self): + ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) + expected = [True, True, False, True, True] + assert tm.equalContents(ts.index != 5, expected) + assert tm.equalContents(~(ts.index == 5), expected) - result = dt1 - td1[0] - exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) - pytest.raises(TypeError, lambda: td1[0] - dt1) + def test_comp_ops_df_compat(self): + # GH 1134 + s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x') + s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x') - result = dt2 - td2[0] - exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) - pytest.raises(TypeError, lambda: td2[0] - dt2) + s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') + s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') - result = dt1 + td1 - exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz) - assert_series_equal(result, exp) + for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]: - result = dt2 + td2 - exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz) - assert_series_equal(result, exp) + msg = "Can only compare identically-labeled Series objects" + with tm.assert_raises_regex(ValueError, msg): + l == r - result = dt1 - td1 - exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz) - assert_series_equal(result, exp) + with tm.assert_raises_regex(ValueError, msg): + l != r - result = dt2 - td2 - exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz) - assert_series_equal(result, exp) + with tm.assert_raises_regex(ValueError, msg): + l < r - pytest.raises(TypeError, lambda: td1 - dt1) - pytest.raises(TypeError, lambda: td2 - dt2) + msg = "Can only compare identically-labeled DataFrame objects" + with tm.assert_raises_regex(ValueError, msg): + l.to_frame() == r.to_frame() - def test_sub_datetime_compat(self): - # see gh-14088 - s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT]) - dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc) - exp = Series([Timedelta('1 days'), pd.NaT]) - assert_series_equal(s - dt, exp) - assert_series_equal(s - Timestamp(dt), exp) + with tm.assert_raises_regex(ValueError, msg): + l.to_frame() != r.to_frame() - def test_sub_single_tz(self): - # GH12290 - s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')]) - s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')]) - result = s1 - s2 - expected = Series([Timedelta('2days')]) - assert_series_equal(result, expected) - result = s2 - s1 - expected = Series([Timedelta('-2days')]) - assert_series_equal(result, expected) + with tm.assert_raises_regex(ValueError, msg): + l.to_frame() < r.to_frame() - def test_ops_nat(self): - # GH 11349 - timedelta_series = Series([NaT, Timedelta('1s')]) - datetime_series = Series([NaT, Timestamp('19900315')]) - nat_series_dtype_timedelta = Series( - [NaT, NaT], dtype='timedelta64[ns]') - nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]') - single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]') - single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]') - # subtraction - assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta) - assert_series_equal(-NaT + timedelta_series, - nat_series_dtype_timedelta) +class TestSeriesArithmetic(object): + def test_divide_decimal(self): + """ resolves issue #9787 """ + from decimal import Decimal - assert_series_equal(timedelta_series - single_nat_dtype_timedelta, - nat_series_dtype_timedelta) - assert_series_equal(-single_nat_dtype_timedelta + timedelta_series, - nat_series_dtype_timedelta) + expected = Series([Decimal(5)]) - assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp) - assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp) + s = Series([Decimal(10)]) + s = s / Decimal(2) - assert_series_equal(datetime_series - single_nat_dtype_datetime, - nat_series_dtype_timedelta) - with pytest.raises(TypeError): - -single_nat_dtype_datetime + datetime_series + assert_series_equal(expected, s) - assert_series_equal(datetime_series - single_nat_dtype_timedelta, - nat_series_dtype_timestamp) - assert_series_equal(-single_nat_dtype_timedelta + datetime_series, - nat_series_dtype_timestamp) + s = Series([Decimal(10)]) + s = s // Decimal(2) - # without a Series wrapping the NaT, it is ambiguous - # whether it is a datetime64 or timedelta64 - # defaults to interpreting it as timedelta64 - assert_series_equal(nat_series_dtype_timestamp - NaT, - nat_series_dtype_timestamp) - assert_series_equal(-NaT + nat_series_dtype_timestamp, - nat_series_dtype_timestamp) + assert_series_equal(expected, s) - assert_series_equal(nat_series_dtype_timestamp - - single_nat_dtype_datetime, - nat_series_dtype_timedelta) - with pytest.raises(TypeError): - -single_nat_dtype_datetime + nat_series_dtype_timestamp + def test_div(self): + with np.errstate(all='ignore'): + # no longer do integer div for any ops, but deal with the 0's + p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) + result = p['first'] / p['second'] + expected = Series( + p['first'].values.astype(float) / p['second'].values, + dtype='float64') + expected.iloc[0:3] = np.inf + assert_series_equal(result, expected) - assert_series_equal(nat_series_dtype_timestamp - - single_nat_dtype_timedelta, - nat_series_dtype_timestamp) - assert_series_equal(-single_nat_dtype_timedelta + - nat_series_dtype_timestamp, - nat_series_dtype_timestamp) + result = p['first'] / 0 + expected = Series(np.inf, index=p.index, name='first') + assert_series_equal(result, expected) - with pytest.raises(TypeError): - timedelta_series - single_nat_dtype_datetime + p = p.astype('float64') + result = p['first'] / p['second'] + expected = Series(p['first'].values / p['second'].values) + assert_series_equal(result, expected) - # addition - assert_series_equal(nat_series_dtype_timestamp + NaT, - nat_series_dtype_timestamp) - assert_series_equal(NaT + nat_series_dtype_timestamp, - nat_series_dtype_timestamp) + p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]}) + result = p['first'] / p['second'] + assert_series_equal(result, p['first'].astype('float64'), + check_names=False) + assert result.name is None + assert not result.equals(p['second'] / p['first']) - assert_series_equal(nat_series_dtype_timestamp + - single_nat_dtype_timedelta, - nat_series_dtype_timestamp) - assert_series_equal(single_nat_dtype_timedelta + - nat_series_dtype_timestamp, - nat_series_dtype_timestamp) + # inf signing + s = Series([np.nan, 1., -1.]) + result = s / 0 + expected = Series([np.nan, np.inf, -np.inf]) + assert_series_equal(result, expected) - assert_series_equal(nat_series_dtype_timedelta + NaT, - nat_series_dtype_timedelta) - assert_series_equal(NaT + nat_series_dtype_timedelta, - nat_series_dtype_timedelta) + # float/integer issue + # GH 7785 + p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)}) + expected = Series([-0.01, -np.inf]) - assert_series_equal(nat_series_dtype_timedelta + - single_nat_dtype_timedelta, - nat_series_dtype_timedelta) - assert_series_equal(single_nat_dtype_timedelta + - nat_series_dtype_timedelta, - nat_series_dtype_timedelta) + result = p['second'].div(p['first']) + assert_series_equal(result, expected, check_names=False) - assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta) - assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta) + result = p['second'] / p['first'] + assert_series_equal(result, expected) - assert_series_equal(timedelta_series + single_nat_dtype_timedelta, - nat_series_dtype_timedelta) - assert_series_equal(single_nat_dtype_timedelta + timedelta_series, - nat_series_dtype_timedelta) + # GH 9144 + s = Series([-1, 0, 1]) - assert_series_equal(nat_series_dtype_timestamp + NaT, - nat_series_dtype_timestamp) - assert_series_equal(NaT + nat_series_dtype_timestamp, - nat_series_dtype_timestamp) + result = 0 / s + expected = Series([0.0, nan, 0.0]) + assert_series_equal(result, expected) - assert_series_equal(nat_series_dtype_timestamp + - single_nat_dtype_timedelta, - nat_series_dtype_timestamp) - assert_series_equal(single_nat_dtype_timedelta + - nat_series_dtype_timestamp, - nat_series_dtype_timestamp) + result = s / 0 + expected = Series([-inf, nan, inf]) + assert_series_equal(result, expected) - assert_series_equal(nat_series_dtype_timedelta + NaT, - nat_series_dtype_timedelta) - assert_series_equal(NaT + nat_series_dtype_timedelta, - nat_series_dtype_timedelta) + result = s // 0 + expected = Series([-inf, nan, inf]) + assert_series_equal(result, expected) - assert_series_equal(nat_series_dtype_timedelta + - single_nat_dtype_timedelta, - nat_series_dtype_timedelta) - assert_series_equal(single_nat_dtype_timedelta + - nat_series_dtype_timedelta, - nat_series_dtype_timedelta) + # GH 8674 + zero_array = np.array([0] * 5) + data = np.random.randn(5) + expected = pd.Series([0.] * 5) + result = zero_array / pd.Series(data) + assert_series_equal(result, expected) - assert_series_equal(nat_series_dtype_timedelta + - single_nat_dtype_datetime, - nat_series_dtype_timestamp) - assert_series_equal(single_nat_dtype_datetime + - nat_series_dtype_timedelta, - nat_series_dtype_timestamp) + result = pd.Series(zero_array) / data + assert_series_equal(result, expected) - # multiplication - assert_series_equal(nat_series_dtype_timedelta * 1.0, - nat_series_dtype_timedelta) - assert_series_equal(1.0 * nat_series_dtype_timedelta, - nat_series_dtype_timedelta) + result = pd.Series(zero_array) / pd.Series(data) + assert_series_equal(result, expected) - assert_series_equal(timedelta_series * 1, timedelta_series) - assert_series_equal(1 * timedelta_series, timedelta_series) - assert_series_equal(timedelta_series * 1.5, - Series([NaT, Timedelta('1.5s')])) - assert_series_equal(1.5 * timedelta_series, - Series([NaT, Timedelta('1.5s')])) +class TestTimedeltaSeriesArithmetic(object): + def test_timedelta_series_ops(self): + # GH11925 + s = Series(timedelta_range('1 day', periods=3)) + ts = Timestamp('2012-01-01') + expected = Series(date_range('2012-01-02', periods=3)) + assert_series_equal(ts + s, expected) + assert_series_equal(s + ts, expected) - assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta) - assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta) + expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D')) + assert_series_equal(ts - s, expected2) + assert_series_equal(ts + (-s), expected2) - with pytest.raises(TypeError): - datetime_series * 1 - with pytest.raises(TypeError): - nat_series_dtype_timestamp * 1 - with pytest.raises(TypeError): - datetime_series * 1.0 - with pytest.raises(TypeError): - nat_series_dtype_timestamp * 1.0 + def test_timedelta64_operations_with_integers(self): + # GH 4521 + # divide/multiply by integers + startdate = Series(date_range('2013-01-01', '2013-01-03')) + enddate = Series(date_range('2013-03-01', '2013-03-03')) - # division - assert_series_equal(timedelta_series / 2, - Series([NaT, Timedelta('0.5s')])) - assert_series_equal(timedelta_series / 2.0, - Series([NaT, Timedelta('0.5s')])) - assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta) - with pytest.raises(TypeError): - nat_series_dtype_timestamp / 1.0 - with pytest.raises(TypeError): - nat_series_dtype_timestamp / 1 + s1 = enddate - startdate + s1[2] = np.nan + s2 = Series([2, 3, 4]) + expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]') + expected[2] = np.nan + result = s1 / s2 + assert_series_equal(result, expected) - def test_ops_datetimelike_align(self): - # GH 7500 - # datetimelike ops need to align - dt = Series(date_range('2012-1-1', periods=3, freq='D')) - dt.iloc[2] = np.nan - dt2 = dt[::-1] + s2 = Series([20, 30, 40]) + expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]') + expected[2] = np.nan + result = s1 / s2 + assert_series_equal(result, expected) - expected = Series([timedelta(0), timedelta(0), pd.NaT]) - # name is reset - result = dt2 - dt + result = s1 / 2 + expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]') + expected[2] = np.nan assert_series_equal(result, expected) - expected = Series(expected, name=0) - result = (dt2.to_frame() - dt.to_frame())[0] + s2 = Series([20, 30, 40]) + expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]') + expected[2] = np.nan + result = s1 * s2 assert_series_equal(result, expected) - def test_object_comparisons(self): - s = Series(['a', 'b', np.nan, 'c', 'a']) + for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16', + 'uint8']: + s2 = Series([20, 30, 40], dtype=dtype) + expected = Series( + s1.values.astype(np.int64) * s2.astype(np.int64), + dtype='m8[ns]') + expected[2] = np.nan + result = s1 * s2 + assert_series_equal(result, expected) - result = s == 'a' - expected = Series([True, False, False, False, True]) + result = s1 * 2 + expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]') + expected[2] = np.nan assert_series_equal(result, expected) - result = s < 'a' - expected = Series([False, False, False, False, False]) + result = s1 * -1 + expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]') + expected[2] = np.nan assert_series_equal(result, expected) - result = s != 'a' - expected = -(s == 'a') + # invalid ops + assert_series_equal(s1 / s2.astype(float), + Series([Timedelta('2 days 22:48:00'), Timedelta( + '1 days 23:12:00'), Timedelta('NaT')])) + assert_series_equal(s1 / 2.0, + Series([Timedelta('29 days 12:00:00'), Timedelta( + '29 days 12:00:00'), Timedelta('NaT')])) + + for op in ['__add__', '__sub__']: + sop = getattr(s1, op, None) + if sop is not None: + pytest.raises(TypeError, sop, 1) + pytest.raises(TypeError, sop, s2.values) + + def test_timedelta64_operations_with_DateOffset(self): + # GH 10699 + td = Series([timedelta(minutes=5, seconds=3)] * 3) + result = td + pd.offsets.Minute(1) + expected = Series([timedelta(minutes=6, seconds=3)] * 3) assert_series_equal(result, expected) - def test_categorical_comparisons(self): + result = td - pd.offsets.Minute(1) + expected = Series([timedelta(minutes=4, seconds=3)] * 3) + assert_series_equal(result, expected) - # GH 8938 - # allow equality comparisons - a = Series(list('abc'), dtype="category") - b = Series(list('abc'), dtype="object") - c = Series(['a', 'b', 'cc'], dtype="object") - d = Series(list('acb'), dtype="object") - e = Categorical(list('abc')) - f = Categorical(list('acb')) + result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3), + pd.offsets.Hour(2)]) + expected = Series([timedelta(minutes=6, seconds=3), timedelta( + minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)]) + assert_series_equal(result, expected) - # vs scalar - assert not (a == 'a').all() - assert ((a != 'a') == ~(a == 'a')).all() + result = td + pd.offsets.Minute(1) + pd.offsets.Second(12) + expected = Series([timedelta(minutes=6, seconds=15)] * 3) + assert_series_equal(result, expected) - assert not ('a' == a).all() - assert (a == 'a')[0] - assert ('a' == a)[0] - assert not ('a' != a)[0] + # valid DateOffsets + for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', + 'Nano']: + op = getattr(pd.offsets, do) + td + op(5) + op(5) + td + td - op(5) + op(5) - td - # vs list-like - assert (a == a).all() - assert not (a != a).all() + def test_timedelta64_operations_with_timedeltas(self): + # td operate with td + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td2 = timedelta(minutes=5, seconds=4) + result = td1 - td2 + expected = (Series([timedelta(seconds=0)] * 3) - + Series([timedelta(seconds=1)] * 3)) + assert result.dtype == 'm8[ns]' + assert_series_equal(result, expected) - assert (a == list(a)).all() - assert (a == b).all() - assert (b == a).all() - assert ((~(a == b)) == (a != b)).all() - assert ((~(b == a)) == (b != a)).all() + result2 = td2 - td1 + expected = (Series([timedelta(seconds=1)] * 3) - + Series([timedelta(seconds=0)] * 3)) + assert_series_equal(result2, expected) - assert not (a == c).all() - assert not (c == a).all() - assert not (a == d).all() - assert not (d == a).all() + # roundtrip + assert_series_equal(result + td2, td1) - # vs a cat-like - assert (a == e).all() - assert (e == a).all() - assert not (a == f).all() - assert not (f == a).all() + # Now again, using pd.to_timedelta, which should build + # a Series or a scalar, depending on input. + td1 = Series(pd.to_timedelta(['00:05:03'] * 3)) + td2 = pd.to_timedelta('00:05:04') + result = td1 - td2 + expected = (Series([timedelta(seconds=0)] * 3) - + Series([timedelta(seconds=1)] * 3)) + assert result.dtype == 'm8[ns]' + assert_series_equal(result, expected) - assert ((~(a == e) == (a != e)).all()) - assert ((~(e == a) == (e != a)).all()) - assert ((~(a == f) == (a != f)).all()) - assert ((~(f == a) == (f != a)).all()) + result2 = td2 - td1 + expected = (Series([timedelta(seconds=1)] * 3) - + Series([timedelta(seconds=0)] * 3)) + assert_series_equal(result2, expected) - # non-equality is not comparable - pytest.raises(TypeError, lambda: a < b) - pytest.raises(TypeError, lambda: b < a) - pytest.raises(TypeError, lambda: a > b) - pytest.raises(TypeError, lambda: b > a) + # roundtrip + assert_series_equal(result + td2, td1) - def test_comparison_tuples(self): - # GH11339 - # comparisons vs tuple - s = Series([(1, 1), (1, 2)]) + def test_operators_timedelta64(self): + # series ops + v1 = date_range('2012-1-1', periods=3, freq='D') + v2 = date_range('2012-1-2', periods=3, freq='D') + rs = Series(v2) - Series(v1) + xp = Series(1e9 * 3600 * 24, + rs.index).astype('int64').astype('timedelta64[ns]') + assert_series_equal(rs, xp) + assert rs.dtype == 'timedelta64[ns]' - result = s == (1, 2) - expected = Series([False, True]) - assert_series_equal(result, expected) + df = DataFrame(dict(A=v1)) + td = Series([timedelta(days=i) for i in range(3)]) + assert td.dtype == 'timedelta64[ns]' - result = s != (1, 2) - expected = Series([True, False]) - assert_series_equal(result, expected) + # series on the rhs + result = df['A'] - df['A'].shift() + assert result.dtype == 'timedelta64[ns]' - result = s == (0, 0) - expected = Series([False, False]) + result = df['A'] + td + assert result.dtype == 'M8[ns]' + + # scalar Timestamp on rhs + maxa = df['A'].max() + assert isinstance(maxa, Timestamp) + + resultb = df['A'] - df['A'].max() + assert resultb.dtype == 'timedelta64[ns]' + + # timestamp on lhs + result = resultb + df['A'] + values = [Timestamp('20111230'), Timestamp('20120101'), + Timestamp('20120103')] + expected = Series(values, name='A') assert_series_equal(result, expected) - result = s != (0, 0) - expected = Series([True, True]) + # datetimes on rhs + result = df['A'] - datetime(2001, 1, 1) + expected = Series( + [timedelta(days=4017 + i) for i in range(3)], name='A') assert_series_equal(result, expected) + assert result.dtype == 'm8[ns]' + + d = datetime(2001, 1, 1, 3, 4) + resulta = df['A'] - d + assert resulta.dtype == 'm8[ns]' + + # roundtrip + resultb = resulta + d + assert_series_equal(df['A'], resultb) + + # timedeltas on rhs + td = timedelta(days=1) + resulta = df['A'] + td + resultb = resulta - td + assert_series_equal(resultb, df['A']) + assert resultb.dtype == 'M8[ns]' + + # roundtrip + td = timedelta(minutes=5, seconds=3) + resulta = df['A'] + td + resultb = resulta - td + assert_series_equal(df['A'], resultb) + assert resultb.dtype == 'M8[ns]' + + # inplace + value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1)) + rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1)) + assert rs[2] == value + + def test_timedelta64_ops_nat(self): + # GH 11349 + timedelta_series = Series([NaT, Timedelta('1s')]) + nat_series_dtype_timedelta = Series([NaT, NaT], + dtype='timedelta64[ns]') + single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]') + + # subtraction + assert_series_equal(timedelta_series - NaT, + nat_series_dtype_timedelta) + assert_series_equal(-NaT + timedelta_series, + nat_series_dtype_timedelta) + + assert_series_equal(timedelta_series - single_nat_dtype_timedelta, + nat_series_dtype_timedelta) + assert_series_equal(-single_nat_dtype_timedelta + timedelta_series, + nat_series_dtype_timedelta) + + # addition + assert_series_equal(nat_series_dtype_timedelta + NaT, + nat_series_dtype_timedelta) + assert_series_equal(NaT + nat_series_dtype_timedelta, + nat_series_dtype_timedelta) + + assert_series_equal(nat_series_dtype_timedelta + + single_nat_dtype_timedelta, + nat_series_dtype_timedelta) + assert_series_equal(single_nat_dtype_timedelta + + nat_series_dtype_timedelta, + nat_series_dtype_timedelta) - s = Series([(1, 1), (1, 1)]) + assert_series_equal(timedelta_series + NaT, + nat_series_dtype_timedelta) + assert_series_equal(NaT + timedelta_series, + nat_series_dtype_timedelta) - result = s == (1, 1) - expected = Series([True, True]) - assert_series_equal(result, expected) + assert_series_equal(timedelta_series + single_nat_dtype_timedelta, + nat_series_dtype_timedelta) + assert_series_equal(single_nat_dtype_timedelta + timedelta_series, + nat_series_dtype_timedelta) - result = s != (1, 1) - expected = Series([False, False]) - assert_series_equal(result, expected) + assert_series_equal(nat_series_dtype_timedelta + NaT, + nat_series_dtype_timedelta) + assert_series_equal(NaT + nat_series_dtype_timedelta, + nat_series_dtype_timedelta) - s = Series([frozenset([1]), frozenset([1, 2])]) + assert_series_equal(nat_series_dtype_timedelta + + single_nat_dtype_timedelta, + nat_series_dtype_timedelta) + assert_series_equal(single_nat_dtype_timedelta + + nat_series_dtype_timedelta, + nat_series_dtype_timedelta) - result = s == frozenset([1]) - expected = Series([True, False]) - assert_series_equal(result, expected) + # multiplication + assert_series_equal(nat_series_dtype_timedelta * 1.0, + nat_series_dtype_timedelta) + assert_series_equal(1.0 * nat_series_dtype_timedelta, + nat_series_dtype_timedelta) - def test_comparison_operators_with_nas(self): - s = Series(bdate_range('1/1/2000', periods=10), dtype=object) - s[::2] = np.nan + assert_series_equal(timedelta_series * 1, timedelta_series) + assert_series_equal(1 * timedelta_series, timedelta_series) - # test that comparisons work - ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] - for op in ops: - val = s[5] + assert_series_equal(timedelta_series * 1.5, + Series([NaT, Timedelta('1.5s')])) + assert_series_equal(1.5 * timedelta_series, + Series([NaT, Timedelta('1.5s')])) - f = getattr(operator, op) - result = f(s, val) + assert_series_equal(timedelta_series * nan, + nat_series_dtype_timedelta) + assert_series_equal(nan * timedelta_series, + nat_series_dtype_timedelta) - expected = f(s.dropna(), val).reindex(s.index) + # division + assert_series_equal(timedelta_series / 2, + Series([NaT, Timedelta('0.5s')])) + assert_series_equal(timedelta_series / 2.0, + Series([NaT, Timedelta('0.5s')])) + assert_series_equal(timedelta_series / nan, + nat_series_dtype_timedelta) - if op == 'ne': - expected = expected.fillna(True).astype(bool) - else: - expected = expected.fillna(False).astype(bool) - assert_series_equal(result, expected) +class TestDatetimeSeriesArithmetic(object): + def test_operators_datetimelike(self): + def run_ops(ops, get_ser, test_ser): - # fffffffuuuuuuuuuuuu - # result = f(val, s) - # expected = f(val, s.dropna()).reindex(s.index) - # assert_series_equal(result, expected) + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + for op_str in ops: + op = getattr(get_ser, op_str, None) + with tm.assert_raises_regex(TypeError, 'operate'): + op(test_ser) - # boolean &, |, ^ should work with object arrays and propagate NAs + # ## timedelta64 ### + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + td2 = timedelta(minutes=5, seconds=4) + ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__', + '__rfloordiv__', '__rpow__'] + run_ops(ops, td1, td2) + td1 + td2 + td2 + td1 + td1 - td2 + td2 - td1 + td1 / td2 + td2 / td1 - ops = ['and_', 'or_', 'xor'] - mask = s.isna() - for bool_op in ops: - f = getattr(operator, bool_op) + # ## datetime64 ### + dt1 = Series([Timestamp('20111230'), Timestamp('20120101'), + Timestamp('20120103')]) + dt1.iloc[2] = np.nan + dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), + Timestamp('20120104')]) + ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__', + '__pow__', '__radd__', '__rmul__', '__rfloordiv__', + '__rtruediv__', '__rdiv__', '__rpow__'] + run_ops(ops, dt1, dt2) + dt1 - dt2 + dt2 - dt1 - filled = s.fillna(s[0]) + # ## datetime64 with timetimedelta ### + ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', + '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', + '__rpow__'] + run_ops(ops, dt1, td1) + dt1 + td1 + td1 + dt1 + dt1 - td1 + # TODO: Decide if this ought to work. + # td1 - dt1 - result = f(s < s[9], s > s[3]) + # ## timetimedelta with datetime64 ### + ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__', + '__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__', + '__rdiv__', '__rpow__'] + run_ops(ops, td1, dt1) + td1 + dt1 + dt1 + td1 - expected = f(filled < filled[9], filled > filled[3]) - expected[mask] = False - assert_series_equal(result, expected) + # 8260, 10763 + # datetime64 with tz + ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', + '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', + '__rpow__'] - def test_comparison_object_numeric_nas(self): - s = Series(np.random.randn(10), dtype=object) - shifted = s.shift(2) + tz = 'US/Eastern' + dt1 = Series(date_range('2000-01-01 09:00:00', periods=5, + tz=tz), name='foo') + dt2 = dt1.copy() + dt2.iloc[2] = np.nan + td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H')) + td2 = td1.copy() + td2.iloc[1] = np.nan + run_ops(ops, dt1, td1) - ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] - for op in ops: - f = getattr(operator, op) + result = dt1 + td1[0] + exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) + assert_series_equal(result, exp) - result = f(s, shifted) - expected = f(s.astype(float), shifted.astype(float)) - assert_series_equal(result, expected) + result = dt2 + td2[0] + exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) + assert_series_equal(result, exp) - def test_comparison_invalid(self): + # odd numpy behavior with scalar timedeltas + result = td1[0] + dt1 + exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) + assert_series_equal(result, exp) - # GH4968 - # invalid date/int comparisons - s = Series(range(5)) - s2 = Series(date_range('20010101', periods=5)) + result = td2[0] + dt2 + exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) + assert_series_equal(result, exp) - for (x, y) in [(s, s2), (s2, s)]: - pytest.raises(TypeError, lambda: x == y) - pytest.raises(TypeError, lambda: x != y) - pytest.raises(TypeError, lambda: x >= y) - pytest.raises(TypeError, lambda: x > y) - pytest.raises(TypeError, lambda: x < y) - pytest.raises(TypeError, lambda: x <= y) + result = dt1 - td1[0] + exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz) + assert_series_equal(result, exp) + pytest.raises(TypeError, lambda: td1[0] - dt1) - def test_unequal_categorical_comparison_raises_type_error(self): - # unequal comparison should raise for unordered cats - cat = Series(Categorical(list("abc"))) + result = dt2 - td2[0] + exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz) + assert_series_equal(result, exp) + pytest.raises(TypeError, lambda: td2[0] - dt2) - def f(): - cat > "b" + result = dt1 + td1 + exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz) + assert_series_equal(result, exp) - pytest.raises(TypeError, f) - cat = Series(Categorical(list("abc"), ordered=False)) + result = dt2 + td2 + exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz) + assert_series_equal(result, exp) - def f(): - cat > "b" + result = dt1 - td1 + exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz) + assert_series_equal(result, exp) - pytest.raises(TypeError, f) + result = dt2 - td2 + exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz) + assert_series_equal(result, exp) - # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057 - # and following comparisons with scalars not in categories should raise - # for unequal comps, but not for equal/not equal - cat = Series(Categorical(list("abc"), ordered=True)) + pytest.raises(TypeError, lambda: td1 - dt1) + pytest.raises(TypeError, lambda: td2 - dt2) - pytest.raises(TypeError, lambda: cat < "d") - pytest.raises(TypeError, lambda: cat > "d") - pytest.raises(TypeError, lambda: "d" < cat) - pytest.raises(TypeError, lambda: "d" > cat) + def test_sub_single_tz(self): + # GH12290 + s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')]) + s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')]) + result = s1 - s2 + expected = Series([Timedelta('2days')]) + assert_series_equal(result, expected) + result = s2 - s1 + expected = Series([Timedelta('-2days')]) + assert_series_equal(result, expected) - tm.assert_series_equal(cat == "d", Series([False, False, False])) - tm.assert_series_equal(cat != "d", Series([True, True, True])) + def test_sub_datetime_compat(self): + # see gh-14088 + s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT]) + dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc) + exp = Series([Timedelta('1 days'), pd.NaT]) + assert_series_equal(s - dt, exp) + assert_series_equal(s - Timestamp(dt), exp) - def test_more_na_comparisons(self): - for dtype in [None, object]: - left = Series(['a', np.nan, 'c'], dtype=dtype) - right = Series(['a', np.nan, 'd'], dtype=dtype) + def test_datetime_series_with_timedelta(self): + # scalar timedeltas/np.timedelta64 objects + # operate with np.timedelta64 correctly + s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) - result = left == right - expected = Series([True, False, False]) - assert_series_equal(result, expected) + result = s + np.timedelta64(1, 's') + result2 = np.timedelta64(1, 's') + s + expected = Series([Timestamp('20130101 9:01:01'), + Timestamp('20130101 9:02:01')]) + assert_series_equal(result, expected) + assert_series_equal(result2, expected) - result = left != right - expected = Series([False, True, True]) - assert_series_equal(result, expected) + result = s + np.timedelta64(5, 'ms') + result2 = np.timedelta64(5, 'ms') + s + expected = Series([Timestamp('20130101 9:01:00.005'), + Timestamp('20130101 9:02:00.005')]) + assert_series_equal(result, expected) + assert_series_equal(result2, expected) - result = left == np.nan - expected = Series([False, False, False]) - assert_series_equal(result, expected) + def test_datetime_series_with_DateOffset(self): + # GH 4532 + # operate with pd.offsets + s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) - result = left != np.nan - expected = Series([True, True, True]) - assert_series_equal(result, expected) + result = s + pd.offsets.Second(5) + result2 = pd.offsets.Second(5) + s + expected = Series([Timestamp('20130101 9:01:05'), + Timestamp('20130101 9:02:05')]) + assert_series_equal(result, expected) + assert_series_equal(result2, expected) - def test_nat_comparisons(self): - data = [([pd.Timestamp('2011-01-01'), pd.NaT, - pd.Timestamp('2011-01-03')], - [pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]), + result = s - pd.offsets.Second(5) + result2 = -pd.offsets.Second(5) + s + expected = Series([Timestamp('20130101 9:00:55'), + Timestamp('20130101 9:01:55')]) + assert_series_equal(result, expected) + assert_series_equal(result2, expected) - ([pd.Timedelta('1 days'), pd.NaT, - pd.Timedelta('3 days')], - [pd.NaT, pd.NaT, pd.Timedelta('3 days')]), + result = s + pd.offsets.Milli(5) + result2 = pd.offsets.Milli(5) + s + expected = Series([Timestamp('20130101 9:01:00.005'), + Timestamp('20130101 9:02:00.005')]) + assert_series_equal(result, expected) + assert_series_equal(result2, expected) - ([pd.Period('2011-01', freq='M'), pd.NaT, - pd.Period('2011-03', freq='M')], - [pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])] + result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5) + expected = Series([Timestamp('20130101 9:06:00.005'), + Timestamp('20130101 9:07:00.005')]) + assert_series_equal(result, expected) - # add lhs / rhs switched data - data = data + [(r, l) for l, r in data] + # valid DateOffsets + for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', + 'Nano']: + op = getattr(pd.offsets, do) + s + op(5) + op(5) + s - for l, r in data: - for dtype in [None, object]: - left = Series(l, dtype=dtype) + def test_datetime64_ops_nat(self): + # GH 11349 + datetime_series = Series([NaT, Timestamp('19900315')]) + nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]') + single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]') - # Series, Index - for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]: - expected = Series([False, False, True]) - assert_series_equal(left == right, expected) + # subtraction + assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp) + assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp) + with pytest.raises(TypeError): + -single_nat_dtype_datetime + datetime_series - expected = Series([True, True, False]) - assert_series_equal(left != right, expected) + assert_series_equal(nat_series_dtype_timestamp - NaT, + nat_series_dtype_timestamp) + assert_series_equal(-NaT + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) + with pytest.raises(TypeError): + -single_nat_dtype_datetime + nat_series_dtype_timestamp - expected = Series([False, False, False]) - assert_series_equal(left < right, expected) + # addition + assert_series_equal(nat_series_dtype_timestamp + NaT, + nat_series_dtype_timestamp) + assert_series_equal(NaT + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) - expected = Series([False, False, False]) - assert_series_equal(left > right, expected) + assert_series_equal(nat_series_dtype_timestamp + NaT, + nat_series_dtype_timestamp) + assert_series_equal(NaT + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) - expected = Series([False, False, True]) - assert_series_equal(left >= right, expected) + # multiplication + with pytest.raises(TypeError): + datetime_series * 1 + with pytest.raises(TypeError): + nat_series_dtype_timestamp * 1 + with pytest.raises(TypeError): + datetime_series * 1.0 + with pytest.raises(TypeError): + nat_series_dtype_timestamp * 1.0 - expected = Series([False, False, True]) - assert_series_equal(left <= right, expected) + # division + with pytest.raises(TypeError): + nat_series_dtype_timestamp / 1.0 + with pytest.raises(TypeError): + nat_series_dtype_timestamp / 1 - def test_nat_comparisons_scalar(self): - data = [[pd.Timestamp('2011-01-01'), pd.NaT, - pd.Timestamp('2011-01-03')], - [pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')], +class TestSeriesOperators(TestData): + def test_op_method(self): + def check(series, other, check_reverse=False): + simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow'] + if not compat.PY3: + simple_ops.append('div') - [pd.Period('2011-01', freq='M'), pd.NaT, - pd.Period('2011-03', freq='M')]] + for opname in simple_ops: + op = getattr(Series, opname) - for l in data: - for dtype in [None, object]: - left = Series(l, dtype=dtype) + if op == 'div': + alt = operator.truediv + else: + alt = getattr(operator, opname) - expected = Series([False, False, False]) - assert_series_equal(left == pd.NaT, expected) - assert_series_equal(pd.NaT == left, expected) + result = op(series, other) + expected = alt(series, other) + assert_almost_equal(result, expected) + if check_reverse: + rop = getattr(Series, "r" + opname) + result = rop(series, other) + expected = alt(other, series) + assert_almost_equal(result, expected) - expected = Series([True, True, True]) - assert_series_equal(left != pd.NaT, expected) - assert_series_equal(pd.NaT != left, expected) + check(self.ts, self.ts * 2) + check(self.ts, self.ts[::2]) + check(self.ts, 5, check_reverse=True) + check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True) - expected = Series([False, False, False]) - assert_series_equal(left < pd.NaT, expected) - assert_series_equal(pd.NaT > left, expected) - assert_series_equal(left <= pd.NaT, expected) - assert_series_equal(pd.NaT >= left, expected) + def test_neg(self): + assert_series_equal(-self.series, -1 * self.series) - assert_series_equal(left > pd.NaT, expected) - assert_series_equal(pd.NaT < left, expected) - assert_series_equal(left >= pd.NaT, expected) - assert_series_equal(pd.NaT <= left, expected) + def test_invert(self): + assert_series_equal(-(self.series < 0), ~(self.series < 0)) - def test_comparison_different_length(self): - a = Series(['a', 'b', 'c']) - b = Series(['b', 'a']) - pytest.raises(ValueError, a.__lt__, b) + def test_operators(self): + def _check_op(series, other, op, pos_only=False, + check_dtype=True): + left = np.abs(series) if pos_only else series + right = np.abs(other) if pos_only else other - a = Series([1, 2]) - b = Series([2, 3, 4]) - pytest.raises(ValueError, a.__eq__, b) + cython_or_numpy = op(left, right) + python = left.combine(right, op) + assert_series_equal(cython_or_numpy, python, + check_dtype=check_dtype) - def test_comparison_label_based(self): + def check(series, other): + simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod'] - # GH 4947 - # comparisons should be label based + for opname in simple_ops: + _check_op(series, other, getattr(operator, opname)) - a = Series([True, False, True], list('bca')) - b = Series([False, True, False], list('abc')) + _check_op(series, other, operator.pow, pos_only=True) - expected = Series([False, True, False], list('abc')) - result = a & b - assert_series_equal(result, expected) + _check_op(series, other, lambda x, y: operator.add(y, x)) + _check_op(series, other, lambda x, y: operator.sub(y, x)) + _check_op(series, other, lambda x, y: operator.truediv(y, x)) + _check_op(series, other, lambda x, y: operator.floordiv(y, x)) + _check_op(series, other, lambda x, y: operator.mul(y, x)) + _check_op(series, other, lambda x, y: operator.pow(y, x), + pos_only=True) + _check_op(series, other, lambda x, y: operator.mod(y, x)) - expected = Series([True, True, False], list('abc')) - result = a | b - assert_series_equal(result, expected) + check(self.ts, self.ts * 2) + check(self.ts, self.ts * 0) + check(self.ts, self.ts[::2]) + check(self.ts, 5) - expected = Series([True, False, False], list('abc')) - result = a ^ b - assert_series_equal(result, expected) + def check_comparators(series, other, check_dtype=True): + _check_op(series, other, operator.gt, check_dtype=check_dtype) + _check_op(series, other, operator.ge, check_dtype=check_dtype) + _check_op(series, other, operator.eq, check_dtype=check_dtype) + _check_op(series, other, operator.lt, check_dtype=check_dtype) + _check_op(series, other, operator.le, check_dtype=check_dtype) - # rhs is bigger - a = Series([True, False, True], list('bca')) - b = Series([False, True, False, True], list('abcd')) + check_comparators(self.ts, 5) + check_comparators(self.ts, self.ts + 1, check_dtype=False) - expected = Series([False, True, False, False], list('abcd')) - result = a & b - assert_series_equal(result, expected) + def test_divmod(self): + def check(series, other): + results = divmod(series, other) + if isinstance(other, Iterable) and len(series) != len(other): + # if the lengths don't match, this is the test where we use + # `self.ts[::2]`. Pad every other value in `other_np` with nan. + other_np = [] + for n in other: + other_np.append(n) + other_np.append(np.nan) + else: + other_np = other + other_np = np.asarray(other_np) + with np.errstate(all='ignore'): + expecteds = divmod(series.values, np.asarray(other_np)) - expected = Series([True, True, False, False], list('abcd')) - result = a | b - assert_series_equal(result, expected) + for result, expected in zip(results, expecteds): + # check the values, name, and index separatly + assert_almost_equal(np.asarray(result), expected) - # filling + assert result.name == series.name + assert_index_equal(result.index, series.index) - # vs empty - result = a & Series([]) - expected = Series([False, False, False], list('bca')) - assert_series_equal(result, expected) + check(self.ts, self.ts * 2) + check(self.ts, self.ts * 0) + check(self.ts, self.ts[::2]) + check(self.ts, 5) - result = a | Series([]) - expected = Series([True, False, True], list('bca')) - assert_series_equal(result, expected) + def test_operators_empty_int_corner(self): + s1 = Series([], [], dtype=np.int32) + s2 = Series({'x': 0.}) + assert_series_equal(s1 * s2, Series([np.nan], index=['x'])) - # vs non-matching - result = a & Series([1], ['z']) - expected = Series([False, False, False, False], list('abcz')) - assert_series_equal(result, expected) + def test_invalid_ops(self): + # invalid ops + pytest.raises(Exception, self.objSeries.__add__, 1) + pytest.raises(Exception, self.objSeries.__add__, + np.array(1, dtype=np.int64)) + pytest.raises(Exception, self.objSeries.__sub__, 1) + pytest.raises(Exception, self.objSeries.__sub__, + np.array(1, dtype=np.int64)) - result = a | Series([1], ['z']) - expected = Series([True, True, False, False], list('abcz')) - assert_series_equal(result, expected) + def test_timedelta64_conversions(self): + startdate = Series(date_range('2013-01-01', '2013-01-03')) + enddate = Series(date_range('2013-03-01', '2013-03-03')) - # identity - # we would like s[s|e] == s to hold for any e, whether empty or not - for e in [Series([]), Series([1], ['z']), - Series(np.nan, b.index), Series(np.nan, a.index)]: - result = a[a | e] - assert_series_equal(result, a[a]) + s1 = enddate - startdate + s1[2] = np.nan - for e in [Series(['z'])]: - if compat.PY3: - with tm.assert_produces_warning(RuntimeWarning): - result = a[a | e] - else: - result = a[a | e] - assert_series_equal(result, a[a]) + for m in [1, 3, 10]: + for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']: - # vs scalars - index = list('bca') - t = Series([True, False, True]) + # op + expected = s1.apply(lambda x: x / np.timedelta64(m, unit)) + result = s1 / np.timedelta64(m, unit) + assert_series_equal(result, expected) - for v in [True, 1, 2]: - result = Series([True, False, True], index=index) | v - expected = Series([True, True, True], index=index) - assert_series_equal(result, expected) + if m == 1 and unit != 'ns': - for v in [np.nan, 'foo']: - pytest.raises(TypeError, lambda: t | v) + # astype + result = s1.astype("timedelta64[{0}]".format(unit)) + assert_series_equal(result, expected) - for v in [False, 0]: - result = Series([True, False, True], index=index) | v - expected = Series([True, False, True], index=index) - assert_series_equal(result, expected) + # reverse op + expected = s1.apply( + lambda x: Timedelta(np.timedelta64(m, unit)) / x) + result = np.timedelta64(m, unit) / s1 - for v in [True, 1]: - result = Series([True, False, True], index=index) & v - expected = Series([True, False, True], index=index) - assert_series_equal(result, expected) + # astype + s = Series(date_range('20130101', periods=3)) + result = s.astype(object) + assert isinstance(result.iloc[0], datetime) + assert result.dtype == np.object_ - for v in [False, 0]: - result = Series([True, False, True], index=index) & v - expected = Series([False, False, False], index=index) - assert_series_equal(result, expected) - for v in [np.nan]: - pytest.raises(TypeError, lambda: t & v) + result = s1.astype(object) + assert isinstance(result.iloc[0], timedelta) + assert result.dtype == np.object_ - def test_comparison_flex_basic(self): - left = pd.Series(np.random.randn(10)) - right = pd.Series(np.random.randn(10)) + @pytest.mark.parametrize('op', [operator.add, operator.sub]) + def test_timedelta64_equal_timedelta_supported_ops(self, op): + ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'), + Timestamp('20130228 22:00:00'), + Timestamp('20130228 21:00:00')]) - assert_series_equal(left.eq(right), left == right) - assert_series_equal(left.ne(right), left != right) - assert_series_equal(left.le(right), left < right) - assert_series_equal(left.lt(right), left <= right) - assert_series_equal(left.gt(right), left > right) - assert_series_equal(left.ge(right), left >= right) + intervals = 'D', 'h', 'm', 's', 'us' - # axis - for axis in [0, None, 'index']: - assert_series_equal(left.eq(right, axis=axis), left == right) - assert_series_equal(left.ne(right, axis=axis), left != right) - assert_series_equal(left.le(right, axis=axis), left < right) - assert_series_equal(left.lt(right, axis=axis), left <= right) - assert_series_equal(left.gt(right, axis=axis), left > right) - assert_series_equal(left.ge(right, axis=axis), left >= right) + # TODO: unused + # npy16_mappings = {'D': 24 * 60 * 60 * 1000000, + # 'h': 60 * 60 * 1000000, + # 'm': 60 * 1000000, + # 's': 1000000, + # 'us': 1} - # - msg = 'No axis named 1 for object type' - for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']: - with tm.assert_raises_regex(ValueError, msg): - getattr(left, op)(right, axis=1) + def timedelta64(*args): + return sum(starmap(np.timedelta64, zip(args, intervals))) - def test_comparison_flex_alignment(self): - left = Series([1, 3, 2], index=list('abc')) - right = Series([2, 2, 2], index=list('bcd')) + for d, h, m, s, us in product(*([range(2)] * 5)): + nptd = timedelta64(d, h, m, s, us) + pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, + microseconds=us) + lhs = op(ser, nptd) + rhs = op(ser, pytd) - exp = pd.Series([False, False, True, False], index=list('abcd')) - assert_series_equal(left.eq(right), exp) + try: + assert_series_equal(lhs, rhs) + except: + raise AssertionError( + "invalid comparsion [op->{0},d->{1},h->{2},m->{3}," + "s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, + us, lhs, rhs)) - exp = pd.Series([True, True, False, True], index=list('abcd')) - assert_series_equal(left.ne(right), exp) + def test_ops_nat_mixed_datetime64_timedelta64(self): + # GH 11349 + timedelta_series = Series([NaT, Timedelta('1s')]) + datetime_series = Series([NaT, Timestamp('19900315')]) + nat_series_dtype_timedelta = Series([NaT, NaT], + dtype='timedelta64[ns]') + nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]') + single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]') + single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]') - exp = pd.Series([False, False, True, False], index=list('abcd')) - assert_series_equal(left.le(right), exp) + # subtraction + assert_series_equal(datetime_series - single_nat_dtype_datetime, + nat_series_dtype_timedelta) - exp = pd.Series([False, False, False, False], index=list('abcd')) - assert_series_equal(left.lt(right), exp) + assert_series_equal(datetime_series - single_nat_dtype_timedelta, + nat_series_dtype_timestamp) + assert_series_equal(-single_nat_dtype_timedelta + datetime_series, + nat_series_dtype_timestamp) - exp = pd.Series([False, True, True, False], index=list('abcd')) - assert_series_equal(left.ge(right), exp) + # without a Series wrapping the NaT, it is ambiguous + # whether it is a datetime64 or timedelta64 + # defaults to interpreting it as timedelta64 + assert_series_equal(nat_series_dtype_timestamp - + single_nat_dtype_datetime, + nat_series_dtype_timedelta) - exp = pd.Series([False, True, False, False], index=list('abcd')) - assert_series_equal(left.gt(right), exp) + assert_series_equal(nat_series_dtype_timestamp - + single_nat_dtype_timedelta, + nat_series_dtype_timestamp) + assert_series_equal(-single_nat_dtype_timedelta + + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) - def test_comparison_flex_alignment_fill(self): - left = Series([1, 3, 2], index=list('abc')) - right = Series([2, 2, 2], index=list('bcd')) + with pytest.raises(TypeError): + timedelta_series - single_nat_dtype_datetime - exp = pd.Series([False, False, True, True], index=list('abcd')) - assert_series_equal(left.eq(right, fill_value=2), exp) + # addition + assert_series_equal(nat_series_dtype_timestamp + + single_nat_dtype_timedelta, + nat_series_dtype_timestamp) + assert_series_equal(single_nat_dtype_timedelta + + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) - exp = pd.Series([True, True, False, False], index=list('abcd')) - assert_series_equal(left.ne(right, fill_value=2), exp) + assert_series_equal(nat_series_dtype_timestamp + + single_nat_dtype_timedelta, + nat_series_dtype_timestamp) + assert_series_equal(single_nat_dtype_timedelta + + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) - exp = pd.Series([False, False, True, True], index=list('abcd')) - assert_series_equal(left.le(right, fill_value=0), exp) + assert_series_equal(nat_series_dtype_timedelta + + single_nat_dtype_datetime, + nat_series_dtype_timestamp) + assert_series_equal(single_nat_dtype_datetime + + nat_series_dtype_timedelta, + nat_series_dtype_timestamp) - exp = pd.Series([False, False, False, True], index=list('abcd')) - assert_series_equal(left.lt(right, fill_value=0), exp) + def test_ops_datetimelike_align(self): + # GH 7500 + # datetimelike ops need to align + dt = Series(date_range('2012-1-1', periods=3, freq='D')) + dt.iloc[2] = np.nan + dt2 = dt[::-1] - exp = pd.Series([True, True, True, False], index=list('abcd')) - assert_series_equal(left.ge(right, fill_value=0), exp) + expected = Series([timedelta(0), timedelta(0), pd.NaT]) + # name is reset + result = dt2 - dt + assert_series_equal(result, expected) - exp = pd.Series([True, True, False, False], index=list('abcd')) - assert_series_equal(left.gt(right, fill_value=0), exp) + expected = Series(expected, name=0) + result = (dt2.to_frame() - dt.to_frame())[0] + assert_series_equal(result, expected) def test_return_dtypes_bool_op_costant(self): # gh15115 @@ -1605,36 +1688,6 @@ def test_arith_ops_df_compat(self): assert_frame_equal(s3.to_frame() + s4.to_frame(), exp) assert_frame_equal(s4.to_frame() + s3.to_frame(), exp) - def test_comp_ops_df_compat(self): - # GH 1134 - s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x') - s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x') - - s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') - s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') - - for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]: - - msg = "Can only compare identically-labeled Series objects" - with tm.assert_raises_regex(ValueError, msg): - l == r - - with tm.assert_raises_regex(ValueError, msg): - l != r - - with tm.assert_raises_regex(ValueError, msg): - l < r - - msg = "Can only compare identically-labeled DataFrame objects" - with tm.assert_raises_regex(ValueError, msg): - l.to_frame() == r.to_frame() - - with tm.assert_raises_regex(ValueError, msg): - l.to_frame() != r.to_frame() - - with tm.assert_raises_regex(ValueError, msg): - l.to_frame() < r.to_frame() - def test_bool_ops_df_compat(self): # GH 1134 s1 = pd.Series([True, False, True], index=list('ABC'), name='x') @@ -1851,12 +1904,6 @@ def _check_fill(meth, op, a, b, fill_value=0): # should accept axis=0 or axis='rows' op(a, b, axis=0) - def test_ne(self): - ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) - expected = [True, True, False, True, True] - assert tm.equalContents(ts.index != 5, expected) - assert tm.equalContents(~(ts.index == 5), expected) - def test_operators_na_handling(self): from decimal import Decimal from datetime import date @@ -1877,22 +1924,6 @@ def test_operators_na_handling(self): expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan]) assert_series_equal(result, expected) - def test_divide_decimal(self): - """ resolves issue #9787 """ - from decimal import Decimal - - expected = Series([Decimal(5)]) - - s = Series([Decimal(10)]) - s = s / Decimal(2) - - assert_series_equal(expected, s) - - s = Series([Decimal(10)]) - s = s // Decimal(2) - - assert_series_equal(expected, s) - def test_datetime64_with_index(self): # arithmetic integer ops with an index
No tests were harming in the making of this PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/18809
2017-12-16T23:31:32Z
2017-12-18T12:34:10Z
2017-12-18T12:34:10Z
2017-12-18T15:20:27Z
DOC: replaced 'Good as first PR' with 'good first issue'
diff --git a/doc/README.rst b/doc/README.rst index b2c66611b68bb..efa21fdd3a2d9 100644 --- a/doc/README.rst +++ b/doc/README.rst @@ -160,8 +160,8 @@ Where to start? There are a number of issues listed under `Docs <https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_ -and `Good as first PR -<https://github.com/pandas-dev/pandas/issues?labels=Good+as+first+PR&sort=updated&state=open>`_ +and `good first issue +<https://github.com/pandas-dev/pandas/issues?labels=good+first+issue&sort=updated&state=open>`_ where you could start out. Or maybe you have an idea of your own, by using pandas, looking for something
- [NA] closes #xxxx - [NA] tests added / passed - [NA] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [NA] whatsnew entry Was browsing https://github.com/pandas-dev/pandas/tree/master/doc and it seemed the 'Good as first PR' had no open issues, whereas 'good first issue' has quite a few open issues. I'm guessing the 'Good as first PR' label was not well used/got renamed to 'good first issue' label?
https://api.github.com/repos/pandas-dev/pandas/pulls/18806
2017-12-16T16:47:58Z
2017-12-18T12:14:06Z
2017-12-18T12:14:06Z
2017-12-18T12:14:25Z
ENH: Add length attribute to Interval and IntervalIndex
diff --git a/doc/source/api.rst b/doc/source/api.rst index 3edaadba64762..64f972e52d190 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1623,6 +1623,7 @@ IntervalIndex Components IntervalIndex.right IntervalIndex.mid IntervalIndex.closed + IntervalIndex.length IntervalIndex.values IntervalIndex.is_non_overlapping_monotonic @@ -1995,6 +1996,7 @@ Properties Interval.closed_left Interval.closed_right Interval.left + Interval.length Interval.mid Interval.open_left Interval.open_right diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 40e1e2011479c..43587ee590fd4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -141,6 +141,7 @@ Other Enhancements - ``IntervalIndex.to_tuples()`` has gained the ``na_tuple`` parameter to control whether NA is returned as a tuple of NA, or NA itself (:issue:`18756`) - ``Categorical.rename_categories``, ``CategoricalIndex.rename_categories`` and :attr:`Series.cat.rename_categories` can now take a callable as their argument (:issue:`18862`) +- :class:`Interval` and :class:`IntervalIndex` have gained a ``length`` attribute (:issue:`18789`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 480ea5cb4fa80..f1da60057186c 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -54,7 +54,17 @@ cdef class IntervalMixin(object): return 0.5 * (self.left + self.right) except TypeError: # datetime safe version - return self.left + 0.5 * (self.right - self.left) + return self.left + 0.5 * self.length + + @property + def length(self): + """Return the length of the Interval""" + try: + return self.right - self.left + except TypeError: + # length not defined for some types, e.g. string + msg = 'cannot compute length between {left!r} and {right!r}' + raise TypeError(msg.format(left=self.left, right=self.right)) cdef _interval_like(other): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index cb786574909db..b7d3305730521 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -116,10 +116,11 @@ class IntervalIndex(IntervalMixin, Index): The indexing behaviors are provisional and may change in a future version of pandas. - Attributes + Parameters ---------- - left, right : array-like (1-dimensional) - Left and right bounds for each interval. + data : array-like (1-dimensional) + Array-like containing Interval objects from which to build the + IntervalIndex closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. @@ -127,7 +128,14 @@ class IntervalIndex(IntervalMixin, Index): Name to be stored in the index. copy : boolean, default False Copy the meta-data + + Attributes + ---------- + left + right + closed mid + length values is_non_overlapping_monotonic @@ -599,6 +607,20 @@ def closed(self): """ return self._closed + @property + def length(self): + """ + Return an Index with entries denoting the length of each Interval in + the IntervalIndex + """ + try: + return self.right - self.left + except TypeError: + # length not defined for some types, e.g. string + msg = ('IntervalIndex contains Intervals without defined length, ' + 'e.g. Intervals with string endpoints') + raise TypeError(msg) + def __len__(self): return len(self.left) @@ -683,11 +705,10 @@ def mid(self): Return the midpoint of each Interval in the IntervalIndex as an Index """ try: - return Index(0.5 * (self.left.values + self.right.values)) + return 0.5 * (self.left + self.right) except TypeError: # datetime safe version - delta = self.right - self.left - return self.left + 0.5 * delta + return self.left + 0.5 * self.length @cache_readonly def is_monotonic(self): diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index c809127a66ab8..74446af8b77f6 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -283,6 +283,36 @@ def test_properties(self, closed): tm.assert_numpy_array_equal(np.asarray(index), expected) tm.assert_numpy_array_equal(index.values, expected) + @pytest.mark.parametrize('breaks', [ + [1, 1, 2, 5, 15, 53, 217, 1014, 5335, 31240, 201608], + [-np.inf, -100, -10, 0.5, 1, 1.5, 3.8, 101, 202, np.inf], + pd.to_datetime(['20170101', '20170202', '20170303', '20170404']), + pd.to_timedelta(['1ns', '2ms', '3s', '4M', '5H', '6D'])]) + def test_length(self, closed, breaks): + # GH 18789 + index = IntervalIndex.from_breaks(breaks, closed=closed) + result = index.length + expected = Index(iv.length for iv in index) + tm.assert_index_equal(result, expected) + + # with NA + index = index.insert(1, np.nan) + result = index.length + expected = Index(iv.length if notna(iv) else iv for iv in index) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize('breaks', [ + list('abcdefgh'), + lzip(range(10), range(1, 11)), + [['A', 'B'], ['a', 'b'], ['c', 'd'], ['e', 'f']], + [Interval(0, 1), Interval(1, 2), Interval(3, 4), Interval(4, 5)]]) + def test_length_errors(self, closed, breaks): + # GH 18789 + index = IntervalIndex.from_breaks(breaks) + msg = 'IntervalIndex contains Intervals without defined length' + with tm.assert_raises_regex(TypeError, msg): + index.length + def test_with_nans(self, closed): index = self.create_index(closed=closed) assert not index.hasnans diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/test_interval.py index 533a79656f174..3db474e32c4dd 100644 --- a/pandas/tests/scalar/test_interval.py +++ b/pandas/tests/scalar/test_interval.py @@ -1,6 +1,7 @@ from __future__ import division -from pandas import Interval, Timestamp +import numpy as np +from pandas import Interval, Timestamp, Timedelta from pandas.core.common import _any_none import pytest @@ -66,6 +67,48 @@ def test_hash(self, interval): # should not raise hash(interval) + @pytest.mark.parametrize('left, right, expected', [ + (0, 5, 5), + (-2, 5.5, 7.5), + (10, 10, 0), + (10, np.inf, np.inf), + (-np.inf, -5, np.inf), + (-np.inf, np.inf, np.inf), + (Timedelta('0 days'), Timedelta('5 days'), Timedelta('5 days')), + (Timedelta('10 days'), Timedelta('10 days'), Timedelta('0 days')), + (Timedelta('1H10M'), Timedelta('5H5M'), Timedelta('3H55M')), + (Timedelta('5S'), Timedelta('1H'), Timedelta('59M55S'))]) + def test_length(self, left, right, expected): + # GH 18789 + iv = Interval(left, right) + result = iv.length + assert result == expected + + @pytest.mark.parametrize('left, right, expected', [ + ('2017-01-01', '2017-01-06', '5 days'), + ('2017-01-01', '2017-01-01 12:00:00', '12 hours'), + ('2017-01-01 12:00', '2017-01-01 12:00:00', '0 days'), + ('2017-01-01 12:01', '2017-01-05 17:31:00', '4 days 5 hours 30 min')]) + @pytest.mark.parametrize('tz', (None, 'UTC', 'CET', 'US/Eastern')) + def test_length_timestamp(self, tz, left, right, expected): + # GH 18789 + iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz)) + result = iv.length + expected = Timedelta(expected) + assert result == expected + + @pytest.mark.parametrize('left, right', [ + ('a', 'z'), + (('a', 'b'), ('c', 'd')), + (list('AB'), list('ab')), + (Interval(0, 1), Interval(1, 2))]) + def test_length_errors(self, left, right): + # GH 18789 + iv = Interval(left, right) + msg = 'cannot compute length between .* and .*' + with tm.assert_raises_regex(TypeError, msg): + iv.length + def test_math_add(self, interval): expected = Interval(1, 2) actual = interval + 1
- [X] closes #18789 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18805
2017-12-16T15:38:06Z
2017-12-23T20:36:29Z
2017-12-23T20:36:29Z
2017-12-30T21:48:13Z
CLN: Drop the as_recarray parameter in read_csv
diff --git a/doc/source/io.rst b/doc/source/io.rst index 1a777c3e0b15f..184767015bf93 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -143,15 +143,6 @@ usecols : array-like or callable, default ``None`` pd.read_csv(StringIO(data), usecols=lambda x: x.upper() in ['COL1', 'COL3']) Using this parameter results in much faster parsing time and lower memory usage. -as_recarray : boolean, default ``False`` - .. deprecated:: 0.18.2 - - Please call ``pd.read_csv(...).to_records()`` instead. - - Return a NumPy recarray instead of a DataFrame after parsing the data. If - set to ``True``, this option takes precedence over the ``squeeze`` parameter. - In addition, as row indices are not available in such a format, the ``index_col`` - parameter will be ignored. squeeze : boolean, default ``False`` If the parsed data only contains one column then return a Series. prefix : str, default ``None`` diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 9dc10a09378f8..7d8770723b160 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -221,6 +221,7 @@ Removal of prior version deprecations/changes and Series (deprecated since v0.18). Instead, resample before calling the methods. (:issue:18601 & :issue:18668) - ``DatetimeIndex.to_datetime``, ``Timestamp.to_datetime``, ``PeriodIndex.to_datetime``, and ``Index.to_datetime`` have been removed (:issue:`8254`, :issue:`14096`, :issue:`14113`) - :func:`read_csv` has dropped the ``skip_footer`` parameter (:issue:`13386`) +- :func:`read_csv` has dropped the ``as_recarray`` parameter (:issue:`13373`) .. _whatsnew_0220.performance: diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 8d4f2af19701a..c6899fa527b6e 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -91,7 +91,6 @@ except NameError: basestring = str cdef extern from "src/numpy_helper.h": - object sarr_from_data(cnp.dtype, int length, void* data) void transfer_object_column(char *dst, char *src, size_t stride, size_t length) @@ -302,7 +301,6 @@ cdef class TextReader: object delimiter, converters, delim_whitespace object na_values object memory_map - object as_recarray object header, orig_header, names, header_start, header_end object index_col object low_memory @@ -334,8 +332,6 @@ cdef class TextReader: converters=None, - as_recarray=False, - skipinitialspace=False, escapechar=None, doublequote=True, @@ -489,8 +485,6 @@ cdef class TextReader: self.converters = converters self.na_filter = na_filter - self.as_recarray = as_recarray - self.compact_ints = compact_ints self.use_unsigned = use_unsigned @@ -903,14 +897,7 @@ cdef class TextReader: # Don't care about memory usage columns = self._read_rows(rows, 1) - if self.as_recarray: - self._start_clock() - result = _to_structured_array(columns, self.header, self.usecols) - self._end_clock('Conversion to structured array') - - return result - else: - return columns + return columns cdef _read_low_memory(self, rows): cdef: @@ -999,7 +986,7 @@ cdef class TextReader: self._start_clock() columns = self._convert_column_data(rows=rows, footer=footer, - upcast_na=not self.as_recarray) + upcast_na=True) self._end_clock('Type conversion') self._start_clock() @@ -2321,77 +2308,6 @@ cdef _apply_converter(object f, parser_t *parser, int64_t col, return lib.maybe_convert_objects(result) -def _to_structured_array(dict columns, object names, object usecols): - cdef: - ndarray recs, column - cnp.dtype dt - dict fields - - object name, fnames, field_type - Py_ssize_t i, offset, nfields, length - int64_t stride, elsize - char *buf - - if names is None: - names = ['%d' % i for i in range(len(columns))] - else: - # single line header - names = names[0] - - if usecols is not None: - names = [n for i, n in enumerate(names) - if i in usecols or n in usecols] - - dt = np.dtype([(str(name), columns[i].dtype) - for i, name in enumerate(names)]) - fnames = dt.names - fields = dt.fields - - nfields = len(fields) - - if PY3: - length = len(list(columns.values())[0]) - else: - length = len(columns.values()[0]) - - stride = dt.itemsize - - # We own the data. - buf = <char*> malloc(length * stride) - - recs = sarr_from_data(dt, length, buf) - assert(recs.flags.owndata) - - for i in range(nfields): - # XXX - field_type = fields[fnames[i]] - - # (dtype, stride) tuple - offset = field_type[1] - elsize = field_type[0].itemsize - column = columns[i] - - _fill_structured_column(buf + offset, <char*> column.data, - elsize, stride, length, - field_type[0] == np.object_) - - return recs - - -cdef _fill_structured_column(char *dst, char* src, int64_t elsize, - int64_t stride, int64_t length, bint incref): - cdef: - int64_t i - - if incref: - transfer_object_column(dst, src, stride, length) - else: - for i in range(length): - memcpy(dst, src, elsize) - dst += stride - src += elsize - - def _maybe_encode(values): if values is None: return [] diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index 8a9a05723d9fe..de3486eca3e9b 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -75,19 +75,6 @@ PANDAS_INLINE PyObject* char_to_string(char* data) { #endif } -PyObject* sarr_from_data(PyArray_Descr* descr, int length, void* data) { - PyArrayObject* result; - npy_intp dims[1] = {length}; - Py_INCREF(descr); // newfromdescr steals a reference to descr - result = (PyArrayObject*)PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, - NULL, data, 0, NULL); - - // Returned array doesn't own data by default - result->flags |= NPY_OWNDATA; - - return (PyObject*)result; -} - void transfer_object_column(char* dst, char* src, size_t stride, size_t length) { size_t i; @@ -105,7 +92,6 @@ void transfer_object_column(char* dst, char* src, size_t stride, } } - void set_array_not_contiguous(PyArrayObject* ao) { ao->flags &= ~(NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS); } diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 927edbf236366..c2fca1f961222 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -108,14 +108,6 @@ example of a valid callable argument would be ``lambda x: x.upper() in ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster parsing time and lower memory usage. -as_recarray : boolean, default False - .. deprecated:: 0.19.0 - Please call `pd.read_csv(...).to_records()` instead. - - Return a NumPy recarray instead of a DataFrame after parsing the data. - If set to True, this option takes precedence over the `squeeze` parameter. - In addition, as row indices are not available in such a format, the - `index_col` parameter will be ignored. squeeze : boolean, default False If the parsed data only contains one column then return a Series prefix : str, default None @@ -506,7 +498,6 @@ def _read(filepath_or_buffer, kwds): _c_parser_defaults = { 'delim_whitespace': False, - 'as_recarray': False, 'na_filter': True, 'compact_ints': False, 'use_unsigned': False, @@ -532,14 +523,12 @@ def _read(filepath_or_buffer, kwds): } _deprecated_defaults = { - 'as_recarray': None, 'buffer_lines': None, 'compact_ints': None, 'use_unsigned': None, 'tupleize_cols': None } _deprecated_args = { - 'as_recarray', 'buffer_lines', 'compact_ints', 'use_unsigned', @@ -614,7 +603,6 @@ def parser_f(filepath_or_buffer, # Internal doublequote=True, delim_whitespace=False, - as_recarray=None, compact_ints=None, use_unsigned=None, low_memory=_c_parser_defaults['low_memory'], @@ -685,7 +673,6 @@ def parser_f(filepath_or_buffer, compact_ints=compact_ints, use_unsigned=use_unsigned, delim_whitespace=delim_whitespace, - as_recarray=as_recarray, warn_bad_lines=warn_bad_lines, error_bad_lines=error_bad_lines, low_memory=low_memory, @@ -971,9 +958,7 @@ def _clean_options(self, options, engine): "and will be removed in a future version." .format(arg=arg)) - if arg == 'as_recarray': - msg += ' Please call pd.to_csv(...).to_records() instead.' - elif arg == 'tupleize_cols': + if arg == 'tupleize_cols': msg += (' Column tuples will then ' 'always be converted to MultiIndex.') @@ -1059,9 +1044,6 @@ def read(self, nrows=None): ret = self._engine.read(nrows) - if self.options.get('as_recarray'): - return ret - # May alter columns / col_dict index, columns, col_dict = self._create_index(ret) @@ -1279,7 +1261,6 @@ def __init__(self, kwds): self.true_values = kwds.get('true_values') self.false_values = kwds.get('false_values') - self.as_recarray = kwds.get('as_recarray', False) self.tupleize_cols = kwds.get('tupleize_cols', False) self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True) self.infer_datetime_format = kwds.pop('infer_datetime_format', False) @@ -1295,9 +1276,6 @@ def __init__(self, kwds): if isinstance(self.header, (list, tuple, np.ndarray)): if not all(map(is_integer, self.header)): raise ValueError("header must be integer or list of integers") - if kwds.get('as_recarray'): - raise ValueError("cannot specify as_recarray when " - "specifying a multi-index header") if kwds.get('usecols'): raise ValueError("cannot specify usecols when " "specifying a multi-index header") @@ -1900,10 +1878,6 @@ def read(self, nrows=None): # Done with first read, next time raise StopIteration self._first_chunk = False - if self.as_recarray: - # what to do if there are leading columns? - return data - names = self.names if self._reader.leading_cols: @@ -2306,9 +2280,6 @@ def read(self, rows=None): columns, data = self._do_date_conversions(columns, data) data = self._convert_data(data) - if self.as_recarray: - return self._to_recarray(data, columns) - index, columns = self._make_index(data, alldata, columns, indexnamerow) return index, columns, data @@ -2376,19 +2347,6 @@ def _clean_mapping(mapping): clean_na_fvalues, self.verbose, clean_conv, clean_dtypes) - def _to_recarray(self, data, columns): - dtypes = [] - o = compat.OrderedDict() - - # use the columns to "order" the keys - # in the unordered 'data' dictionary - for col in columns: - dtypes.append((str(col), data[col].dtype)) - o[col] = data[col] - - tuples = lzip(*o.values()) - return np.array(tuples, dtypes) - def _infer_columns(self): names = self.names num_original_columns = 0 diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py index 7a1fca55dd51e..e0422249289b7 100644 --- a/pandas/tests/io/parser/c_parser_only.py +++ b/pandas/tests/io/parser/c_parser_only.py @@ -18,7 +18,6 @@ import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas import DataFrame -from pandas import compat from pandas.compat import StringIO, range, lrange @@ -161,25 +160,6 @@ def error(val): assert sum(precise_errors) <= sum(normal_errors) assert max(precise_errors) <= max(normal_errors) - def test_pass_dtype_as_recarray(self): - if compat.is_platform_windows() and self.low_memory: - pytest.skip( - "segfaults on win-64, only when all tests are run") - - data = """\ -one,two -1,2.5 -2,3.5 -3,4.5 -4,5.5""" - - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - result = self.read_csv(StringIO(data), dtype={ - 'one': 'u1', 1: 'S1'}, as_recarray=True) - assert result['one'].dtype == 'u1' - assert result['two'].dtype == 'S1' - def test_usecols_dtypes(self): data = """\ 1,2,3 diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 6a996213b28bb..8a1f23d203a32 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -997,23 +997,6 @@ def test_empty_with_nrows_chunksize(self): StringIO('foo,bar\n'), chunksize=10))) tm.assert_frame_equal(result, expected) - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - result = self.read_csv(StringIO('foo,bar\n'), - nrows=10, as_recarray=True) - result = DataFrame(result[2], columns=result[1], - index=result[0]) - tm.assert_frame_equal(DataFrame.from_records( - result), expected, check_index_type=False) - - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - result = next(iter(self.read_csv(StringIO('foo,bar\n'), - chunksize=10, as_recarray=True))) - result = DataFrame(result[2], columns=result[1], index=result[0]) - tm.assert_frame_equal(DataFrame.from_records(result), expected, - check_index_type=False) - def test_eof_states(self): # see gh-10728, gh-10548 @@ -1431,93 +1414,6 @@ def test_compact_ints_use_unsigned(self): use_unsigned=True) tm.assert_frame_equal(out, expected) - def test_compact_ints_as_recarray(self): - data = ('0,1,0,0\n' - '1,1,0,0\n' - '0,1,0,1') - - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - result = self.read_csv(StringIO(data), delimiter=',', header=None, - compact_ints=True, as_recarray=True) - ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)]) - assert result.dtype == ex_dtype - - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - result = self.read_csv(StringIO(data), delimiter=',', header=None, - as_recarray=True, compact_ints=True, - use_unsigned=True) - ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)]) - assert result.dtype == ex_dtype - - def test_as_recarray(self): - # basic test - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - data = 'a,b\n1,a\n2,b' - expected = np.array([(1, 'a'), (2, 'b')], - dtype=[('a', '=i8'), ('b', 'O')]) - out = self.read_csv(StringIO(data), as_recarray=True) - tm.assert_numpy_array_equal(out, expected) - - # index_col ignored - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - data = 'a,b\n1,a\n2,b' - expected = np.array([(1, 'a'), (2, 'b')], - dtype=[('a', '=i8'), ('b', 'O')]) - out = self.read_csv(StringIO(data), as_recarray=True, index_col=0) - tm.assert_numpy_array_equal(out, expected) - - # respects names - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - data = '1,a\n2,b' - expected = np.array([(1, 'a'), (2, 'b')], - dtype=[('a', '=i8'), ('b', 'O')]) - out = self.read_csv(StringIO(data), names=['a', 'b'], - header=None, as_recarray=True) - tm.assert_numpy_array_equal(out, expected) - - # header order is respected even though it conflicts - # with the natural ordering of the column names - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - data = 'b,a\n1,a\n2,b' - expected = np.array([(1, 'a'), (2, 'b')], - dtype=[('b', '=i8'), ('a', 'O')]) - out = self.read_csv(StringIO(data), as_recarray=True) - tm.assert_numpy_array_equal(out, expected) - - # overrides the squeeze parameter - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - data = 'a\n1' - expected = np.array([(1,)], dtype=[('a', '=i8')]) - out = self.read_csv(StringIO(data), as_recarray=True, squeeze=True) - tm.assert_numpy_array_equal(out, expected) - - # does data conversions before doing recarray conversion - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - data = 'a,b\n1,a\n2,b' - conv = lambda x: int(x) + 1 - expected = np.array([(2, 'a'), (3, 'b')], - dtype=[('a', '=i8'), ('b', 'O')]) - out = self.read_csv(StringIO(data), as_recarray=True, - converters={'a': conv}) - tm.assert_numpy_array_equal(out, expected) - - # filters by usecols before doing recarray conversion - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - data = 'a,b\n1,a\n2,b' - expected = np.array([(1,), (2,)], dtype=[('a', '=i8')]) - out = self.read_csv(StringIO(data), as_recarray=True, - usecols=['a']) - tm.assert_numpy_array_equal(out, expected) - def test_memory_map(self): mmap_file = os.path.join(self.dirpath, 'test_mmap.csv') expected = DataFrame({ diff --git a/pandas/tests/io/parser/header.py b/pandas/tests/io/parser/header.py index 58dae112c59b7..3fb0650348763 100644 --- a/pandas/tests/io/parser/header.py +++ b/pandas/tests/io/parser/header.py @@ -116,13 +116,6 @@ def test_header_multi_index(self): # INVALID OPTIONS - # no as_recarray - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - pytest.raises(ValueError, self.read_csv, - StringIO(data), header=[0, 1, 2, 3], - index_col=[0, 1], as_recarray=True) - # names pytest.raises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3], diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index f66f9ccf065f7..ab4c14034cd20 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -194,33 +194,6 @@ def test_header_not_enough_lines(self): 2: np.array([3, 6], dtype=np.int64)} assert_array_dicts_equal(recs, expected) - # not enough rows - pytest.raises(parser.ParserError, TextReader, StringIO(data), - delimiter=',', header=5, as_recarray=True) - - def test_header_not_enough_lines_as_recarray(self): - data = ('skip this\n' - 'skip this\n' - 'a,b,c\n' - '1,2,3\n' - '4,5,6') - - reader = TextReader(StringIO(data), delimiter=',', - header=2, as_recarray=True) - header = reader.header - expected = [['a', 'b', 'c']] - assert header == expected - - recs = reader.read() - expected = {'a': np.array([1, 4], dtype=np.int64), - 'b': np.array([2, 5], dtype=np.int64), - 'c': np.array([3, 6], dtype=np.int64)} - assert_array_dicts_equal(expected, recs) - - # not enough rows - pytest.raises(parser.ParserError, TextReader, StringIO(data), - delimiter=',', header=5, as_recarray=True) - def test_escapechar(self): data = ('\\"hello world\"\n' '\\"hello world\"\n' @@ -267,25 +240,6 @@ def _make_reader(**kwds): assert (result[0] == ex_values).all() assert result[1].dtype == 'S4' - def test_numpy_string_dtype_as_recarray(self): - data = """\ -a,1 -aa,2 -aaa,3 -aaaa,4 -aaaaa,5""" - - def _make_reader(**kwds): - return TextReader(StringIO(data), delimiter=',', header=None, - **kwds) - - reader = _make_reader(dtype='S4', as_recarray=True) - result = reader.read() - assert result['0'].dtype == 'S4' - ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4') - assert (result['0'] == ex_values).all() - assert result['1'].dtype == 'S4' - def test_pass_dtype(self): data = """\ one,two diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index ab5d8a7595c96..b944322b1ed40 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -128,9 +128,7 @@ def read(self): class TestDeprecatedFeatures(object): @pytest.mark.parametrize("engine", ["c", "python"]) - @pytest.mark.parametrize("kwargs", [{"as_recarray": True}, - {"as_recarray": False}, - {"buffer_lines": True}, + @pytest.mark.parametrize("kwargs", [{"buffer_lines": True}, {"buffer_lines": False}, {"compact_ints": True}, {"compact_ints": False},
Deprecated back in 0.19.0 xref #13373.
https://api.github.com/repos/pandas-dev/pandas/pulls/18804
2017-12-16T05:38:57Z
2017-12-18T12:16:09Z
2017-12-18T12:16:09Z
2017-12-18T17:13:15Z
DEPR: Deprecate Series.valid
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 9dc10a09378f8..c4a7bab0f9406 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -203,6 +203,7 @@ Deprecations - ``Series.from_array`` and ``SparseSeries.from_array`` are deprecated. Use the normal constructor ``Series(..)`` and ``SparseSeries(..)`` instead (:issue:`18213`). - ``DataFrame.as_matrix`` is deprecated. Use ``DataFrame.values`` instead (:issue:`18458`). - ``Series.asobject``, ``DatetimeIndex.asobject``, ``PeriodIndex.asobject`` and ``TimeDeltaIndex.asobject`` have been deprecated. Use ``.astype(object)`` instead (:issue:`18572`) +- ``Series.valid`` is deprecated. Use :meth:`Series.dropna` instead (:issue:`18800`). .. _whatsnew_0220.prior_deprecations: diff --git a/pandas/core/series.py b/pandas/core/series.py index 19c84c34d7d1d..a3e7be1bfb35a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -150,7 +150,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): _accessors = frozenset(['dt', 'cat', 'str']) _deprecations = generic.NDFrame._deprecations | frozenset( ['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value', - 'from_csv']) + 'from_csv', 'valid']) _allow_index_ops = True def __init__(self, data=None, index=None, dtype=None, name=None, @@ -3006,8 +3006,13 @@ def dropna(self, axis=0, inplace=False, **kwargs): else: return self.copy() - valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace, - **kwargs) + def valid(self, inplace=False, **kwargs): + """DEPRECATED. Series.valid will be removed in a future version. + Use :meth:`Series.dropna` instead. + """ + warnings.warn("Method .valid will be removed in a future version. " + "Use .dropna instead.", FutureWarning, stacklevel=2) + return self.dropna(inplace=inplace, **kwargs) @Appender(generic._shared_docs['valid_index'] % { 'position': 'first', 'klass': 'Series'}) diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index b5ce3efe9f85d..8a38b1054a1f5 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -701,7 +701,7 @@ def dropna(self, axis=0, inplace=False, **kwargs): """ # TODO: make more efficient axis = self._get_axis_number(axis or 0) - dense_valid = self.to_dense().valid() + dense_valid = self.to_dense().dropna() if inplace: raise NotImplementedError("Cannot perform inplace dropna" " operations on a SparseSeries") diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 430f3e12ae32e..430562ce727da 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -766,10 +766,10 @@ def test_combineFrame(self): added = self.frame + frame_copy - indexer = added['A'].valid().index + indexer = added['A'].dropna().index exp = (self.frame['A'] * 2).copy() - tm.assert_series_equal(added['A'].valid(), exp.loc[indexer]) + tm.assert_series_equal(added['A'].dropna(), exp.loc[indexer]) exp.loc[~exp.index.isin(indexer)] = np.nan tm.assert_series_equal(added['A'], exp.loc[added['A'].index]) diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index d6d5ccc6487c4..3af798acdede5 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -213,7 +213,7 @@ def test_shift(self): unshifted = shifted.shift(-1) tm.assert_index_equal(shifted.index, ps.index) tm.assert_index_equal(unshifted.index, ps.index) - tm.assert_numpy_array_equal(unshifted.iloc[:, 0].valid().values, + tm.assert_numpy_array_equal(unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values) shifted2 = ps.shift(1, 'B') diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py index e5c0708e35c51..701d174f3e929 100644 --- a/pandas/tests/generic/test_series.py +++ b/pandas/tests/generic/test_series.py @@ -222,3 +222,8 @@ def test_to_xarray(self): assert_almost_equal(list(result.coords.keys()), ['one', 'two']) assert isinstance(result, DataArray) assert_series_equal(result.to_series(), s) + + def test_valid_deprecated(self): + # GH18800 + with tm.assert_produces_warning(FutureWarning): + pd.Series([]).valid() diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 289b5c01c1263..99cf5c623646c 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -396,7 +396,7 @@ def test_cummin(self): ts = self.ts.copy() ts[::2] = np.NaN result = ts.cummin()[1::2] - expected = np.minimum.accumulate(ts.valid()) + expected = np.minimum.accumulate(ts.dropna()) tm.assert_series_equal(result, expected) @@ -406,7 +406,7 @@ def test_cummax(self): ts = self.ts.copy() ts[::2] = np.NaN result = ts.cummax()[1::2] - expected = np.maximum.accumulate(ts.valid()) + expected = np.maximum.accumulate(ts.dropna()) tm.assert_series_equal(result, expected) @@ -570,7 +570,7 @@ def _check_accum_op(self, name, check_dtype=True): ts[::2] = np.NaN result = func(ts)[1::2] - expected = func(np.array(ts.valid())) + expected = func(np.array(ts.dropna())) tm.assert_numpy_array_equal(result.values, expected, check_dtype=False) @@ -1530,7 +1530,7 @@ def test_shift_categorical(self): # GH 9416 s = pd.Series(['a', 'b', 'c', 'd'], dtype='category') - assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).valid()) + assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).dropna()) sp1 = s.shift(1) assert_index_equal(s.index, sp1.index) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index b23dc37016b69..8c05888b80781 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -694,7 +694,7 @@ def test_valid(self): ts = self.ts.copy() ts[::2] = np.NaN - result = ts.valid() + result = ts.dropna() assert len(result) == ts.count() tm.assert_series_equal(result, ts[1::2]) tm.assert_series_equal(result, ts[pd.notna(ts)]) diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py index cf5e3fe4f29b0..14a44c36c6a0c 100644 --- a/pandas/tests/series/test_quantile.py +++ b/pandas/tests/series/test_quantile.py @@ -17,14 +17,14 @@ class TestSeriesQuantile(TestData): def test_quantile(self): q = self.ts.quantile(0.1) - assert q == np.percentile(self.ts.valid(), 10) + assert q == np.percentile(self.ts.dropna(), 10) q = self.ts.quantile(0.9) - assert q == np.percentile(self.ts.valid(), 90) + assert q == np.percentile(self.ts.dropna(), 90) # object dtype q = Series(self.ts, dtype=object).quantile(0.9) - assert q == np.percentile(self.ts.valid(), 90) + assert q == np.percentile(self.ts.dropna(), 90) # datetime64[ns] dtype dts = self.ts.index.to_series() @@ -49,8 +49,8 @@ def test_quantile_multi(self): qs = [.1, .9] result = self.ts.quantile(qs) - expected = pd.Series([np.percentile(self.ts.valid(), 10), - np.percentile(self.ts.valid(), 90)], + expected = pd.Series([np.percentile(self.ts.dropna(), 10), + np.percentile(self.ts.dropna(), 90)], index=qs, name=self.ts.name) tm.assert_series_equal(result, expected) @@ -72,9 +72,9 @@ def test_quantile_interpolation(self): # interpolation = linear (default case) q = self.ts.quantile(0.1, interpolation='linear') - assert q == np.percentile(self.ts.valid(), 10) + assert q == np.percentile(self.ts.dropna(), 10) q1 = self.ts.quantile(0.1) - assert q1 == np.percentile(self.ts.valid(), 10) + assert q1 == np.percentile(self.ts.dropna(), 10) # test with and without interpolation keyword assert q == q1 diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py index 310412e53bd1c..01b4ea6eaa238 100644 --- a/pandas/tests/series/test_sorting.py +++ b/pandas/tests/series/test_sorting.py @@ -50,10 +50,10 @@ def test_sort_values(self): # ascending=False ordered = ts.sort_values(ascending=False) - expected = np.sort(ts.valid().values)[::-1] - assert_almost_equal(expected, ordered.valid().values) + expected = np.sort(ts.dropna().values)[::-1] + assert_almost_equal(expected, ordered.dropna().values) ordered = ts.sort_values(ascending=False, na_position='first') - assert_almost_equal(expected, ordered.valid().values) + assert_almost_equal(expected, ordered.dropna().values) # ascending=[False] should behave the same as ascending=False ordered = ts.sort_values(ascending=[False]) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 95410c6ea0105..2e3a7a6c28a11 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -42,7 +42,7 @@ def test_shift(self): tm.assert_index_equal(shifted.index, self.ts.index) tm.assert_index_equal(unshifted.index, self.ts.index) - tm.assert_numpy_array_equal(unshifted.valid().values, + tm.assert_numpy_array_equal(unshifted.dropna().values, self.ts.values[:-1]) offset = BDay() @@ -69,7 +69,7 @@ def test_shift(self): unshifted = shifted.shift(-1) tm.assert_index_equal(shifted.index, ps.index) tm.assert_index_equal(unshifted.index, ps.index) - tm.assert_numpy_array_equal(unshifted.valid().values, ps.values[:-1]) + tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1]) shifted2 = ps.shift(1, 'B') shifted3 = ps.shift(1, BDay()) diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py index 1dc1c7f1575cc..32c56263c7522 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/sparse/test_series.py @@ -796,9 +796,9 @@ def _compare_all(obj): def test_dropna(self): sp = SparseSeries([0, 0, 0, nan, nan, 5, 6], fill_value=0) - sp_valid = sp.valid() + sp_valid = sp.dropna() - expected = sp.to_dense().valid() + expected = sp.to_dense().dropna() expected = expected[expected != 0] exp_arr = pd.SparseArray(expected.values, fill_value=0, kind='block') tm.assert_sp_array_equal(sp_valid.values, exp_arr)
- [x] xref #18262 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR deprecates the ``valid`` method on ``pd.Series``. This method duplicates ``dropna`` and should be deprecated according to #18262.
https://api.github.com/repos/pandas-dev/pandas/pulls/18800
2017-12-15T22:43:45Z
2017-12-16T10:42:46Z
2017-12-16T10:42:46Z
2018-02-02T17:06:21Z
BUG: Convert data elements when dtype=str in Series constructor with …
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 0579a80aad28e..1a19033ac61ee 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -277,6 +277,7 @@ Conversion - Fixed a bug where ``FY5253`` date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`) - Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`) - Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`) +- Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`) Indexing diff --git a/pandas/core/series.py b/pandas/core/series.py index a3e7be1bfb35a..a1d296deae319 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3277,6 +3277,11 @@ def _try_cast(arr, take_fast_path): # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. if issubclass(subarr.dtype.type, compat.string_types): + # GH 16605 + # If not empty convert the data to dtype + if not isna(data).all(): + data = np.array(data, dtype=dtype, copy=False) + subarr = np.array(data, dtype=object, copy=copy) return subarr diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 610b9f7bdbf6c..21c028e634bc0 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -676,6 +676,25 @@ def test_arg_for_errors_in_astype(self): df.astype(np.int8, errors='ignore') + @pytest.mark.parametrize('input_vals', [ + ([1, 2]), + ([1.0, 2.0, np.nan]), + (['1', '2']), + (list(pd.date_range('1/1/2011', periods=2, freq='H'))), + (list(pd.date_range('1/1/2011', periods=2, freq='H', + tz='US/Eastern'))), + ([pd.Interval(left=0, right=5)]), + ]) + def test_constructor_list_str(self, input_vals): + # GH 16605 + # Ensure that data elements are converted to strings when + # dtype is str, 'str', or 'U' + + for dtype in ['str', str, 'U']: + result = DataFrame({'A': input_vals}, dtype=dtype) + expected = DataFrame({'A': input_vals}).astype({'A': dtype}) + assert_frame_equal(result, expected) + class TestDataFrameDatetimeWithTZ(TestData): diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index f9842514ed5e5..08416fe34efcc 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -142,6 +142,25 @@ def test_constructor_list_like(self): result = Series(obj, index=[0, 1, 2]) assert_series_equal(result, expected) + @pytest.mark.parametrize('input_vals', [ + ([1, 2]), + ([1.0, 2.0, np.nan]), + (['1', '2']), + (list(pd.date_range('1/1/2011', periods=2, freq='H'))), + (list(pd.date_range('1/1/2011', periods=2, freq='H', + tz='US/Eastern'))), + ([pd.Interval(left=0, right=5)]), + ]) + def test_constructor_list_str(self, input_vals): + # GH 16605 + # Ensure that data elements from a list are converted to strings + # when dtype is str, 'str', or 'U' + + for dtype in ['str', str, 'U']: + result = Series(input_vals, dtype=dtype) + expected = Series(input_vals).astype(dtype) + assert_series_equal(result, expected) + def test_constructor_generator(self): gen = (i for i in range(10))
…int/float list - [ ] closes #16605 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Not sure if my solution is correct but it seems to resolve the issue and pass the tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/18795
2017-12-15T17:08:36Z
2017-12-21T15:18:34Z
2017-12-21T15:18:34Z
2017-12-21T16:22:38Z
Skipif no scipy
diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index e0be34b14a97d..2146704fea95f 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -9,6 +9,7 @@ import pandas.core.dtypes.common as com import pandas.util.testing as tm +import pandas.util._test_decorators as td class TestPandasDtype(object): @@ -132,21 +133,22 @@ def test_is_object(): assert not com.is_object_dtype([1, 2, 3]) -def test_is_sparse(): +@pytest.mark.parametrize("check_scipy", [ + False, pytest.param(True, marks=td.skip_if_no_scipy) +]) +def test_is_sparse(check_scipy): assert com.is_sparse(pd.SparseArray([1, 2, 3])) assert com.is_sparse(pd.SparseSeries([1, 2, 3])) assert not com.is_sparse(np.array([1, 2, 3])) - # This test will only skip if the previous assertions - # pass AND scipy is not installed. - sparse = pytest.importorskip("scipy.sparse") - assert not com.is_sparse(sparse.bsr_matrix([1, 2, 3])) + if check_scipy: + import scipy.sparse + assert not com.is_sparse(scipy.sparse.bsr_matrix([1, 2, 3])) +@td.skip_if_no_scipy def test_is_scipy_sparse(): - tm._skip_if_no_scipy() - from scipy.sparse import bsr_matrix assert com.is_scipy_sparse(bsr_matrix([1, 2, 3])) @@ -501,7 +503,10 @@ def test_is_bool_dtype(): assert com.is_bool_dtype(pd.Index([True, False])) -def test_is_extension_type(): +@pytest.mark.parametrize("check_scipy", [ + False, pytest.param(True, marks=td.skip_if_no_scipy) +]) +def test_is_extension_type(check_scipy): assert not com.is_extension_type([1, 2, 3]) assert not com.is_extension_type(np.array([1, 2, 3])) assert not com.is_extension_type(pd.DatetimeIndex([1, 2, 3])) @@ -517,10 +522,9 @@ def test_is_extension_type(): s = pd.Series([], dtype=dtype) assert com.is_extension_type(s) - # This test will only skip if the previous assertions - # pass AND scipy is not installed. - sparse = pytest.importorskip("scipy.sparse") - assert not com.is_extension_type(sparse.bsr_matrix([1, 2, 3])) + if check_scipy: + import scipy.sparse + assert not com.is_extension_type(scipy.sparse.bsr_matrix([1, 2, 3])) def test_is_complex_dtype(): diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 577f4238c2328..e8bdd2a551a34 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -37,6 +37,7 @@ _ensure_int32, _ensure_categorical) from pandas.util import testing as tm +import pandas.util._test_decorators as td @pytest.fixture(params=[True, False], ids=str) @@ -1190,8 +1191,8 @@ def test_nan_to_nat_conversions(): assert (s[8].value == np.datetime64('NaT').astype(np.int64)) +@td.skip_if_no_scipy def test_is_scipy_sparse(spmatrix): # noqa: F811 - tm._skip_if_no_scipy() assert is_scipy_sparse(spmatrix([[0, 1]])) assert not is_scipy_sparse(np.array([1])) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 7014929db4c2d..17d711f937bf7 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -22,6 +22,7 @@ import pandas.io.formats.printing as printing import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.tests.frame.common import TestData @@ -30,22 +31,22 @@ class TestDataFrameAnalytics(TestData): # ---------------------------------------------------------------------= # Correlation and covariance + @td.skip_if_no_scipy def test_corr_pearson(self): - tm._skip_if_no_scipy() self.frame['A'][:5] = nan self.frame['B'][5:10] = nan self._check_method('pearson') + @td.skip_if_no_scipy def test_corr_kendall(self): - tm._skip_if_no_scipy() self.frame['A'][:5] = nan self.frame['B'][5:10] = nan self._check_method('kendall') + @td.skip_if_no_scipy def test_corr_spearman(self): - tm._skip_if_no_scipy() self.frame['A'][:5] = nan self.frame['B'][5:10] = nan @@ -62,8 +63,8 @@ def _check_method(self, method='pearson', check_minp=False): expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan tm.assert_frame_equal(result, expected) + @td.skip_if_no_scipy def test_corr_non_numeric(self): - tm._skip_if_no_scipy() self.frame['A'][:5] = nan self.frame['B'][5:10] = nan @@ -72,9 +73,8 @@ def test_corr_non_numeric(self): expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr() tm.assert_frame_equal(result, expected) + @td.skip_if_no_scipy def test_corr_nooverlap(self): - tm._skip_if_no_scipy() - # nothing in common for meth in ['pearson', 'kendall', 'spearman']: df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan], @@ -88,9 +88,8 @@ def test_corr_nooverlap(self): assert rs.loc['B', 'B'] == 1 assert isna(rs.loc['C', 'C']) + @td.skip_if_no_scipy def test_corr_constant(self): - tm._skip_if_no_scipy() - # constant --> all NA for meth in ['pearson', 'spearman']: @@ -106,9 +105,8 @@ def test_corr_int(self): df3.cov() df3.corr() + @td.skip_if_no_scipy def test_corr_int_and_boolean(self): - tm._skip_if_no_scipy() - # when dtypes of pandas series are different # then ndarray will have dtype=object, # so it need to be properly handled @@ -719,8 +717,8 @@ def test_sem(self): result = nanops.nansem(arr, axis=0) assert not (result < 0).any() + @td.skip_if_no_scipy def test_skew(self): - tm._skip_if_no_scipy() from scipy.stats import skew def alt(x): @@ -730,9 +728,8 @@ def alt(x): self._check_stat_op('skew', alt) + @td.skip_if_no_scipy def test_kurt(self): - tm._skip_if_no_scipy() - from scipy.stats import kurtosis def alt(x): diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 7e8869cbdeefb..2e4e8b9582cf6 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -16,6 +16,7 @@ from pandas.util.testing import assert_series_equal, assert_frame_equal import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.tests.frame.common import TestData, _check_mixed_float @@ -646,9 +647,8 @@ def test_interp_nan_idx(self): with pytest.raises(NotImplementedError): df.interpolate(method='values') + @td.skip_if_no_scipy def test_interp_various(self): - tm._skip_if_no_scipy() - df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7], 'C': [1, 2, 3, 5, 8, 13, 21]}) df = df.set_index('C') @@ -695,8 +695,8 @@ def test_interp_various(self): expected.A.loc[13] = 5 assert_frame_equal(result, expected, check_dtype=False) + @td.skip_if_no_scipy def test_interp_alt_scipy(self): - tm._skip_if_no_scipy() df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7], 'C': [1, 2, 3, 5, 8, 13, 21]}) result = df.interpolate(method='barycentric') @@ -739,8 +739,6 @@ def test_interp_rowwise(self): expected[4] = expected[4].astype(np.float64) assert_frame_equal(result, expected) - # scipy route - tm._skip_if_no_scipy() result = df.interpolate(axis=1, method='values') assert_frame_equal(result, expected) @@ -753,7 +751,10 @@ def test_rowwise_alt(self): 1: [1, 2, 3, 4, 3, 2, 1, 0, -1]}) df.interpolate(axis=0) - def test_interp_leading_nans(self): + @pytest.mark.parametrize("check_scipy", [ + False, pytest.param(True, marks=td.skip_if_no_scipy) + ]) + def test_interp_leading_nans(self, check_scipy): df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0], "B": [np.nan, -3, -3.5, np.nan, -4]}) result = df.interpolate() @@ -761,9 +762,9 @@ def test_interp_leading_nans(self): expected['B'].loc[3] = -3.75 assert_frame_equal(result, expected) - tm._skip_if_no_scipy() - result = df.interpolate(method='polynomial', order=1) - assert_frame_equal(result, expected) + if check_scipy: + result = df.interpolate(method='polynomial', order=1) + assert_frame_equal(result, expected) def test_interp_raise_on_only_mixed(self): df = DataFrame({'A': [1, 2, np.nan, 4], diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index e87c67a682d46..7f9ae7b8bb1da 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -643,10 +643,10 @@ def test_secondary_y_ts(self): assert ax.get_yaxis().get_visible() @pytest.mark.slow + @td.skip_if_no_scipy def test_secondary_kde(self): if not self.mpl_ge_1_5_0: pytest.skip("mpl is not supported") - tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() ser = Series(np.random.randn(10)) diff --git a/pandas/tests/plotting/test_deprecated.py b/pandas/tests/plotting/test_deprecated.py index d2f8e13a2444b..2c2d371921d2f 100644 --- a/pandas/tests/plotting/test_deprecated.py +++ b/pandas/tests/plotting/test_deprecated.py @@ -24,9 +24,8 @@ class TestDeprecatedNameSpace(TestPlotBase): @pytest.mark.slow + @td.skip_if_no_scipy def test_scatter_plot_legacy(self): - tm._skip_if_no_scipy() - df = pd.DataFrame(randn(100, 2)) with tm.assert_produces_warning(FutureWarning): diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 3b3f6666340b8..d61b0a40380f3 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -1398,8 +1398,8 @@ def test_boxplot_subplots_return_type(self): check_ax_title=False) @pytest.mark.slow + @td.skip_if_no_scipy def test_kde_df(self): - tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() if not self.mpl_ge_1_5_0: pytest.skip("mpl is not supported") @@ -1422,8 +1422,8 @@ def test_kde_df(self): self._check_ax_scales(axes, yaxis='log') @pytest.mark.slow + @td.skip_if_no_scipy def test_kde_missing_vals(self): - tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() if not self.mpl_ge_1_5_0: pytest.skip("mpl is not supported") @@ -1949,8 +1949,8 @@ def test_hist_colors(self): tm.close() @pytest.mark.slow + @td.skip_if_no_scipy def test_kde_colors(self): - tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() if not self.mpl_ge_1_5_0: pytest.skip("mpl is not supported") @@ -1974,8 +1974,8 @@ def test_kde_colors(self): self._check_colors(ax.get_lines(), linecolors=rgba_colors) @pytest.mark.slow + @td.skip_if_no_scipy def test_kde_colors_and_styles_subplots(self): - tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() if not self.mpl_ge_1_5_0: pytest.skip("mpl is not supported") diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 1753bc6387d33..bb590d5232b62 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -52,8 +52,8 @@ def test_bootstrap_plot(self): @td.skip_if_no_mpl class TestDataFramePlots(TestPlotBase): + @td.skip_if_no_scipy def test_scatter_matrix_axis(self): - tm._skip_if_no_scipy() scatter_matrix = plotting.scatter_matrix with tm.RNGContext(42): diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 6dd7e1e9882b2..2458fc0dc992c 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -589,6 +589,7 @@ def test_plot_fails_with_dupe_color_and_style(self): x.plot(style='k--', color='k', ax=ax) @pytest.mark.slow + @td.skip_if_no_scipy def test_hist_kde(self): if not self.mpl_ge_1_5_0: pytest.skip("mpl is not supported") @@ -602,7 +603,6 @@ def test_hist_kde(self): ylabels = ax.get_yticklabels() self._check_text_labels(ylabels, [''] * len(ylabels)) - tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() _check_plot_works(self.ts.plot.kde) _check_plot_works(self.ts.plot.density) @@ -615,8 +615,8 @@ def test_hist_kde(self): self._check_text_labels(ylabels, [''] * len(ylabels)) @pytest.mark.slow + @td.skip_if_no_scipy def test_kde_kwargs(self): - tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() if not self.mpl_ge_1_5_0: pytest.skip("mpl is not supported") @@ -633,8 +633,8 @@ def test_kde_kwargs(self): self._check_text_labels(ax.yaxis.get_label(), 'Density') @pytest.mark.slow + @td.skip_if_no_scipy def test_kde_missing_vals(self): - tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() if not self.mpl_ge_1_5_0: pytest.skip("mpl is not supported") @@ -665,6 +665,7 @@ def test_hist_kwargs(self): tm.close() @pytest.mark.slow + @td.skip_if_no_scipy def test_hist_kde_color(self): if not self.mpl_ge_1_5_0: pytest.skip("mpl is not supported") @@ -675,7 +676,6 @@ def test_hist_kde_color(self): assert len(ax.patches) == 10 self._check_colors(ax.patches, facecolors=['b'] * 10) - tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() _, ax = self.plt.subplots() ax = self.ts.plot.kde(logy=True, color='r', ax=ax) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 289b5c01c1263..b206d84c0d6f8 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -23,6 +23,7 @@ from pandas.util.testing import (assert_series_equal, assert_almost_equal, assert_frame_equal, assert_index_equal) import pandas.util.testing as tm +import pandas.util._test_decorators as td from .common import TestData @@ -282,9 +283,8 @@ def test_sem(self): result = s.sem(ddof=1) assert isna(result) + @td.skip_if_no_scipy def test_skew(self): - tm._skip_if_no_scipy() - from scipy.stats import skew alt = lambda x: skew(x, bias=False) self._check_stat_op('skew', alt) @@ -302,9 +302,8 @@ def test_skew(self): assert 0 == s.skew() assert (df.skew() == 0).all() + @td.skip_if_no_scipy def test_kurt(self): - tm._skip_if_no_scipy() - from scipy.stats import kurtosis alt = lambda x: kurtosis(x, bias=False) self._check_stat_op('kurt', alt) @@ -708,9 +707,8 @@ def test_modulo(self): expected = Series([nan, 0.0]) assert_series_equal(result, expected) + @td.skip_if_no_scipy def test_corr(self): - tm._skip_if_no_scipy() - import scipy.stats as stats # full overlap @@ -739,9 +737,8 @@ def test_corr(self): expected, _ = stats.pearsonr(A, B) tm.assert_almost_equal(result, expected) + @td.skip_if_no_scipy def test_corr_rank(self): - tm._skip_if_no_scipy() - import scipy import scipy.stats as stats diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index b23dc37016b69..04fdebad6165b 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -19,6 +19,7 @@ from pandas.core.series import remove_na from pandas.util.testing import assert_series_equal, assert_frame_equal import pandas.util.testing as tm +import pandas.util._test_decorators as td from .common import TestData @@ -853,8 +854,8 @@ def test_interpolate(self): non_ts[0] = np.NaN pytest.raises(ValueError, non_ts.interpolate, method='time') + @td.skip_if_no_scipy def test_interpolate_pchip(self): - tm._skip_if_no_scipy() _skip_if_no_pchip() ser = Series(np.sort(np.random.uniform(size=100))) @@ -866,8 +867,8 @@ def test_interpolate_pchip(self): # does not blow up, GH5977 interp_s[49:51] + @td.skip_if_no_scipy def test_interpolate_akima(self): - tm._skip_if_no_scipy() _skip_if_no_akima() ser = Series([10, 11, 12, 13]) @@ -881,9 +882,8 @@ def test_interpolate_akima(self): interp_s = ser.reindex(new_index).interpolate(method='akima') assert_series_equal(interp_s[1:3], expected) + @td.skip_if_no_scipy def test_interpolate_piecewise_polynomial(self): - tm._skip_if_no_scipy() - ser = Series([10, 11, 12, 13]) expected = Series([11.00, 11.25, 11.50, 11.75, @@ -896,9 +896,8 @@ def test_interpolate_piecewise_polynomial(self): method='piecewise_polynomial') assert_series_equal(interp_s[1:3], expected) + @td.skip_if_no_scipy def test_interpolate_from_derivatives(self): - tm._skip_if_no_scipy() - ser = Series([10, 11, 12, 13]) expected = Series([11.00, 11.25, 11.50, 11.75, @@ -911,19 +910,17 @@ def test_interpolate_from_derivatives(self): method='from_derivatives') assert_series_equal(interp_s[1:3], expected) - def test_interpolate_corners(self): - s = Series([np.nan, np.nan]) - assert_series_equal(s.interpolate(), s) - - s = Series([]).interpolate() - assert_series_equal(s.interpolate(), s) - - tm._skip_if_no_scipy() + @pytest.mark.parametrize("kwargs", [ + {}, + pytest.param({'method': 'polynomial', 'order': 1}, + marks=td.skip_if_no_scipy) + ]) + def test_interpolate_corners(self, kwargs): s = Series([np.nan, np.nan]) - assert_series_equal(s.interpolate(method='polynomial', order=1), s) + assert_series_equal(s.interpolate(**kwargs), s) s = Series([]).interpolate() - assert_series_equal(s.interpolate(method='polynomial', order=1), s) + assert_series_equal(s.interpolate(**kwargs), s) def test_interpolate_index_values(self): s = Series(np.nan, index=np.sort(np.random.rand(30))) @@ -953,17 +950,17 @@ def test_interpolate_non_ts(self): with pytest.raises(ValueError): s.interpolate(method='time') - # New interpolation tests - def test_nan_interpolate(self): + @pytest.mark.parametrize("kwargs", [ + {}, + pytest.param({'method': 'polynomial', 'order': 1}, + marks=td.skip_if_no_scipy) + ]) + def test_nan_interpolate(self, kwargs): s = Series([0, 1, np.nan, 3]) - result = s.interpolate() + result = s.interpolate(**kwargs) expected = Series([0., 1., 2., 3.]) assert_series_equal(result, expected) - tm._skip_if_no_scipy() - result = s.interpolate(method='polynomial', order=1) - assert_series_equal(result, expected) - def test_nan_irregular_index(self): s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9]) result = s.interpolate() @@ -976,16 +973,15 @@ def test_nan_str_index(self): expected = Series([0., 1., 2., 2.], index=list('abcd')) assert_series_equal(result, expected) + @td.skip_if_no_scipy def test_interp_quad(self): - tm._skip_if_no_scipy() sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4]) result = sq.interpolate(method='quadratic') expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4]) assert_series_equal(result, expected) + @td.skip_if_no_scipy def test_interp_scipy_basic(self): - tm._skip_if_no_scipy() - s = Series([1, 3, np.nan, 12, np.nan, 25]) # slinear expected = Series([1., 3., 7.5, 12., 18.5, 25.]) @@ -1147,9 +1143,8 @@ def test_interp_limit_before_ends(self): limit_direction='both') assert_series_equal(result, expected) + @td.skip_if_no_scipy def test_interp_all_good(self): - # scipy - tm._skip_if_no_scipy() s = Series([1, 2, 3]) result = s.interpolate(method='polynomial', order=1) assert_series_equal(result, s) @@ -1158,7 +1153,11 @@ def test_interp_all_good(self): result = s.interpolate() assert_series_equal(result, s) - def test_interp_multiIndex(self): + @pytest.mark.parametrize("check_scipy", [ + False, + pytest.param(True, marks=td.skip_if_no_scipy) + ]) + def test_interp_multiIndex(self, check_scipy): idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')]) s = Series([1, 2, np.nan], index=idx) @@ -1167,18 +1166,18 @@ def test_interp_multiIndex(self): result = s.interpolate() assert_series_equal(result, expected) - tm._skip_if_no_scipy() - with pytest.raises(ValueError): - s.interpolate(method='polynomial', order=1) + if check_scipy: + with pytest.raises(ValueError): + s.interpolate(method='polynomial', order=1) + @td.skip_if_no_scipy def test_interp_nonmono_raise(self): - tm._skip_if_no_scipy() s = Series([1, np.nan, 3], index=[0, 2, 1]) with pytest.raises(ValueError): s.interpolate(method='krogh') + @td.skip_if_no_scipy def test_interp_datetime64(self): - tm._skip_if_no_scipy() df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3)) result = df.interpolate(method='nearest') expected = Series([1., 1., 3.], @@ -1192,25 +1191,22 @@ def test_interp_limit_no_nans(self): expected = s assert_series_equal(result, expected) - def test_no_order(self): - tm._skip_if_no_scipy() + @td.skip_if_no_scipy + @pytest.mark.parametrize("method", ['polynomial', 'spline']) + def test_no_order(self, method): s = Series([0, 1, np.nan, 3]) with pytest.raises(ValueError): - s.interpolate(method='polynomial') - with pytest.raises(ValueError): - s.interpolate(method='spline') + s.interpolate(method=method) + @td.skip_if_no_scipy def test_spline(self): - tm._skip_if_no_scipy() s = Series([1, 2, np.nan, 4, 5, np.nan, 7]) result = s.interpolate(method='spline', order=1) expected = Series([1., 2., 3., 4., 5., 6., 7.]) assert_series_equal(result, expected) + @td.skip_if_no('scipy', min_version='0.15') def test_spline_extrapolate(self): - tm.skip_if_no_package( - 'scipy', min_version='0.15', - app='setting ext on scipy.interpolate.UnivariateSpline') s = Series([1, 2, 3, 4, np.nan, 6, np.nan]) result3 = s.interpolate(method='spline', order=1, ext=3) expected3 = Series([1., 2., 3., 4., 5., 6., 6.]) @@ -1220,25 +1216,23 @@ def test_spline_extrapolate(self): expected1 = Series([1., 2., 3., 4., 5., 6., 7.]) assert_series_equal(result1, expected1) + @td.skip_if_no_scipy def test_spline_smooth(self): - tm._skip_if_no_scipy() s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7]) assert (s.interpolate(method='spline', order=3, s=0)[5] != s.interpolate(method='spline', order=3)[5]) + @td.skip_if_no_scipy def test_spline_interpolation(self): - tm._skip_if_no_scipy() - s = Series(np.arange(10) ** 2) s[np.random.randint(0, 9, 3)] = np.nan result1 = s.interpolate(method='spline', order=1) expected1 = s.interpolate(method='spline', order=1) assert_series_equal(result1, expected1) + @td.skip_if_no_scipy def test_spline_error(self): # see gh-10633 - tm._skip_if_no_scipy() - s = pd.Series(np.arange(10) ** 2) s[np.random.randint(0, 9, 3)] = np.nan with pytest.raises(ValueError): diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py index 1dc1c7f1575cc..7deeedacd1b23 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/sparse/test_series.py @@ -13,6 +13,7 @@ isna, compat, _np_version_under1p12) from pandas.tseries.offsets import BDay import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.compat import range from pandas.core.reshape.util import cartesian_product @@ -997,11 +998,11 @@ def test_round_trip_preserve_multiindex_names(self): check_names=True) +@td.skip_if_no_scipy class TestSparseSeriesScipyInteraction(object): # Issue 8048: add SparseSeries coo methods def setup_method(self, method): - tm._skip_if_no_scipy() import scipy.sparse # SparseSeries inputs used in tests, the tests rely on the order self.sparse_series = [] @@ -1108,7 +1109,6 @@ def test_from_coo_nodense_index(self): def test_from_coo_long_repr(self): # GH 13114 # test it doesn't raise error. Formatting is tested in test_format - tm._skip_if_no_scipy() import scipy.sparse sparse = SparseSeries.from_coo(scipy.sparse.rand(350, 18)) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 7ef77e4c78e10..d7fc5033bab90 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -19,6 +19,7 @@ import pandas.core.algorithms as algos from pandas.core.common import _asarray_tuplesafe import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.core.dtypes.dtypes import CategoricalDtype as CDT from pandas.compat.numpy import np_array_datetime64_compat from pandas.util.testing import assert_almost_equal @@ -1109,8 +1110,8 @@ def test_unique_label_indices(): class TestRank(object): + @td.skip_if_no_scipy def test_scipy_compat(self): - tm._skip_if_no_scipy() from scipy.stats import rankdata def _check(arr): diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index f51bf0c4e476a..040c3adbcaf93 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -26,6 +26,7 @@ makeCustomDataframe as mkdf) import pandas.core.panel as panelm import pandas.util.testing as tm +import pandas.util._test_decorators as td def make_test_panel(): @@ -104,11 +105,9 @@ def test_min(self): def test_max(self): self._check_stat_op('max', np.max) + @td.skip_if_no_scipy def test_skew(self): - try: - from scipy.stats import skew - except ImportError: - pytest.skip("no scipy.stats.skew") + from scipy.stats import skew def this_skew(x): if len(x) < 3: diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index c42bedebe2f23..b064e3c7012bc 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -16,6 +16,7 @@ from pandas.util.testing import (assert_frame_equal, assert_series_equal, assert_almost_equal) import pandas.util.testing as tm +import pandas.util._test_decorators as td def add_nans(panel4d): @@ -59,11 +60,9 @@ def test_min(self): def test_max(self): self._check_stat_op('max', np.max) + @td.skip_if_no_scipy def test_skew(self): - try: - from scipy.stats import skew - except ImportError: - pytest.skip("no scipy.stats.skew") + from scipy.stats import skew def this_skew(x): if len(x) < 3: diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 661c7dfcc50fc..bee925823eebe 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -17,6 +17,7 @@ from pandas.core.base import SpecificationError from pandas.errors import UnsupportedFunctionCall import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.compat import range, zip, PY3 N, K = 100, 10 @@ -251,9 +252,8 @@ def test_count_nonnumeric_types(self): expected = df.notna().astype(float) tm.assert_frame_equal(result, expected) + @td.skip_if_no_scipy def test_window_with_args(self): - tm._skip_if_no_scipy() - # make sure that we are aggregating window functions correctly with arg r = Series(np.random.randn(100)).rolling(window=10, min_periods=1, win_type='gaussian') @@ -289,9 +289,9 @@ class TestWindow(Base): def setup_method(self, method): self._create_data() + @td.skip_if_no_scipy def test_constructor(self): # GH 12669 - tm._skip_if_no_scipy() for o in [self.series, self.frame]: c = o.rolling @@ -367,9 +367,9 @@ def test_constructor(self): with pytest.raises(ValueError): c(window=2, min_periods=1, center=w) + @td.skip_if_no_scipy def test_constructor_with_win_type(self): # GH 13383 - tm._skip_if_no_scipy() for o in [self.series, self.frame]: c = o.rolling c(0, win_type='boxcar') @@ -839,10 +839,9 @@ def test_rolling_count(self): def test_rolling_mean(self): self._check_moment_func(mom.rolling_mean, np.mean, name='mean') + @td.skip_if_no_scipy def test_cmov_mean(self): # GH 8238 - tm._skip_if_no_scipy() - vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) xp = np.array([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516, 12.818, @@ -856,10 +855,9 @@ def test_cmov_mean(self): rs = Series(vals).rolling(5, center=True).mean() tm.assert_series_equal(xp, rs) + @td.skip_if_no_scipy def test_cmov_window(self): # GH 8238 - tm._skip_if_no_scipy() - vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) xp = np.array([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516, 12.818, @@ -873,10 +871,9 @@ def test_cmov_window(self): rs = Series(vals).rolling(5, win_type='boxcar', center=True).mean() tm.assert_series_equal(xp, rs) + @td.skip_if_no_scipy def test_cmov_window_corner(self): # GH 8238 - tm._skip_if_no_scipy() - # all nan vals = np.empty(10, dtype=float) vals.fill(np.nan) @@ -897,10 +894,9 @@ def test_cmov_window_corner(self): assert np.isnan(rs).all() assert len(rs) == 5 + @td.skip_if_no_scipy def test_cmov_window_frame(self): # Gh 8238 - tm._skip_if_no_scipy() - vals = np.array([[12.18, 3.64], [10.18, 9.16], [13.24, 14.61], [4.51, 8.11], [6.15, 11.44], [9.14, 6.21], [11.31, 10.67], [2.94, 6.51], [9.42, 8.39], [12.44, @@ -929,9 +925,8 @@ def test_cmov_window_frame(self): rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).sum() tm.assert_frame_equal(DataFrame(xp), rs) + @td.skip_if_no_scipy def test_cmov_window_na_min_periods(self): - tm._skip_if_no_scipy() - # min_periods vals = Series(np.random.randn(10)) vals[4] = np.nan @@ -942,10 +937,9 @@ def test_cmov_window_na_min_periods(self): center=True).mean() tm.assert_series_equal(xp, rs) + @td.skip_if_no_scipy def test_cmov_window_regular(self): # GH 8238 - tm._skip_if_no_scipy() - win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman', 'blackmanharris', 'nuttall', 'barthann'] @@ -975,10 +969,9 @@ def test_cmov_window_regular(self): rs = Series(vals).rolling(5, win_type=wt, center=True).mean() tm.assert_series_equal(xp, rs) + @td.skip_if_no_scipy def test_cmov_window_regular_linear_range(self): # GH 8238 - tm._skip_if_no_scipy() - win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman', 'blackmanharris', 'nuttall', 'barthann'] @@ -992,10 +985,9 @@ def test_cmov_window_regular_linear_range(self): rs = Series(vals).rolling(5, win_type=wt, center=True).mean() tm.assert_series_equal(xp, rs) + @td.skip_if_no_scipy def test_cmov_window_regular_missing_data(self): # GH 8238 - tm._skip_if_no_scipy() - win_types = ['triang', 'blackman', 'hamming', 'bartlett', 'bohman', 'blackmanharris', 'nuttall', 'barthann'] @@ -1025,10 +1017,9 @@ def test_cmov_window_regular_missing_data(self): rs = Series(vals).rolling(5, win_type=wt, min_periods=3).mean() tm.assert_series_equal(xp, rs) + @td.skip_if_no_scipy def test_cmov_window_special(self): # GH 8238 - tm._skip_if_no_scipy() - win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian'] kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.}, {'width': 0.5}] @@ -1052,10 +1043,9 @@ def test_cmov_window_special(self): rs = Series(vals).rolling(5, win_type=wt, center=True).mean(**k) tm.assert_series_equal(xp, rs) + @td.skip_if_no_scipy def test_cmov_window_special_linear_range(self): # GH 8238 - tm._skip_if_no_scipy() - win_types = ['kaiser', 'gaussian', 'general_gaussian', 'slepian'] kwds = [{'beta': 1.}, {'std': 1.}, {'power': 2., 'width': 2.}, {'width': 0.5}] @@ -1259,19 +1249,15 @@ def test_rolling_var(self): self._check_moment_func(mom.rolling_var, lambda x: np.var(x, ddof=0), name='var', ddof=0) + @td.skip_if_no_scipy def test_rolling_skew(self): - try: - from scipy.stats import skew - except ImportError: - pytest.skip('no scipy') + from scipy.stats import skew self._check_moment_func(mom.rolling_skew, lambda x: skew(x, bias=False), name='skew') + @td.skip_if_no_scipy def test_rolling_kurt(self): - try: - from scipy.stats import kurtosis - except ImportError: - pytest.skip('no scipy') + from scipy.stats import kurtosis self._check_moment_func(mom.rolling_kurt, lambda x: kurtosis(x, bias=False), name='kurt') diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 0b2d50d06a66c..95a9a8fed42f7 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -94,6 +94,11 @@ def _skip_if_not_us_locale(): return True +def _skip_if_no_scipy(): + return not (safe_import('scipy.stats') and safe_import('scipy.sparse') and + safe_import('scipy.interpolate')) + + def skip_if_no(package, min_version=None): """ Generic function to help skip test functions when required packages are not @@ -119,9 +124,11 @@ def skip_if_no(package, min_version=None): The decorated function wrapped within a pytest ``skip_if`` mark """ def decorated_func(func): + msg = "Could not import '{}'".format(package) + if min_version: + msg += " satisfying a min_version of {}".format(min_version) return pytest.mark.skipif( - not safe_import(package, min_version=min_version), - reason="Could not import '{}'".format(package) + not safe_import(package, min_version=min_version), reason=msg )(func) return decorated_func @@ -144,3 +151,5 @@ def decorated_func(func): reason="Specific locale is set " "{lang}".format( lang=locale.getlocale()[0])) +skip_if_no_scipy = pytest.mark.skipif(_skip_if_no_scipy(), + reason="Missing SciPy requirement")
- [ ] progress towards #18190 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18794
2017-12-15T16:46:26Z
2017-12-15T23:54:47Z
2017-12-15T23:54:47Z
2017-12-15T23:55:53Z
DOC: standardizing docstrings to use pd.DataFrame
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 753c623b2de4c..65934494b321b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2304,7 +2304,7 @@ def query(self, expr, inplace=False, **kwargs): -------- >>> from numpy.random import randn >>> from pandas import DataFrame - >>> df = DataFrame(randn(10, 2), columns=list('ab')) + >>> df = pd.DataFrame(randn(10, 2), columns=list('ab')) >>> df.query('a > b') >>> df[df.a > df.b] # same result as the previous expression """ @@ -2368,7 +2368,7 @@ def eval(self, expr, inplace=False, **kwargs): -------- >>> from numpy.random import randn >>> from pandas import DataFrame - >>> df = DataFrame(randn(10, 2), columns=list('ab')) + >>> df = pd.DataFrame(randn(10, 2), columns=list('ab')) >>> df.eval('a + b') >>> df.eval('c = a + b') """ @@ -2664,7 +2664,7 @@ def assign(self, **kwargs): Examples -------- - >>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)}) + >>> df = pd.DataFrame({'A': range(1, 11), 'B': np.random.randn(10)}) Where the value is a callable, evaluated on `df`: @@ -3780,9 +3780,9 @@ def nlargest(self, n, columns, keep='first'): Examples -------- - >>> df = DataFrame({'a': [1, 10, 8, 11, -1], - ... 'b': list('abdce'), - ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]}) + >>> df = pd.DataFrame({'a': [1, 10, 8, 11, -1], + ... 'b': list('abdce'), + ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]}) >>> df.nlargest(3, 'a') a b c 3 11 c 3 @@ -3815,9 +3815,9 @@ def nsmallest(self, n, columns, keep='first'): Examples -------- - >>> df = DataFrame({'a': [1, 10, 8, 11, -1], - ... 'b': list('abdce'), - ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]}) + >>> df = pd.DataFrame({'a': [1, 10, 8, 11, -1], + ... 'b': list('abdce'), + ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]}) >>> df.nsmallest(3, 'a') a b c 4 -1 e 4 @@ -5818,7 +5818,7 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, Examples -------- - >>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), + >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), columns=['a', 'b']) >>> df.quantile(.1) a 1.3 @@ -5941,7 +5941,7 @@ def isin(self, values): -------- When ``values`` is a list: - >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) + >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) >>> df.isin([1, 3, 12, 'a']) A B 0 True True @@ -5950,7 +5950,7 @@ def isin(self, values): When ``values`` is a dict: - >>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]}) + >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]}) >>> df.isin({'A': [1, 3], 'B': [4, 7, 12]}) A B 0 True False # Note that B didn't match the 1 here. @@ -5959,7 +5959,7 @@ def isin(self, values): When ``values`` is a Series or DataFrame: - >>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) + >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']}) >>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']}) >>> df.isin(other) A B
- [ ] closes #18770 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18788
2017-12-15T03:56:07Z
2017-12-16T10:43:39Z
2017-12-16T10:43:39Z
2017-12-16T10:43:45Z
use / test modern conda recipe
diff --git a/.travis.yml b/.travis.yml index 0f43e4cf54faa..9eccf87960dd0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -51,7 +51,7 @@ matrix: - python-gtk2 - dist: trusty env: - - JOB="3.5" TEST_ARGS="--skip-slow --skip-network" COVERAGE=true + - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true COVERAGE=true - dist: trusty env: - JOB="3.6" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" CONDA_FORGE=true @@ -62,7 +62,7 @@ matrix: # In allow_failures - dist: trusty env: - - JOB="3.6_BUILD_TEST" TEST_ARGS="--skip-slow" BUILD_TEST=true + - JOB="3.6_PIP_BUILD_TEST" TEST_ARGS="--skip-slow" PIP_BUILD_TEST=true addons: apt: packages: @@ -81,7 +81,7 @@ matrix: - JOB="2.7_SLOW" SLOW=true - dist: trusty env: - - JOB="3.6_BUILD_TEST" TEST_ARGS="--skip-slow" BUILD_TEST=true + - JOB="3.6_PIP_BUILD_TEST" TEST_ARGS="--skip-slow" PIP_BUILD_TEST=true addons: apt: packages: diff --git a/ci/install_travis.sh b/ci/install_travis.sh index dac3625cba4ba..90b9bf3f3186e 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -50,6 +50,13 @@ conda config --set ssl_verify false || exit 1 conda config --set quiet true --set always_yes true --set changeps1 false || exit 1 conda update -q conda +if [ "$CONDA_BUILD_TEST" ]; then + echo + echo "[installing conda-build]" + conda install conda-build +fi + + echo echo "[add channels]" conda config --remove channels defaults || exit 1 @@ -116,7 +123,7 @@ if [ "$COVERAGE" ]; then fi echo -if [ -z "$BUILD_TEST" ]; then +if [ -z "$PIP_BUILD_TEST" ] and [ -z "$CONDA_BUILD_TEST" ]; then # build but don't install echo "[build em]" @@ -155,23 +162,34 @@ echo "[removing installed pandas]" conda remove pandas -y --force pip uninstall -y pandas -if [ "$BUILD_TEST" ]; then +echo +echo "[no installed pandas]" +conda list pandas +pip list --format columns |grep pandas - # remove any installation - pip uninstall -y pandas - conda list pandas - pip list --format columns |grep pandas +# build and install +echo + +if [ "$PIP_BUILD_TEST" ]; then # build & install testing - echo ["building release"] + echo "[building release]" bash scripts/build_dist_for_release.sh conda uninstall -y cython time pip install dist/*tar.gz || exit 1 +elif [ "$CONDA_BUILD_TEST" ]; then + + # build & install testing + echo "[building conda recipe]" + conda build ./conda.recipe --numpy 1.13 --python 3.5 -q --no-test + + echo "[installing]" + conda install $(conda build ./conda.recipe --numpy 1.13 --python 3.5 --output) --force + else # install our pandas - echo echo "[running setup.py develop]" python setup.py develop || exit 1 diff --git a/ci/requirements-3.5.build b/ci/requirements-3.5_CONDA_BUILD_TEST.build similarity index 77% rename from ci/requirements-3.5.build rename to ci/requirements-3.5_CONDA_BUILD_TEST.build index 76227e106e1fd..6648e3778777c 100644 --- a/ci/requirements-3.5.build +++ b/ci/requirements-3.5_CONDA_BUILD_TEST.build @@ -2,5 +2,5 @@ python=3.5* python-dateutil pytz nomkl -numpy=1.11.3 +numpy=1.13* cython diff --git a/ci/requirements-3.5.pip b/ci/requirements-3.5_CONDA_BUILD_TEST.pip similarity index 100% rename from ci/requirements-3.5.pip rename to ci/requirements-3.5_CONDA_BUILD_TEST.pip diff --git a/ci/requirements-3.5.run b/ci/requirements-3.5_CONDA_BUILD_TEST.run similarity index 92% rename from ci/requirements-3.5.run rename to ci/requirements-3.5_CONDA_BUILD_TEST.run index 52828b5220997..19d9a91e86585 100644 --- a/ci/requirements-3.5.run +++ b/ci/requirements-3.5_CONDA_BUILD_TEST.run @@ -1,5 +1,5 @@ pytz -numpy=1.11.3 +numpy=1.13* openpyxl xlsxwriter xlrd diff --git a/ci/requirements-3.5.sh b/ci/requirements-3.5_CONDA_BUILD_TEST.sh similarity index 86% rename from ci/requirements-3.5.sh rename to ci/requirements-3.5_CONDA_BUILD_TEST.sh index d694ad3679ac1..09d6775cfc894 100644 --- a/ci/requirements-3.5.sh +++ b/ci/requirements-3.5_CONDA_BUILD_TEST.sh @@ -2,7 +2,7 @@ source activate pandas -echo "install 35" +echo "install 35 CONDA_BUILD_TEST" # pip install python-dateutil to get latest conda remove -n pandas python-dateutil --force diff --git a/ci/requirements-3.6_BUILD_TEST.build b/ci/requirements-3.6_PIP_BUILD_TEST.build similarity index 100% rename from ci/requirements-3.6_BUILD_TEST.build rename to ci/requirements-3.6_PIP_BUILD_TEST.build diff --git a/ci/requirements-3.6_BUILD_TEST.pip b/ci/requirements-3.6_PIP_BUILD_TEST.pip similarity index 100% rename from ci/requirements-3.6_BUILD_TEST.pip rename to ci/requirements-3.6_PIP_BUILD_TEST.pip diff --git a/ci/requirements-3.6_BUILD_TEST.sh b/ci/requirements-3.6_PIP_BUILD_TEST.sh similarity index 75% rename from ci/requirements-3.6_BUILD_TEST.sh rename to ci/requirements-3.6_PIP_BUILD_TEST.sh index 2a3adeff836ee..3a8cf673b32f2 100644 --- a/ci/requirements-3.6_BUILD_TEST.sh +++ b/ci/requirements-3.6_PIP_BUILD_TEST.sh @@ -2,6 +2,6 @@ source activate pandas -echo "install 36 BUILD_TEST" +echo "install 36 PIP_BUILD_TEST" conda install -n pandas -c conda-forge pyarrow dask pyqt qtpy diff --git a/ci/script_multi.sh b/ci/script_multi.sh index 58742552628c8..c1fa756ece965 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -18,7 +18,7 @@ fi export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))') echo PYTHONHASHSEED=$PYTHONHASHSEED -if [ "$BUILD_TEST" ]; then +if [ "$PIP_BUILD_TEST" ] || [ "$CONDA_BUILD_TEST" ]; then echo "[build-test]" echo "[env]" diff --git a/ci/script_single.sh b/ci/script_single.sh index 963ce00b4a094..005c648ee025f 100755 --- a/ci/script_single.sh +++ b/ci/script_single.sh @@ -16,7 +16,7 @@ if [ "$SLOW" ]; then TEST_ARGS="--only-slow --skip-network" fi -if [ "$BUILD_TEST" ]; then +if [ "$PIP_BUILD_TEST" ] || [ "$CONDA_BUILD_TEST" ]; then echo "We are not running pytest as this is a build test." elif [ "$DOC" ]; then diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml index 0b54980d2bc87..3510496f0b519 100644 --- a/conda.recipe/meta.yaml +++ b/conda.recipe/meta.yaml @@ -1,9 +1,9 @@ package: name: pandas - version: {{ GIT_DESCRIBE_TAG|replace("v","") }} + version: {{ environ.get('GIT_DESCRIBE_TAG','').replace('v', '', 1) }} build: - number: {{ GIT_DESCRIBE_NUMBER|int }} + number: {{ environ.get('GIT_DESCRIBE_NUMBER', 0) }} {% if GIT_DESCRIBE_NUMBER|int == 0 %}string: np{{ CONDA_NPY }}py{{ CONDA_PY }}_0 {% else %}string: np{{ CONDA_NPY }}py{{ CONDA_PY }}_{{ GIT_BUILD_STR }}{% endif %} @@ -14,12 +14,14 @@ requirements: build: - python - cython - - numpy x.x + - {{ pin_compatible('numpy') }} - setuptools >=3.3 + - python-dateutil >=2.5.0 + - pytz run: - python - - numpy x.x + - {{ pin_compatible('numpy') }} - python-dateutil >=2.5.0 - pytz diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index dfd222f10d235..56e398bf3d454 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -162,8 +162,12 @@ If installed, we now require: +-----------------+-----------------+----------+ +Build Changes +^^^^^^^^^^^^^ + - Building pandas for development now requires ``cython >= 0.24`` (:issue:`18613`) - Building from source now explicity requires ``setuptools`` in ``setup.py`` (:issue:`18113`) +- Updated conda recipe to be in compliance with conda-build 3.0+ (:issue:`18002`) .. _whatsnew_0220.api:
supersedes #18592 closes #18002
https://api.github.com/repos/pandas-dev/pandas/pulls/18787
2017-12-15T02:19:05Z
2017-12-19T10:57:03Z
2017-12-19T10:57:02Z
2017-12-19T13:00:42Z
TST: xfail geopandas downstream test
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 0f0abd8cd3400..6407bee49ad15 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -92,6 +92,7 @@ def test_pandas_datareader(): pandas_datareader.get_data_google('AAPL') +@pytest.mark.xfail(reason="install not working, gh-18780") def test_geopandas(): geopandas = import_module('geopandas') # noqa
xref #18780
https://api.github.com/repos/pandas-dev/pandas/pulls/18786
2017-12-15T01:45:31Z
2017-12-15T01:45:37Z
2017-12-15T01:45:37Z
2017-12-15T01:45:37Z
Fix Series.__sub__ non-nano datetime64
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f300deddebeb..1273433730162 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -350,7 +350,7 @@ Reshaping Numeric ^^^^^^^ -- +- Bug in :func:`Series.__sub__` subtracting a non-nanosecond ``np.datetime64`` object from a ``Series`` gave incorrect results (:issue:`7996`) - - diff --git a/pandas/core/ops.py b/pandas/core/ops.py index ac9ca03c13973..05ec7f41b0c66 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -28,7 +28,7 @@ is_datetimelike_v_numeric, is_integer_dtype, is_categorical_dtype, is_object_dtype, is_timedelta64_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, + is_datetime64_dtype, is_datetime64tz_dtype, is_datetime64_ns_dtype, is_bool_dtype, is_datetimetz, is_list_like, is_offsetlike, is_scalar, @@ -527,6 +527,11 @@ def _convert_to_array(self, values, name=None, other=None): elif not (isinstance(values, (np.ndarray, ABCSeries)) and is_datetime64_dtype(values)): values = libts.array_to_datetime(values) + elif (is_datetime64_dtype(values) and + not is_datetime64_ns_dtype(values)): + # GH#7996 e.g. np.datetime64('2013-01-01') is datetime64[D] + values = values.astype('datetime64[ns]') + elif inferred_type in ('timedelta', 'timedelta64'): # have a timedelta, convert to to ns here values = to_timedelta(values, errors='coerce', box=False) diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 4adbdbca82fd2..433e3cf440cbd 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -1004,6 +1004,33 @@ def test_operators_timedelta64_with_timedelta_invalid(self, scalar_td): class TestDatetimeSeriesArithmetic(object): + @pytest.mark.parametrize( + 'box, assert_func', + [(Series, tm.assert_series_equal), + (pd.Index, tm.assert_index_equal)]) + def test_sub_datetime64_not_ns(self, box, assert_func): + # GH#7996 + dt64 = np.datetime64('2013-01-01') + assert dt64.dtype == 'datetime64[D]' + + obj = box(date_range('20130101', periods=3)) + res = obj - dt64 + expected = box([Timedelta(days=0), Timedelta(days=1), + Timedelta(days=2)]) + assert_func(res, expected) + + res = dt64 - obj + assert_func(res, -expected) + + @pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano') + def test_frame_sub_datetime64_not_ns(self): + df = pd.DataFrame(date_range('20130101', periods=3)) + dt64 = np.datetime64('2013-01-01') + assert dt64.dtype == 'datetime64[D]' + res = df - dt64 + expected = pd.DataFrame([Timedelta(days=0), Timedelta(days=1), + Timedelta(days=2)]) + tm.assert_frame_equal(res, expected) def test_operators_datetimelike(self): def run_ops(ops, get_ser, test_ser):
The original bug report was for `Series`. This fixes that bug and includes a test that checks for `DatetimeIndex` while we're at it. I checked and this does _not_ fix the analogous problem in `DataFrame. I'm hoping someone else will pick up the torch on that b/c the broadcast/dispatch is still something of a mystery to me. - [x] closes #7996 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18783
2017-12-14T17:13:34Z
2017-12-28T12:31:28Z
2017-12-28T12:31:28Z
2018-02-11T22:00:25Z
COMPAT: 32-bit compat for tz-conversions
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index c12a15b71487b..11e1787cd77da 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -469,7 +469,8 @@ cdef inline void _localize_tso(_TSObject obj, object tz): """ cdef: ndarray[int64_t] trans, deltas - Py_ssize_t delta, posn + int64_t delta + Py_ssize_t posn datetime dt assert obj.tzinfo is None
xref #18777
https://api.github.com/repos/pandas-dev/pandas/pulls/18778
2017-12-14T11:09:56Z
2017-12-14T12:34:42Z
2017-12-14T12:34:42Z
2017-12-14T12:35:09Z
Construct 1d array from listlike
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py index 6276dc324ca0d..65af7b077d80f 100644 --- a/asv_bench/benchmarks/ctors.py +++ b/asv_bench/benchmarks/ctors.py @@ -1,10 +1,36 @@ import numpy as np -from pandas import DataFrame, Series, Index, DatetimeIndex, Timestamp +from pandas import Series, Index, DatetimeIndex, Timestamp from .pandas_vb_common import setup # noqa -class Constructors(object): +class SeriesConstructors(object): + + goal_time = 0.2 + + param_names = ["data_fmt", "with_index"] + params = [[lambda x: x, + list, + lambda arr: list(arr.astype(str)), + lambda arr: dict(zip(range(len(arr)), arr)), + lambda arr: [(i, -i) for i in arr], + lambda arr: [[i, -i] for i in arr], + lambda arr: ([(i, -i) for i in arr][:-1] + [None]), + lambda arr: ([[i, -i] for i in arr][:-1] + [None])], + [False, True]] + + def setup(self, data_fmt, with_index): + N = 10**4 + np.random.seed(1234) + arr = np.random.randn(N) + self.data = data_fmt(arr) + self.index = np.arange(N) if with_index else None + + def time_series_constructor(self, data_fmt, with_index): + Series(self.data, index=self.index) + + +class SeriesDtypesConstructors(object): goal_time = 0.2 @@ -19,12 +45,6 @@ def setup(self): self.s = Series([Timestamp('20110101'), Timestamp('20120101'), Timestamp('20130101')] * N * 10) - def time_frame_from_ndarray(self): - DataFrame(self.arr) - - def time_series_from_ndarray(self): - Series(self.data, index=self.index) - def time_index_from_array_string(self): Index(self.arr_str) diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py index 6761d48d25919..391a209cb2a89 100644 --- a/asv_bench/benchmarks/frame_ctor.py +++ b/asv_bench/benchmarks/frame_ctor.py @@ -81,3 +81,15 @@ def setup(self, nrows): def time_frame_from_records_generator(self, nrows): # issue-6700 self.df = DataFrame.from_records(self.gen, nrows=nrows) + + +class FromNDArray(object): + + goal_time = 0.2 + + def setup(self): + N = 100000 + self.data = np.random.randn(N) + + def time_frame_from_ndarray(self): + self.df = DataFrame(self.data) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index a39f83d5261c0..5a62203f79642 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -148,23 +148,6 @@ def item_from_zerodim(object val): return util.unbox_if_zerodim(val) -@cython.wraparound(False) -@cython.boundscheck(False) -cpdef ndarray[object] list_to_object_array(list obj): - """ - Convert list to object ndarray. Seriously can\'t believe - I had to write this function. - """ - cdef: - Py_ssize_t i, n = len(obj) - ndarray[object] arr = np.empty(n, dtype=object) - - for i in range(n): - arr[i] = obj[i] - - return arr - - @cython.wraparound(False) @cython.boundscheck(False) def fast_unique(ndarray[object] values): diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index e15b4693432d9..8bfed4fe60fed 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -349,7 +349,9 @@ def infer_dtype(object value, bint skipna=False): else: if not isinstance(value, list): value = list(value) - values = list_to_object_array(value) + from pandas.core.dtypes.cast import ( + construct_1d_object_array_from_listlike) + values = construct_1d_object_array_from_listlike(value) values = getattr(values, 'values', values) val = _try_infer_map(values) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 0ceb8966fd3c8..167f215b6c0ac 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -6,7 +6,8 @@ from warnings import warn, catch_warnings import numpy as np -from pandas.core.dtypes.cast import maybe_promote +from pandas.core.dtypes.cast import ( + maybe_promote, construct_1d_object_array_from_listlike) from pandas.core.dtypes.generic import ( ABCSeries, ABCIndex, ABCIndexClass, ABCCategorical) @@ -171,7 +172,7 @@ def _ensure_arraylike(values): if inferred in ['mixed', 'string', 'unicode']: if isinstance(values, tuple): values = list(values) - values = lib.list_to_object_array(values) + values = construct_1d_object_array_from_listlike(values) else: values = np.asarray(values) return values @@ -401,7 +402,7 @@ def isin(comps, values): .format(values_type=type(values).__name__)) if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)): - values = lib.list_to_object_array(list(values)) + values = construct_1d_object_array_from_listlike(list(values)) comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) diff --git a/pandas/core/common.py b/pandas/core/common.py index 76a69030463ec..35696be5b2a03 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -21,6 +21,7 @@ from pandas.core.dtypes.missing import isna, isnull, notnull # noqa from pandas.api import types from pandas.core.dtypes import common +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike # compat from pandas.errors import ( # noqa @@ -381,7 +382,7 @@ def _asarray_tuplesafe(values, dtype=None): return values.values if isinstance(values, list) and dtype in [np.object_, object]: - return lib.list_to_object_array(values) + return construct_1d_object_array_from_listlike(values) result = np.asarray(values, dtype=dtype) @@ -389,17 +390,10 @@ def _asarray_tuplesafe(values, dtype=None): result = np.asarray(values, dtype=object) if result.ndim == 2: - if isinstance(values, list): - return lib.list_to_object_array(values) - else: - # Making a 1D array that safely contains tuples is a bit tricky - # in numpy, leading to the following - try: - result = np.empty(len(values), dtype=object) - result[:] = values - except ValueError: - # we have a list-of-list - result[:] = [tuple(x) for x in values] + # Avoid building an array of arrays: + # TODO: verify whether any path hits this except #18819 (invalid) + values = [tuple(x) for x in values] + result = construct_1d_object_array_from_listlike(values) return result diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index a97b84ab9cc5b..87c6fb69f33bf 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -42,7 +42,7 @@ def maybe_convert_platform(values): """ try to do platform conversion, allow ndarray or list here """ if isinstance(values, (list, tuple)): - values = lib.list_to_object_array(list(values)) + values = construct_1d_object_array_from_listlike(list(values)) if getattr(values, 'dtype', None) == np.object_: if hasattr(values, '_values'): values = values._values @@ -1162,3 +1162,28 @@ def construct_1d_arraylike_from_scalar(value, length, dtype): subarr.fill(value) return subarr + + +def construct_1d_object_array_from_listlike(values): + """ + Transform any list-like object in a 1-dimensional numpy array of object + dtype. + + Parameters + ---------- + values : any iterable which has a len() + + Raises + ------ + TypeError + * If `values` does not have a len() + + Returns + ------- + 1-dimensional numpy array of dtype object + """ + # numpy will try to interpret nested lists as further dimensions, hence + # making a 1D array that contains list-likes is a bit tricky: + result = np.empty(len(values), dtype='object') + result[:] = values + return result diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 2fb0cbb14c225..17ad5f20ce742 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -33,7 +33,9 @@ is_list_like, is_scalar, _ensure_object) -from pandas.core.dtypes.cast import maybe_upcast_putmask, find_common_type +from pandas.core.dtypes.cast import ( + maybe_upcast_putmask, find_common_type, + construct_1d_object_array_from_listlike) from pandas.core.dtypes.generic import ( ABCSeries, ABCDataFrame, @@ -750,7 +752,7 @@ def wrapper(left, right, name=name, na_op=na_op): def _comp_method_OBJECT_ARRAY(op, x, y): if isinstance(y, list): - y = lib.list_to_object_array(y) + y = construct_1d_object_array_from_listlike(y) if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): if not is_object_dtype(y.dtype): y = y.astype(np.object_) @@ -901,7 +903,7 @@ def na_op(x, y): result = op(x, y) except TypeError: if isinstance(y, list): - y = lib.list_to_object_array(y) + y = construct_1d_object_array_from_listlike(y) if isinstance(y, (np.ndarray, ABCSeries)): if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)): diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index 82a35fa711e8c..d13d781f03117 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -21,7 +21,8 @@ infer_dtype_from_array, maybe_convert_string_to_object, maybe_convert_scalar, - find_common_type) + find_common_type, + construct_1d_object_array_from_listlike) from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, @@ -407,3 +408,17 @@ def test_period_dtype(self): np.dtype('datetime64[ns]'), np.object, np.int64]: assert find_common_type([dtype, dtype2]) == np.object assert find_common_type([dtype2, dtype]) == np.object + + @pytest.mark.parametrize('datum1', [1, 2., "3", (4, 5), [6, 7], None]) + @pytest.mark.parametrize('datum2', [8, 9., "10", (11, 12), [13, 14], None]) + def test_cast_1d_array(self, datum1, datum2): + data = [datum1, datum2] + result = construct_1d_object_array_from_listlike(data) + + # Direct comparison fails: https://github.com/numpy/numpy/issues/10218 + assert result.dtype == 'object' + assert list(result) == data + + @pytest.mark.parametrize('val', [1, 2., None]) + def test_cast_1d_array_invalid_scalar(self, val): + pytest.raises(TypeError, construct_1d_object_array_from_listlike, val) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 22ad2258e70bc..8be6c4875ae24 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -21,8 +21,8 @@ MultiIndex, Timedelta, Timestamp, date_range, Categorical) import pandas as pd -import pandas._libs.lib as lib import pandas.util.testing as tm +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.tests.frame.common import TestData @@ -1199,7 +1199,7 @@ def test_constructor_from_items(self): DataFrame.from_items(row_items, orient='index') # orient='index', but thar be tuples - arr = lib.list_to_object_array( + arr = construct_1d_object_array_from_listlike( [('bar', 'baz')] * len(self.mixed_frame)) self.mixed_frame['foo'] = arr row_items = [(idx, list(self.mixed_frame.xs(idx))) diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 510ca6ac83ec0..7d6937592002d 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -18,7 +18,7 @@ from pandas.errors import PerformanceWarning, UnsortedIndexError from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.indexes.base import InvalidIndexError -from pandas._libs import lib +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas._libs.lib import Timestamp import pandas.util.testing as tm @@ -913,7 +913,7 @@ def test_from_product_invalid_input(self): def test_from_product_datetimeindex(self): dt_index = date_range('2000-01-01', periods=2) mi = pd.MultiIndex.from_product([[1, 2], dt_index]) - etalon = lib.list_to_object_array([(1, pd.Timestamp( + etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp( '2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp( '2000-01-01')), (2, pd.Timestamp('2000-01-02'))]) tm.assert_numpy_array_equal(mi.values, etalon) @@ -938,11 +938,11 @@ def test_values_boxed(self): (1, pd.Timestamp('2000-01-04')), (2, pd.Timestamp('2000-01-02')), (3, pd.Timestamp('2000-01-03'))] - mi = pd.MultiIndex.from_tuples(tuples) - tm.assert_numpy_array_equal(mi.values, - lib.list_to_object_array(tuples)) + result = pd.MultiIndex.from_tuples(tuples) + expected = construct_1d_object_array_from_listlike(tuples) + tm.assert_numpy_array_equal(result.values, expected) # Check that code branches for boxed values produce identical results - tm.assert_numpy_array_equal(mi.values[:4], mi[:4].values) + tm.assert_numpy_array_equal(result.values[:4], result[:4].values) def test_append(self): result = self.index[:3].append(self.index[3:])
- [x] tests passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` #18626 isn't converging, will need more work and I won't have much time soon, so I took away the asv and refactoring that should be straighforward to merge. asv says ``` before after ratio [96439fb1] [e72437c5] - 38.6±1ms 35.1±0.5ms 0.91 panel_ctor.Constructors3.time_panel_from_dict_same_index - 24.2±0.1μs 21.8±0.2μs 0.90 ctors.Constructors.time_index_from_array_string - 39.7±1ms 34.9±0.02ms 0.88 panel_ctor.Constructors2.time_panel_from_dict_equiv_indexes - 14.1±0.04μs 12.3±0.08μs 0.87 ctors.Constructors.time_series_from_ndarray - 12.2±0.6μs 10.1±0.05μs 0.82 ctors.Constructors.time_dtindex_from_series SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. ``` ... but they are borderline, and don't always come up.
https://api.github.com/repos/pandas-dev/pandas/pulls/18769
2017-12-13T18:10:31Z
2017-12-19T14:41:05Z
2017-12-19T14:41:05Z
2017-12-19T15:02:46Z
Added pyarrow to OSX build script
diff --git a/ci/requirements-3.5_OSX.sh b/ci/requirements-3.5_OSX.sh index c2978b175968c..f22f57c38364d 100644 --- a/ci/requirements-3.5_OSX.sh +++ b/ci/requirements-3.5_OSX.sh @@ -4,4 +4,4 @@ source activate pandas echo "install 35_OSX" -conda install -n pandas -c conda-forge feather-format==0.3.1 fastparquet +conda install -n pandas -c conda-forge feather-format==0.3.1 fastparquet pyarrow=0.5.0
- [X] closes #18714 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18766
2017-12-13T15:39:09Z
2017-12-13T21:29:44Z
null
2017-12-13T21:29:51Z
TST: Skip if Decorators for Localpath and Pathlib
diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index c059f01ecf3f4..5da347e47957c 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -1,6 +1,7 @@ import pandas as pd from pandas.compat import PY2 import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.errors import EmptyDataError import os import io @@ -71,8 +72,8 @@ def test_from_iterator(self): tm.assert_frame_equal(df, df0.iloc[2:5, :]) rdr.close() + @td.skip_if_no('pathlib') def test_path_pathlib(self): - tm._skip_if_no_pathlib() from pathlib import Path for j in 0, 1: df0 = self.data[j] @@ -82,8 +83,8 @@ def test_path_pathlib(self): df = pd.read_sas(fname, encoding='utf-8') tm.assert_frame_equal(df, df0) + @td.skip_if_no('py.path') def test_path_localpath(self): - tm._skip_if_no_localpath() from py.path import local as LocalPath for j in 0, 1: df0 = self.data[j] diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 707580bfe9601..13a393d9109ae 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -8,6 +8,7 @@ import pandas as pd import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.io import common from pandas.compat import is_platform_windows, StringIO, FileNotFoundError @@ -67,17 +68,15 @@ def test_expand_user_normal_path(self): assert expanded_name == filename assert os.path.expanduser(filename) == expanded_name + @td.skip_if_no('pathlib') def test_stringify_path_pathlib(self): - tm._skip_if_no_pathlib() - rel_path = common._stringify_path(Path('.')) assert rel_path == '.' redundant_path = common._stringify_path(Path('foo//bar')) assert redundant_path == os.path.join('foo', 'bar') + @td.skip_if_no('py.path') def test_stringify_path_localpath(self): - tm._skip_if_no_localpath() - path = os.path.join('foo', 'bar') abs_path = os.path.abspath(path) lpath = LocalPath(path) diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 4efeeecf8ee4a..274d60c40e83f 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -13,6 +13,7 @@ import pandas as pd import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas import DataFrame, Index, MultiIndex from pandas.compat import u, range, map, BytesIO, iteritems from pandas.core.config import set_option, get_option @@ -650,11 +651,10 @@ def test_read_from_file_url(self): tm.assert_frame_equal(url_table, local_table) + @td.skip_if_no('pathlib') def test_read_from_pathlib_path(self): # GH12655 - tm._skip_if_no_pathlib() - from pathlib import Path str_path = os.path.join(self.dirpath, 'test1' + self.ext) @@ -665,11 +665,10 @@ def test_read_from_pathlib_path(self): tm.assert_frame_equal(expected, actual) + @td.skip_if_no('py.path') def test_read_from_py_localpath(self): # GH12655 - tm._skip_if_no_localpath() - from py.path import local as LocalPath str_path = os.path.join(self.dirpath, 'test1' + self.ext) diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 6df31b73da9b7..d63764e90d26e 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -5119,11 +5119,10 @@ def test_read_nokey_empty(self): store.close() pytest.raises(ValueError, read_hdf, path) + @td.skip_if_no('pathlib') def test_read_from_pathlib_path(self): # GH11773 - tm._skip_if_no_pathlib() - from pathlib import Path expected = DataFrame(np.random.rand(4, 5), @@ -5137,11 +5136,10 @@ def test_read_from_pathlib_path(self): tm.assert_frame_equal(expected, actual) + @td.skip_if_no('py.path') def test_read_from_py_localpath(self): # GH11773 - tm._skip_if_no_localpath() - from py.path import local as LocalPath expected = DataFrame(np.random.rand(4, 5), diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 3fe4b8c3bb783..0b2d50d06a66c 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -94,6 +94,38 @@ def _skip_if_not_us_locale(): return True +def skip_if_no(package, min_version=None): + """ + Generic function to help skip test functions when required packages are not + present on the testing system. + + Intended for use as a decorator, this function will wrap the decorated + function with a pytest ``skip_if`` mark. During a pytest test suite + execution, that mark will attempt to import the specified ``package`` and + optionally ensure it meets the ``min_version``. If the import and version + check are unsuccessful, then the decorated function will be skipped. + + Parameters + ---------- + package: str + The name of the package required by the decorated function + min_version: str or None, default None + Optional minimum version of the package required by the decorated + function + + Returns + ------- + decorated_func: function + The decorated function wrapped within a pytest ``skip_if`` mark + """ + def decorated_func(func): + return pytest.mark.skipif( + not safe_import(package, min_version=min_version), + reason="Could not import '{}'".format(package) + )(func) + return decorated_func + + skip_if_no_mpl = pytest.mark.skipif(_skip_if_no_mpl(), reason="Missing matplotlib dependency") skip_if_mpl_1_5 = pytest.mark.skipif(_skip_if_mpl_1_5(), diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 32f8c4884c99f..2a0a7c9301752 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -359,22 +359,6 @@ def _skip_if_no_xarray(): pytest.skip("xarray version is too low: {version}".format(version=v)) -def _skip_if_no_pathlib(): - try: - from pathlib import Path # noqa - except ImportError: - import pytest - pytest.skip("pathlib not available") - - -def _skip_if_no_localpath(): - try: - from py.path import local as LocalPath # noqa - except ImportError: - import pytest - pytest.skip("py.path not installed") - - def skip_if_no_ne(engine='numexpr'): from pandas.core.computation.expressions import ( _USE_NUMEXPR,
- [ ] progress towards #18190 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This push should knock out the ``pathlib`` and ``localpath`` skip if functions. Rather than creating them as their own marks, I've created a generic decorator that accepts a package and optionally a min_version as its arguments. With that, it returns a function marked with the ``skip_if`` decorator that calls ``safe_import`` to validate whether or not the function should be skipped. This isn't applicable to all of the skip_if decorators because some of them have post-processing they do after import (see the matplotlib decorators) but for very generic import checks this could scale better in the future
https://api.github.com/repos/pandas-dev/pandas/pulls/18765
2017-12-13T15:10:27Z
2017-12-15T01:03:07Z
2017-12-15T01:03:07Z
2017-12-15T02:39:10Z
Implement roll_monthday, simplify SemiMonthOffset
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 933e7ed64b837..d3278e42e413f 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -523,11 +523,9 @@ def shift_quarters(int64_t[:] dtindex, int quarters, n = quarters months_since = (dts.month - q1start_month) % modby - compare_month = dts.month - months_since - compare_month = compare_month or 12 # compare_day is only relevant for comparison in the case # where months_since == 0. - compare_day = get_firstbday(dts.year, compare_month) + compare_day = get_firstbday(dts.year, dts.month) if n <= 0 and (months_since != 0 or (months_since == 0 and dts.day > compare_day)): @@ -556,11 +554,9 @@ def shift_quarters(int64_t[:] dtindex, int quarters, n = quarters months_since = (dts.month - q1start_month) % modby - compare_month = dts.month - months_since - compare_month = compare_month or 12 # compare_day is only relevant for comparison in the case # where months_since == 0. - compare_day = get_lastbday(dts.year, compare_month) + compare_day = get_lastbday(dts.year, dts.month) if n <= 0 and (months_since != 0 or (months_since == 0 and dts.day > compare_day)): @@ -827,7 +823,55 @@ cpdef int get_day_of_month(datetime other, day_opt) except? -1: raise ValueError(day_opt) -cpdef int roll_yearday(other, n, month, day_opt='start') except? -1: +cpdef int roll_convention(int other, int n, int compare): + """ + Possibly increment or decrement the number of periods to shift + based on rollforward/rollbackward conventions. + + Parameters + ---------- + other : int, generally the day component of a datetime + n : number of periods to increment, before adjusting for rolling + compare : int, generally the day component of a datetime, in the same + month as the datetime form which `other` was taken. + + Returns + ------- + n : int number of periods to increment + """ + if n > 0 and other < compare: + n -= 1 + elif n <= 0 and other > compare: + # as if rolled forward already + n += 1 + return n + + +cpdef int roll_monthday(datetime other, int n, datetime compare): + """ + Possibly increment or decrement the number of periods to shift + based on rollforward/rollbackward conventions. + + Parameters + ---------- + other : datetime + n : number of periods to increment, before adjusting for rolling + compare : datetime + + Returns + ------- + n : int number of periods to increment + """ + if n > 0 and other < compare: + n -= 1 + elif n <= 0 and other > compare: + # as if rolled forward already + n += 1 + return n + + +cpdef int roll_qtrday(datetime other, int n, int month, object day_opt, + int modby=3) except? -1: """ Possibly increment or decrement the number of periods to shift based on rollforward/rollbackward conventions. @@ -836,6 +880,48 @@ cpdef int roll_yearday(other, n, month, day_opt='start') except? -1: ---------- other : datetime or Timestamp n : number of periods to increment, before adjusting for rolling + month : int reference month giving the first month of the year + day_opt : 'start', 'end', 'business_start', 'business_end' + The convention to use in finding the day in a given month against + which to compare for rollforward/rollbackward decisions. + modby : int 3 for quarters, 12 for years + + Returns + ------- + n : int number of periods to increment + """ + # TODO: Merge this with roll_yearday by setting modby=12 there? + # code de-duplication versus perf hit? + # TODO: with small adjustments this could be used in shift_quarters + months_since = other.month % modby - month % modby + + if n > 0: + if months_since < 0 or (months_since == 0 and + other.day < get_day_of_month(other, + day_opt)): + # pretend to roll back if on same month but + # before compare_day + n -= 1 + else: + if months_since > 0 or (months_since == 0 and + other.day > get_day_of_month(other, + day_opt)): + # make sure to roll forward, so negate + n += 1 + return n + + +cpdef int roll_yearday(datetime other, int n, int month, + object day_opt) except? -1: + """ + Possibly increment or decrement the number of periods to shift + based on rollforward/rollbackward conventions. + + Parameters + ---------- + other : datetime or Timestamp + n : number of periods to increment, before adjusting for rolling + month : reference month giving the first month of the year day_opt : 'start', 'end' 'start': returns 1 'end': returns last day of the month @@ -846,7 +932,7 @@ cpdef int roll_yearday(other, n, month, day_opt='start') except? -1: Notes ----- - * Mirrors `roll_check` in tslib.shift_months + * Mirrors `roll_check` in shift_months Examples ------- @@ -888,7 +974,7 @@ cpdef int roll_yearday(other, n, month, day_opt='start') except? -1: other.day < get_day_of_month(other, day_opt)): n -= 1 - elif n <= 0: + else: if other.month > month or (other.month == month and other.day > get_day_of_month(other, day_opt)): diff --git a/pandas/tests/tseries/offsets/test_liboffsets.py b/pandas/tests/tseries/offsets/test_liboffsets.py index 8aa32bc600ee6..1e0ecc39084eb 100644 --- a/pandas/tests/tseries/offsets/test_liboffsets.py +++ b/pandas/tests/tseries/offsets/test_liboffsets.py @@ -9,6 +9,7 @@ from pandas import Timestamp import pandas._libs.tslibs.offsets as liboffsets +from pandas._libs.tslibs.offsets import roll_qtrday def test_get_lastbday(): @@ -95,3 +96,93 @@ def test_roll_yearday(): assert liboffsets.roll_yearday(other, 5, month, day_opt) == 5 assert liboffsets.roll_yearday(other, -7, month, day_opt) == -6 assert liboffsets.roll_yearday(other, 0, month, day_opt) == 1 + + +def test_roll_qtrday(): + other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday + for day_opt in ['start', 'end', 'business_start', 'business_end']: + # as long as (other.month % 3) != (month % 3), day_opt is irrelevant + # the `day_opt` doesn't matter. + month = 5 # (other.month % 3) < (month % 3) + assert roll_qtrday(other, 4, month, day_opt, modby=3) == 3 + assert roll_qtrday(other, -3, month, day_opt, modby=3) == -3 + + month = 3 # (other.month % 3) > (month % 3) + assert roll_qtrday(other, 4, month, day_opt, modby=3) == 4 + assert roll_qtrday(other, -3, month, day_opt, modby=3) == -2 + + month = 2 + other = datetime(1999, 5, 31) # Monday + # has (other.month % 3) == (month % 3) + + n = 2 + assert roll_qtrday(other, n, month, 'start', modby=3) == n + assert roll_qtrday(other, n, month, 'end', modby=3) == n + assert roll_qtrday(other, n, month, 'business_start', modby=3) == n + assert roll_qtrday(other, n, month, 'business_end', modby=3) == n + + n = -1 + assert roll_qtrday(other, n, month, 'start', modby=3) == n + 1 + assert roll_qtrday(other, n, month, 'end', modby=3) == n + assert roll_qtrday(other, n, month, 'business_start', modby=3) == n + 1 + assert roll_qtrday(other, n, month, 'business_end', modby=3) == n + + other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday + month = 4 # (other.month % 3) == (month % 3) + n = 2 + assert roll_qtrday(other, n, month, 'start', modby=3) == n + assert roll_qtrday(other, n, month, 'end', modby=3) == n - 1 + assert roll_qtrday(other, n, month, 'business_start', modby=3) == n - 1 + assert roll_qtrday(other, n, month, 'business_end', modby=3) == n - 1 + + n = -1 + assert roll_qtrday(other, n, month, 'start', modby=3) == n + assert roll_qtrday(other, n, month, 'end', modby=3) == n + assert roll_qtrday(other, n, month, 'business_start', modby=3) == n + assert roll_qtrday(other, n, month, 'business_end', modby=3) == n + + other = Timestamp(2072, 10, 3, 6, 17, 18) # First businessday + month = 4 # (other.month % 3) == (month % 3) + n = 2 + assert roll_qtrday(other, n, month, 'start', modby=3) == n + assert roll_qtrday(other, n, month, 'end', modby=3) == n - 1 + assert roll_qtrday(other, n, month, 'business_start', modby=3) == n + assert roll_qtrday(other, n, month, 'business_end', modby=3) == n - 1 + + n = -1 + assert roll_qtrday(other, n, month, 'start', modby=3) == n + 1 + assert roll_qtrday(other, n, month, 'end', modby=3) == n + assert roll_qtrday(other, n, month, 'business_start', modby=3) == n + assert roll_qtrday(other, n, month, 'business_end', modby=3) == n + + +def test_roll_monthday(): + other = Timestamp('2017-12-29', tz='US/Pacific') + before = Timestamp('2017-12-01', tz='US/Pacific') + after = Timestamp('2017-12-31', tz='US/Pacific') + + n = 42 + assert liboffsets.roll_monthday(other, n, other) == n + assert liboffsets.roll_monthday(other, n, before) == n + assert liboffsets.roll_monthday(other, n, after) == n - 1 + + n = -4 + assert liboffsets.roll_monthday(other, n, other) == n + assert liboffsets.roll_monthday(other, n, before) == n + 1 + assert liboffsets.roll_monthday(other, n, after) == n + + +def test_roll_convention(): + other = 29 + before = 1 + after = 31 + + n = 42 + assert liboffsets.roll_convention(other, n, other) == n + assert liboffsets.roll_convention(other, n, before) == n + assert liboffsets.roll_convention(other, n, after) == n - 1 + + n = -4 + assert liboffsets.roll_convention(other, n, other) == n + assert liboffsets.roll_convention(other, n, before) == n + 1 + assert liboffsets.roll_convention(other, n, after) == n diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 0e6a2259274ed..4f3c24ba534ff 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -22,7 +22,7 @@ from pandas._libs.tslibs.offsets import ( ApplyTypeError, as_datetime, _is_normalized, - _get_calendar, _to_dt64, _validate_business_time, + _get_calendar, _to_dt64, _determine_offset, apply_index_wraps, roll_yearday, @@ -557,28 +557,31 @@ def get_str(td): def apply(self, other): if isinstance(other, datetime): n = self.n + wday = other.weekday() - if n == 0 and other.weekday() > 4: - n = 1 - - result = other - - # avoid slowness below - if abs(n) > 5: - k = n // 5 - result = result + timedelta(7 * k) - if n < 0 and result.weekday() > 4: - n += 1 - n -= 5 * k - if n == 0 and result.weekday() > 4: - n -= 1 + # avoid slowness below by operating on weeks first + weeks = n // 5 + if n <= 0 and wday > 4: + # roll forward + n += 1 - while n != 0: - k = n // abs(n) - result = result + timedelta(k) - if result.weekday() < 5: - n -= k + n -= 5 * weeks + + # n is always >= 0 at this point + if n == 0 and wday > 4: + # roll back + days = 4 - wday + elif wday > 4: + # roll forward + days = (7 - wday) + (n - 1) + elif wday + n <= 4: + # shift by n days without leaving the current week + days = n + else: + # shift by n days plus 2 to get past the weekend + days = n + 2 + result = other + timedelta(days=7 * weeks + days) if self.offset: result = result + self.offset return result @@ -614,8 +617,8 @@ class BusinessHourMixin(BusinessMixin): def __init__(self, start='09:00', end='17:00', offset=timedelta(0)): # must be validated here to equality check kwds = {'offset': offset} - self.start = kwds['start'] = _validate_business_time(start) - self.end = kwds['end'] = _validate_business_time(end) + self.start = kwds['start'] = liboffsets._validate_business_time(start) + self.end = kwds['end'] = liboffsets._validate_business_time(end) self.kwds.update(kwds) self._offset = offset @@ -969,15 +972,8 @@ def onOffset(self, dt): @apply_wraps def apply(self, other): - n = self.n compare_day = self._get_offset_day(other) - - if n > 0 and other.day < compare_day: - n -= 1 - elif n <= 0 and other.day > compare_day: - # as if rolled forward already - n += 1 - + n = liboffsets.roll_convention(other.day, self.n, compare_day) return shift_month(other, n, self._day_opt) @apply_index_wraps @@ -1063,22 +1059,12 @@ class CustomBusinessMonthEnd(_CustomBusinessMonth): @apply_wraps def apply(self, other): - n = self.n - # First move to month offset cur_mend = self.m_offset.rollforward(other) # Find this custom month offset - cur_cmend = self.cbday.rollback(cur_mend) - - # handle zero case. arbitrarily rollforward - if n == 0 and other != cur_cmend: - n += 1 - - if other < cur_cmend and n >= 1: - n -= 1 - elif other > cur_cmend and n <= -1: - n += 1 + compare_date = self.cbday.rollback(cur_mend) + n = liboffsets.roll_monthday(other, self.n, compare_date) new = cur_mend + n * self.m_offset result = self.cbday.rollback(new) @@ -1091,23 +1077,12 @@ class CustomBusinessMonthBegin(_CustomBusinessMonth): @apply_wraps def apply(self, other): - n = self.n - dt_in = other - # First move to month offset - cur_mbegin = self.m_offset.rollback(dt_in) + cur_mbegin = self.m_offset.rollback(other) # Find this custom month offset - cur_cmbegin = self.cbday.rollforward(cur_mbegin) - - # handle zero case. arbitrarily rollforward - if n == 0 and dt_in != cur_cmbegin: - n += 1 - - if dt_in > cur_cmbegin and n <= -1: - n += 1 - elif dt_in < cur_cmbegin and n >= 1: - n -= 1 + compare_date = self.cbday.rollforward(cur_mbegin) + n = liboffsets.roll_monthday(other, self.n, compare_date) new = cur_mbegin + n * self.m_offset result = self.cbday.rollforward(new) @@ -1147,21 +1122,21 @@ def rule_code(self): @apply_wraps def apply(self, other): - n = self.n - if not self.onOffset(other): - _, days_in_month = tslib.monthrange(other.year, other.month) - if 1 < other.day < self.day_of_month: - other = other.replace(day=self.day_of_month) - if n > 0: - # rollforward so subtract 1 - n -= 1 - elif self.day_of_month < other.day < days_in_month: - other = other.replace(day=self.day_of_month) - if n < 0: - # rollforward in the negative direction so add 1 - n += 1 - elif n == 0: - n = 1 + # shift `other` to self.day_of_month, incrementing `n` if necessary + n = liboffsets.roll_convention(other.day, self.n, self.day_of_month) + + days_in_month = tslib.monthrange(other.year, other.month)[1] + + # For SemiMonthBegin on other.day == 1 and + # SemiMonthEnd on other.day == days_in_month, + # shifting `other` to `self.day_of_month` _always_ requires + # incrementing/decrementing `n`, regardless of whether it is + # initially positive. + if type(self) is SemiMonthBegin and (self.n <= 0 and other.day == 1): + n -= 1 + elif type(self) is SemiMonthEnd and (self.n > 0 and + other.day == days_in_month): + n += 1 return self._apply(n, other) @@ -1231,12 +1206,6 @@ def onOffset(self, dt): return dt.day in (self.day_of_month, days_in_month) def _apply(self, n, other): - # if other.day is not day_of_month move to day_of_month and update n - if n > 0 and other.day < self.day_of_month: - n -= 1 - elif other.day > self.day_of_month: - n += 1 - months = n // 2 day = 31 if n % 2 else self.day_of_month return shift_month(other, months, day) @@ -1282,12 +1251,6 @@ def onOffset(self, dt): return dt.day in (1, self.day_of_month) def _apply(self, n, other): - # if other.day is not day_of_month move to day_of_month and update n - if other.day < self.day_of_month: - n -= 1 - elif n <= 0 and other.day > self.day_of_month: - n += 1 - months = n // 2 + n % 2 day = 1 if n % 2 else self.day_of_month return shift_month(other, months, day) @@ -1564,7 +1527,8 @@ class QuarterOffset(DateOffset): _from_name_startingMonth = None _adjust_dst = True # TODO: Consider combining QuarterOffset and YearOffset __init__ at some - # point + # point. Also apply_index, onOffset, rule_code if + # startingMonth vs month attr names are resolved def __init__(self, n=1, normalize=False, startingMonth=None): self.n = self._validate_n(n) @@ -1595,26 +1559,22 @@ def rule_code(self): @apply_wraps def apply(self, other): - n = self.n - compare_day = self._get_offset_day(other) - - months_since = (other.month - self.startingMonth) % 3 - - if n <= 0 and (months_since != 0 or - (months_since == 0 and other.day > compare_day)): - # make sure to roll forward, so negate - n += 1 - elif n > 0 and (months_since == 0 and other.day < compare_day): - # pretend to roll back if on same month but before compare_day - n -= 1 - - return shift_month(other, 3 * n - months_since, self._day_opt) + # months_since: find the calendar quarter containing other.month, + # e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep]. + # Then find the month in that quarter containing an onOffset date for + # self. `months_since` is the number of months to shift other.month + # to get to this on-offset month. + months_since = other.month % 3 - self.startingMonth % 3 + qtrs = liboffsets.roll_qtrday(other, self.n, self.startingMonth, + day_opt=self._day_opt, modby=3) + months = qtrs * 3 - months_since + return shift_month(other, months, self._day_opt) def onOffset(self, dt): if self.normalize and not _is_normalized(dt): return False - modMonth = (dt.month - self.startingMonth) % 3 - return modMonth == 0 and dt.day == self._get_offset_day(dt) + mod_month = (dt.month - self.startingMonth) % 3 + return mod_month == 0 and dt.day == self._get_offset_day(dt) @apply_index_wraps def apply_index(self, dtindex): @@ -2142,6 +2102,7 @@ def apply(self, other): n -= 1 elif n < 0 and other > current_easter: n += 1 + # TODO: Why does this handle the 0 case the opposite of others? # NOTE: easter returns a datetime.date so we have to convert to type of # other
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18762
2017-12-13T06:44:31Z
2017-12-30T12:37:09Z
2017-12-30T12:37:09Z
2018-01-23T04:40:48Z
[WIP] implement tests using hypothesis
diff --git a/.gitignore b/.gitignore index b1748ae72b8ba..8a4b2574b3dfb 100644 --- a/.gitignore +++ b/.gitignore @@ -61,6 +61,9 @@ dist coverage.xml coverage_html_report +# hypothesis test database +.hypothesis/ + # OS generated files # ###################### .directory diff --git a/pandas/tests/tseries/offsets/test_behavior.py b/pandas/tests/tseries/offsets/test_behavior.py new file mode 100644 index 0000000000000..0ff4ef1a29c77 --- /dev/null +++ b/pandas/tests/tseries/offsets/test_behavior.py @@ -0,0 +1,334 @@ +# -*- coding: utf-8 -*- +""" +Behavioral based tests for offsets and date_range. +""" +from datetime import timedelta + +import pytest +from hypothesis import given, assume +import hypothesis.strategies as st +import hypothesis.extra.numpy as hen +import hypothesis.extra.pytz as hepytz # hypothesis[pytz] + +import pandas as pd + +from pandas.tseries.offsets import (Hour, Minute, Second, Milli, Micro, Nano, + MonthEnd, MonthBegin, + BMonthEnd, BMonthBegin, + QuarterEnd, QuarterBegin, + BQuarterEnd, BQuarterBegin, + YearEnd, YearBegin, + BYearEnd, BYearBegin, + Week, LastWeekOfMonth, WeekOfMonth, + SemiMonthBegin, SemiMonthEnd, + Easter, + FY5253, FY5253Quarter, + DateOffset) +# TODO: +# BusinessDay, BusinessHour, CustomBusinessDay, CustomBusinessHour, +# CustomBusinessMonthEnd, CustomBusinessMonthBegin + + +tick_classes = [Hour, Minute, Second, Milli, Micro, Nano] +yqm_classes = [MonthBegin, MonthEnd, BMonthBegin, BMonthEnd, + QuarterBegin, QuarterEnd, BQuarterBegin, BQuarterEnd, + YearBegin, YearEnd, BYearBegin, BYearEnd] +offset_types = [Week, LastWeekOfMonth, WeekOfMonth, SemiMonthEnd, + SemiMonthBegin, FY5253Quarter, FY5253, + Easter, DateOffset] + tick_classes + yqm_classes + +# ---------------------------------------------------------------- +# Helpers for generating random data + +dt_max = pd.Timestamp.max.replace(nanosecond=0).to_pydatetime() +td_max = timedelta(106751, 85636, 854775) +td_min = -td_max - timedelta(microseconds=1) + +n_strategy = st.integers(min_value=-999, max_value=999) +# TODO: Choose these bounds systematically. (-999, 999) is arbitrarily chosen +# to get rid of OverflowErrors in development +month_strategy = st.integers(min_value=1, max_value=12) +weekday_strategy = st.integers(min_value=0, max_value=6) + + +def gen_dst_crossing(): + # Generate either a pair of Timestamps or a date_range that is known + # to cross a DST transition + raise NotImplementedError + + +def gen_date_range_freq(): + # return a freq str or offset object suitable for passing as + # `freq` kwarg to date_range + return st.sampled_from(['Y', 'Q', 'M', 'D', 'H', + 'T', 's', 'ms', 'us', 'ns']) + # TODO: Add the rest; business, multiples, ... + + +@st.composite +def gen_random_date_range(draw): + # TODO: Choose the min/max values more systematically + start = st.datetimes(min_value=pd.Timestamp(1900, 1, 1).to_pydatetime(), + max_value=pd.Timestamp(2100, 1, 1).to_pydatetime()) + periods = st.integers(min_value=10, max_value=100) + freq = gen_date_range_freq() + tz = gen_random_tz() + + dti = pd.date_range(start=draw(start), tz=draw(tz), + freq=draw(freq), periods=draw(periods)) + return dti + + +def gen_random_tz(): + # Allows None + return st.one_of(st.none(), hepytz.timezones()) + # TODO: Weighting between naive and timezones? + # TODO: Get datetuil timezones? + + +gen_random_datetime = st.datetimes(min_value=pd.Timestamp.min.to_pydatetime(), + max_value=pd.Timestamp.max.to_pydatetime(), + timezones=gen_random_tz()) + + +def gen_random_timestamp(): + nano = st.integers(min_value=0, max_value=999) + dt = st.datetimes(min_value=pd.Timestamp.min.to_pydatetime(), + max_value=pd.Timestamp.max.to_pydatetime(), + timezones=gen_random_tz()) + ts = pd.Timestamp(dt) + + if dt != dt_max: + ts.replace(nanosecond=nano) + else: + ts = ts.replace(nanosecond=min(nano, pd.Timestamp.max.nanosecond)) + + # TODO: worry about timezones near min/max? + return ts + + +def gen_random_datelike(): + # py_dates = st.dates() + py_datetimes = gen_random_datetime + + # dt64_dtypes = hen.datetime64_dtypes() + # np_dates = hen.arrays(dtype=dt64_dtypes, shape=()) + # TODO: Allow for non-scalar versions? + # FIXME: dt64.__add__(offset) does not get dispatched to + # offset.__radd__(dt64), just raises TypeError + + any_dates = st.one_of(py_datetimes) + return any_dates + + +def gen_timedeltalike(): + py_timedeltas = st.timedeltas(min_value=td_min, max_value=td_max) + pd_timedeltas = py_timedeltas.map(pd.Timedelta) + # TODO: get those last few nanoseconds? + + td64_dtypes = hen.timedelta64_dtypes() + np_timedeltas = hen.arrays(dtype=td64_dtypes, shape=()) + # TODO: Allow for non-scalar versions? + + # TODO: Week + # TODO: Tick + any_tds = st.one_of(py_timedeltas, pd_timedeltas, np_timedeltas) + return any_tds + + +@st.composite +def gen_random_relativedelta_DateOffset(draw): + relativedelta_kwds = set([ + 'years', 'months', 'weeks', 'days', + 'year', 'month', 'week', 'day', 'weekday', + 'hour', 'minute', 'second', 'microsecond', + 'nanosecond', 'nanoseconds', + 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds']) + kwargs = {kwd: st.integers() for kwd in relativedelta_kwds} + kwargs['n'] = st.integers() + kwargs['normalize'] = st.booleans() + kwargs = {key: draw(kwargs[key]) for key in kwargs} + return DateOffset(**kwargs) + + +@st.composite +def gen_random_offset(draw, cls): + # Note: `draw` is a dummy argument that gets supplied by the composite + # decorator + n = n_strategy + normalize = st.booleans() + + if cls in tick_classes + [MonthBegin, MonthEnd, BMonthBegin, BMonthEnd, + Easter]: + n = n.filter(lambda x: abs(x) < 100) # TODO: avoid arbitrary cutoff + tup = st.tuples(n, normalize) + + elif cls in [QuarterBegin, QuarterEnd, BQuarterBegin, BQuarterEnd]: + n = n.filter(lambda x: abs(x) < 25) # TODO: avoid arbitrary cutoff + startingMonth = month_strategy + tup = st.tuples(n, normalize, startingMonth) + + elif cls in [YearBegin, YearEnd, BYearBegin, BYearEnd]: + n = n.filter(lambda x: abs(x) < 6) # TODO: avoid arbitrary cutoff + month = month_strategy + tup = st.tuples(n, normalize, month) + + elif cls == Week: + n = n.filter(lambda x: abs(x) < 400) # TODO: avoid arbitrary cutoff + weekday = st.sampled_from([None, 0, 1, 2, 3, 4, 5, 6]) + tup = st.tuples(n, normalize, weekday) + + elif cls == LastWeekOfMonth: + n = n.filter(lambda x: abs(x) < 400) # TODO: avoid arbitrary cutoff + n = n.filter(lambda x: x != 0) + weekday = weekday_strategy + tup = st.tuples(n, normalize, weekday) + + elif cls == WeekOfMonth: + n = n.filter(lambda x: abs(x) < 400) # TODO: avoid arbitrary cutoff + n = n.filter(lambda x: x != 0) + week = st.integers(min_value=0, max_value=3) + weekday = weekday_strategy + tup = st.tuples(n, normalize, week, weekday) + + elif cls in [SemiMonthBegin, SemiMonthEnd]: + n = n.filter(lambda x: abs(x) < 800) # TODO: avoid arbitrary cutoff + day_of_month = st.integers(min_value=cls._min_day_of_month, + max_value=27) + tup = st.tuples(n, normalize, day_of_month) + + elif cls is FY5253: + n = n.filter(lambda x: abs(x) < 6) # TODO: avoid arbitrary cutoff + n = n.filter(lambda x: x != 0) + weekday = weekday_strategy + startingMonth = month_strategy + variation = st.sampled_from(["nearest", "last"]) + tup = st.tuples(n, normalize, weekday, startingMonth, variation) + + elif cls is FY5253Quarter: + n = n.filter(lambda x: abs(x) < 24) # TODO: avoid arbitrary cutoff + n = n.filter(lambda x: x != 0) + weekday = weekday_strategy + startingMonth = month_strategy + qtr_with_extra_week = st.integers(min_value=1, max_value=4) + variation = st.sampled_from(["nearest", "last"]) + tup = st.tuples(n, normalize, weekday, startingMonth, + qtr_with_extra_week, variation) + + elif cls is DateOffset: + # klass = cls(days=value, normalize=normalize) + return gen_random_relativedelta_DateOffset() + + else: + raise NotImplementedError(cls) + + args = draw(tup) + return cls(*args) + +# ---------------------------------------------------------------- +# Tick-specific behavior tests + + +@given(n=n_strategy, m=n_strategy) +@pytest.mark.parametrize('cls', tick_classes) +def test_tick_add_sub(cls, n, m): + # For all Tick subclasses and all integers n, m, we should have + # tick(n) + tick(m) == tick(n+m) + # tick(n) - tick(m) == tick(n-m) + left = cls(n) + right = cls(m) + expected = cls(n + m) + + assert left + right == expected + assert left.apply(right) == expected + + expected = cls(n - m) + assert left - right == expected + + +@given(n=n_strategy, m=n_strategy) +@pytest.mark.parametrize('cls', tick_classes) +def test_tick_equality(cls, n, m): + # tick == tock iff tick.n == tock.n + left = cls(n) + right = cls(m) + if n == m: + assert left == right + assert not (left != right) + else: + assert left != right + assert not (left == right) + + +# ---------------------------------------------------------------- + +@given(dt=gen_random_datelike(), data=st.data()) +@pytest.mark.parametrize('cls', offset_types) +def test_on_offset_implementations(cls, dt, data): + # check that the class-specific implementations of onOffset match + # the general case definition: + # (dt + offset) - offset == dt + + offset = data.draw(gen_random_offset(cls), label='offset') + # TODO: Is there a more performant way to do this? + + assume(not offset.normalize) + compare = (dt + offset) - offset + expected = compare == dt + + res = offset.onOffset(dt) + assert res == expected + + +@given(data=st.data()) +@pytest.mark.parametrize('cls', yqm_classes) +def test_apply_index_implementations(cls, data): + # offset.apply_index(dti)[i] should match dti[i] + offset + + offset = data.draw(gen_random_offset(cls), label='offset') + assume(offset.n != 0) # TODO: test for that case separately + + # rng = pd.date_range(start='1/1/2000', periods=100000, freq='T') + rng = data.draw(gen_random_date_range(), label='rng') + ser = pd.Series(rng) + + res = rng + offset + res_v2 = offset.apply_index(rng) + assert (res == res_v2).all() + + assert res[0] == rng[0] + offset + assert res[-1] == rng[-1] + offset + res2 = ser + offset + # apply_index is only for indexes, not series, so no res2_v2 + assert res2.iloc[0] == ser.iloc[0] + offset + assert res2.iloc[-1] == ser.iloc[-1] + offset + # TODO: Check randomly assorted entries, not just first/last + + +@given(freq=gen_date_range_freq()) +def test_range_matches_addition(freq): + + raise pytest.skip('Need to generate date_range args') + dr = pd.date_range('2016-10-30 12:00:00', freq=freq, + periods=20, tz='US/Eastern') + assert dr[-1] > pd.Timestamp('2016-11-10') # DST transition is crossed + + res = dr + freq + assert res[:-1].equals(dr[1:]) + + +@given(data=st.data()) +@pytest.mark.parametrize('cls', yqm_classes) +def test_shift_across_dst(cls, data): + # GH#18319 check that 1) timezone is correctly normalized and + # 2) that hour is not incorrectly changed by this normalization + + raise pytest.skip('Need to generate date_range args') + offset = data.draw(gen_random_offset(cls), label='offset') + dti = pd.date_range(start='2017-10-30 12:00:00', end='2017-11-06', + freq='D', tz='US/Eastern') + # dti includes a transition across DST boundary + assert (dti.hour == 12).all() # we haven't screwed up yet + + res = dti + offset + assert (res.hour == 12).all()
[skipci] Related: #17978 Exposes a whole bunch of broken cases. Many of them look like they are not currently caught because the large majority of offset tests use tz-naive pydatetime inputs. Note this does _not_ fix these bugs. The goal here is a Proof Of Concept for using hypothesis and discuss if/how we can make it useful. It also doesn't add hypothesis to the requirements because I have no idea what the appropriate file is for that.
https://api.github.com/repos/pandas-dev/pandas/pulls/18761
2017-12-13T06:41:50Z
2017-12-18T23:00:36Z
null
2018-02-11T22:00:35Z
CLN: ASV inference benchmark
diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py index dc1d6de73f8ae..16d9e7cd73cbb 100644 --- a/asv_bench/benchmarks/inference.py +++ b/asv_bench/benchmarks/inference.py @@ -1,77 +1,80 @@ -from .pandas_vb_common import * -import pandas as pd +import numpy as np +import pandas.util.testing as tm +from pandas import DataFrame, Series, to_numeric +from .pandas_vb_common import numeric_dtypes, lib, setup # noqa -class DtypeInfer(object): - goal_time = 0.2 +class NumericInferOps(object): # from GH 7332 + goal_time = 0.2 + params = numeric_dtypes + param_names = ['dtype'] + + def setup(self, dtype): + N = 5 * 10**5 + self.df = DataFrame({'A': np.arange(N).astype(dtype), + 'B': np.arange(N).astype(dtype)}) + + def time_add(self, dtype): + self.df['A'] + self.df['B'] + + def time_subtract(self, dtype): + self.df['A'] - self.df['B'] - def setup(self): - self.N = 500000 - self.df_int64 = DataFrame(dict(A=np.arange(self.N, dtype='int64'), - B=np.arange(self.N, dtype='int64'))) - self.df_int32 = DataFrame(dict(A=np.arange(self.N, dtype='int32'), - B=np.arange(self.N, dtype='int32'))) - self.df_uint32 = DataFrame(dict(A=np.arange(self.N, dtype='uint32'), - B=np.arange(self.N, dtype='uint32'))) - self.df_float64 = DataFrame(dict(A=np.arange(self.N, dtype='float64'), - B=np.arange(self.N, dtype='float64'))) - self.df_float32 = DataFrame(dict(A=np.arange(self.N, dtype='float32'), - B=np.arange(self.N, dtype='float32'))) - self.df_datetime64 = DataFrame(dict(A=pd.to_datetime(np.arange(self.N, dtype='int64'), unit='ms'), - B=pd.to_datetime(np.arange(self.N, dtype='int64'), unit='ms'))) - self.df_timedelta64 = DataFrame(dict(A=(self.df_datetime64['A'] - self.df_datetime64['B']), - B=self.df_datetime64['B'])) + def time_multiply(self, dtype): + self.df['A'] * self.df['B'] - def time_int64(self): - (self.df_int64['A'] + self.df_int64['B']) + def time_divide(self, dtype): + self.df['A'] / self.df['B'] - def time_int32(self): - (self.df_int32['A'] + self.df_int32['B']) + def time_modulo(self, dtype): + self.df['A'] % self.df['B'] - def time_uint32(self): - (self.df_uint32['A'] + self.df_uint32['B']) - def time_float64(self): - (self.df_float64['A'] + self.df_float64['B']) +class DateInferOps(object): + # from GH 7332 + goal_time = 0.2 + + def setup_cache(self): + N = 5 * 10**5 + df = DataFrame({'datetime64': np.arange(N).astype('datetime64[ms]')}) + df['timedelta'] = df['datetime64'] - df['datetime64'] + return df - def time_float32(self): - (self.df_float32['A'] + self.df_float32['B']) + def time_subtract_datetimes(self, df): + df['datetime64'] - df['datetime64'] - def time_datetime64(self): - (self.df_datetime64['A'] - self.df_datetime64['B']) + def time_timedelta_plus_datetime(self, df): + df['timedelta'] + df['datetime64'] - def time_timedelta64_1(self): - (self.df_timedelta64['A'] + self.df_timedelta64['B']) + def time_add_timedeltas(self, df): + df['timedelta'] + df['timedelta'] - def time_timedelta64_2(self): - (self.df_timedelta64['A'] + self.df_timedelta64['A']) +class ToNumeric(object): -class to_numeric(object): goal_time = 0.2 + params = ['ignore', 'coerce'] + param_names = ['errors'] - def setup(self): - self.n = 10000 - self.float = Series(np.random.randn(self.n * 100)) + def setup(self, errors): + N = 10000 + self.float = Series(np.random.randn(N)) self.numstr = self.float.astype('str') - self.str = Series(tm.makeStringIndex(self.n)) + self.str = Series(tm.makeStringIndex(N)) - def time_from_float(self): - pd.to_numeric(self.float) + def time_from_float(self, errors): + to_numeric(self.float, errors=errors) - def time_from_numeric_str(self): - pd.to_numeric(self.numstr) + def time_from_numeric_str(self, errors): + to_numeric(self.numstr, errors=errors) - def time_from_str_ignore(self): - pd.to_numeric(self.str, errors='ignore') + def time_from_str(self, errors): + to_numeric(self.str, errors=errors) - def time_from_str_coerce(self): - pd.to_numeric(self.str, errors='coerce') - -class to_numeric_downcast(object): +class ToNumericDowncast(object): param_names = ['dtype', 'downcast'] params = [['string-float', 'string-int', 'string-nint', 'datetime64', @@ -81,37 +84,30 @@ class to_numeric_downcast(object): N = 500000 N2 = int(N / 2) - data_dict = { - 'string-int': (['1'] * N2) + ([2] * N2), - 'string-nint': (['-1'] * N2) + ([2] * N2), - 'datetime64': np.repeat(np.array(['1970-01-01', '1970-01-02'], - dtype='datetime64[D]'), N), - 'string-float': (['1.1'] * N2) + ([2] * N2), - 'int-list': ([1] * N2) + ([2] * N2), - 'int32': np.repeat(np.int32(1), N) - } + data_dict = {'string-int': ['1'] * N2 + [2] * N2, + 'string-nint': ['-1'] * N2 + [2] * N2, + 'datetime64': np.repeat(np.array(['1970-01-01', '1970-01-02'], + dtype='datetime64[D]'), N), + 'string-float': ['1.1'] * N2 + [2] * N2, + 'int-list': [1] * N2 + [2] * N2, + 'int32': np.repeat(np.int32(1), N)} def setup(self, dtype, downcast): self.data = self.data_dict[dtype] def time_downcast(self, dtype, downcast): - pd.to_numeric(self.data, downcast=downcast) + to_numeric(self.data, downcast=downcast) class MaybeConvertNumeric(object): - def setup(self): - n = 1000000 - arr = np.repeat([2**63], n) - arr = arr + np.arange(n).astype('uint64') - arr = np.array([arr[i] if i%2 == 0 else - str(arr[i]) for i in range(n)], - dtype=object) - - arr[-1] = -1 - self.data = arr - self.na_values = set() - - def time_convert(self): - lib.maybe_convert_numeric(self.data, self.na_values, - coerce_numeric=False) + def setup_cache(self): + N = 10**6 + arr = np.repeat([2**63], N) + np.arange(N).astype('uint64') + data = arr.astype(object) + data[1::2] = arr[1::2].astype(str) + data[-1] = -1 + return data + + def time_convert(self, data): + lib.maybe_convert_numeric(data, set(), coerce_numeric=False) diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index b7040bfdb9397..4de87ddcb0683 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -14,6 +14,10 @@ except ImportError: pass +numeric_dtypes = [np.int64, np.int32, np.uint32, np.uint64, np.float32, + np.float64, np.int16, np.int8, np.uint16, np.uint8] +datetime_dtypes = [np.datetime64, np.timedelta64] + # This function just needs to be imported into each benchmark file in order to # sets up the random seed before each function. # http://asv.readthedocs.io/en/latest/writing_benchmarks.html @@ -39,7 +43,7 @@ def remove(self, f): def teardown(self, *args, **kwargs): self.remove(self.fname) -# try em until it works! +# Compatability import for lib for imp in ['pandas._libs.lib', 'pandas.lib', 'pandas_tseries']: try: lib = import_module(imp)
- Flake8 and removed star imports - Used `params` and `setup_cache` where possible ``` asv dev -b ^inference · Discovering benchmarks · Running 13 total benchmarks (1 commits * 1 environments * 13 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/inference.py:105 [ 7.69%] ··· Running inference.MaybeConvertNumeric.time_convert 2.55s [ 15.38%] ··· Running inference.NumericInferOps.time_add ok [ 15.38%] ···· ========= ======== dtype --------- -------- int64 7.27ms int32 4.09ms uint32 2.33ms float32 3.98ms float64 7.20ms ========= ======== [ 23.08%] ··· Running inference.NumericInferOps.time_divide ok [ 23.08%] ···· ========= ======== dtype --------- -------- int64 9.22ms int32 7.77ms uint32 6.34ms float32 4.00ms float64 7.25ms ========= ======== [ 30.77%] ··· Running inference.NumericInferOps.time_modulo ok [ 30.77%] ···· ========= ======== dtype --------- -------- int64 17.3ms int32 9.96ms uint32 10.3ms float32 8.27ms float64 8.37ms ========= ======== [ 38.46%] ··· Running inference.NumericInferOps.time_multiply ok [ 38.46%] ···· ========= ======== dtype --------- -------- int64 7.18ms int32 4.12ms uint32 2.42ms float32 4.08ms float64 7.15ms ========= ======== [ 46.15%] ··· Running inference.NumericInferOps.time_subtract ok [ 46.15%] ···· ========= ======== dtype --------- -------- int64 7.23ms int32 3.96ms uint32 2.36ms float32 3.96ms float64 7.12ms ========= ======== [ 53.85%] ··· Running inference.ToNumeric.time_from_float ok [ 53.85%] ···· ======== ======= errors -------- ------- ignore 157μs coerce 158μs ======== ======= [ 61.54%] ··· Running inference.ToNumeric.time_from_numeric_str ok [ 61.54%] ···· ======== ======== errors -------- -------- ignore 8.08ms coerce 8.05ms ======== ======== [ 69.23%] ··· Running inference.ToNumeric.time_from_str ok [ 69.23%] ···· ======== ======== errors -------- -------- ignore 365μs coerce 24.7ms ======== ======== [ 76.92%] ··· Running inference.ToNumericDowncast.time_downcast ok [ 76.92%] ···· ============== ======== ========= ======== ========== ======== -- downcast -------------- ----------------------------------------------- dtype None integer signed unsigned float ============== ======== ========= ======== ========== ======== string-float 264ms 270ms 272ms 267ms 268ms string-int 597ms 621ms 640ms 612ms 605ms string-nint 613ms 622ms 631ms 597ms 610ms datetime64 5.17ms 72.0ms 72.3ms 74.3ms 8.87ms int-list 65.0ms 92.7ms 92.5ms 94.5ms 66.8ms int32 26.2μs 27.2ms 26.9ms 27.9ms 1.35ms ============== ======== ========= ======== ========== ======== [ 76.92%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/inference.py:40 [ 84.62%] ··· Running inference.DateInferOps.time_add_timedeltas 29.1ms [ 92.31%] ··· Running inference.DateInferOps.time_subtract_datetimes 23.6ms [100.00%] ··· Running inference.DateInferOps.time_timedelta_plus_datetime 157ms ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18759
2017-12-13T06:27:28Z
2017-12-18T13:00:00Z
2017-12-18T13:00:00Z
2017-12-18T18:51:46Z
CLN: ASV index_object benchmark
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py index 65af7b077d80f..3f9016787aab4 100644 --- a/asv_bench/benchmarks/ctors.py +++ b/asv_bench/benchmarks/ctors.py @@ -1,7 +1,8 @@ import numpy as np -from pandas import Series, Index, DatetimeIndex, Timestamp +import pandas.util.testing as tm +from pandas import Series, Index, DatetimeIndex, Timestamp, MultiIndex -from .pandas_vb_common import setup # noqa +from .pandas_vb_common import setup # noqa class SeriesConstructors(object): @@ -21,7 +22,6 @@ class SeriesConstructors(object): def setup(self, data_fmt, with_index): N = 10**4 - np.random.seed(1234) arr = np.random.randn(N) self.data = data_fmt(arr) self.index = np.arange(N) if with_index else None @@ -35,21 +35,32 @@ class SeriesDtypesConstructors(object): goal_time = 0.2 def setup(self): - N = 10**2 + N = 10**4 self.arr = np.random.randn(N, N) self.arr_str = np.array(['foo', 'bar', 'baz'], dtype=object) - - self.data = np.random.randn(N) - self.index = Index(np.arange(N)) - self.s = Series([Timestamp('20110101'), Timestamp('20120101'), Timestamp('20130101')] * N * 10) def time_index_from_array_string(self): Index(self.arr_str) + def time_index_from_array_floats(self): + Index(self.arr) + def time_dtindex_from_series(self): DatetimeIndex(self.s) def time_dtindex_from_index_with_series(self): Index(self.s) + + +class MultiIndexConstructor(object): + + goal_time = 0.2 + + def setup(self): + N = 10**4 + self.iterables = [tm.makeStringIndex(N), range(20)] + + def time_multiindex_from_iterables(self): + MultiIndex.from_product(self.iterables) diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py index a607168ea0457..d73b216478ad5 100644 --- a/asv_bench/benchmarks/index_object.py +++ b/asv_bench/benchmarks/index_object.py @@ -1,207 +1,151 @@ -from .pandas_vb_common import * +import numpy as np +import pandas.util.testing as tm +from pandas import (Series, date_range, DatetimeIndex, Index, MultiIndex, + RangeIndex) + +from .pandas_vb_common import setup # noqa class SetOperations(object): + + goal_time = 0.2 + params = (['datetime', 'date_string', 'int', 'strings'], + ['intersection', 'union', 'symmetric_difference']) + param_names = ['dtype', 'method'] + + def setup(self, dtype, method): + N = 10**5 + dates_left = date_range('1/1/2000', periods=N, freq='T') + fmt = '%Y-%m-%d %H:%M:%S' + date_str_left = Index(dates_left.strftime(fmt)) + int_left = Index(np.arange(N)) + str_left = tm.makeStringIndex(N) + data = {'datetime': {'left': dates_left, 'right': dates_left[:-1]}, + 'date_string': {'left': date_str_left, + 'right': date_str_left[:-1]}, + 'int': {'left': int_left, 'right': int_left[:-1]}, + 'strings': {'left': str_left, 'right': str_left[:-1]}} + self.left = data[dtype]['left'] + self.right = data[dtype]['right'] + + def time_operation(self, dtype, method): + getattr(self.left, method)(self.right) + + +class SetDisjoint(object): + goal_time = 0.2 def setup(self): - self.rng = date_range('1/1/2000', periods=10000, freq='T') - self.rng2 = self.rng[:(-1)] - - # object index with datetime values - if (self.rng.dtype == object): - self.idx_rng = self.rng.view(Index) - else: - self.idx_rng = self.rng.astype(object) - self.idx_rng2 = self.idx_rng[:(-1)] - - # other datetime - N = 100000 - A = N - 20000 + N = 10**5 B = N + 20000 - self.dtidx1 = DatetimeIndex(range(N)) - self.dtidx2 = DatetimeIndex(range(A, B)) - self.dtidx3 = DatetimeIndex(range(N, B)) - - # integer - self.N = 1000000 - self.options = np.arange(self.N) - self.left = Index( - self.options.take(np.random.permutation(self.N)[:(self.N // 2)])) - self.right = Index( - self.options.take(np.random.permutation(self.N)[:(self.N // 2)])) - - # strings - N = 10000 - strs = tm.rands_array(10, N) - self.leftstr = Index(strs[:N * 2 // 3]) - self.rightstr = Index(strs[N // 3:]) - - def time_datetime_intersection(self): - self.rng.intersection(self.rng2) - - def time_datetime_union(self): - self.rng.union(self.rng2) - - def time_datetime_difference(self): - self.dtidx1.difference(self.dtidx2) + self.datetime_left = DatetimeIndex(range(N)) + self.datetime_right = DatetimeIndex(range(N, B)) def time_datetime_difference_disjoint(self): - self.dtidx1.difference(self.dtidx3) - - def time_datetime_symmetric_difference(self): - self.dtidx1.symmetric_difference(self.dtidx2) - - def time_index_datetime_intersection(self): - self.idx_rng.intersection(self.idx_rng2) - - def time_index_datetime_union(self): - self.idx_rng.union(self.idx_rng2) - - def time_int64_intersection(self): - self.left.intersection(self.right) - - def time_int64_union(self): - self.left.union(self.right) - - def time_int64_difference(self): - self.left.difference(self.right) - - def time_int64_symmetric_difference(self): - self.left.symmetric_difference(self.right) - - def time_str_difference(self): - self.leftstr.difference(self.rightstr) - - def time_str_symmetric_difference(self): - self.leftstr.symmetric_difference(self.rightstr) + self.datetime_left.difference(self.datetime_right) class Datetime(object): + goal_time = 0.2 def setup(self): - self.dr = pd.date_range('20000101', freq='D', periods=10000) + self.dr = date_range('20000101', freq='D', periods=10000) def time_is_dates_only(self): self.dr._is_dates_only -class Float64(object): - goal_time = 0.2 - - def setup(self): - self.idx = tm.makeFloatIndex(1000000) - self.mask = ((np.arange(self.idx.size) % 3) == 0) - self.series_mask = Series(self.mask) - - self.baseidx = np.arange(1000000.0) - - def time_boolean_indexer(self): - self.idx[self.mask] - - def time_boolean_series_indexer(self): - self.idx[self.series_mask] - - def time_construct(self): - Index(self.baseidx) - - def time_div(self): - (self.idx / 2) - - def time_get(self): - self.idx[1] +class Ops(object): - def time_mul(self): - (self.idx * 2) + sample_time = 0.2 + params = ['float', 'int'] + param_names = ['dtype'] - def time_slice_indexer_basic(self): - self.idx[:(-1)] + def setup(self, dtype): + N = 10**6 + indexes = {'int': 'makeIntIndex', 'float': 'makeFloatIndex'} + self.index = getattr(tm, indexes[dtype])(N) - def time_slice_indexer_even(self): - self.idx[::2] + def time_add(self, dtype): + self.index + 2 + def time_subtract(self, dtype): + self.index - 2 -class StringIndex(object): - goal_time = 0.2 + def time_multiply(self, dtype): + self.index * 2 - def setup(self): - self.idx = tm.makeStringIndex(1000000) - self.mask = ((np.arange(1000000) % 3) == 0) - self.series_mask = Series(self.mask) - - def time_boolean_indexer(self): - self.idx[self.mask] - - def time_boolean_series_indexer(self): - self.idx[self.series_mask] + def time_divide(self, dtype): + self.index / 2 - def time_slice_indexer_basic(self): - self.idx[:(-1)] + def time_modulo(self, dtype): + self.index % 2 - def time_slice_indexer_even(self): - self.idx[::2] +class Duplicated(object): -class Multi1(object): goal_time = 0.2 def setup(self): - (n, k) = (200, 5000) - self.levels = [np.arange(n), tm.makeStringIndex(n).values, (1000 + np.arange(n))] - self.labels = [np.random.choice(n, (k * n)) for lev in self.levels] - self.mi = MultiIndex(levels=self.levels, labels=self.labels) - - self.iterables = [tm.makeStringIndex(10000), range(20)] + n, k = 200, 5000 + levels = [np.arange(n), + tm.makeStringIndex(n).values, + 1000 + np.arange(n)] + labels = [np.random.choice(n, (k * n)) for lev in levels] + self.mi = MultiIndex(levels=levels, labels=labels) def time_duplicated(self): self.mi.duplicated() - def time_from_product(self): - MultiIndex.from_product(self.iterables) +class Sortlevel(object): -class Multi2(object): goal_time = 0.2 def setup(self): - self.n = ((((3 * 5) * 7) * 11) * (1 << 10)) - (low, high) = (((-1) << 12), (1 << 12)) - self.f = (lambda k: np.repeat(np.random.randint(low, high, (self.n // k)), k)) - self.i = np.random.permutation(self.n) - self.mi = MultiIndex.from_arrays([self.f(11), self.f(7), self.f(5), self.f(3), self.f(1)])[self.i] + n = 1182720 + low, high = -4096, 4096 + arrs = [np.repeat(np.random.randint(low, high, (n // k)), k) + for k in [11, 7, 5, 3, 1]] + self.mi_int = MultiIndex.from_arrays(arrs)[np.random.permutation(n)] - self.a = np.repeat(np.arange(100), 1000) - self.b = np.tile(np.arange(1000), 100) - self.midx2 = MultiIndex.from_arrays([self.a, self.b]) - self.midx2 = self.midx2.take(np.random.permutation(np.arange(100000))) + a = np.repeat(np.arange(100), 1000) + b = np.tile(np.arange(1000), 100) + self.mi = MultiIndex.from_arrays([a, b]) + self.mi = self.mi.take(np.random.permutation(np.arange(100000))) def time_sortlevel_int64(self): - self.mi.sortlevel() + self.mi_int.sortlevel() def time_sortlevel_zero(self): - self.midx2.sortlevel(0) + self.mi.sortlevel(0) def time_sortlevel_one(self): - self.midx2.sortlevel(1) + self.mi.sortlevel(1) -class Multi3(object): +class MultiIndexValues(object): + goal_time = 0.2 - def setup(self): - self.level1 = range(1000) - self.level2 = date_range(start='1/1/2012', periods=100) - self.mi = MultiIndex.from_product([self.level1, self.level2]) + def setup_cache(self): - def time_datetime_level_values_full(self): - self.mi.copy().values + level1 = range(1000) + level2 = date_range(start='1/1/2012', periods=100) + mi = MultiIndex.from_product([level1, level2]) + return mi - def time_datetime_level_values_sliced(self): - self.mi[:10].values + def time_datetime_level_values_copy(self, mi): + mi.copy().values + + def time_datetime_level_values_sliced(self, mi): + mi[:10].values class Range(object): + goal_time = 0.2 def setup(self): @@ -221,20 +165,60 @@ def time_min_trivial(self): self.idx_inc.min() -class IndexOps(object): +class IndexAppend(object): + goal_time = 0.2 def setup(self): + N = 10000 - self.ridx = [RangeIndex(i * 100, (i + 1) * 100) for i in range(N)] - self.iidx = [idx.astype(int) for idx in self.ridx] - self.oidx = [idx.astype(str) for idx in self.iidx] + self.range_idx = RangeIndex(0, 100) + self.int_idx = self.range_idx.astype(int) + self.obj_idx = self.int_idx.astype(str) + self.range_idxs = [] + self.int_idxs = [] + self.object_idxs = [] + for i in range(1, N): + r_idx = RangeIndex(i * 100, (i + 1) * 100) + self.range_idxs.append(r_idx) + i_idx = r_idx.astype(int) + self.int_idxs.append(i_idx) + o_idx = i_idx.astype(str) + self.object_idxs.append(o_idx) + + def time_append_range_list(self): + self.range_idx.append(self.range_idxs) + + def time_append_int_list(self): + self.int_idx.append(self.int_idxs) + + def time_append_obj_list(self): + self.obj_idx.append(self.object_idxs) + + +class Indexing(object): + + goal_time = 0.2 + params = ['String', 'Float', 'Int'] + param_names = ['dtype'] + + def setup(self, dtype): + N = 10**6 + self.idx = getattr(tm, 'make{}Index'.format(dtype))(N) + self.array_mask = (np.arange(N) % 3) == 0 + self.series_mask = Series(self.array_mask) - def time_concat_range(self): - self.ridx[0].append(self.ridx[1:]) + def time_boolean_array(self, dtype): + self.idx[self.array_mask] - def time_concat_int(self): - self.iidx[0].append(self.iidx[1:]) + def time_boolean_series(self, dtype): + self.idx[self.series_mask] - def time_concat_obj(self): - self.oidx[0].append(self.oidx[1:]) + def time_get(self, dtype): + self.idx[1] + + def time_slice(self, dtype): + self.idx[:-1] + + def time_slice_step(self, dtype): + self.idx[::2] diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index f271b82c758ee..5b12f6ea89614 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -367,5 +367,3 @@ def time_assign_with_setitem(self): np.random.seed(1234) for i in range(100): self.df[i] = np.random.randn(self.N) - -
- Remove star imports and flake8 checked - Moved some index constructor benchmarks to `ctors.py` (probably should rename this file in the future) - Moved some index indexing benchmarks to `indexing.py` ``` asv dev -b ^index_object · Discovering benchmarks · Running 32 total benchmarks (1 commits * 1 environments * 32 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/index_object.py:160 [ 3.12%] ··· Running index_object.MultiIndexValues.time_datetime_level_values_copy 30.1ms [ 6.25%] ··· Running index_object.MultiIndexValues.time_datetime_level_values_sliced 544μs [ 9.38%] ··· Running index_object.Datetime.time_is_dates_only 357μs [ 12.50%] ··· Running index_object.Duplicated.time_duplicated 228ms [ 15.62%] ··· Running index_object.IndexAppend.time_append_int_list 285ms [ 18.75%] ··· Running index_object.IndexAppend.time_append_obj_list 299ms [ 21.88%] ··· Running index_object.IndexAppend.time_append_range_list 310ms [ 25.00%] ··· Running index_object.Ops.time_add ok [ 25.00%] ···· ======= ======== dtype ------- -------- float 6.22ms int 5.53ms ======= ======== [ 28.12%] ··· Running index_object.Ops.time_divide ok [ 28.12%] ···· ======= ======== dtype ------- -------- float 5.59ms int 20.7ms ======= ======== [ 31.25%] ··· Running index_object.Ops.time_modulo ok [ 31.25%] ···· ======= ======== dtype ------- -------- float 15.8ms int 19.1ms ======= ======== [ 34.38%] ··· Running index_object.Ops.time_multiply ok [ 34.38%] ···· ======= ======== dtype ------- -------- float 5.55ms int 5.82ms ======= ======== [ 37.50%] ··· Running index_object.Ops.time_subtract ok [ 37.50%] ···· ======= ======== dtype ------- -------- float 5.53ms int 5.51ms ======= ======== [ 40.62%] ··· Running index_object.Range.time_max 65.6ms [ 43.75%] ··· Running index_object.Range.time_max_trivial 61.5ms [ 46.88%] ··· Running index_object.Range.time_min 62.4ms [ 50.00%] ··· Running index_object.Range.time_min_trivial 63.3ms [ 53.12%] ··· Running index_object.SetOperations.time_datetime_difference 14.2ms [ 56.25%] ··· Running index_object.SetOperations.time_datetime_difference_disjoint 8.42ms [ 59.38%] ··· Running index_object.SetOperations.time_datetime_intersection 1.35ms [ 62.50%] ··· Running index_object.SetOperations.time_datetime_symmetric_difference 18.5ms [ 65.62%] ··· Running index_object.SetOperations.time_datetime_union 895μs [ 68.75%] ··· Running index_object.SetOperations.time_index_datetime_intersection 6.96ms [ 71.88%] ··· Running index_object.SetOperations.time_index_datetime_union 6.91ms [ 75.00%] ··· Running index_object.SetOperations.time_int64_difference 13.6ms [ 78.12%] ··· Running index_object.SetOperations.time_int64_intersection 6.33ms [ 81.25%] ··· Running index_object.SetOperations.time_int64_symmetric_difference 20.0ms [ 84.38%] ··· Running index_object.SetOperations.time_int64_union 12.1ms [ 87.50%] ··· Running index_object.SetOperations.time_str_difference 6.12ms [ 90.62%] ··· Running index_object.SetOperations.time_str_symmetric_difference 11.8ms [ 93.75%] ··· Running index_object.Sortlevel.time_sortlevel_int64 775ms [ 96.88%] ··· Running index_object.Sortlevel.time_sortlevel_one 18.2ms [100.00%] ··· Running index_object.Sortlevel.time_sortlevel_zero 20.5ms ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18758
2017-12-13T05:06:10Z
2017-12-26T07:55:24Z
2017-12-26T07:55:24Z
2017-12-31T04:49:26Z
BUG: Fix IntervalIndex.to_tuples() with NA values
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 37032ff6bc313..9dc10a09378f8 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -138,6 +138,7 @@ Other Enhancements - :func:`Series` / :func:`DataFrame` tab completion also returns identifiers in the first level of a :func:`MultiIndex`. (:issue:`16326`) - :func:`read_excel()` has gained the ``nrows`` parameter (:issue:`16645`) - :func:``DataFrame.to_json`` and ``Series.to_json`` now accept an ``index`` argument which allows the user to exclude the index from the JSON output (:issue:`17394`) +- ``IntervalIndex.to_tuples()`` has gained the ``na_tuple`` parameter to control whether NA is returned as a tuple of NA, or NA itself (:issue:`18756`) .. _whatsnew_0220.api_breaking: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 292b0f638f821..cb786574909db 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -544,9 +544,31 @@ def from_tuples(cls, data, closed='right', name=None, copy=False): return cls.from_arrays(left, right, closed, name=name, copy=False) - def to_tuples(self): - """Return an Index of tuples of the form (left, right)""" - return Index(_asarray_tuplesafe(zip(self.left, self.right))) + def to_tuples(self, na_tuple=True): + """ + Return an Index of tuples of the form (left, right) + + Parameters + ---------- + na_tuple : boolean, default True + Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA + value itself if False, ``nan``. + + ..versionadded:: 0.22.0 + + Examples + -------- + >>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3]) + >>> idx.to_tuples() + Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object') + >>> idx.to_tuples(na_tuple=False) + Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object') + """ + tuples = _asarray_tuplesafe(zip(self.left, self.right)) + if not na_tuple: + # GH 18756 + tuples = np.where(~self._isnan, tuples, np.nan) + return Index(tuples) @cache_readonly def _multiindex(self): diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index abad930793d7f..c809127a66ab8 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -7,6 +7,7 @@ Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp, Timedelta, compat, date_range, timedelta_range, DateOffset) from pandas.compat import lzip +from pandas.core.common import _asarray_tuplesafe from pandas.tseries.offsets import Day from pandas._libs.interval import IntervalTree from pandas.tests.indexes.common import Base @@ -1072,6 +1073,45 @@ def test_is_non_overlapping_monotonic(self, closed): idx = IntervalIndex.from_breaks(range(4), closed=closed) assert idx.is_non_overlapping_monotonic is True + @pytest.mark.parametrize('tuples', [ + lzip(range(10), range(1, 11)), + lzip(date_range('20170101', periods=10), + date_range('20170101', periods=10)), + lzip(timedelta_range('0 days', periods=10), + timedelta_range('1 day', periods=10))]) + def test_to_tuples(self, tuples): + # GH 18756 + idx = IntervalIndex.from_tuples(tuples) + result = idx.to_tuples() + expected = Index(_asarray_tuplesafe(tuples)) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize('tuples', [ + lzip(range(10), range(1, 11)) + [np.nan], + lzip(date_range('20170101', periods=10), + date_range('20170101', periods=10)) + [np.nan], + lzip(timedelta_range('0 days', periods=10), + timedelta_range('1 day', periods=10)) + [np.nan]]) + @pytest.mark.parametrize('na_tuple', [True, False]) + def test_to_tuples_na(self, tuples, na_tuple): + # GH 18756 + idx = IntervalIndex.from_tuples(tuples) + result = idx.to_tuples(na_tuple=na_tuple) + + # check the non-NA portion + expected_notna = Index(_asarray_tuplesafe(tuples[:-1])) + result_notna = result[:-1] + tm.assert_index_equal(result_notna, expected_notna) + + # check the NA portion + result_na = result[-1] + if na_tuple: + assert isinstance(result_na, tuple) + assert len(result_na) == 2 + assert all(isna(x) for x in result_na) + else: + assert isna(result_na) + class TestIntervalRange(object):
- [X] closes #18756 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18757
2017-12-13T03:56:50Z
2017-12-15T11:32:04Z
2017-12-15T11:32:03Z
2017-12-16T18:33:47Z
CLN: factor apply out of frame.py
diff --git a/pandas/core/apply.py b/pandas/core/apply.py new file mode 100644 index 0000000000000..2f43087f7dff9 --- /dev/null +++ b/pandas/core/apply.py @@ -0,0 +1,301 @@ +import numpy as np +from pandas import compat +from pandas._libs import lib +from pandas.core.dtypes.common import ( + is_extension_type, + is_sequence) + +from pandas.io.formats.printing import pprint_thing + + +def frame_apply(obj, func, axis=0, broadcast=False, + raw=False, reduce=None, args=(), **kwds): + """ construct and return a row or column based frame apply object """ + + axis = obj._get_axis_number(axis) + if axis == 0: + klass = FrameRowApply + elif axis == 1: + klass = FrameColumnApply + + return klass(obj, func, broadcast=broadcast, + raw=raw, reduce=reduce, args=args, kwds=kwds) + + +class FrameApply(object): + + def __init__(self, obj, func, broadcast, raw, reduce, args, kwds): + self.obj = obj + self.broadcast = broadcast + self.raw = raw + self.reduce = reduce + self.args = args + + self.ignore_failures = kwds.pop('ignore_failures', False) + self.kwds = kwds + + # curry if needed + if kwds or args and not isinstance(func, np.ufunc): + def f(x): + return func(x, *args, **kwds) + else: + f = func + + self.f = f + + @property + def columns(self): + return self.obj.columns + + @property + def index(self): + return self.obj.index + + @property + def values(self): + return self.obj.values + + @property + def agg_axis(self): + return self.obj._get_agg_axis(self.axis) + + def get_result(self): + """ compute the results """ + + # all empty + if len(self.columns) == 0 and len(self.index) == 0: + return self.apply_empty_result() + + # string dispatch + if isinstance(self.f, compat.string_types): + if self.axis: + self.kwds['axis'] = self.axis + return getattr(self.obj, self.f)(*self.args, **self.kwds) + + # ufunc + elif isinstance(self.f, np.ufunc): + with np.errstate(all='ignore'): + results = self.f(self.values) + return self.obj._constructor(data=results, index=self.index, + columns=self.columns, copy=False) + + # broadcasting + if self.broadcast: + return self.apply_broadcast() + + # one axis empty + if not all(self.obj.shape): + return self.apply_empty_result() + + # raw + if self.raw and not self.obj._is_mixed_type: + return self.apply_raw() + + return self.apply_standard() + + def apply_empty_result(self): + from pandas import Series + reduce = self.reduce + + if reduce is None: + reduce = False + + EMPTY_SERIES = Series([]) + try: + r = self.f(EMPTY_SERIES, *self.args, **self.kwds) + reduce = not isinstance(r, Series) + except Exception: + pass + + if reduce: + return Series(np.nan, index=self.agg_axis) + else: + return self.obj.copy() + + def apply_raw(self): + try: + result = lib.reduce(self.values, self.f, axis=self.axis) + except Exception: + result = np.apply_along_axis(self.f, self.axis, self.values) + + # TODO: mixed type case + from pandas import DataFrame, Series + if result.ndim == 2: + return DataFrame(result, index=self.index, columns=self.columns) + else: + return Series(result, index=self.agg_axis) + + def apply_standard(self): + from pandas import Series + + reduce = self.reduce + if reduce is None: + reduce = True + + # try to reduce first (by default) + # this only matters if the reduction in values is of different dtype + # e.g. if we want to apply to a SparseFrame, then can't directly reduce + if reduce: + values = self.values + + # we cannot reduce using non-numpy dtypes, + # as demonstrated in gh-12244 + if not is_extension_type(values): + + # Create a dummy Series from an empty array + index = self.obj._get_axis(self.axis) + empty_arr = np.empty(len(index), dtype=values.dtype) + + dummy = Series(empty_arr, index=index, dtype=values.dtype) + + try: + labels = self.agg_axis + result = lib.reduce(values, self.f, + axis=self.axis, + dummy=dummy, + labels=labels) + return Series(result, index=labels) + except Exception: + pass + + # compute the result using the series generator + results, res_index, res_columns = self._apply_series_generator() + + # wrap results + return self.wrap_results(results, res_index, res_columns) + + def _apply_series_generator(self): + series_gen = self.series_generator + res_index = self.result_index + res_columns = self.result_columns + + i = None + keys = [] + results = {} + if self.ignore_failures: + successes = [] + for i, v in enumerate(series_gen): + try: + results[i] = self.f(v) + keys.append(v.name) + successes.append(i) + except Exception: + pass + + # so will work with MultiIndex + if len(successes) < len(res_index): + res_index = res_index.take(successes) + + else: + try: + for i, v in enumerate(series_gen): + results[i] = self.f(v) + keys.append(v.name) + except Exception as e: + if hasattr(e, 'args'): + + # make sure i is defined + if i is not None: + k = res_index[i] + e.args = e.args + ('occurred at index %s' % + pprint_thing(k), ) + raise + + return results, res_index, res_columns + + def wrap_results(self, results, res_index, res_columns): + from pandas import Series + + if len(results) > 0 and is_sequence(results[0]): + if not isinstance(results[0], Series): + index = res_columns + else: + index = None + + result = self.obj._constructor(data=results, index=index) + result.columns = res_index + + if self.axis == 1: + result = result.T + result = result._convert( + datetime=True, timedelta=True, copy=False) + + else: + + result = Series(results) + result.index = res_index + + return result + + def _apply_broadcast(self, target): + result_values = np.empty_like(target.values) + columns = target.columns + for i, col in enumerate(columns): + result_values[:, i] = self.f(target[col]) + + result = self.obj._constructor(result_values, index=target.index, + columns=target.columns) + return result + + +class FrameRowApply(FrameApply): + axis = 0 + + def get_result(self): + + # dispatch to agg + if isinstance(self.f, (list, dict)): + return self.obj.aggregate(self.f, axis=self.axis, + *self.args, **self.kwds) + + return super(FrameRowApply, self).get_result() + + def apply_broadcast(self): + return self._apply_broadcast(self.obj) + + @property + def series_generator(self): + return (self.obj._ixs(i, axis=1) + for i in range(len(self.columns))) + + @property + def result_index(self): + return self.columns + + @property + def result_columns(self): + return self.index + + +class FrameColumnApply(FrameApply): + axis = 1 + + def __init__(self, obj, func, broadcast, raw, reduce, args, kwds): + super(FrameColumnApply, self).__init__(obj, func, broadcast, + raw, reduce, args, kwds) + + # skip if we are mixed datelike and trying reduce across axes + # GH6125 + if self.reduce: + if self.obj._is_mixed_type and self.obj._is_datelike_mixed_type: + self.reduce = False + + def apply_broadcast(self): + return self._apply_broadcast(self.obj.T).T + + @property + def series_generator(self): + from pandas import Series + dtype = object if self.obj._is_mixed_type else None + return (Series._from_array(arr, index=self.columns, name=name, + dtype=dtype) + for i, (arr, name) in enumerate(zip(self.values, + self.index))) + + @property + def result_index(self): + return self.index + + @property + def result_columns(self): + return self.columns diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5f323d0f040bc..753c623b2de4c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4810,8 +4810,7 @@ def aggregate(self, func, axis=0, *args, **kwargs): def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds): - """ - Applies function along input axis of DataFrame. + """Applies function along input axis of DataFrame. Objects passed to functions are Series objects having index either the DataFrame's index (axis=0) or the columns (axis=1). @@ -4870,194 +4869,15 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, ------- applied : Series or DataFrame """ - axis = self._get_axis_number(axis) - ignore_failures = kwds.pop('ignore_failures', False) - - # dispatch to agg - if axis == 0 and isinstance(func, (list, dict)): - return self.aggregate(func, axis=axis, *args, **kwds) - - if len(self.columns) == 0 and len(self.index) == 0: - return self._apply_empty_result(func, axis, reduce, *args, **kwds) - - # if we are a string, try to dispatch - if isinstance(func, compat.string_types): - if axis: - kwds['axis'] = axis - return getattr(self, func)(*args, **kwds) - - if kwds or args and not isinstance(func, np.ufunc): - def f(x): - return func(x, *args, **kwds) - else: - f = func - - if isinstance(f, np.ufunc): - with np.errstate(all='ignore'): - results = f(self.values) - return self._constructor(data=results, index=self.index, - columns=self.columns, copy=False) - else: - if not broadcast: - if not all(self.shape): - return self._apply_empty_result(func, axis, reduce, *args, - **kwds) - - if raw and not self._is_mixed_type: - return self._apply_raw(f, axis) - else: - if reduce is None: - reduce = True - return self._apply_standard( - f, axis, - reduce=reduce, - ignore_failures=ignore_failures) - else: - return self._apply_broadcast(f, axis) - - def _apply_empty_result(self, func, axis, reduce, *args, **kwds): - if reduce is None: - reduce = False - try: - reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds), - Series) - except Exception: - pass - - if reduce: - return Series(np.nan, index=self._get_agg_axis(axis)) - else: - return self.copy() - - def _apply_raw(self, func, axis): - try: - result = lib.reduce(self.values, func, axis=axis) - except Exception: - result = np.apply_along_axis(func, axis, self.values) - - # TODO: mixed type case - if result.ndim == 2: - return DataFrame(result, index=self.index, columns=self.columns) - else: - return Series(result, index=self._get_agg_axis(axis)) - - def _apply_standard(self, func, axis, ignore_failures=False, reduce=True): - - # skip if we are mixed datelike and trying reduce across axes - # GH6125 - if (reduce and axis == 1 and self._is_mixed_type and - self._is_datelike_mixed_type): - reduce = False - - # try to reduce first (by default) - # this only matters if the reduction in values is of different dtype - # e.g. if we want to apply to a SparseFrame, then can't directly reduce - if reduce: - values = self.values - - # we cannot reduce using non-numpy dtypes, - # as demonstrated in gh-12244 - if not is_extension_type(values): - # Create a dummy Series from an empty array - index = self._get_axis(axis) - empty_arr = np.empty(len(index), dtype=values.dtype) - dummy = Series(empty_arr, index=self._get_axis(axis), - dtype=values.dtype) - - try: - labels = self._get_agg_axis(axis) - result = lib.reduce(values, func, axis=axis, dummy=dummy, - labels=labels) - return Series(result, index=labels) - except Exception: - pass - - dtype = object if self._is_mixed_type else None - if axis == 0: - series_gen = (self._ixs(i, axis=1) - for i in range(len(self.columns))) - res_index = self.columns - res_columns = self.index - elif axis == 1: - res_index = self.index - res_columns = self.columns - values = self.values - series_gen = (Series._from_array(arr, index=res_columns, name=name, - dtype=dtype) - for i, (arr, name) in enumerate(zip(values, - res_index))) - else: # pragma : no cover - raise AssertionError('Axis must be 0 or 1, got %s' % str(axis)) - - i = None - keys = [] - results = {} - if ignore_failures: - successes = [] - for i, v in enumerate(series_gen): - try: - results[i] = func(v) - keys.append(v.name) - successes.append(i) - except Exception: - pass - # so will work with MultiIndex - if len(successes) < len(res_index): - res_index = res_index.take(successes) - else: - try: - for i, v in enumerate(series_gen): - results[i] = func(v) - keys.append(v.name) - except Exception as e: - if hasattr(e, 'args'): - # make sure i is defined - if i is not None: - k = res_index[i] - e.args = e.args + ('occurred at index %s' % - pprint_thing(k), ) - raise - - if len(results) > 0 and is_sequence(results[0]): - if not isinstance(results[0], Series): - index = res_columns - else: - index = None - - result = self._constructor(data=results, index=index) - result.columns = res_index - - if axis == 1: - result = result.T - result = result._convert(datetime=True, timedelta=True, copy=False) - - else: - - result = Series(results) - result.index = res_index - - return result - - def _apply_broadcast(self, func, axis): - if axis == 0: - target = self - elif axis == 1: - target = self.T - else: # pragma: no cover - raise AssertionError('Axis must be 0 or 1, got %s' % axis) - - result_values = np.empty_like(target.values) - columns = target.columns - for i, col in enumerate(columns): - result_values[:, i] = func(target[col]) - - result = self._constructor(result_values, index=target.index, - columns=target.columns) - - if axis == 1: - result = result.T - - return result + from pandas.core.apply import frame_apply + op = frame_apply(self, + func=func, + axis=axis, + broadcast=broadcast, + raw=raw, + reduce=reduce, + args=args, **kwds) + return op.get_result() def applymap(self, func): """ @@ -6189,8 +6009,6 @@ def isin(self, values): ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs) ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs) -_EMPTY_SERIES = Series([]) - def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): """ diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 36a18d8f8b4a0..05f39a8caa6f6 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -861,11 +861,17 @@ def apply(self, func, axis=0, broadcast=False, reduce=False): new_series, index=self.index, columns=self.columns, default_fill_value=self._default_fill_value, default_kind=self._default_kind).__finalize__(self) - else: - if not broadcast: - return self._apply_standard(func, axis, reduce=reduce) - else: - return self._apply_broadcast(func, axis) + + from pandas.core.apply import frame_apply + op = frame_apply(self, + func=func, + axis=axis, + reduce=reduce) + + if broadcast: + return op.apply_broadcast() + + return op.apply_standard() def applymap(self, func): """ diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index ab2e810d77634..65dd166e1f6a8 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -13,6 +13,7 @@ Timestamp, compat) import pandas as pd from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.apply import frame_apply from pandas.util.testing import (assert_series_equal, assert_frame_equal) import pandas.util.testing as tm @@ -153,8 +154,9 @@ def test_apply_axis1(self): assert tapplied[d] == np.mean(self.frame.xs(d)) def test_apply_ignore_failures(self): - result = self.mixed_frame._apply_standard(np.mean, 0, - ignore_failures=True) + result = frame_apply(self.mixed_frame, + np.mean, 0, + ignore_failures=True).apply_standard() expected = self.mixed_frame._get_numeric_data().apply(np.mean) assert_series_equal(result, expected)
xref #18577
https://api.github.com/repos/pandas-dev/pandas/pulls/18754
2017-12-13T01:43:58Z
2017-12-14T11:36:50Z
2017-12-14T11:36:50Z
2017-12-14T12:10:16Z
DOC: read_excel doc - fixed formatting and added examples
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 0301bf0a23dd5..0f6660d2f4125 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -205,6 +205,8 @@ Other API Changes - Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:issue:`16672`) - :func:`pandas.merge` now raises a ``ValueError`` when trying to merge on incompatible data types (:issue:`9780`) - :func:`wide_to_long` previously kept numeric-like suffixes as ``object`` dtype. Now they are cast to numeric if possible (:issue:`17627`) +- In :func:`read_excel`, the ``comment`` argument is now exposed as a named parameter (:issue:`18735`) +- Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:issue:`16672`) .. _whatsnew_0230.deprecations: diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 97a739b349a98..4f0655cff9b57 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -137,7 +137,7 @@ na_values : scalar, str, list-like, or dict, default None Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values. By default the following values are interpreted - as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70) + """'. + as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'. keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to. @@ -148,6 +148,10 @@ this parameter is only necessary for columns stored as TEXT in Excel, any numeric columns will automatically be parsed, regardless of display format. +comment : str, default None + Comments out remainder of line. Pass a character or characters to this + argument to indicate comments in the input file. Any data between the + comment string and the end of the current line is ignored. skip_footer : int, default 0 .. deprecated:: 0.23.0 @@ -164,6 +168,77 @@ parsed : DataFrame or Dict of DataFrames DataFrame from the passed in Excel file. See notes in sheet_name argument for more information on when a Dict of Dataframes is returned. + +Examples +-------- + +An example DataFrame written to a local file + +>>> df_out = pd.DataFrame([('string1', 1), +... ('string2', 2), +... ('string3', 3)], +... columns=['Name', 'Value']) +>>> df_out + Name Value +0 string1 1 +1 string2 2 +2 string3 3 +>>> df_out.to_excel('tmp.xlsx') + +The file can be read using the file name as string or an open file object: + +>>> pd.read_excel('tmp.xlsx') + Name Value +0 string1 1 +1 string2 2 +2 string3 3 + +>>> pd.read_excel(open('tmp.xlsx','rb')) + Name Value +0 string1 1 +1 string2 2 +2 string3 3 + +Index and header can be specified via the `index_col` and `header` arguments + +>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) + 0 1 2 +0 NaN Name Value +1 0.0 string1 1 +2 1.0 string2 2 +3 2.0 string3 3 + +Column types are inferred but can be explicitly specified + +>>> pd.read_excel('tmp.xlsx', dtype={'Name':str, 'Value':float}) + Name Value +0 string1 1.0 +1 string2 2.0 +2 string3 3.0 + +True, False, and NA values, and thousands separators have defaults, +but can be explicitly specified, too. Supply the values you would like +as strings or lists of strings! + +>>> pd.read_excel('tmp.xlsx', +... na_values=['string1', 'string2']) + Name Value +0 NaN 1 +1 NaN 2 +2 string3 3 + +Comment lines in the excel input file can be skipped using the `comment` kwarg + +>>> df = pd.DataFrame({'a': ['1', '#2'], 'b': ['2', '3']}) +>>> df.to_excel('tmp.xlsx', index=False) +>>> pd.read_excel('tmp.xlsx') + a b +0 1 2 +1 #2 3 + +>>> pd.read_excel('tmp.xlsx', comment='#') + a b +0 1 2 """ @@ -223,6 +298,7 @@ def read_excel(io, parse_dates=False, date_parser=None, thousands=None, + comment=None, skipfooter=0, convert_float=True, **kwds): @@ -256,6 +332,7 @@ def read_excel(io, parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, + comment=comment, skipfooter=skipfooter, convert_float=convert_float, **kwds) @@ -338,6 +415,7 @@ def parse(self, parse_dates=False, date_parser=None, thousands=None, + comment=None, skipfooter=0, convert_float=True, **kwds): @@ -363,6 +441,7 @@ def parse(self, parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, + comment=comment, skipfooter=skipfooter, convert_float=convert_float, **kwds) @@ -417,6 +496,7 @@ def _parse_excel(self, parse_dates=False, date_parser=None, thousands=None, + comment=None, skipfooter=0, convert_float=True, **kwds): @@ -591,6 +671,7 @@ def _parse_cell(cell_contents, cell_typ): parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, + comment=comment, skipfooter=skipfooter, **kwds) diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 71677322329f5..168144d78b3be 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -1858,6 +1858,68 @@ def test_invalid_columns(self): with pytest.raises(KeyError): write_frame.to_excel(path, 'test1', columns=['C', 'D']) + def test_comment_arg(self): + # Re issue #18735 + # Test the comment argument functionality to read_excel + with ensure_clean(self.ext) as path: + + # Create file to read in + df = DataFrame({'A': ['one', '#one', 'one'], + 'B': ['two', 'two', '#two']}) + df.to_excel(path, 'test_c') + + # Read file without comment arg + result1 = read_excel(path, 'test_c') + result1.iloc[1, 0] = None + result1.iloc[1, 1] = None + result1.iloc[2, 1] = None + result2 = read_excel(path, 'test_c', comment='#') + tm.assert_frame_equal(result1, result2) + + def test_comment_default(self): + # Re issue #18735 + # Test the comment argument default to read_excel + with ensure_clean(self.ext) as path: + + # Create file to read in + df = DataFrame({'A': ['one', '#one', 'one'], + 'B': ['two', 'two', '#two']}) + df.to_excel(path, 'test_c') + + # Read file with default and explicit comment=None + result1 = read_excel(path, 'test_c') + result2 = read_excel(path, 'test_c', comment=None) + tm.assert_frame_equal(result1, result2) + + def test_comment_used(self): + # Re issue #18735 + # Test the comment argument is working as expected when used + with ensure_clean(self.ext) as path: + + # Create file to read in + df = DataFrame({'A': ['one', '#one', 'one'], + 'B': ['two', 'two', '#two']}) + df.to_excel(path, 'test_c') + + # Test read_frame_comment against manually produced expected output + expected = DataFrame({'A': ['one', None, 'one'], + 'B': ['two', None, None]}) + result = read_excel(path, 'test_c', comment='#') + tm.assert_frame_equal(result, expected) + + def test_comment_emptyline(self): + # Re issue #18735 + # Test that read_excel ignores commented lines at the end of file + with ensure_clean(self.ext) as path: + + df = DataFrame({'a': ['1', '#2'], 'b': ['2', '3']}) + df.to_excel(path, index=False) + + # Test that all-comment lines at EoF are ignored + expected = DataFrame({'a': [1], 'b': [2]}) + result = read_excel(path, comment='#') + tm.assert_frame_equal(result, expected) + def test_datetimes(self): # Test writing and reading datetimes. For issue #9139. (xref #9185)
Fixes a formatting bug in the `read_excel` docs that caused a line break and bold print in list of `_NA_VALUES`. Adds examples in the `read_excel` docstring. - [x] closes #18735 - [x] tests added & passed - [x] passes `git diff master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18753
2017-12-12T23:36:07Z
2017-12-30T13:33:45Z
2017-12-30T13:33:45Z
2017-12-30T13:33:48Z
CLN: remove doc/plots directory (dead code)
diff --git a/doc/plots/stats/moment_plots.py b/doc/plots/stats/moment_plots.py deleted file mode 100644 index 9e3a902592c6b..0000000000000 --- a/doc/plots/stats/moment_plots.py +++ /dev/null @@ -1,30 +0,0 @@ -import numpy as np - -import matplotlib.pyplot as plt -import pandas.util.testing as t -import pandas.stats.moments as m - - -def test_series(n=1000): - t.N = n - s = t.makeTimeSeries() - return s - - -def plot_timeseries(*args, **kwds): - n = len(args) - - fig, axes = plt.subplots(n, 1, figsize=kwds.get('size', (10, 5)), - sharex=True) - titles = kwds.get('titles', None) - - for k in range(1, n + 1): - ax = axes[k - 1] - ts = args[k - 1] - ax.plot(ts.index, ts.values) - - if titles: - ax.set_title(titles[k - 1]) - - fig.autofmt_xdate() - fig.subplots_adjust(bottom=0.10, top=0.95) diff --git a/doc/plots/stats/moments_ewma.py b/doc/plots/stats/moments_ewma.py deleted file mode 100644 index 3e521ed60bb8f..0000000000000 --- a/doc/plots/stats/moments_ewma.py +++ /dev/null @@ -1,15 +0,0 @@ -import matplotlib.pyplot as plt -import pandas.util.testing as t -import pandas.stats.moments as m - -t.N = 200 -s = t.makeTimeSeries().cumsum() - -plt.figure(figsize=(10, 5)) -plt.plot(s.index, s.values) -plt.plot(s.index, m.ewma(s, 20, min_periods=1).values) -f = plt.gcf() -f.autofmt_xdate() - -plt.show() -plt.close('all') diff --git a/doc/plots/stats/moments_ewmvol.py b/doc/plots/stats/moments_ewmvol.py deleted file mode 100644 index 093f62868fc4e..0000000000000 --- a/doc/plots/stats/moments_ewmvol.py +++ /dev/null @@ -1,23 +0,0 @@ -import matplotlib.pyplot as plt -import pandas.util.testing as t -import pandas.stats.moments as m - -t.N = 500 -ts = t.makeTimeSeries() -ts[::100] = 20 - -s = ts.cumsum() - - -plt.figure(figsize=(10, 5)) -plt.plot(s.index, m.ewmvol(s, span=50, min_periods=1).values, color='b') -plt.plot(s.index, m.rolling_std(s, 50, min_periods=1).values, color='r') - -plt.title('Exp-weighted std with shocks') -plt.legend(('Exp-weighted', 'Equal-weighted')) - -f = plt.gcf() -f.autofmt_xdate() - -plt.show() -plt.close('all') diff --git a/doc/plots/stats/moments_expw.py b/doc/plots/stats/moments_expw.py deleted file mode 100644 index 5fff419b3a940..0000000000000 --- a/doc/plots/stats/moments_expw.py +++ /dev/null @@ -1,35 +0,0 @@ -from moment_plots import * - -np.random.seed(1) - -ts = test_series(500) * 10 - -# ts[::100] = 20 - -s = ts.cumsum() - -fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True) - -ax0, ax1, ax2 = axes - -ax0.plot(s.index, s.values) -ax0.set_title('time series') - -ax1.plot(s.index, m.ewma(s, span=50, min_periods=1).values, color='b') -ax1.plot(s.index, m.rolling_mean(s, 50, min_periods=1).values, color='r') -ax1.set_title('rolling_mean vs. ewma') - -line1 = ax2.plot( - s.index, m.ewmstd(s, span=50, min_periods=1).values, color='b') -line2 = ax2.plot( - s.index, m.rolling_std(s, 50, min_periods=1).values, color='r') -ax2.set_title('rolling_std vs. ewmstd') - -fig.legend((line1, line2), - ('Exp-weighted', 'Equal-weighted'), - loc='upper right') -fig.autofmt_xdate() -fig.subplots_adjust(bottom=0.10, top=0.95) - -plt.show() -plt.close('all') diff --git a/doc/plots/stats/moments_rolling.py b/doc/plots/stats/moments_rolling.py deleted file mode 100644 index 30a6c5f53e20c..0000000000000 --- a/doc/plots/stats/moments_rolling.py +++ /dev/null @@ -1,24 +0,0 @@ -from moment_plots import * - -ts = test_series() -s = ts.cumsum() - -s[20:50] = np.NaN -s[120:150] = np.NaN -plot_timeseries(s, - m.rolling_count(s, 50), - m.rolling_sum(s, 50, min_periods=10), - m.rolling_mean(s, 50, min_periods=10), - m.rolling_std(s, 50, min_periods=10), - m.rolling_skew(s, 50, min_periods=10), - m.rolling_kurt(s, 50, min_periods=10), - size=(10, 12), - titles=('time series', - 'rolling_count', - 'rolling_sum', - 'rolling_mean', - 'rolling_std', - 'rolling_skew', - 'rolling_kurt')) -plt.show() -plt.close('all') diff --git a/doc/plots/stats/moments_rolling_binary.py b/doc/plots/stats/moments_rolling_binary.py deleted file mode 100644 index ab6b7b1c8ff49..0000000000000 --- a/doc/plots/stats/moments_rolling_binary.py +++ /dev/null @@ -1,30 +0,0 @@ -from moment_plots import * - -np.random.seed(1) - -ts = test_series() -s = ts.cumsum() -ts2 = test_series() -s2 = ts2.cumsum() - -s[20:50] = np.NaN -s[120:150] = np.NaN -fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True) - -ax0, ax1, ax2 = axes - -ax0.plot(s.index, s.values) -ax0.plot(s2.index, s2.values) -ax0.set_title('time series') - -ax1.plot(s.index, m.rolling_corr(s, s2, 50, min_periods=1).values) -ax1.set_title('rolling_corr') - -ax2.plot(s.index, m.rolling_cov(s, s2, 50, min_periods=1).values) -ax2.set_title('rolling_cov') - -fig.autofmt_xdate() -fig.subplots_adjust(bottom=0.10, top=0.95) - -plt.show() -plt.close('all')
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry As part of my work on #18723, I've stumbled upon the ``doc/plots`` directory. I've searched for usages of files in this directory and have turned up blanks only, so this whole directory seems to be dead code. I assume the dir existed and had uses from the time before ``@savefig`` could be used in ipython directives, and now is outdated. As the pandas docs now relies on ``@savefig`` decorators, I propose just removing this directory.
https://api.github.com/repos/pandas-dev/pandas/pulls/18751
2017-12-12T22:54:31Z
2017-12-13T01:57:45Z
2017-12-13T01:57:44Z
2017-12-13T19:56:30Z
DOC: copied over the shared transform documentation from Series/DataF…
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index a5d8cc254cd93..bba2b5b8bc468 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -816,6 +816,9 @@ def _python_apply_general(self, f): def _iterate_slices(self): yield self._selection_name, self._selected_obj + @Appender(_shared_docs['transform'] % dict( + klass='GroupBy', + versionadded='')) def transform(self, func, *args, **kwargs): raise AbstractMethodError(self)
…rame.transform to core.GroupBy.transform. (#6257) ~- [ ] closes #xxxx~ - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18750
2017-12-12T21:43:20Z
2017-12-13T02:01:12Z
null
2023-05-11T01:16:56Z
DOC: Adding example to head and tail method (#16416)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index da550dccc9c89..4eb7865523cc3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3563,6 +3563,44 @@ def head(self, n=5): ------- obj_head : type of caller The first n rows of the caller object. + + See Also + -------- + pandas.DataFrame.tail + + Examples + -------- + >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', + ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) + >>> df + animal + 0 alligator + 1 bee + 2 falcon + 3 lion + 4 monkey + 5 parrot + 6 shark + 7 whale + 8 zebra + + Viewing the first 5 lines + + >>> df.head() + animal + 0 alligator + 1 bee + 2 falcon + 3 lion + 4 monkey + + Viewing the first n lines (three in this case) + + >>> df.head(3) + animal + 0 alligator + 1 bee + 2 falcon """ return self.iloc[:n] @@ -3580,6 +3618,44 @@ def tail(self, n=5): ------- obj_tail : type of caller The last n rows of the caller object. + + See Also + -------- + pandas.DataFrame.head + + Examples + -------- + >>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', + ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) + >>> df + animal + 0 alligator + 1 bee + 2 falcon + 3 lion + 4 monkey + 5 parrot + 6 shark + 7 whale + 8 zebra + + Viewing the last 5 lines + + >>> df.tail() + animal + 4 monkey + 5 parrot + 6 shark + 7 whale + 8 zebra + + Viewing the last n lines (three in this case) + + >>> df.tail(3) + animal + 6 shark + 7 whale + 8 zebra """ if n == 0:
closes #18691 - [X] closes #16416 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18749
2017-12-12T21:37:11Z
2017-12-15T23:56:55Z
2017-12-15T23:56:55Z
2017-12-15T23:56:58Z
Move tests that dont belong in test_offsets
diff --git a/pandas/tests/scalar/test_parsing.py b/pandas/tests/scalar/test_parsing.py index 8ae858d791a97..bff0de649ac5e 100644 --- a/pandas/tests/scalar/test_parsing.py +++ b/pandas/tests/scalar/test_parsing.py @@ -7,11 +7,51 @@ import pytest from dateutil.parser import parse +import pandas as pd import pandas.util._test_decorators as td from pandas.conftest import is_dateutil_le_261, is_dateutil_gt_261 from pandas import compat from pandas.util import testing as tm from pandas._libs.tslibs import parsing +from pandas._libs.tslibs.parsing import parse_time_string + + +def test_to_datetime1(): + actual = pd.to_datetime(datetime(2008, 1, 15)) + assert actual == datetime(2008, 1, 15) + + actual = pd.to_datetime('20080115') + assert actual == datetime(2008, 1, 15) + + # unparseable + s = 'Month 1, 1999' + assert pd.to_datetime(s, errors='ignore') == s + + +class TestParseQuarters(object): + + def test_parse_time_string(self): + (date, parsed, reso) = parse_time_string('4Q1984') + (date_lower, parsed_lower, reso_lower) = parse_time_string('4q1984') + assert date == date_lower + assert parsed == parsed_lower + assert reso == reso_lower + + def test_parse_time_quarter_w_dash(self): + # https://github.com/pandas-dev/pandas/issue/9688 + pairs = [('1988-Q2', '1988Q2'), ('2Q-1988', '2Q1988')] + + for dashed, normal in pairs: + (date_dash, parsed_dash, reso_dash) = parse_time_string(dashed) + (date, parsed, reso) = parse_time_string(normal) + + assert date_dash == date + assert parsed_dash == parsed + assert reso_dash == reso + + pytest.raises(parsing.DateParseError, parse_time_string, "-2Q1992") + pytest.raises(parsing.DateParseError, parse_time_string, "2-Q1992") + pytest.raises(parsing.DateParseError, parse_time_string, "4-4Q1992") class TestDatetimeParsingWrappers(object): diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 84e811301ab4b..5b4c2f9d86674 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -29,9 +29,7 @@ QuarterEnd, BusinessMonthEnd, FY5253, Nano, Easter, FY5253Quarter, LastWeekOfMonth) -from pandas.core.tools.datetimes import ( - format, ole2datetime, parse_time_string, - to_datetime, DateParseError) +from pandas.core.tools.datetimes import format, ole2datetime import pandas.tseries.offsets as offsets from pandas.io.pickle import read_pickle from pandas._libs.tslibs import timezones @@ -67,18 +65,6 @@ def test_ole2datetime(): ole2datetime(60) -def test_to_datetime1(): - actual = to_datetime(datetime(2008, 1, 15)) - assert actual == datetime(2008, 1, 15) - - actual = to_datetime('20080115') - assert actual == datetime(2008, 1, 15) - - # unparseable - s = 'Month 1, 1999' - assert to_datetime(s, errors='ignore') == s - - def test_normalize_date(): actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10)) assert actual == datetime(2007, 10, 1) @@ -2800,32 +2786,6 @@ def test_get_offset_legacy(): get_offset(name) -class TestParseTimeString(object): - - def test_parse_time_string(self): - (date, parsed, reso) = parse_time_string('4Q1984') - (date_lower, parsed_lower, reso_lower) = parse_time_string('4q1984') - assert date == date_lower - assert parsed == parsed_lower - assert reso == reso_lower - - def test_parse_time_quarter_w_dash(self): - # https://github.com/pandas-dev/pandas/issue/9688 - pairs = [('1988-Q2', '1988Q2'), ('2Q-1988', '2Q1988'), ] - - for dashed, normal in pairs: - (date_dash, parsed_dash, reso_dash) = parse_time_string(dashed) - (date, parsed, reso) = parse_time_string(normal) - - assert date_dash == date - assert parsed_dash == parsed - assert reso_dash == reso - - pytest.raises(DateParseError, parse_time_string, "-2Q1992") - pytest.raises(DateParseError, parse_time_string, "2-Q1992") - pytest.raises(DateParseError, parse_time_string, "4-4Q1992") - - def test_get_standard_freq(): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): fstr = get_standard_freq('W')
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18747
2017-12-12T20:25:08Z
2017-12-13T14:12:33Z
2017-12-13T14:12:33Z
2018-02-11T22:00:37Z
Add example to `Dataframe.head()` method's docstring.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index da550dccc9c89..071dda090fddd 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2966,8 +2966,8 @@ def add_suffix(self, suffix): Examples -------- >>> df = pd.DataFrame({ - ... 'col1' : ['A', 'A', 'B', np.nan, 'D', 'C'], - ... 'col2' : [2, 1, 9, 8, 7, 4], + ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], + ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }) >>> df @@ -3563,6 +3563,42 @@ def head(self, n=5): ------- obj_head : type of caller The first n rows of the caller object. + + Examples + -------- + >>> df = pd.DataFrame({ + ... 'col1': ['A', 'A', 'B', 'B', 'D', 'C', 'E', 'E', 'F'], + ... 'col2': [2, 1, 9, 8, 7, 4, 0, 2, 2], + ... 'col3': [0, 1, 9, 4, 2, 3, 5, 7, 3], + ... }) + >>> df + col1 col2 col3 + 0 A 2 0 + 1 A 1 1 + 2 B 9 9 + 3 B 8 4 + 4 D 7 2 + 5 C 4 3 + 6 E 0 5 + 7 E 2 7 + 8 F 2 3 + + Viewing the first 5 lines (the default) + >>> df.head() + col1 col2 col3 + 0 A 2 0 + 1 A 1 1 + 2 B 9 9 + 3 B 8 4 + 4 D 7 2 + + Viewing the first n lines (three in this case) + >>> df.head(3) + col1 col2 col3 + 0 A 2 0 + 1 A 1 1 + 2 B 9 9 + """ return self.iloc[:n]
closes #18691 closes #16416
https://api.github.com/repos/pandas-dev/pandas/pulls/18746
2017-12-12T19:42:37Z
2017-12-15T23:58:03Z
null
2017-12-16T13:07:25Z
Created decorators for skip_if locale functions
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index a94865d8e9657..c89e3ddbfc5d0 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -22,7 +22,7 @@ from pandas.core.dtypes.common import is_datetime64_ns_dtype from pandas.util import testing as tm import pandas.util._test_decorators as td -from pandas.util.testing import assert_series_equal, _skip_if_has_locale +from pandas.util.testing import assert_series_equal from pandas import (isna, to_datetime, Timestamp, Series, DataFrame, Index, DatetimeIndex, NaT, date_range, compat) @@ -144,11 +144,10 @@ def test_to_datetime_format_time(self, cache): for s, format, dt in data: assert to_datetime(s, format=format, cache=cache) == dt + @td.skip_if_has_locale @pytest.mark.parametrize('cache', [True, False]) def test_to_datetime_with_non_exact(self, cache): # GH 10834 - tm._skip_if_has_locale() - # 8904 # exact kw if sys.version_info < (2, 7): @@ -830,11 +829,10 @@ def test_to_datetime_with_space_in_series(self, cache): result_ignore = to_datetime(s, errors='ignore', cache=cache) tm.assert_series_equal(result_ignore, s) + @td.skip_if_has_locale @pytest.mark.parametrize('cache', [True, False]) def test_to_datetime_with_apply(self, cache): # this is only locale tested with US/None locales - tm._skip_if_has_locale() - # GH 5195 # with a format and coerce a single item to_datetime fails td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1, 2, 3]) @@ -1023,9 +1021,9 @@ def test_dayfirst(self, cache): class TestGuessDatetimeFormat(object): + @td.skip_if_not_us_locale @is_dateutil_le_261 def test_guess_datetime_format_for_array(self): - tm._skip_if_not_us_locale() expected_format = '%Y-%m-%d %H:%M:%S.%f' dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format) @@ -1044,9 +1042,9 @@ def test_guess_datetime_format_for_array(self): [np.nan, np.nan, np.nan], dtype='O')) assert format_for_string_of_nans is None + @td.skip_if_not_us_locale @is_dateutil_gt_261 def test_guess_datetime_format_for_array_gt_261(self): - tm._skip_if_not_us_locale() expected_format = '%Y-%m-%d %H:%M:%S.%f' dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format) @@ -1393,9 +1391,9 @@ def test_parsers_timestring(self, cache): assert result4 == exp_now assert result5 == exp_now + @td.skip_if_has_locale def test_parsers_time(self): # GH11818 - _skip_if_has_locale() strings = ["14:15", "1415", "2:15pm", "0215pm", "14:15:00", "141500", "2:15:00pm", "021500pm", time(14, 15)] expected = time(14, 15) diff --git a/pandas/tests/scalar/test_parsing.py b/pandas/tests/scalar/test_parsing.py index 70961755ceec9..8ae858d791a97 100644 --- a/pandas/tests/scalar/test_parsing.py +++ b/pandas/tests/scalar/test_parsing.py @@ -6,6 +6,8 @@ import numpy as np import pytest from dateutil.parser import parse + +import pandas.util._test_decorators as td from pandas.conftest import is_dateutil_le_261, is_dateutil_gt_261 from pandas import compat from pandas.util import testing as tm @@ -66,6 +68,7 @@ def test_parsers_monthfreq(self): class TestGuessDatetimeFormat(object): + @td.skip_if_not_us_locale @is_dateutil_le_261 @pytest.mark.parametrize( "string, format", @@ -79,11 +82,10 @@ class TestGuessDatetimeFormat(object): '%Y-%m-%d %H:%M:%S.%f')]) def test_guess_datetime_format_with_parseable_formats( self, string, format): - tm._skip_if_not_us_locale() - result = parsing._guess_datetime_format(string) assert result == format + @td.skip_if_not_us_locale @is_dateutil_gt_261 @pytest.mark.parametrize( "string", @@ -92,8 +94,6 @@ def test_guess_datetime_format_with_parseable_formats( '2011-12-30 00:00:00.000000']) def test_guess_datetime_format_with_parseable_formats_gt_261( self, string): - tm._skip_if_not_us_locale() - result = parsing._guess_datetime_format(string) assert result is None @@ -118,6 +118,7 @@ def test_guess_datetime_format_with_dayfirst_gt_261(self, dayfirst): ambiguous_string, dayfirst=dayfirst) assert result is None + @td.skip_if_has_locale @is_dateutil_le_261 @pytest.mark.parametrize( "string, format", @@ -127,13 +128,10 @@ def test_guess_datetime_format_with_dayfirst_gt_261(self, dayfirst): ('30/Dec/2011 00:00:00', '%d/%b/%Y %H:%M:%S')]) def test_guess_datetime_format_with_locale_specific_formats( self, string, format): - # The month names will vary depending on the locale, in which - # case these wont be parsed properly (dateutil can't parse them) - tm._skip_if_has_locale() - result = parsing._guess_datetime_format(string) assert result == format + @td.skip_if_has_locale @is_dateutil_gt_261 @pytest.mark.parametrize( "string", @@ -143,10 +141,6 @@ def test_guess_datetime_format_with_locale_specific_formats( '30/Dec/2011 00:00:00']) def test_guess_datetime_format_with_locale_specific_formats_gt_261( self, string): - # The month names will vary depending on the locale, in which - # case these wont be parsed properly (dateutil can't parse them) - tm._skip_if_has_locale() - result = parsing._guess_datetime_format(string) assert result is None diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index b0d0e2a51b5f4..95410c6ea0105 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -8,6 +8,7 @@ import pandas as pd import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas._libs.tslib import iNaT from pandas.compat import lrange, StringIO, product from pandas.core.indexes.timedeltas import TimedeltaIndex @@ -17,7 +18,7 @@ Timestamp, to_datetime, offsets, timedelta_range) from pandas.util.testing import (assert_series_equal, assert_almost_equal, - assert_frame_equal, _skip_if_has_locale) + assert_frame_equal) from pandas.tests.series.common import TestData @@ -738,10 +739,9 @@ def test_between_time_types(self): pytest.raises(ValueError, series.between_time, datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) + @td.skip_if_has_locale def test_between_time_formats(self): # GH11818 - _skip_if_has_locale() - rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = DataFrame(np.random.randn(len(rng), 2), index=rng) diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 6d15f360bcbe8..3fe4b8c3bb783 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -25,6 +25,7 @@ def test_foo(): """ import pytest +import locale from distutils.version import LooseVersion from pandas.compat import is_platform_windows, is_platform_32bit, PY3 @@ -81,6 +82,18 @@ def _skip_if_mpl_1_5(): mod.use("Agg", warn=False) +def _skip_if_has_locale(): + lang, _ = locale.getlocale() + if lang is not None: + return True + + +def _skip_if_not_us_locale(): + lang, _ = locale.getlocale() + if lang != 'en_US': + return True + + skip_if_no_mpl = pytest.mark.skipif(_skip_if_no_mpl(), reason="Missing matplotlib dependency") skip_if_mpl_1_5 = pytest.mark.skipif(_skip_if_mpl_1_5(), @@ -92,3 +105,10 @@ def _skip_if_mpl_1_5(): skip_if_windows_python_3 = pytest.mark.skipif(is_platform_windows() and PY3, reason=("not used on python3/" "win32")) +skip_if_has_locale = pytest.mark.skipif(_skip_if_has_locale(), + reason="Specific locale is set {lang}" + .format(lang=locale.getlocale()[0])) +skip_if_not_us_locale = pytest.mark.skipif(_skip_if_not_us_locale(), + reason="Specific locale is set " + "{lang}".format( + lang=locale.getlocale()[0])) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 81f84ea646c86..32f8c4884c99f 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -389,22 +389,6 @@ def skip_if_no_ne(engine='numexpr'): installed=_NUMEXPR_INSTALLED)) -def _skip_if_has_locale(): - import locale - lang, _ = locale.getlocale() - if lang is not None: - import pytest - pytest.skip("Specific locale is set {lang}".format(lang=lang)) - - -def _skip_if_not_us_locale(): - import locale - lang, _ = locale.getlocale() - if lang != 'en_US': - import pytest - pytest.skip("Specific locale is set {lang}".format(lang=lang)) - - def _skip_if_no_mock(): try: import mock # noqa
- [ ] progress towards #18190 - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18745
2017-12-12T16:18:31Z
2017-12-13T01:47:42Z
2017-12-13T01:47:42Z
2017-12-13T01:55:29Z
DOC: Fix sphinx warning
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 14bb77be5d77c..a7dde5d6ee410 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -123,7 +123,7 @@ I/O - Bug in :func:`read_csv` when reading numeric category fields with high cardinality (:issue:`18186`) - Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) - Bug in parsing integer datetime-like columns with specified format in ``read_sql`` (:issue:`17855`). -- Bug in :meth:`DataFrame.to_msgpack` when serializing data of the numpy.bool_ datatype (:issue:`18390`) +- Bug in :meth:`DataFrame.to_msgpack` when serializing data of the ``numpy.bool_`` datatype (:issue:`18390`) - Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) - Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`) - Bug in :func:`to_latex` where repeated multi-index values were not printed even though a higher level index differed from the previous row (:issue:`14484`)
[ci skip] It interpreted that as a link.
https://api.github.com/repos/pandas-dev/pandas/pulls/18741
2017-12-12T03:19:31Z
2017-12-12T09:35:07Z
2017-12-12T09:35:07Z
2017-12-20T16:11:31Z
DOC: Add date to whatsnew
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index a250505adc409..14bb77be5d77c 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -1,7 +1,7 @@ .. _whatsnew_0211: -v0.21.1 -------- +v0.21.1 (December 12, 2017) +--------------------------- This is a minor bug-fix release in the 0.21.x series and includes some small regression fixes, bug fixes and performance improvements.
[ci skip]
https://api.github.com/repos/pandas-dev/pandas/pulls/18740
2017-12-12T03:02:50Z
2017-12-12T03:03:23Z
2017-12-12T03:03:23Z
2017-12-12T03:03:28Z
DOC: Update relase notes
diff --git a/doc/source/release.rst b/doc/source/release.rst index a3289b1144863..0298eda2c78ab 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -37,6 +37,82 @@ analysis / manipulation tool available in any language. * Binary installers on PyPI: http://pypi.python.org/pypi/pandas * Documentation: http://pandas.pydata.org +pandas 0.21.1 +------------- + +**Release date:** December 12, 2017 + +This is a minor bug-fix release in the 0.21.x series and includes some small +regression fixes, bug fixes and performance improvements. We recommend that all +users upgrade to this version. + +Highlights include: + +- Temporarily restore matplotlib datetime plotting functionality. This should + resolve issues for users who relied implicitly on pandas to plot datetimes + with matplotlib. See :ref:`here <whatsnew_0211.special>`. +- Improvements to the Parquet IO functions introduced in 0.21.0. See + :ref:`here <whatsnew_0211.enhancements.parquet>`. + +See the :ref:`v0.21.1 Whatsnew <whatsnew_0211>` overview for an extensive list +of all the changes for 0.21.1. + +Thanks +~~~~~~ + +A total of 46 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +Contributors +============ + +* Aaron Critchley + +* Alex Rychyk +* Alexander Buchkovsky + +* Alexander Michael Schade + +* Chris Mazzullo +* Cornelius Riemenschneider + +* Dave Hirschfeld + +* David Fischer + +* David Stansby + +* Dror Atariah + +* Eric Kisslinger + +* Hans + +* Ingolf Becker + +* Jan Werkmann + +* Jeff Reback +* Joris Van den Bossche +* Jörg Döpfert + +* Kevin Kuhl + +* Krzysztof Chomski + +* Leif Walsh +* Licht Takeuchi +* Manraj Singh + +* Matt Braymer-Hayes + +* Michael Waskom + +* Mie~~~ + +* Peter Hoffmann + +* Robert Meyer + +* Sam Cohan + +* Sietse Brouwer + +* Sven + +* Tim Swast +* Tom Augspurger +* Wes Turner +* William Ayd + +* Yee Mey + +* bolkedebruin + +* cgohlke +* derestle-htwg + +* fjdiod + +* gabrielclow + +* gfyoung +* ghasemnaddaf + +* jbrockmendel +* jschendel +* miker985 + +* topper-123 + pandas 0.21.0 ------------- diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 5d7950a667a2f..a250505adc409 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -10,8 +10,8 @@ We recommend that all users upgrade to this version. Highlights include: - Temporarily restore matplotlib datetime plotting functionality. This should - resolve issues for users who relied implicitly on pandas to plot datetimes - with matplotlib. See :ref:`here <whatsnew_0211.special>`. + resolve issues for users who implicitly relied on pandas to plot datetimes + with matplotlib. See :ref:`here <whatsnew_0211.converters>`. - Improvements to the Parquet IO functions introduced in 0.21.0. See :ref:`here <whatsnew_0211.enhancements.parquet>`. @@ -21,7 +21,7 @@ Highlights include: :backlinks: none -.. _whatsnew_0211.special: +.. _whatsnew_0211.converters: Restore Matplotlib datetime Converter Registration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -34,8 +34,8 @@ pandas``. In pandas 0.21.0, we required users to explicitly register the converter. This caused problems for some users who relied on those converters being present for regular ``matplotlib.pyplot`` plotting methods, so we're -temporarily reverting that change; pandas will again register the converters on -import. +temporarily reverting that change; pandas 0.21.1 again registers the converters on +import, just like before 0.21.0. We've added a new option to control the converters: ``pd.options.plotting.matplotlib.register_converters``. By default, they are
[ci skip] Updating the release doc, and some wording issues.
https://api.github.com/repos/pandas-dev/pandas/pulls/18739
2017-12-12T02:56:41Z
2017-12-12T02:58:33Z
2017-12-12T02:58:33Z
2017-12-12T02:58:36Z
Implement missing offset comparison methods
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index b37e5dc620260..6b0dbedbbdfc6 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -567,7 +567,13 @@ cdef class _Timedelta(timedelta): return PyObject_RichCompare(np.array([self]), other, op) return PyObject_RichCompare(other, self, reverse_ops[op]) else: - if op == Py_EQ: + if (getattr(other, "_typ", "") == "dateoffset" and + hasattr(other, "delta")): + # offsets.Tick; we catch this fairly late as it is a + # relatively infrequent case + ots = other.delta + return cmp_scalar(self.value, ots.value, op) + elif op == Py_EQ: return False elif op == Py_NE: return True diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index c260700c9473b..a68d3af889e7b 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -9,9 +9,24 @@ from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series, to_timedelta, compat) +from pandas.tseries.frequencies import to_offset from pandas._libs.tslib import iNaT, NaT +class TestTimedeltaComparisons(object): + @pytest.mark.parametrize('freq', ['D', 'H', 'T', 's', 'ms', 'us', 'ns']) + def test_tick_comparison(self, freq): + offset = to_offset(freq) * 2 + delta = offset._inc + assert isinstance(delta, Timedelta) + assert delta < offset + assert delta <= offset + assert not delta == offset + assert delta != offset + assert not delta > offset + assert not delta >= offset + + class TestTimedeltaArithmetic(object): _multiprocess_can_split_ = True diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 5b4c2f9d86674..bd2f51ff875e1 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -3147,3 +3147,52 @@ def test_require_integers(offset_types): cls = offset_types with pytest.raises(ValueError): cls(n=1.5) + + +def test_comparisons(offset_types): + cls = offset_types + + if cls is WeekOfMonth: + # TODO: The default values for week and weekday should be non-raising + off = cls(n=1, week=1, weekday=2) + elif cls is LastWeekOfMonth: + # TODO: The default value for weekday should be non-raising + off = cls(n=1, weekday=4) + else: + off = cls(n=1) + + if cls is Week: + assert off < timedelta(days=8) + assert off > timedelta(days=6) + assert off <= Day(n=7) + elif issubclass(cls, offsets.Tick): + pass + else: + with pytest.raises(TypeError): + off < timedelta(days=8) + with pytest.raises(TypeError): + off > timedelta(days=6) + with pytest.raises(TypeError): + off <= Day(n=7) + with pytest.raises(TypeError): + off < DateOffset(month=7) + + +def test_week_comparison(): + # Only Week with weekday == None is special + off = Week(weekday=3) + with pytest.raises(TypeError): + off < timedelta(days=8) + with pytest.raises(TypeError): + off > timedelta(days=6) + with pytest.raises(TypeError): + off <= Day(n=7) + + +@pytest.mark.parametrize('opname', ['__eq__', '__ne__', + '__lt__', '__le__', + '__gt__', '__ge__']) +def test_comparison_names(offset_types, opname): + cls = offset_types + method = getattr(cls, opname) + assert method.__name__ == opname diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index 24033d4ff6cbd..d82062c96408b 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -9,7 +9,8 @@ from pandas import Timedelta, Timestamp from pandas.tseries import offsets -from pandas.tseries.offsets import Hour, Minute, Second, Milli, Micro, Nano +from pandas.tseries.offsets import (Hour, Minute, Second, Milli, Micro, Nano, + Week) from .common import assert_offset_equal @@ -35,6 +36,24 @@ def test_delta_to_tick(): assert (tick == offsets.Day(3)) +@pytest.mark.parametrize('cls', tick_classes) +def test_tick_comparisons(cls): + off = cls(n=2) + with pytest.raises(TypeError): + off < 3 + + # Unfortunately there is no good way to make the reverse inequality work + assert off > timedelta(-1) + assert off >= timedelta(-1) + assert off < off._inc * 3 # Timedelta object + assert off <= off._inc * 3 # Timedelta object + assert off == off.delta + assert off.delta == off + assert off != -1 * off + + assert off < Week() + + # --------------------------------------------------------------------- diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 8b12b2f3ad2ce..2937279bdf995 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -112,6 +112,24 @@ def wrapper(self, other): return wrapper +def _make_cmp_func(op): + assert op not in [operator.eq, operator.ne] + # __eq__ and __ne__ have slightly different behavior, returning + # False and True, respectively, instead of raising + + def cmp_func(self, other): + if type(self) == Week and self.weekday is None: + # Week without weekday behaves like a Tick + tick = Day(n=7 * self.n, normalize=self.normalize) + return op(tick, other) + else: + raise TypeError('Cannot compare type {self} with {other}' + .format(self=self.__class__.__name__, + other=other.__class__.__name__)) + + cmp_func.__name__ = '__{name}__'.format(name=op.__name__) + return cmp_func + # --------------------------------------------------------------------- # DateOffset @@ -299,6 +317,11 @@ def _repr_attrs(self): def name(self): return self.rule_code + __lt__ = _make_cmp_func(operator.lt) + __le__ = _make_cmp_func(operator.le) + __ge__ = _make_cmp_func(operator.ge) + __gt__ = _make_cmp_func(operator.gt) + def __eq__(self, other): if other is None: return False @@ -320,9 +343,7 @@ def __hash__(self): return hash(self._params()) def __add__(self, other): - if isinstance(other, (ABCDatetimeIndex, ABCSeries)): - return other + self - elif isinstance(other, ABCPeriod): + if isinstance(other, (ABCDatetimeIndex, ABCSeries, ABCPeriod)): return other + self try: return self.apply(other) @@ -2146,8 +2167,41 @@ def onOffset(self, dt): def _tick_comp(op): def f(self, other): - return op(self.delta, other.delta) + if isinstance(other, Tick): + # Note we cannot just try/except other.delta because Tick.delta + # returns a Timedelta while Timedelta.delta returns an int + other_delta = other.delta + elif isinstance(other, (timedelta, np.timedelta64)): + other_delta = other + elif isinstance(other, Week) and other.weekday is None: + other_delta = timedelta(weeks=other.n) + elif isinstance(other, compat.string_types): + from pandas.tseries.frequencies import to_offset + other = to_offset(other) + if isinstance(other, DateOffset): + return f(self, other) + else: + if op == operator.eq: + return False + elif op == operator.ne: + return True + raise TypeError('Cannot compare type {self} and {other}' + .format(self=self.__class__.__name__, + other=other.__class__.__name__)) + elif op == operator.eq: + # TODO: Consider changing this older behavior for + # __eq__ and __ne__to match other comparisons + return False + elif op == operator.ne: + return True + else: + raise TypeError('Cannot compare type {self} and {other}' + .format(self=self.__class__.__name__, + other=other.__class__.__name__)) + + return op(self.delta, other_delta) + f.__name__ = '__{name}__'.format(name=op.__name__) return f @@ -2184,34 +2238,11 @@ def __add__(self, other): raise OverflowError("the add operation between {self} and {other} " "will overflow".format(self=self, other=other)) - def __eq__(self, other): - if isinstance(other, compat.string_types): - from pandas.tseries.frequencies import to_offset - - other = to_offset(other) - - if isinstance(other, Tick): - return self.delta == other.delta - else: - # TODO: Are there cases where this should raise TypeError? - return False - - # This is identical to DateOffset.__hash__, but has to be redefined here - # for Python 3, because we've redefined __eq__. def __hash__(self): - return hash(self._params()) - - def __ne__(self, other): - if isinstance(other, compat.string_types): - from pandas.tseries.frequencies import to_offset - - other = to_offset(other) - - if isinstance(other, Tick): - return self.delta != other.delta - else: - # TODO: Are there cases where this should raise TypeError? - return True + # This is identical to DateOffset.__hash__, but has to be redefined + # here for Python 3, because we've redefined __eq__. + tup = (str(self.__class__), ('n', self.n)) + return hash(tup) @property def delta(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Not sure if it closes #8386, but its most of the way there. Fix some missing comparisons, including Timedelta with Tick and Tick with (timedelta|Timedelta|Week) Attach correct names for comparison methods. If we can resolve #18510, we can make `Week(weekday=None)` just return a `Tick`, avoid some special casing.
https://api.github.com/repos/pandas-dev/pandas/pulls/18738
2017-12-12T02:19:26Z
2017-12-28T19:49:08Z
null
2020-04-05T17:39:59Z
Update parsers.py usecols description
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 927edbf236366..ff7c4972bc502 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -108,6 +108,8 @@ example of a valid callable argument would be ``lambda x: x.upper() in ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster parsing time and lower memory usage. + Note that indexes in usecols is automatically sorted in ascending order. (ie. [1,0] + will return same dataframe as [0,1] using indexes) as_recarray : boolean, default False .. deprecated:: 0.19.0 Please call `pd.read_csv(...).to_records()` instead.
Added note on the description of usecols in parsers.py file to clarify that order of parsers.py doesn't matter in the read_csv() method. Regardless of what order usecols is inputted in, read_csv() will return a DataFrame of the columns in ascending order.
https://api.github.com/repos/pandas-dev/pandas/pulls/18737
2017-12-12T00:16:35Z
2017-12-12T01:11:23Z
null
2023-05-11T01:16:55Z
Backports round 2
diff --git a/.gitignore b/.gitignore index ff0a6aef47163..b1748ae72b8ba 100644 --- a/.gitignore +++ b/.gitignore @@ -106,3 +106,4 @@ doc/build/html/index.html doc/tmp.sv doc/source/styled.xlsx doc/source/templates/ +env/ diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 0cdfec63fd696..cd3cc282a8010 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -348,7 +348,7 @@ The following methods are available: The weights used in the window are specified by the ``win_type`` keyword. The list of recognized types are the `scipy.signal window functions - <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__: +<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__: - ``boxcar`` - ``triang`` diff --git a/doc/source/io.rst b/doc/source/io.rst index 4024414610a82..ba33c449e701f 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4548,11 +4548,8 @@ dtypes, including extension dtypes such as datetime with tz. Several caveats. -- The format will NOT write an ``Index``, or ``MultiIndex`` for the - ``DataFrame`` and will raise an error if a non-default one is provided. You - can ``.reset_index()`` to store the index or ``.reset_index(drop=True)`` to - ignore it. - Duplicate column names and non-string columns names are not supported +- Index level names, if specified, must be strings - Categorical dtypes can be serialized to parquet, but will de-serialize as ``object`` dtype. - Non supported types include ``Period`` and actual python object types. These will raise a helpful error message on an attempt at serialization. diff --git a/doc/source/options.rst b/doc/source/options.rst index db3380bd4a3e7..505a5ade68de0 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -433,7 +433,7 @@ compute.use_numexpr True Use the numexpr library to computation if it is installed. plotting.matplotlib.register_converters True Register custom converters with matplotlib. Set to False to de-register. -======================================= ============ ======================================== +======================================= ============ ================================== .. _basics.console_output: diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 00726a4606cf7..206dabd1142ae 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -3,9 +3,23 @@ v0.21.1 ------- -This is a minor release from 0.21.1 and includes a number of deprecations, new -features, enhancements, and performance improvements along with a large number -of bug fixes. We recommend that all users upgrade to this version. +This is a minor bug-fix release in the 0.21.x series and includes some small regression fixes, +bug fixes and performance improvements. +We recommend that all users upgrade to this version. + +Highlights include: + +- Temporarily restore matplotlib datetime plotting functionality. This should + resolve issues for users who relied implicitly on pandas to plot datetimes + with matplotlib. See :ref:`here <whatsnew_0211.special>`. +- Improvements to the Parquet IO functions introduced in 0.21.0. See + :ref:`here <whatsnew_0211.enhancements.parquet>`. + + +.. contents:: What's new in v0.21.1 + :local: + :backlinks: none + .. _whatsnew_0211.special: @@ -42,9 +56,16 @@ registering them when they want them. New features ~~~~~~~~~~~~ -- -- -- +.. _whatsnew_0211.enhancements.parquet: + +Improvements to the Parquet IO functionality +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- :func:`DataFrame.to_parquet` will now write non-default indexes when the + underlying engine supports it. The indexes will be preserved when reading + back in with :func:`read_parquet` (:issue:`18581`). +- :func:`read_parquet` now allows to specify the columns to read from a parquet file (:issue:`18154`) +- :func:`read_parquet` now allows to specify kwargs which are passed to the respective engine (:issue:`18216`) .. _whatsnew_0211.enhancements.other: @@ -53,7 +74,6 @@ Other Enhancements - :meth:`Timestamp.timestamp` is now available in Python 2.7. (:issue:`17329`) - :class:`Grouper` and :class:`TimeGrouper` now have a friendly repr output (:issue:`18203`). -- .. _whatsnew_0211.deprecations: @@ -69,17 +89,6 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Improved performance of plotting large series/dataframes (:issue:`18236`). -- -- - -.. _whatsnew_0211.docs: - -Documentation Changes -~~~~~~~~~~~~~~~~~~~~~ - -- -- -- .. _whatsnew_0211.bug_fixes: @@ -94,7 +103,7 @@ Conversion - Bug in :meth:`IntervalIndex.copy` when copying and ``IntervalIndex`` with non-default ``closed`` (:issue:`18339`) - Bug in :func:`DataFrame.to_dict` where columns of datetime that are tz-aware were not converted to required arrays when used with ``orient='records'``, raising``TypeError` (:issue:`18372`) - Bug in :class:`DateTimeIndex` and :meth:`date_range` where mismatching tz-aware ``start`` and ``end`` timezones would not raise an err if ``end.tzinfo`` is None (:issue:`18431`) -- +- Bug in :meth:`Series.fillna` which raised when passed a long integer on Python 2 (:issue:`18159`). Indexing ^^^^^^^^ @@ -104,7 +113,6 @@ Indexing - Bug in :class:`IntervalIndex` constructor when a list of intervals is passed with non-default ``closed`` (:issue:`18334`) - Bug in ``Index.putmask`` when an invalid mask passed (:issue:`18368`) - Bug in masked assignment of a ``timedelta64[ns]`` dtype ``Series``, incorrectly coerced to float (:issue:`18493`) -- I/O ^^^ @@ -114,21 +122,19 @@ I/O - Bug in :func:`read_csv` for handling null values in index columns when specifying ``na_filter=False`` (:issue:`5239`) - Bug in :func:`read_csv` when reading numeric category fields with high cardinality (:issue:`18186`) - Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) -- :func:`read_parquet` now allows to specify the columns to read from a parquet file (:issue:`18154`) -- :func:`read_parquet` now allows to specify kwargs which are passed to the respective engine (:issue:`18216`) - Bug in parsing integer datetime-like columns with specified format in ``read_sql`` (:issue:`17855`). - Bug in :meth:`DataFrame.to_msgpack` when serializing data of the numpy.bool_ datatype (:issue:`18390`) - Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) - Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`) - Bug in :func:`to_latex` where repeated multi-index values were not printed even though a higher level index differed from the previous row (:issue:`14484`) +- Bug when reading NaN-only categorical columns in :class:`HDFStore` (:issue:`18413`) +- Bug in :meth:`DataFrame.to_latex` with ``longtable=True`` where a latex multicolumn always spanned over three columns (:issue:`17959`) Plotting ^^^^^^^^ - Bug in ``DataFrame.plot()`` and ``Series.plot()`` with :class:`DatetimeIndex` where a figure generated by them is not pickleable in Python 3 (:issue:`18439`) -- -- Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -137,15 +143,6 @@ Groupby/Resample/Rolling - Bug in ``DataFrame.resample(...)`` when there is a time change (DST) and resampling frequecy is 12h or higher (:issue:`15549`) - Bug in ``pd.DataFrameGroupBy.count()`` when counting over a datetimelike column (:issue:`13393`) - Bug in ``rolling.var`` where calculation is inaccurate with a zero-valued array (:issue:`18430`) -- -- - -Sparse -^^^^^^ - -- -- -- Reshaping ^^^^^^^^^ @@ -159,9 +156,8 @@ Numeric ^^^^^^^ - Bug in ``pd.Series.rolling.skew()`` and ``rolling.kurt()`` with all equal values has floating issue (:issue:`18044`) -- -- -- +- Bug in :class:`TimedeltaIndex` subtraction could incorrectly overflow when ``NaT`` is present (:issue:`17791`) +- Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`) Categorical ^^^^^^^^^^^ @@ -177,9 +173,3 @@ String ^^^^^^ - :meth:`Series.str.split()` will now propogate ``NaN`` values across all expanded columns instead of ``None`` (:issue:`18450`) - -Other -^^^^^ - -- -- diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 71de6c7c3e8cf..4e9b2b9a2e922 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -681,7 +681,7 @@ def __sub__(self, other): return self._add_delta(-other) elif is_integer(other): return self.shift(-other) - elif isinstance(other, datetime): + elif isinstance(other, (datetime, np.datetime64)): return self._sub_datelike(other) elif isinstance(other, Period): return self._sub_period(other) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 50085889ad88f..3c518017a8808 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -31,6 +31,7 @@ import pandas.core.dtypes.concat as _concat from pandas.errors import PerformanceWarning from pandas.core.common import _values_from_object, _maybe_box +from pandas.core.algorithms import checked_add_with_arr from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.numeric import Int64Index, Float64Index @@ -762,7 +763,7 @@ def _sub_datelike(self, other): raise TypeError("DatetimeIndex subtraction must have the same " "timezones or no timezones") result = self._sub_datelike_dti(other) - elif isinstance(other, datetime): + elif isinstance(other, (datetime, np.datetime64)): other = Timestamp(other) if other is libts.NaT: result = self._nat_new(box=False) @@ -772,7 +773,8 @@ def _sub_datelike(self, other): "timezones or no timezones") else: i8 = self.asi8 - result = i8 - other.value + result = checked_add_with_arr(i8, -other.value, + arr_mask=self._isnan) result = self._maybe_mask_results(result, fill_value=libts.iNaT) else: diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 445adb6bd3b18..0cc35300f0d17 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -381,7 +381,8 @@ def _add_datelike(self, other): else: other = Timestamp(other) i8 = self.asi8 - result = checked_add_with_arr(i8, other.value) + result = checked_add_with_arr(i8, other.value, + arr_mask=self._isnan) result = self._maybe_mask_results(result, fill_value=iNaT) return DatetimeIndex(result, name=self.name, copy=False) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index b929dfd5a9d0b..3b7cd1d02e1d3 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1837,8 +1837,10 @@ def _can_hold_element(self, element): if tipo is not None: return (issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(tipo.type, (np.datetime64, np.timedelta64))) - return (isinstance(element, (float, int, np.floating, np.int_)) and - not isinstance(element, (bool, np.bool_, datetime, timedelta, + return ( + isinstance( + element, (float, int, np.floating, np.int_, compat.long)) + and not isinstance(element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))) def to_native_types(self, slicer=None, na_rep='', float_format=None, @@ -1886,9 +1888,11 @@ def _can_hold_element(self, element): if tipo is not None: return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating)) - return (isinstance(element, - (float, int, complex, np.float_, np.int_)) and - not isinstance(element, (bool, np.bool_))) + return ( + isinstance( + element, + (float, int, complex, np.float_, np.int_, compat.long)) + and not isinstance(element, (bool, np.bool_))) def should_store(self, value): return issubclass(value.dtype.type, np.complexfloating) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 24eeb1dd94c18..bac5ac762400d 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -950,8 +950,8 @@ def get_col_type(dtype): if self.longtable: buf.write('\\endhead\n') buf.write('\\midrule\n') - buf.write('\\multicolumn{3}{r}{{Continued on next ' - 'page}} \\\\\n') + buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next ' + 'page}}}} \\\\\n'.format(n=len(row))) buf.write('\\midrule\n') buf.write('\\endfoot\n\n') buf.write('\\bottomrule\n') diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 4a13d2c9db944..eaaa14e756e22 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -3,7 +3,8 @@ from warnings import catch_warnings from distutils.version import LooseVersion from pandas import DataFrame, RangeIndex, Int64Index, get_option -from pandas.compat import range +from pandas.compat import string_types +from pandas.core.common import AbstractMethodError from pandas.io.common import get_filepath_or_buffer @@ -25,6 +26,11 @@ def get_engine(engine): except ImportError: pass + raise ImportError("Unable to find a usable engine; " + "tried using: 'pyarrow', 'fastparquet'.\n" + "pyarrow or fastparquet is required for parquet " + "support") + if engine not in ['pyarrow', 'fastparquet']: raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") @@ -34,37 +40,75 @@ def get_engine(engine): return FastParquetImpl() -class PyArrowImpl(object): +class BaseImpl(object): + + api = None # module + + @staticmethod + def validate_dataframe(df): + + if not isinstance(df, DataFrame): + raise ValueError("to_parquet only supports IO with DataFrames") + + # must have value column names (strings only) + if df.columns.inferred_type not in {'string', 'unicode'}: + raise ValueError("parquet must have string column names") + + # index level names must be strings + valid_names = all( + isinstance(name, string_types) + for name in df.index.names + if name is not None + ) + if not valid_names: + raise ValueError("Index level names must be strings") + + def write(self, df, path, compression, **kwargs): + raise AbstractMethodError(self) + + def read(self, path, columns=None, **kwargs): + raise AbstractMethodError(self) + + +class PyArrowImpl(BaseImpl): def __init__(self): # since pandas is a dependency of pyarrow # we need to import on first use - try: import pyarrow import pyarrow.parquet except ImportError: - raise ImportError("pyarrow is required for parquet support\n\n" - "you can install via conda\n" - "conda install pyarrow -c conda-forge\n" - "\nor via pip\n" - "pip install -U pyarrow\n") - + raise ImportError( + "pyarrow is required for parquet support\n\n" + "you can install via conda\n" + "conda install pyarrow -c conda-forge\n" + "\nor via pip\n" + "pip install -U pyarrow\n" + ) if LooseVersion(pyarrow.__version__) < '0.4.1': - raise ImportError("pyarrow >= 0.4.1 is required for parquet" - "support\n\n" - "you can install via conda\n" - "conda install pyarrow -c conda-forge\n" - "\nor via pip\n" - "pip install -U pyarrow\n") - - self._pyarrow_lt_050 = LooseVersion(pyarrow.__version__) < '0.5.0' - self._pyarrow_lt_060 = LooseVersion(pyarrow.__version__) < '0.6.0' + raise ImportError( + "pyarrow >= 0.4.1 is required for parquet support\n\n" + "you can install via conda\n" + "conda install pyarrow -c conda-forge\n" + "\nor via pip\n" + "pip install -U pyarrow\n" + ) + + self._pyarrow_lt_060 = ( + LooseVersion(pyarrow.__version__) < LooseVersion('0.6.0')) + self._pyarrow_lt_070 = ( + LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0')) + self.api = pyarrow def write(self, df, path, compression='snappy', coerce_timestamps='ms', **kwargs): + self.validate_dataframe(df) + if self._pyarrow_lt_070: + self._validate_write_lt_070(df) path, _, _ = get_filepath_or_buffer(path) + if self._pyarrow_lt_060: table = self.api.Table.from_pandas(df, timestamps_to_ms=True) self.api.parquet.write_table( @@ -78,36 +122,75 @@ def write(self, df, path, compression='snappy', def read(self, path, columns=None, **kwargs): path, _, _ = get_filepath_or_buffer(path) + if self._pyarrow_lt_070: + return self.api.parquet.read_pandas(path, columns=columns, + **kwargs).to_pandas() + kwargs['use_pandas_metadata'] = True return self.api.parquet.read_table(path, columns=columns, **kwargs).to_pandas() - -class FastParquetImpl(object): + def _validate_write_lt_070(self, df): + # Compatibility shim for pyarrow < 0.7.0 + # TODO: Remove in pandas 0.22.0 + from pandas.core.indexes.multi import MultiIndex + if isinstance(df.index, MultiIndex): + msg = ( + "Multi-index DataFrames are only supported " + "with pyarrow >= 0.7.0" + ) + raise ValueError(msg) + # Validate index + if not isinstance(df.index, Int64Index): + msg = ( + "pyarrow < 0.7.0 does not support serializing {} for the " + "index; you can .reset_index() to make the index into " + "column(s), or install the latest version of pyarrow or " + "fastparquet." + ) + raise ValueError(msg.format(type(df.index))) + if not df.index.equals(RangeIndex(len(df))): + raise ValueError( + "pyarrow < 0.7.0 does not support serializing a non-default " + "index; you can .reset_index() to make the index into " + "column(s), or install the latest version of pyarrow or " + "fastparquet." + ) + if df.index.name is not None: + raise ValueError( + "pyarrow < 0.7.0 does not serialize indexes with a name; you " + "can set the index.name to None or install the latest version " + "of pyarrow or fastparquet." + ) + + +class FastParquetImpl(BaseImpl): def __init__(self): # since pandas is a dependency of fastparquet # we need to import on first use - try: import fastparquet except ImportError: - raise ImportError("fastparquet is required for parquet support\n\n" - "you can install via conda\n" - "conda install fastparquet -c conda-forge\n" - "\nor via pip\n" - "pip install -U fastparquet") - + raise ImportError( + "fastparquet is required for parquet support\n\n" + "you can install via conda\n" + "conda install fastparquet -c conda-forge\n" + "\nor via pip\n" + "pip install -U fastparquet" + ) if LooseVersion(fastparquet.__version__) < '0.1.0': - raise ImportError("fastparquet >= 0.1.0 is required for parquet " - "support\n\n" - "you can install via conda\n" - "conda install fastparquet -c conda-forge\n" - "\nor via pip\n" - "pip install -U fastparquet") - + raise ImportError( + "fastparquet >= 0.1.0 is required for parquet " + "support\n\n" + "you can install via conda\n" + "conda install fastparquet -c conda-forge\n" + "\nor via pip\n" + "pip install -U fastparquet" + ) self.api = fastparquet def write(self, df, path, compression='snappy', **kwargs): + self.validate_dataframe(df) # thriftpy/protocol/compact.py:339: # DeprecationWarning: tostring() is deprecated. # Use tobytes() instead. @@ -118,7 +201,8 @@ def write(self, df, path, compression='snappy', **kwargs): def read(self, path, columns=None, **kwargs): path, _, _ = get_filepath_or_buffer(path) - return self.api.ParquetFile(path).to_pandas(columns=columns, **kwargs) + parquet_file = self.api.ParquetFile(path) + return parquet_file.to_pandas(columns=columns, **kwargs) def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): @@ -139,43 +223,7 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): kwargs Additional keyword arguments passed to the engine """ - impl = get_engine(engine) - - if not isinstance(df, DataFrame): - raise ValueError("to_parquet only support IO with DataFrames") - - valid_types = {'string', 'unicode'} - - # validate index - # -------------- - - # validate that we have only a default index - # raise on anything else as we don't serialize the index - - if not isinstance(df.index, Int64Index): - raise ValueError("parquet does not support serializing {} " - "for the index; you can .reset_index()" - "to make the index into column(s)".format( - type(df.index))) - - if not df.index.equals(RangeIndex.from_range(range(len(df)))): - raise ValueError("parquet does not support serializing a " - "non-default index for the index; you " - "can .reset_index() to make the index " - "into column(s)") - - if df.index.name is not None: - raise ValueError("parquet does not serialize index meta-data on a " - "default index") - - # validate columns - # ---------------- - - # must have value column names (strings only) - if df.columns.inferred_type not in valid_types: - raise ValueError("parquet must have string column names") - return impl.write(df, path, compression=compression, **kwargs) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 40955c50f6b5f..2a1aaf2f66469 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2137,10 +2137,17 @@ def convert(self, values, nan_rep, encoding): # if we have stored a NaN in the categories # then strip it; in theory we could have BOTH # -1s in the codes and nulls :< - mask = isna(categories) - if mask.any(): - categories = categories[~mask] - codes[codes != -1] -= mask.astype(int).cumsum().values + if categories is None: + # Handle case of NaN-only categorical columns in which case + # the categories are an empty array; when this is stored, + # pytables cannot write a zero-len array, so on readback + # the categories would be None and `read_hdf()` would fail. + categories = Index([], dtype=np.float64) + else: + mask = isna(categories) + if mask.any(): + categories = categories[~mask] + codes[codes != -1] -= mask.astype(int).cumsum().values self.data = Categorical.from_codes(codes, categories=categories, diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 8d9ac59cf9883..20a9916ad6bc4 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -211,6 +211,40 @@ def test_ufunc_coercions(self): tm.assert_index_equal(result, exp) assert result.freq == 'D' + def test_datetimeindex_sub_timestamp_overflow(self): + dtimax = pd.to_datetime(['now', pd.Timestamp.max]) + dtimin = pd.to_datetime(['now', pd.Timestamp.min]) + + tsneg = Timestamp('1950-01-01') + ts_neg_variants = [tsneg, + tsneg.to_pydatetime(), + tsneg.to_datetime64().astype('datetime64[ns]'), + tsneg.to_datetime64().astype('datetime64[D]')] + + tspos = Timestamp('1980-01-01') + ts_pos_variants = [tspos, + tspos.to_pydatetime(), + tspos.to_datetime64().astype('datetime64[ns]'), + tspos.to_datetime64().astype('datetime64[D]')] + + for variant in ts_neg_variants: + with pytest.raises(OverflowError): + dtimax - variant + + expected = pd.Timestamp.max.value - tspos.value + for variant in ts_pos_variants: + res = dtimax - variant + assert res[1].value == expected + + expected = pd.Timestamp.min.value - tsneg.value + for variant in ts_neg_variants: + res = dtimin - variant + assert res[1].value == expected + + for variant in ts_pos_variants: + with pytest.raises(OverflowError): + dtimin - variant + def test_week_of_month_frequency(self): # GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise d1 = date(2002, 9, 1) diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index f4f669ee1d087..3cf56dc5115c2 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -1282,3 +1282,23 @@ def test_add_overflow(self): result = (to_timedelta([pd.NaT, '5 days', '1 hours']) + to_timedelta(['7 seconds', pd.NaT, '4 hours'])) tm.assert_index_equal(result, exp) + + def test_timedeltaindex_add_timestamp_nat_masking(self): + # GH17991 checking for overflow-masking with NaT + tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + + tsneg = Timestamp('1950-01-01') + ts_neg_variants = [tsneg, + tsneg.to_pydatetime(), + tsneg.to_datetime64().astype('datetime64[ns]'), + tsneg.to_datetime64().astype('datetime64[D]')] + + tspos = Timestamp('1980-01-01') + ts_pos_variants = [tspos, + tspos.to_pydatetime(), + tspos.to_datetime64().astype('datetime64[ns]'), + tspos.to_datetime64().astype('datetime64[D]')] + + for variant in ts_neg_variants + ts_pos_variants: + res = tdinat + variant + assert res[1] is pd.NaT diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index c182db35c0c89..4e59779cb9b47 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1245,7 +1245,9 @@ class TestCanHoldElement(object): @pytest.mark.parametrize('value, dtype', [ (1, 'i8'), (1.0, 'f8'), + (2**63, 'f8'), (1j, 'complex128'), + (2**63, 'complex128'), (True, 'bool'), (np.timedelta64(20, 'ns'), '<m8[ns]'), (np.datetime64(20, 'ns'), '<M8[ns]'), diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index c0b7d4cee384a..5504ac942f688 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -91,6 +91,29 @@ def test_to_latex_format(self, frame): assert withindex_result == withindex_expected + def test_to_latex_empty(self): + df = DataFrame() + result = df.to_latex() + expected = r"""\begin{tabular}{l} +\toprule +Empty DataFrame +Columns: Index([], dtype='object') +Index: Index([], dtype='object') \\ +\bottomrule +\end{tabular} +""" + assert result == expected + + result = df.to_latex(longtable=True) + expected = r"""\begin{longtable}{l} +\toprule +Empty DataFrame +Columns: Index([], dtype='object') +Index: Index([], dtype='object') \\ +\end{longtable} +""" + assert result == expected + def test_to_latex_with_formatters(self): df = DataFrame({'int': [1, 2, 3], 'float': [1.0, 2.0, 3.0], @@ -377,7 +400,7 @@ def test_to_latex_longtable(self, frame): 1 & 2 & b2 \\ \end{longtable} """ - + open("expected.txt", "w").write(withindex_result) assert withindex_result == withindex_expected withoutindex_result = df.to_latex(index=False, longtable=True) @@ -387,7 +410,7 @@ def test_to_latex_longtable(self, frame): \midrule \endhead \midrule -\multicolumn{3}{r}{{Continued on next page}} \\ +\multicolumn{2}{r}{{Continued on next page}} \\ \midrule \endfoot @@ -400,6 +423,14 @@ def test_to_latex_longtable(self, frame): assert withoutindex_result == withoutindex_expected + df = DataFrame({'a': [1, 2]}) + with1column_result = df.to_latex(index=False, longtable=True) + assert "\multicolumn{1}" in with1column_result + + df = DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]}) + with3columns_result = df.to_latex(index=False, longtable=True) + assert "\multicolumn{3}" in with3columns_result + def test_to_latex_escape_special_chars(self): special_characters = ['&', '%', '$', '#', '_', '{', '}', '~', '^', '\\'] diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index e7bcff22371b7..8c88cf076319b 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -181,15 +181,14 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp): class Base(object): def check_error_on_write(self, df, engine, exc): - # check that we are raising the exception - # on writing - + # check that we are raising the exception on writing with pytest.raises(exc): with tm.ensure_clean() as path: to_parquet(df, path, engine, compression=None) def check_round_trip(self, df, engine, expected=None, - write_kwargs=None, read_kwargs=None): + write_kwargs=None, read_kwargs=None, + check_names=True): if write_kwargs is None: write_kwargs = {} if read_kwargs is None: @@ -200,7 +199,7 @@ def check_round_trip(self, df, engine, expected=None, if expected is None: expected = df - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected, check_names=check_names) # repeat to_parquet(df, path, engine, **write_kwargs) @@ -208,7 +207,7 @@ def check_round_trip(self, df, engine, expected=None, if expected is None: expected = df - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected, check_names=check_names) class TestBasic(Base): @@ -247,33 +246,6 @@ def test_columns_dtypes_invalid(self, engine): datetime.datetime(2011, 1, 1, 1, 1)] self.check_error_on_write(df, engine, ValueError) - def test_write_with_index(self, engine): - - df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, write_kwargs={'compression': None}) - - # non-default index - for index in [[2, 3, 4], - pd.date_range('20130101', periods=3), - list('abc'), - [1, 3, 4], - pd.MultiIndex.from_tuples([('a', 1), ('a', 2), - ('b', 1)]), - ]: - - df.index = index - self.check_error_on_write(df, engine, ValueError) - - # index with meta-data - df.index = [0, 1, 2] - df.index.name = 'foo' - self.check_error_on_write(df, engine, ValueError) - - # column multi-index - df.index = [0, 1, 2] - df.columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]), - self.check_error_on_write(df, engine, ValueError) - @pytest.mark.parametrize('compression', [None, 'gzip', 'snappy', 'brotli']) def test_compression(self, engine, compression): @@ -297,6 +269,72 @@ def test_read_columns(self, engine): write_kwargs={'compression': None}, read_kwargs={'columns': ['string']}) + def test_write_index(self, engine): + check_names = engine != 'fastparquet' + + if engine == 'pyarrow': + import pyarrow + if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'): + pytest.skip("pyarrow is < 0.7.0") + + df = pd.DataFrame({'A': [1, 2, 3]}) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) + + indexes = [ + [2, 3, 4], + pd.date_range('20130101', periods=3), + list('abc'), + [1, 3, 4], + ] + # non-default index + for index in indexes: + df.index = index + self.check_round_trip( + df, engine, + write_kwargs={'compression': None}, + check_names=check_names) + + # index with meta-data + df.index = [0, 1, 2] + df.index.name = 'foo' + self.check_round_trip(df, engine, write_kwargs={'compression': None}) + + def test_write_multiindex(self, pa_ge_070): + # Not suppoprted in fastparquet as of 0.1.3 or older pyarrow version + engine = pa_ge_070 + + df = pd.DataFrame({'A': [1, 2, 3]}) + index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]) + df.index = index + self.check_round_trip(df, engine, write_kwargs={'compression': None}) + + def test_write_column_multiindex(self, engine): + # column multi-index + mi_columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]) + df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns) + self.check_error_on_write(df, engine, ValueError) + + def test_multiindex_with_columns(self, pa_ge_070): + + engine = pa_ge_070 + dates = pd.date_range('01-Jan-2018', '01-Dec-2018', freq='MS') + df = pd.DataFrame(np.random.randn(2 * len(dates), 3), + columns=list('ABC')) + index1 = pd.MultiIndex.from_product( + [['Level1', 'Level2'], dates], + names=['level', 'date']) + index2 = index1.copy(names=None) + for index in [index1, index2]: + df.index = index + with tm.ensure_clean() as path: + df.to_parquet(path, engine) + result = read_parquet(path, engine) + expected = df + tm.assert_frame_equal(result, expected) + result = read_parquet(path, engine, columns=['A', 'B']) + expected = df[['A', 'B']] + tm.assert_frame_equal(result, expected) + class TestParquetPyArrow(Base): @@ -322,14 +360,12 @@ def test_basic(self, pa): self.check_round_trip(df, pa) def test_duplicate_columns(self, pa): - # not currently able to handle duplicate columns df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list('aaa')).copy() self.check_error_on_write(df, pa, ValueError) def test_unsupported(self, pa): - # period df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)}) self.check_error_on_write(df, pa, ValueError) diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index a97747b93369f..a7cc6b711802e 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -4928,6 +4928,25 @@ def test_categorical_conversion(self): result = read_hdf(path, 'df', where='obsids=B') tm.assert_frame_equal(result, expected) + def test_categorical_nan_only_columns(self): + # GH18413 + # Check that read_hdf with categorical columns with NaN-only values can + # be read back. + df = pd.DataFrame({ + 'a': ['a', 'b', 'c', np.nan], + 'b': [np.nan, np.nan, np.nan, np.nan], + 'c': [1, 2, 3, 4], + 'd': pd.Series([None] * 4, dtype=object) + }) + df['a'] = df.a.astype('category') + df['b'] = df.b.astype('category') + df['d'] = df.b.astype('category') + expected = df + with ensure_clean_path(self.path) as path: + df.to_hdf(path, 'df', format='table', data_columns=True) + result = read_hdf(path, 'df') + tm.assert_frame_equal(result, expected) + def test_duplicate_column_name(self): df = DataFrame(columns=["a", "a"], data=[[0, 0]]) diff --git a/setup.cfg b/setup.cfg index 0123078523b6f..7a88ee8557dc7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,7 +12,7 @@ tag_prefix = v parentdir_prefix = pandas- [flake8] -ignore = E731,E402 +ignore = E731,E402,W503 max-line-length = 79 [yapf]
https://api.github.com/repos/pandas-dev/pandas/pulls/18732
2017-12-11T20:42:07Z
2017-12-12T02:37:19Z
2017-12-12T02:37:19Z
2017-12-12T02:38:31Z
COMPAT: Emit warning when groupby by a tuple
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 316244b583aa2..552ddabb7359a 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -1091,7 +1091,7 @@ You can also select multiple rows from each group by specifying multiple nth val business_dates = pd.date_range(start='4/1/2014', end='6/30/2014', freq='B') df = pd.DataFrame(1, index=business_dates, columns=['a', 'b']) # get the first, 4th, and last date index for each month - df.groupby((df.index.year, df.index.month)).nth([0, 3, -1]) + df.groupby([df.index.year, df.index.month]).nth([0, 3, -1]) Enumerate group items ~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index dfd222f10d235..ae6d0816abc41 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -203,6 +203,10 @@ Deprecations - ``Series.from_array`` and ``SparseSeries.from_array`` are deprecated. Use the normal constructor ``Series(..)`` and ``SparseSeries(..)`` instead (:issue:`18213`). - ``DataFrame.as_matrix`` is deprecated. Use ``DataFrame.values`` instead (:issue:`18458`). - ``Series.asobject``, ``DatetimeIndex.asobject``, ``PeriodIndex.asobject`` and ``TimeDeltaIndex.asobject`` have been deprecated. Use ``.astype(object)`` instead (:issue:`18572`) +- Grouping by a tuple of keys now emits a ``FutureWarning`` and is deprecated. + In the future, a tuple passed to ``'by'`` will always refer to a single key + that is the actual tuple, instead of treating the tuple as multiple keys. To + retain the previous behavior, use a list instead of a tuple (:issue:`18314`) - ``Series.valid`` is deprecated. Use :meth:`Series.dropna` instead (:issue:`18800`). .. _whatsnew_0220.prior_deprecations: diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index a5d8cc254cd93..b4223ac0a177a 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -28,6 +28,7 @@ is_bool_dtype, is_scalar, is_list_like, + is_hashable, needs_i8_conversion, _ensure_float64, _ensure_platform_int, @@ -2850,7 +2851,27 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, elif isinstance(key, BaseGrouper): return key, [], obj - # Everything which is not a list is a key (including tuples): + # In the future, a tuple key will always mean an actual key, + # not an iterable of keys. In the meantime, we attempt to provide + # a warning. We can assume that the user wanted a list of keys when + # the key is not in the index. We just have to be careful with + # unhashble elements of `key`. Any unhashable elements implies that + # they wanted a list of keys. + # https://github.com/pandas-dev/pandas/issues/18314 + is_tuple = isinstance(key, tuple) + all_hashable = is_tuple and is_hashable(key) + + if is_tuple: + if ((all_hashable and key not in obj and set(key).issubset(obj)) + or not all_hashable): + # column names ('a', 'b') -> ['a', 'b'] + # arrays like (a, b) -> [a, b] + msg = ("Interpreting tuple 'by' as a list of keys, rather than " + "a single key. Use 'by=[...]' instead of 'by=(...)'. In " + "the future, a tuple will always mean a single key.") + warnings.warn(msg, FutureWarning, stacklevel=5) + key = list(key) + if not isinstance(key, list): keys = [key] match_axis_length = False diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 3436dd9169081..3327612b016f4 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2727,6 +2727,38 @@ def test_empty_dataframe_groupby(self): assert_frame_equal(result, expected) + def test_tuple_warns(self): + # https://github.com/pandas-dev/pandas/issues/18314 + df = pd.DataFrame({('a', 'b'): [1, 1, 2, 2], 'a': [1, 1, 1, 2], + 'b': [1, 2, 2, 2], 'c': [1, 1, 1, 1]}) + with tm.assert_produces_warning(FutureWarning) as w: + df[['a', 'b', 'c']].groupby(('a', 'b')).c.mean() + + assert "Interpreting tuple 'by' as a list" in str(w[0].message) + + with tm.assert_produces_warning(None): + df.groupby(('a', 'b')).c.mean() + + def test_tuple_warns_unhashable(self): + # https://github.com/pandas-dev/pandas/issues/18314 + business_dates = date_range(start='4/1/2014', end='6/30/2014', + freq='B') + df = DataFrame(1, index=business_dates, columns=['a', 'b']) + + with tm.assert_produces_warning(FutureWarning) as w: + df.groupby((df.index.year, df.index.month)).nth([0, 3, -1]) + + assert "Interpreting tuple 'by' as a list" in str(w[0].message) + + @pytest.mark.xfail(reason="GH-18798") + def test_tuple_correct_keyerror(self): + # https://github.com/pandas-dev/pandas/issues/18798 + df = pd.DataFrame(1, index=range(3), + columns=pd.MultiIndex.from_product([[1, 2], + [3, 4]])) + with tm.assert_raises_regex(KeyError, "(7, 8)"): + df.groupby((7, 8)).mean() + def _check_groupby(df, result, keys, field, f=lambda x: x.sum()): tups = lmap(tuple, df[keys].values)
Closes https://github.com/pandas-dev/pandas/issues/18314
https://api.github.com/repos/pandas-dev/pandas/pulls/18731
2017-12-11T19:05:39Z
2017-12-18T18:37:45Z
2017-12-18T18:37:45Z
2017-12-18T18:37:49Z
DOC: fix options table
diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 49ac516af6d37..a6bc9431d3bcc 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -340,7 +340,7 @@ The following methods are available: The weights used in the window are specified by the ``win_type`` keyword. The list of recognized types are the `scipy.signal window functions - <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__: +<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__: - ``boxcar`` - ``triang`` diff --git a/doc/source/options.rst b/doc/source/options.rst index db3380bd4a3e7..505a5ade68de0 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -433,7 +433,7 @@ compute.use_numexpr True Use the numexpr library to computation if it is installed. plotting.matplotlib.register_converters True Register custom converters with matplotlib. Set to False to de-register. -======================================= ============ ======================================== +======================================= ============ ================================== .. _basics.console_output:
https://api.github.com/repos/pandas-dev/pandas/pulls/18730
2017-12-11T18:58:09Z
2017-12-11T21:03:51Z
2017-12-11T21:03:51Z
2017-12-11T21:03:56Z
DOC: read_csv usecols element order is ignored
diff --git a/doc/source/io.rst b/doc/source/io.rst index 54e7a11c5f2b1..01ac9517cddc0 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -131,7 +131,8 @@ usecols : array-like or callable, default ``None`` be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in `names` or inferred from the document header row(s). For example, a valid array-like - `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. + `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Element + order is ignored, so usecols=[0,1] is the same as [1, 0]. If callable, the callable function will be evaluated against the column names, returning names where the callable function evaluates to True: @@ -2822,11 +2823,11 @@ to be parsed. If `usecols` is a list of integers, then it is assumed to be the file column indices to be parsed. - .. code-block:: python read_excel('path_to_file.xls', 'Sheet1', usecols=[0, 2, 3]) +Element order is ignored, so usecols=[0,1] is the same as [1,0]. Parsing Dates +++++++++++++ diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index a04d77de08950..adfcaf43a92a5 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -101,7 +101,8 @@ be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in `names` or inferred from the document header row(s). For example, a valid array-like - `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. + `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Element + order is ignored, so usecols=[1,0] is the same as [0,1]. If callable, the callable function will be evaluated against the column names, returning names where the callable function evaluates to True. An
- [x] closes #18673 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18729
2017-12-11T17:30:17Z
2017-12-21T15:24:08Z
2017-12-21T15:24:08Z
2017-12-22T12:50:53Z
centralize and de-privatize month/day name constants
diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index a68ecbd2e8629..d7edae865911a 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -33,6 +33,17 @@ cdef int32_t* _month_offset = [ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365, 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366] +# Canonical location for other modules to find name constants +MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', + 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] +MONTH_NUMBERS = {name: num for num, name in enumerate(MONTHS)} +MONTH_ALIASES = {(num + 1): name for num, name in enumerate(MONTHS)} +MONTH_TO_CAL_NUM = {name: num + 1 for num, name in enumerate(MONTHS)} + +DAYS = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] +int_to_weekday = {num: name for num, name in enumerate(DAYS)} +weekday_to_int = {int_to_weekday[key]: key for key in int_to_weekday} + # ---------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 950677b3b53db..18101c834c737 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -139,8 +139,8 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, int mo_off, dom, doy, dow, ldom _month_offset = np.array( - [[ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ], - [ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 ]], + [[0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365], + [0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366]], dtype=np.int32) count = len(dtindex) @@ -380,8 +380,8 @@ def get_date_field(ndarray[int64_t] dtindex, object field): int mo_off, doy, dow _month_offset = np.array( - [[ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ], - [ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 ]], + [[0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365], + [0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366]], dtype=np.int32 ) count = len(dtindex) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 29e14103dfe20..933e7ed64b837 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -17,6 +17,7 @@ np.import_array() from util cimport is_string_object, is_integer_object +from ccalendar import MONTHS, DAYS from conversion cimport tz_convert_single, pydt_to_i8 from frequencies cimport get_freq_code from nattype cimport NPY_NAT @@ -27,14 +28,9 @@ from np_datetime cimport (pandas_datetimestruct, # --------------------------------------------------------------------- # Constants -# Duplicated in tslib -_MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', - 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] -_int_to_month = {(k + 1): v for k, v in enumerate(_MONTHS)} -_month_to_int = {v: k for k, v in _int_to_month.items()} - class WeekDay(object): + # TODO: Remove: This is not used outside of tests MON = 0 TUE = 1 WED = 2 @@ -44,18 +40,6 @@ class WeekDay(object): SUN = 6 -_int_to_weekday = { - WeekDay.MON: 'MON', - WeekDay.TUE: 'TUE', - WeekDay.WED: 'WED', - WeekDay.THU: 'THU', - WeekDay.FRI: 'FRI', - WeekDay.SAT: 'SAT', - WeekDay.SUN: 'SUN'} - -_weekday_to_int = {_int_to_weekday[key]: key for key in _int_to_weekday} - - _offset_to_period_map = { 'WEEKDAY': 'D', 'EOM': 'M', @@ -88,17 +72,16 @@ _offset_to_period_map = { need_suffix = ['QS', 'BQ', 'BQS', 'YS', 'AS', 'BY', 'BA', 'BYS', 'BAS'] for __prefix in need_suffix: - for _m in _MONTHS: + for _m in MONTHS: key = '%s-%s' % (__prefix, _m) _offset_to_period_map[key] = _offset_to_period_map[__prefix] for __prefix in ['A', 'Q']: - for _m in _MONTHS: + for _m in MONTHS: _alias = '%s-%s' % (__prefix, _m) _offset_to_period_map[_alias] = _alias -_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] -for _d in _days: +for _d in DAYS: _offset_to_period_map['W-%s' % _d] = 'W-%s' % _d diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 8ce1d9cdf2158..a9a5500cd7447 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -41,6 +41,9 @@ from dateutil.relativedelta import relativedelta from dateutil.parser import DEFAULTPARSER from dateutil.parser import parse as du_parse +from ccalendar import MONTH_NUMBERS +from nattype import nat_strings + # ---------------------------------------------------------------------- # Constants @@ -49,14 +52,8 @@ class DateParseError(ValueError): pass -_nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN']) - _DEFAULT_DATETIME = datetime(1, 1, 1).replace(hour=0, minute=0, second=0, microsecond=0) -_MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', - 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] -_MONTH_NUMBERS = {k: i for i, k in enumerate(_MONTHS)} -_MONTH_ALIASES = {(k + 1): v for k, v in enumerate(_MONTHS)} cdef object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])') @@ -213,7 +210,7 @@ cdef inline object _parse_dateabbr_string(object date_string, object default, # len(date_string) == 0 # should be NaT??? - if date_string in _nat_strings: + if date_string in nat_strings: return NAT_SENTINEL, NAT_SENTINEL, '' date_string = date_string.upper() @@ -267,7 +264,7 @@ cdef inline object _parse_dateabbr_string(object date_string, object default, if freq is not None: # hack attack, #1228 try: - mnum = _MONTH_NUMBERS[_get_rule_month(freq)] + 1 + mnum = MONTH_NUMBERS[_get_rule_month(freq)] + 1 except (KeyError, ValueError): msg = ('Unable to retrieve month information from given ' 'freq: {0}').format(freq) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index cf73257caf227..42570e355e2bf 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -32,8 +32,9 @@ from timestamps import Timestamp from timezones cimport is_utc, is_tzlocal, get_utcoffset, get_dst_info from timedeltas cimport delta_to_nanoseconds +from ccalendar import MONTH_NUMBERS from parsing import (parse_time_string, NAT_SENTINEL, - _get_rule_month, _MONTH_NUMBERS) + _get_rule_month) from frequencies cimport get_freq_code from resolution import resolution, Resolution from nattype import nat_strings, NaT, iNaT @@ -1148,7 +1149,7 @@ def _quarter_to_myear(year, quarter, freq): if quarter <= 0 or quarter > 4: raise ValueError('Quarter must be 1 <= q <= 4') - mnum = _MONTH_NUMBERS[_get_rule_month(freq)] + 1 + mnum = MONTH_NUMBERS[_get_rule_month(freq)] + 1 month = (mnum + (quarter - 1) * 3) % 12 + 1 if month > mnum: year -= 1 diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index d2b518c74a1e3..9cb2c450524fb 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -17,13 +17,13 @@ from pandas._libs.khash cimport (khiter_t, from cpython.datetime cimport datetime -from np_datetime cimport (pandas_datetimestruct, - dtstruct_to_dt64, dt64_to_dtstruct) +from np_datetime cimport pandas_datetimestruct, dt64_to_dtstruct from frequencies cimport get_freq_code from timezones cimport (is_utc, is_tzlocal, maybe_get_tz, get_dst_info, get_utcoffset) from fields import build_field_sarray from conversion import tz_convert +from ccalendar import DAYS, MONTH_ALIASES, int_to_weekday from pandas._libs.properties import cache_readonly from pandas._libs.tslib import Timestamp @@ -50,13 +50,6 @@ _ONE_MINUTE = 60 * _ONE_SECOND _ONE_HOUR = 60 * _ONE_MINUTE _ONE_DAY = 24 * _ONE_HOUR -DAYS = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] -_weekday_rule_aliases = {k: v for k, v in enumerate(DAYS)} - -_MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', - 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] -_MONTH_ALIASES = {(k + 1): v for k, v in enumerate(_MONTHS)} - # ---------------------------------------------------------------------- cpdef resolution(ndarray[int64_t] stamps, tz=None): @@ -354,7 +347,7 @@ class Resolution(object): # Frequency Inference -# TODO: this is non performiant logic here (and duplicative) and this +# TODO: this is non performant logic here (and duplicative) and this # simply should call unique_1d directly # plus no reason to depend on khash directly cdef unique_deltas(ndarray[int64_t] arr): @@ -537,7 +530,7 @@ class _FrequencyInferer(object): annual_rule = self._get_annual_rule() if annual_rule: nyears = self.ydiffs[0] - month = _MONTH_ALIASES[self.rep_stamp.month] + month = MONTH_ALIASES[self.rep_stamp.month] alias = '{prefix}-{month}'.format(prefix=annual_rule, month=month) return _maybe_add_count(alias, nyears) @@ -545,7 +538,7 @@ class _FrequencyInferer(object): if quarterly_rule: nquarters = self.mdiffs[0] / 3 mod_dict = {0: 12, 2: 11, 1: 10} - month = _MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]] + month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]] alias = '{prefix}-{month}'.format(prefix=quarterly_rule, month=month) return _maybe_add_count(alias, nquarters) @@ -558,7 +551,7 @@ class _FrequencyInferer(object): days = self.deltas[0] / _ONE_DAY if days % 7 == 0: # Weekly - day = _weekday_rule_aliases[self.rep_stamp.weekday()] + day = int_to_weekday[self.rep_stamp.weekday()] return _maybe_add_count('W-{day}'.format(day=day), days / 7) else: return _maybe_add_count('D', days) @@ -630,7 +623,7 @@ class _FrequencyInferer(object): # get which week week = week_of_months[0] + 1 - wd = _weekday_rule_aliases[weekdays[0]] + wd = int_to_weekday[weekdays[0]] return 'WOM-{week}{weekday}'.format(week=week, weekday=wd) @@ -642,7 +635,7 @@ class _TimedeltaFrequencyInferer(_FrequencyInferer): days = self.deltas[0] / _ONE_DAY if days % 7 == 0: # Weekly - wd = _weekday_rule_aliases[self.rep_stamp.weekday()] + wd = int_to_weekday[self.rep_stamp.weekday()] alias = 'W-{weekday}'.format(weekday=wd) return _maybe_add_count(alias, days / 7) else: diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index 9cbcfa4f46008..9df23948ae627 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -6,7 +6,8 @@ import pandas.core.indexes.period as period from pandas.compat import lrange from pandas.tseries.frequencies import get_freq -from pandas._libs.tslibs.resolution import _MONTHS as MONTHS + +from pandas._libs.tslibs.ccalendar import MONTHS from pandas._libs.tslibs.period import period_ordinal, period_asfreq from pandas import (PeriodIndex, Period, DatetimeIndex, Timestamp, Series, date_range, to_datetime, period_range) diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py index eb6363689cca0..792eb0d49077f 100644 --- a/pandas/tests/scalar/test_period.py +++ b/pandas/tests/scalar/test_period.py @@ -12,9 +12,9 @@ from pandas._libs import tslib from pandas._libs.tslibs import period as libperiod +from pandas._libs.tslibs.ccalendar import DAYS, MONTHS from pandas._libs.tslibs.parsing import DateParseError from pandas import Period, Timestamp, offsets -from pandas._libs.tslibs.resolution import DAYS, _MONTHS as MONTHS class TestPeriodProperties(object): diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 1fd6befd64f57..f00fa07d868a1 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -22,7 +22,7 @@ from pandas.core.base import SpecificationError, AbstractMethodError from pandas.errors import UnsupportedFunctionCall from pandas.core.groupby import DataError -from pandas._libs.tslibs.resolution import DAYS, _MONTHS as MONTHS + from pandas.tseries.frequencies import to_offset from pandas.core.indexes.datetimes import date_range from pandas.tseries.offsets import Minute, BDay @@ -33,6 +33,7 @@ from pandas.util.testing import (assert_series_equal, assert_almost_equal, assert_frame_equal, assert_index_equal) from pandas._libs.tslibs.period import IncompatibleFrequency +from pandas._libs.tslibs.ccalendar import DAYS, MONTHS bday = BDay() diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 460ad3f5591fc..f6e3d1f271036 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -25,7 +25,8 @@ from pandas._libs.tslibs.resolution import (Resolution, _FrequencyInferer, _TimedeltaFrequencyInferer) -from pandas._libs.tslibs.parsing import _get_rule_month, _MONTH_NUMBERS +from pandas._libs.tslibs.parsing import _get_rule_month +from pandas._libs.tslibs.ccalendar import MONTH_NUMBERS from pytz import AmbiguousTimeError @@ -496,8 +497,8 @@ def _is_annual(rule): def _quarter_months_conform(source, target): - snum = _MONTH_NUMBERS[source] - tnum = _MONTH_NUMBERS[target] + snum = MONTH_NUMBERS[source] + tnum = MONTH_NUMBERS[target] return snum % 3 == tnum % 3 diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index dd5f01a36a43e..8b12b2f3ad2ce 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -16,13 +16,13 @@ from pandas._libs import tslib, Timestamp, OutOfBoundsDatetime, Timedelta from pandas.util._decorators import cache_readonly +from pandas._libs.tslibs import ccalendar from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds import pandas._libs.tslibs.offsets as liboffsets from pandas._libs.tslibs.offsets import ( ApplyTypeError, as_datetime, _is_normalized, _get_calendar, _to_dt64, _validate_business_time, - _int_to_weekday, _weekday_to_int, _determine_offset, apply_index_wraps, roll_yearday, @@ -933,7 +933,7 @@ def name(self): if self.isAnchored: return self.rule_code else: - month = liboffsets._int_to_month[self.n] + month = ccalendar.MONTH_ALIASES[self.n] return "{code}-{month}".format(code=self.rule_code, month=month) @@ -1348,7 +1348,8 @@ def onOffset(self, dt): def rule_code(self): suffix = '' if self.weekday is not None: - suffix = '-{weekday}'.format(weekday=_int_to_weekday[self.weekday]) + weekday = ccalendar.int_to_weekday[self.weekday] + suffix = '-{weekday}'.format(weekday=weekday) return self._prefix + suffix @classmethod @@ -1356,7 +1357,7 @@ def _from_name(cls, suffix=None): if not suffix: weekday = None else: - weekday = _weekday_to_int[suffix] + weekday = ccalendar.weekday_to_int[suffix] return cls(weekday=weekday) @@ -1430,7 +1431,7 @@ def onOffset(self, dt): @property def rule_code(self): - weekday = _int_to_weekday.get(self.weekday, '') + weekday = ccalendar.int_to_weekday.get(self.weekday, '') return '{prefix}-{week}{weekday}'.format(prefix=self._prefix, week=self.week + 1, weekday=weekday) @@ -1443,7 +1444,7 @@ def _from_name(cls, suffix=None): # TODO: handle n here... # only one digit weeks (1 --> week 0, 2 --> week 1, etc.) week = int(suffix[0]) - 1 - weekday = _weekday_to_int[suffix[1:]] + weekday = ccalendar.weekday_to_int[suffix[1:]] return cls(week=week, weekday=weekday) @@ -1509,7 +1510,7 @@ def onOffset(self, dt): @property def rule_code(self): - weekday = _int_to_weekday.get(self.weekday, '') + weekday = ccalendar.int_to_weekday.get(self.weekday, '') return '{prefix}-{weekday}'.format(prefix=self._prefix, weekday=weekday) @@ -1519,7 +1520,7 @@ def _from_name(cls, suffix=None): raise ValueError("Prefix {prefix!r} requires a suffix." .format(prefix=cls._prefix)) # TODO: handle n here... - weekday = _weekday_to_int[suffix] + weekday = ccalendar.weekday_to_int[suffix] return cls(weekday=weekday) # --------------------------------------------------------------------- @@ -1550,7 +1551,7 @@ def isAnchored(self): def _from_name(cls, suffix=None): kwargs = {} if suffix: - kwargs['startingMonth'] = liboffsets._month_to_int[suffix] + kwargs['startingMonth'] = ccalendar.MONTH_TO_CAL_NUM[suffix] else: if cls._from_name_startingMonth is not None: kwargs['startingMonth'] = cls._from_name_startingMonth @@ -1558,7 +1559,7 @@ def _from_name(cls, suffix=None): @property def rule_code(self): - month = liboffsets._int_to_month[self.startingMonth] + month = ccalendar.MONTH_ALIASES[self.startingMonth] return '{prefix}-{month}'.format(prefix=self._prefix, month=month) @apply_wraps @@ -1681,12 +1682,12 @@ def __init__(self, n=1, normalize=False, month=None): def _from_name(cls, suffix=None): kwargs = {} if suffix: - kwargs['month'] = liboffsets._month_to_int[suffix] + kwargs['month'] = ccalendar.MONTH_TO_CAL_NUM[suffix] return cls(**kwargs) @property def rule_code(self): - month = liboffsets._int_to_month[self.month] + month = ccalendar.MONTH_ALIASES[self.month] return '{prefix}-{month}'.format(prefix=self._prefix, month=month) @@ -1906,8 +1907,8 @@ def _get_suffix_prefix(self): def get_rule_code_suffix(self): prefix = self._get_suffix_prefix() - month = liboffsets._int_to_month[self.startingMonth] - weekday = _int_to_weekday[self.weekday] + month = ccalendar.MONTH_ALIASES[self.startingMonth] + weekday = ccalendar.int_to_weekday[self.weekday] return '{prefix}-{month}-{weekday}'.format(prefix=prefix, month=month, weekday=weekday) @@ -1921,8 +1922,8 @@ def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code): raise ValueError("Unable to parse varion_code: " "{code}".format(code=varion_code)) - startingMonth = liboffsets._month_to_int[startingMonth_code] - weekday = _weekday_to_int[weekday_code] + startingMonth = ccalendar.MONTH_TO_CAL_NUM[startingMonth_code] + weekday = ccalendar.weekday_to_int[weekday_code] return {"weekday": weekday, "startingMonth": startingMonth,
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18728
2017-12-11T17:01:37Z
2017-12-12T10:55:19Z
2017-12-12T10:55:19Z
2018-01-23T04:40:57Z
bugfix for plot for string x values
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index b15c5271ae321..3e186b7e4aaee 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -561,12 +561,14 @@ def _get_xticks(self, convert_period=False): if convert_period and isinstance(index, PeriodIndex): self.data = self.data.reindex(index=index.sort_values()) x = self.data.index.to_timestamp()._mpl_repr() - elif index.is_numeric(): + elif (index.is_numeric() or + index.inferred_type in ['string', 'unicode']): """ Matplotlib supports numeric values or datetime objects as xaxis values. Taking LBYL approach here, by the time matplotlib raises exception when using non numeric/datetime values for xaxis, several actions are already taken by plt. + Matplotlib also supports strings as xaxis values. """ x = index._mpl_repr() elif is_datetype: diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 3d25b0b51e052..6dcd342b94a47 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -139,6 +139,28 @@ def test_plot(self): result = ax.get_axes() # deprecated assert result is axes[0] + # GH 18726 + def test_two_plots_with_x_str(self): + x1, y1 = ['a', 'b'], [1, 2] + x2, y2 = ['b', 'a'], [4, 3] + df1 = pd.DataFrame(y1, index=x1) + df2 = pd.DataFrame(y2, index=x2) + ax = None + ax = df1.plot(ax=ax) + ax = df2.plot(ax=ax) + + line1, line2 = ax.lines + + # xdata should not be touched (Earlier it was [0, 1]) + tm.assert_numpy_array_equal(line1.get_xdata(), np.array(x1), + check_dtype=False) + tm.assert_numpy_array_equal(line1.get_ydata(), np.array(y1), + check_dtype=False) + tm.assert_numpy_array_equal(line2.get_xdata(), np.array(x2), + check_dtype=False) + tm.assert_numpy_array_equal(line2.get_ydata(), np.array(y2), + check_dtype=False) + # GH 15516 def test_mpl2_color_cycle_str(self): # test CN mpl 2.0 color cycle
- [ ] closes #18687 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Matplotlib can handle string data, so `_get_xticks` does not need to convert strings to int
https://api.github.com/repos/pandas-dev/pandas/pulls/18726
2017-12-11T12:50:23Z
2018-10-11T01:59:30Z
null
2018-10-11T01:59:30Z
CLN: ASV rolling benchmark
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py index 654e5d3bfec0e..7d63d78084270 100644 --- a/asv_bench/benchmarks/gil.py +++ b/asv_bench/benchmarks/gil.py @@ -180,19 +180,35 @@ def setup(self, method): raise NotImplementedError win = 100 arr = np.random.rand(100000) - rolling = {'rolling_median': rolling_median, - 'rolling_mean': rolling_mean, - 'rolling_min': rolling_min, - 'rolling_max': rolling_max, - 'rolling_var': rolling_var, - 'rolling_skew': rolling_skew, - 'rolling_kurt': rolling_kurt, - 'rolling_std': rolling_std} - - @test_parallel(num_threads=2) - def parallel_rolling(): - rolling[method](arr, win) - self.parallel_rolling = parallel_rolling + if hasattr(DataFrame, 'rolling'): + rolling = {'rolling_median': 'median', + 'rolling_mean': 'mean', + 'rolling_min': 'min', + 'rolling_max': 'max', + 'rolling_var': 'var', + 'rolling_skew': 'skew', + 'rolling_kurt': 'kurt', + 'rolling_std': 'std'} + df = DataFrame(arr).rolling(win) + + @test_parallel(num_threads=2) + def parallel_rolling(): + getattr(df, rolling[method])() + self.parallel_rolling = parallel_rolling + else: + rolling = {'rolling_median': rolling_median, + 'rolling_mean': rolling_mean, + 'rolling_min': rolling_min, + 'rolling_max': rolling_max, + 'rolling_var': rolling_var, + 'rolling_skew': rolling_skew, + 'rolling_kurt': rolling_kurt, + 'rolling_std': rolling_std} + + @test_parallel(num_threads=2) + def parallel_rolling(): + rolling[method](arr, win) + self.parallel_rolling = parallel_rolling def time_rolling(self, method): self.parallel_rolling() diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index 899349cd21f84..45142c53dcd01 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -1,185 +1,41 @@ -from .pandas_vb_common import * import pandas as pd import numpy as np +from .pandas_vb_common import setup # noqa -class DataframeRolling(object): - goal_time = 0.2 - def setup(self): - self.N = 100000 - self.Ns = 10000 - self.df = pd.DataFrame({'a': np.random.random(self.N)}) - self.dfs = pd.DataFrame({'a': np.random.random(self.Ns)}) - self.wins = 10 - self.winl = 1000 +class Methods(object): - def time_rolling_quantile_0(self): - (self.df.rolling(self.wins).quantile(0.0)) + sample_time = 0.2 + params = (['DataFrame', 'Series'], + [10, 1000], + ['int', 'float'], + ['median', 'mean', 'max', 'min', 'std', 'count', 'skew', 'kurt', + 'sum', 'corr', 'cov']) + param_names = ['contructor', 'window', 'dtype', 'method'] - def time_rolling_quantile_1(self): - (self.df.rolling(self.wins).quantile(1.0)) + def setup(self, contructor, window, dtype, method): + N = 10**5 + arr = np.random.random(N).astype(dtype) + self.roll = getattr(pd, contructor)(arr).rolling(window) - def time_rolling_quantile_median(self): - (self.df.rolling(self.wins).quantile(0.5)) + def time_rolling(self, contructor, window, dtype, method): + getattr(self.roll, method)() - def time_rolling_median(self): - (self.df.rolling(self.wins).median()) - def time_rolling_mean(self): - (self.df.rolling(self.wins).mean()) +class Quantile(object): - def time_rolling_max(self): - (self.df.rolling(self.wins).max()) + sample_time = 0.2 + params = (['DataFrame', 'Series'], + [10, 1000], + ['int', 'float'], + [0, 0.5, 1]) + param_names = ['contructor', 'window', 'dtype', 'percentile'] - def time_rolling_min(self): - (self.df.rolling(self.wins).min()) + def setup(self, contructor, window, dtype, percentile): + N = 10**5 + arr = np.random.random(N).astype(dtype) + self.roll = getattr(pd, contructor)(arr).rolling(window) - def time_rolling_std(self): - (self.df.rolling(self.wins).std()) - - def time_rolling_count(self): - (self.df.rolling(self.wins).count()) - - def time_rolling_skew(self): - (self.df.rolling(self.wins).skew()) - - def time_rolling_kurt(self): - (self.df.rolling(self.wins).kurt()) - - def time_rolling_sum(self): - (self.df.rolling(self.wins).sum()) - - def time_rolling_corr(self): - (self.dfs.rolling(self.wins).corr()) - - def time_rolling_cov(self): - (self.dfs.rolling(self.wins).cov()) - - def time_rolling_quantile_0_l(self): - (self.df.rolling(self.winl).quantile(0.0)) - - def time_rolling_quantile_1_l(self): - (self.df.rolling(self.winl).quantile(1.0)) - - def time_rolling_quantile_median_l(self): - (self.df.rolling(self.winl).quantile(0.5)) - - def time_rolling_median_l(self): - (self.df.rolling(self.winl).median()) - - def time_rolling_mean_l(self): - (self.df.rolling(self.winl).mean()) - - def time_rolling_max_l(self): - (self.df.rolling(self.winl).max()) - - def time_rolling_min_l(self): - (self.df.rolling(self.winl).min()) - - def time_rolling_std_l(self): - (self.df.rolling(self.wins).std()) - - def time_rolling_count_l(self): - (self.df.rolling(self.wins).count()) - - def time_rolling_skew_l(self): - (self.df.rolling(self.wins).skew()) - - def time_rolling_kurt_l(self): - (self.df.rolling(self.wins).kurt()) - - def time_rolling_sum_l(self): - (self.df.rolling(self.wins).sum()) - - -class SeriesRolling(object): - goal_time = 0.2 - - def setup(self): - self.N = 100000 - self.Ns = 10000 - self.df = pd.DataFrame({'a': np.random.random(self.N)}) - self.dfs = pd.DataFrame({'a': np.random.random(self.Ns)}) - self.sr = self.df.a - self.srs = self.dfs.a - self.wins = 10 - self.winl = 1000 - - def time_rolling_quantile_0(self): - (self.sr.rolling(self.wins).quantile(0.0)) - - def time_rolling_quantile_1(self): - (self.sr.rolling(self.wins).quantile(1.0)) - - def time_rolling_quantile_median(self): - (self.sr.rolling(self.wins).quantile(0.5)) - - def time_rolling_median(self): - (self.sr.rolling(self.wins).median()) - - def time_rolling_mean(self): - (self.sr.rolling(self.wins).mean()) - - def time_rolling_max(self): - (self.sr.rolling(self.wins).max()) - - def time_rolling_min(self): - (self.sr.rolling(self.wins).min()) - - def time_rolling_std(self): - (self.sr.rolling(self.wins).std()) - - def time_rolling_count(self): - (self.sr.rolling(self.wins).count()) - - def time_rolling_skew(self): - (self.sr.rolling(self.wins).skew()) - - def time_rolling_kurt(self): - (self.sr.rolling(self.wins).kurt()) - - def time_rolling_sum(self): - (self.sr.rolling(self.wins).sum()) - - def time_rolling_corr(self): - (self.srs.rolling(self.wins).corr()) - - def time_rolling_cov(self): - (self.srs.rolling(self.wins).cov()) - - def time_rolling_quantile_0_l(self): - (self.sr.rolling(self.winl).quantile(0.0)) - - def time_rolling_quantile_1_l(self): - (self.sr.rolling(self.winl).quantile(1.0)) - - def time_rolling_quantile_median_l(self): - (self.sr.rolling(self.winl).quantile(0.5)) - - def time_rolling_median_l(self): - (self.sr.rolling(self.winl).median()) - - def time_rolling_mean_l(self): - (self.sr.rolling(self.winl).mean()) - - def time_rolling_max_l(self): - (self.sr.rolling(self.winl).max()) - - def time_rolling_min_l(self): - (self.sr.rolling(self.winl).min()) - - def time_rolling_std_l(self): - (self.sr.rolling(self.wins).std()) - - def time_rolling_count_l(self): - (self.sr.rolling(self.wins).count()) - - def time_rolling_skew_l(self): - (self.sr.rolling(self.wins).skew()) - - def time_rolling_kurt_l(self): - (self.sr.rolling(self.wins).kurt()) - - def time_rolling_sum_l(self): - (self.sr.rolling(self.wins).sum()) + def time_quantile(self, contructor, window, dtype, percentile): + self.roll.quantile(percentile)
The benchmark was simplified by parameterizing over the various statistical methods and constructors (Series and DataFrame). I made a separate class for `Quantile` since various quantiles were benchmarked. I figured benchmarking 10^4 points and 10^5 points is a little redundant, so I am just benchmarking 10^5 here.
https://api.github.com/repos/pandas-dev/pandas/pulls/18725
2017-12-11T04:16:36Z
2017-12-13T14:42:14Z
2017-12-13T14:42:14Z
2017-12-13T18:00:16Z
CLN: Drop the skip_footer parameter in read_csv
diff --git a/doc/source/io.rst b/doc/source/io.rst index 54e7a11c5f2b1..370d9a96ee9ae 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -198,10 +198,6 @@ skiprows : list-like or integer, default ``None`` skipfooter : int, default ``0`` Number of lines at bottom of file to skip (unsupported with engine='c'). -skip_footer : int, default ``0`` - .. deprecated:: 0.19.0 - - Use the ``skipfooter`` parameter instead, as they are identical nrows : int, default ``None`` Number of rows of file to read. Useful for reading pieces of large files. diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 2ea44722d343d..c2da0c420f643 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -219,6 +219,7 @@ Removal of prior version deprecations/changes - The ``freq`` and ``how`` parameters have been removed from the ``rolling``/``expanding``/``ewm`` methods of DataFrame and Series (deprecated since v0.18). Instead, resample before calling the methods. (:issue:18601 & :issue:18668) - ``DatetimeIndex.to_datetime``, ``Timestamp.to_datetime``, ``PeriodIndex.to_datetime``, and ``Index.to_datetime`` have been removed (:issue:`8254`, :issue:`14096`, :issue:`14113`) +- :func:`read_csv` has dropped the ``skip_footer`` parameter (:issue:`13386`) .. _whatsnew_0220.performance: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index a04d77de08950..927edbf236366 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -148,9 +148,6 @@ An example of a valid callable argument would be ``lambda x: x in [0, 2]``. skipfooter : int, default 0 Number of lines at bottom of file to skip (Unsupported with engine='c') -skip_footer : int, default 0 - .. deprecated:: 0.19.0 - Use the `skipfooter` parameter instead, as they are identical nrows : int, default None Number of rows of file to read. Useful for reading pieces of large files na_values : scalar, str, list-like, or dict, default None @@ -613,7 +610,6 @@ def parser_f(filepath_or_buffer, warn_bad_lines=True, skipfooter=0, - skip_footer=0, # deprecated # Internal doublequote=True, @@ -641,13 +637,6 @@ def parser_f(filepath_or_buffer, engine = 'c' engine_specified = False - if skip_footer != 0: - warnings.warn("The 'skip_footer' argument has " - "been deprecated and will be removed " - "in a future version. Please use the " - "'skipfooter' argument instead.", - FutureWarning, stacklevel=2) - kwds = dict(delimiter=delimiter, engine=engine, dialect=dialect, @@ -682,7 +671,7 @@ def parser_f(filepath_or_buffer, nrows=nrows, iterator=iterator, chunksize=chunksize, - skipfooter=skipfooter or skip_footer, + skipfooter=skipfooter, converters=converters, dtype=dtype, usecols=usecols, diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 189a113bb6abb..ab5d8a7595c96 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -137,16 +137,11 @@ class TestDeprecatedFeatures(object): {"use_unsigned": True}, {"use_unsigned": False}, {"tupleize_cols": True}, - {"tupleize_cols": False}, - {"skip_footer": 1}]) + {"tupleize_cols": False}]) def test_deprecated_args(self, engine, kwargs): data = "1,2,3" arg, _ = list(kwargs.items())[0] - if engine == "c" and arg == "skip_footer": - # unsupported --> exception is raised - return - if engine == "python" and arg == "buffer_lines": # unsupported --> exception is raised return
Deprecated back in 0.19.0. xref #13386.
https://api.github.com/repos/pandas-dev/pandas/pulls/18724
2017-12-11T04:15:56Z
2017-12-11T21:40:57Z
2017-12-11T21:40:57Z
2017-12-12T05:03:03Z
DEPR/CLN: Remove pd.rolling_*, pd.expanding* and pd.ewm*
diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 06afa440aa26c..a64542fa71705 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -209,19 +209,12 @@ Window Functions .. currentmodule:: pandas.core.window -.. warning:: - - Prior to version 0.18.0, ``pd.rolling_*``, ``pd.expanding_*``, and ``pd.ewm*`` were module level - functions and are now deprecated. These are replaced by using the :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expanding` and :class:`~pandas.core.window.EWM`. objects and a corresponding method call. - - The deprecation warning will show the new syntax, see an example :ref:`here <whatsnew_0180.window_deprecations>`. - -For working with data, a number of windows functions are provided for +For working with data, a number of window functions are provided for computing common *window* or *rolling* statistics. Among these are count, sum, mean, median, correlation, variance, covariance, standard deviation, skewness, and kurtosis. -Starting in version 0.18.1, the ``rolling()`` and ``expanding()`` +The ``rolling()`` and ``expanding()`` functions can be used directly from DataFrameGroupBy objects, see the :ref:`groupby docs <groupby.transform.window_resample>`. diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 592c0788070a1..2bd2bb199bf1f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -361,6 +361,8 @@ Removal of prior version deprecations/changes - The ``labels`` attribute of the ``Categorical`` class has been removed in favor of :attribute:`Categorical.codes` (:issue:`7768`) - The ``flavor`` parameter have been removed from func:`to_sql` method (:issue:`13611`) - The modules `pandas.tools.hashing` and `pandas.util.hashing` have been removed (:issue:`16223`) +- The top-level functions ``pd.rolling_*``, ``pd.expanding_*`` and ``pd.ewm*`` have been removed (Deprecated since v0.18). + Instead, use the DataFrame/Series methods :attr:`~DataFrame.rolling`, :attr:`~DataFrame.expanding` and :attr:`~DataFrame.ewm` (:issue:`18723`) .. _whatsnew_0230.performance: diff --git a/pandas/__init__.py b/pandas/__init__.py index 78501620d780b..97ae73174c09c 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -41,7 +41,6 @@ from pandas.core.api import * from pandas.core.sparse.api import * -from pandas.stats.api import * from pandas.tseries.api import * from pandas.core.computation.api import * from pandas.core.reshape.api import * diff --git a/pandas/stats/__init__.py b/pandas/stats/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/stats/api.py b/pandas/stats/api.py deleted file mode 100644 index 2a11456d4f9e5..0000000000000 --- a/pandas/stats/api.py +++ /dev/null @@ -1,7 +0,0 @@ -""" -Common namespace of statistical functions -""" - -# flake8: noqa - -from pandas.stats.moments import * diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py deleted file mode 100644 index 1cd98feb05ea0..0000000000000 --- a/pandas/stats/moments.py +++ /dev/null @@ -1,855 +0,0 @@ -""" -Provides rolling statistical moments and related descriptive -statistics implemented in Cython -""" -from __future__ import division - -import warnings -import numpy as np -from pandas.core.dtypes.common import is_scalar -from pandas.core.api import DataFrame, Series -from pandas.util._decorators import Substitution, Appender - -__all__ = ['rolling_count', 'rolling_max', 'rolling_min', - 'rolling_sum', 'rolling_mean', 'rolling_std', 'rolling_cov', - 'rolling_corr', 'rolling_var', 'rolling_skew', 'rolling_kurt', - 'rolling_quantile', 'rolling_median', 'rolling_apply', - 'rolling_window', - 'ewma', 'ewmvar', 'ewmstd', 'ewmvol', 'ewmcorr', 'ewmcov', - 'expanding_count', 'expanding_max', 'expanding_min', - 'expanding_sum', 'expanding_mean', 'expanding_std', - 'expanding_cov', 'expanding_corr', 'expanding_var', - 'expanding_skew', 'expanding_kurt', 'expanding_quantile', - 'expanding_median', 'expanding_apply'] - -# ----------------------------------------------------------------------------- -# Docs - -# The order of arguments for the _doc_template is: -# (header, args, kwargs, returns, notes) - -_doc_template = """ -%s - -Parameters ----------- -%s%s -Returns -------- -%s -%s -""" - -_roll_kw = """window : int - Size of the moving window. This is the number of observations used for - calculating the statistic. -min_periods : int, default None - Minimum number of observations in window required to have a value - (otherwise result is NA). -freq : string or DateOffset object, optional (default None) - Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. -center : boolean, default False - Set the labels at the center of the window. -how : string, default '%s' - Method for down- or re-sampling -""" - -_roll_notes = r""" -Notes ------ -By default, the result is set to the right edge of the window. This can be -changed to the center of the window by setting ``center=True``. - -The `freq` keyword is used to conform time series data to a specified -frequency by resampling the data. This is done with the default parameters -of :meth:`~pandas.Series.resample` (i.e. using the `mean`). -""" - - -_ewm_kw = r"""com : float, optional - Specify decay in terms of center of mass, - :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0` -span : float, optional - Specify decay in terms of span, - :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1` -halflife : float, optional - Specify decay in terms of half-life, - :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0` -alpha : float, optional - Specify smoothing factor :math:`\alpha` directly, - :math:`0 < \alpha \leq 1` - - .. versionadded:: 0.18.0 - -min_periods : int, default 0 - Minimum number of observations in window required to have a value - (otherwise result is NA). -freq : None or string alias / date offset object, default=None - Frequency to conform to before computing statistic -adjust : boolean, default True - Divide by decaying adjustment factor in beginning periods to account for - imbalance in relative weightings (viewing EWMA as a moving average) -how : string, default 'mean' - Method for down- or re-sampling -ignore_na : boolean, default False - Ignore missing values when calculating weights; - specify True to reproduce pre-0.15.0 behavior -""" - -_ewm_notes = r""" -Notes ------ -Exactly one of center of mass, span, half-life, and alpha must be provided. -Allowed values and relationship between the parameters are specified in the -parameter descriptions above; see the link at the end of this section for -a detailed explanation. - -When adjust is True (default), weighted averages are calculated using weights - (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1. - -When adjust is False, weighted averages are calculated recursively as: - weighted_average[0] = arg[0]; - weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i]. - -When ignore_na is False (default), weights are based on absolute positions. -For example, the weights of x and y used in calculating the final weighted -average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and -(1-alpha)**2 and alpha (if adjust is False). - -When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on -relative positions. For example, the weights of x and y used in calculating -the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is -True), and 1-alpha and alpha (if adjust is False). - -More details can be found at -http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows -""" - -_expanding_kw = """min_periods : int, default None - Minimum number of observations in window required to have a value - (otherwise result is NA). -freq : string or DateOffset object, optional (default None) - Frequency to conform the data to before computing the statistic. Specified - as a frequency string or DateOffset object. -""" - - -_type_of_input_retval = "y : type of input argument" - -_flex_retval = """y : type depends on inputs - DataFrame / DataFrame -> DataFrame (matches on columns) or Panel (pairwise) - DataFrame / Series -> Computes result for each column - Series / Series -> Series""" - -_pairwise_retval = "y : Panel whose items are df1.index values" - -_unary_arg = "arg : Series, DataFrame\n" - -_binary_arg_flex = """arg1 : Series, DataFrame, or ndarray -arg2 : Series, DataFrame, or ndarray, optional - if not supplied then will default to arg1 and produce pairwise output -""" - -_binary_arg = """arg1 : Series, DataFrame, or ndarray -arg2 : Series, DataFrame, or ndarray -""" - -_pairwise_arg = """df1 : DataFrame -df2 : DataFrame -""" - -_pairwise_kw = """pairwise : bool, default False - If False then only matching columns between arg1 and arg2 will be used and - the output will be a DataFrame. - If True then all pairwise combinations will be calculated and the output - will be a Panel in the case of DataFrame inputs. In the case of missing - elements, only complete pairwise observations will be used. -""" - -_ddof_kw = """ddof : int, default 1 - Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of elements. -""" - -_bias_kw = r"""bias : boolean, default False - Use a standard estimation bias correction -""" - - -def ensure_compat(dispatch, name, arg, func_kw=None, *args, **kwargs): - """ - wrapper function to dispatch to the appropriate window functions - wraps/unwraps ndarrays for compat - - can be removed when ndarray support is removed - """ - is_ndarray = isinstance(arg, np.ndarray) - if is_ndarray: - if arg.ndim == 1: - arg = Series(arg) - elif arg.ndim == 2: - arg = DataFrame(arg) - else: - raise AssertionError("cannot support ndim > 2 for ndarray compat") - - warnings.warn("pd.{dispatch}_{name} is deprecated for ndarrays and " - "will be removed " - "in a future version" - .format(dispatch=dispatch, name=name), - FutureWarning, stacklevel=3) - - # get the functional keywords here - if func_kw is None: - func_kw = [] - kwds = {} - for k in func_kw: - value = kwargs.pop(k, None) - if value is not None: - kwds[k] = value - - # TODO: the below is only in place temporary until this module is removed. - kwargs.pop('freq', None) # freq removed in 0.23 - # how is a keyword that if not-None should be in kwds - how = kwargs.pop('how', None) - if how is not None: - kwds['how'] = how - - r = getattr(arg, dispatch)(**kwargs) - - if not is_ndarray: - - # give a helpful deprecation message - # with copy-pastable arguments - pargs = ','.join("{a}={b}".format(a=a, b=b) - for a, b in kwargs.items() if b is not None) - aargs = ','.join(args) - if len(aargs): - aargs += ',' - - def f(a, b): - if is_scalar(b): - return "{a}={b}".format(a=a, b=b) - return "{a}=<{b}>".format(a=a, b=type(b).__name__) - aargs = ','.join(f(a, b) for a, b in kwds.items() if b is not None) - warnings.warn("pd.{dispatch}_{name} is deprecated for {klass} " - "and will be removed in a future version, replace with " - "\n\t{klass}.{dispatch}({pargs}).{name}({aargs})" - .format(klass=type(arg).__name__, pargs=pargs, - aargs=aargs, dispatch=dispatch, name=name), - FutureWarning, stacklevel=3) - - result = getattr(r, name)(*args, **kwds) - - if is_ndarray: - result = result.values - return result - - -def rolling_count(arg, window, **kwargs): - """ - Rolling count of number of non-NaN observations inside provided window. - - Parameters - ---------- - arg : DataFrame or numpy ndarray-like - window : int - Size of the moving window. This is the number of observations used for - calculating the statistic. - freq : string or DateOffset object, optional (default None) - Frequency to conform the data to before computing the - statistic. Specified as a frequency string or DateOffset object. - center : boolean, default False - Whether the label should correspond with center of window - how : string, default 'mean' - Method for down- or re-sampling - - Returns - ------- - rolling_count : type of caller - - Notes - ----- - The `freq` keyword is used to conform time series data to a specified - frequency by resampling the data. This is done with the default parameters - of :meth:`~pandas.Series.resample` (i.e. using the `mean`). - - To learn more about the frequency strings, please see `this link - <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. - """ - return ensure_compat('rolling', 'count', arg, window=window, **kwargs) - - -@Substitution("Unbiased moving covariance.", _binary_arg_flex, - _roll_kw % 'None' + _pairwise_kw + _ddof_kw, _flex_retval, - _roll_notes) -@Appender(_doc_template) -def rolling_cov(arg1, arg2=None, window=None, pairwise=None, **kwargs): - if window is None and isinstance(arg2, (int, float)): - window = arg2 - arg2 = arg1 - pairwise = True if pairwise is None else pairwise # only default unset - elif arg2 is None: - arg2 = arg1 - pairwise = True if pairwise is None else pairwise # only default unset - return ensure_compat('rolling', - 'cov', - arg1, - other=arg2, - window=window, - pairwise=pairwise, - func_kw=['other', 'pairwise', 'ddof'], - **kwargs) - - -@Substitution("Moving sample correlation.", _binary_arg_flex, - _roll_kw % 'None' + _pairwise_kw, _flex_retval, _roll_notes) -@Appender(_doc_template) -def rolling_corr(arg1, arg2=None, window=None, pairwise=None, **kwargs): - if window is None and isinstance(arg2, (int, float)): - window = arg2 - arg2 = arg1 - pairwise = True if pairwise is None else pairwise # only default unset - elif arg2 is None: - arg2 = arg1 - pairwise = True if pairwise is None else pairwise # only default unset - return ensure_compat('rolling', - 'corr', - arg1, - other=arg2, - window=window, - pairwise=pairwise, - func_kw=['other', 'pairwise'], - **kwargs) - - -# ----------------------------------------------------------------------------- -# Exponential moving moments - - -@Substitution("Exponentially-weighted moving average", _unary_arg, _ewm_kw, - _type_of_input_retval, _ewm_notes) -@Appender(_doc_template) -def ewma(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0, - freq=None, adjust=True, how=None, ignore_na=False): - return ensure_compat('ewm', - 'mean', - arg, - com=com, - span=span, - halflife=halflife, - alpha=alpha, - min_periods=min_periods, - freq=freq, - adjust=adjust, - how=how, - ignore_na=ignore_na) - - -@Substitution("Exponentially-weighted moving variance", _unary_arg, - _ewm_kw + _bias_kw, _type_of_input_retval, _ewm_notes) -@Appender(_doc_template) -def ewmvar(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0, - bias=False, freq=None, how=None, ignore_na=False, adjust=True): - return ensure_compat('ewm', - 'var', - arg, - com=com, - span=span, - halflife=halflife, - alpha=alpha, - min_periods=min_periods, - freq=freq, - adjust=adjust, - how=how, - ignore_na=ignore_na, - bias=bias, - func_kw=['bias']) - - -@Substitution("Exponentially-weighted moving std", _unary_arg, - _ewm_kw + _bias_kw, _type_of_input_retval, _ewm_notes) -@Appender(_doc_template) -def ewmstd(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0, - bias=False, freq=None, how=None, ignore_na=False, adjust=True): - return ensure_compat('ewm', - 'std', - arg, - com=com, - span=span, - halflife=halflife, - alpha=alpha, - min_periods=min_periods, - freq=freq, - adjust=adjust, - how=how, - ignore_na=ignore_na, - bias=bias, - func_kw=['bias']) - - -ewmvol = ewmstd - - -@Substitution("Exponentially-weighted moving covariance", _binary_arg_flex, - _ewm_kw + _pairwise_kw, _type_of_input_retval, _ewm_notes) -@Appender(_doc_template) -def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None, - min_periods=0, bias=False, freq=None, pairwise=None, how=None, - ignore_na=False, adjust=True): - if arg2 is None: - arg2 = arg1 - pairwise = True if pairwise is None else pairwise - elif isinstance(arg2, (int, float)) and com is None: - com = arg2 - arg2 = arg1 - pairwise = True if pairwise is None else pairwise - - return ensure_compat('ewm', - 'cov', - arg1, - other=arg2, - com=com, - span=span, - halflife=halflife, - alpha=alpha, - min_periods=min_periods, - bias=bias, - freq=freq, - how=how, - ignore_na=ignore_na, - adjust=adjust, - pairwise=pairwise, - func_kw=['other', 'pairwise', 'bias']) - - -@Substitution("Exponentially-weighted moving correlation", _binary_arg_flex, - _ewm_kw + _pairwise_kw, _type_of_input_retval, _ewm_notes) -@Appender(_doc_template) -def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None, - min_periods=0, freq=None, pairwise=None, how=None, ignore_na=False, - adjust=True): - if arg2 is None: - arg2 = arg1 - pairwise = True if pairwise is None else pairwise - elif isinstance(arg2, (int, float)) and com is None: - com = arg2 - arg2 = arg1 - pairwise = True if pairwise is None else pairwise - return ensure_compat('ewm', - 'corr', - arg1, - other=arg2, - com=com, - span=span, - halflife=halflife, - alpha=alpha, - min_periods=min_periods, - freq=freq, - how=how, - ignore_na=ignore_na, - adjust=adjust, - pairwise=pairwise, - func_kw=['other', 'pairwise']) - -# --------------------------------------------------------------------- -# Python interface to Cython functions - - -def _rolling_func(name, desc, how=None, func_kw=None, additional_kw=''): - if how is None: - how_arg_str = 'None' - else: - how_arg_str = "'{how}".format(how=how) - - @Substitution(desc, _unary_arg, _roll_kw % how_arg_str + additional_kw, - _type_of_input_retval, _roll_notes) - @Appender(_doc_template) - def f(arg, window, min_periods=None, freq=None, center=False, - **kwargs): - - return ensure_compat('rolling', - name, - arg, - window=window, - min_periods=min_periods, - freq=freq, - center=center, - func_kw=func_kw, - **kwargs) - return f - - -rolling_max = _rolling_func('max', 'Moving maximum.', how='max') -rolling_min = _rolling_func('min', 'Moving minimum.', how='min') -rolling_sum = _rolling_func('sum', 'Moving sum.') -rolling_mean = _rolling_func('mean', 'Moving mean.') -rolling_median = _rolling_func('median', 'Moving median.', how='median') -rolling_std = _rolling_func('std', 'Moving standard deviation.', - func_kw=['ddof'], - additional_kw=_ddof_kw) -rolling_var = _rolling_func('var', 'Moving variance.', - func_kw=['ddof'], - additional_kw=_ddof_kw) -rolling_skew = _rolling_func('skew', 'Unbiased moving skewness.') -rolling_kurt = _rolling_func('kurt', 'Unbiased moving kurtosis.') - - -def rolling_quantile(arg, window, quantile, min_periods=None, freq=None, - center=False): - """Moving quantile. - - Parameters - ---------- - arg : Series, DataFrame - window : int - Size of the moving window. This is the number of observations used for - calculating the statistic. - quantile : float - 0 <= quantile <= 1 - min_periods : int, default None - Minimum number of observations in window required to have a value - (otherwise result is NA). - freq : string or DateOffset object, optional (default None) - Frequency to conform the data to before computing the - statistic. Specified as a frequency string or DateOffset object. - center : boolean, default False - Whether the label should correspond with center of window - - Returns - ------- - y : type of input argument - - Notes - ----- - By default, the result is set to the right edge of the window. This can be - changed to the center of the window by setting ``center=True``. - - The `freq` keyword is used to conform time series data to a specified - frequency by resampling the data. This is done with the default parameters - of :meth:`~pandas.Series.resample` (i.e. using the `mean`). - - To learn more about the frequency strings, please see `this link - <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. - """ - return ensure_compat('rolling', - 'quantile', - arg, - window=window, - freq=freq, - center=center, - min_periods=min_periods, - func_kw=['quantile'], - quantile=quantile) - - -def rolling_apply(arg, window, func, min_periods=None, freq=None, - center=False, args=(), kwargs={}): - """Generic moving function application. - - Parameters - ---------- - arg : Series, DataFrame - window : int - Size of the moving window. This is the number of observations used for - calculating the statistic. - func : function - Must produce a single value from an ndarray input - min_periods : int, default None - Minimum number of observations in window required to have a value - (otherwise result is NA). - freq : string or DateOffset object, optional (default None) - Frequency to conform the data to before computing the - statistic. Specified as a frequency string or DateOffset object. - center : boolean, default False - Whether the label should correspond with center of window - args : tuple - Passed on to func - kwargs : dict - Passed on to func - - Returns - ------- - y : type of input argument - - Notes - ----- - By default, the result is set to the right edge of the window. This can be - changed to the center of the window by setting ``center=True``. - - The `freq` keyword is used to conform time series data to a specified - frequency by resampling the data. This is done with the default parameters - of :meth:`~pandas.Series.resample` (i.e. using the `mean`). - - To learn more about the frequency strings, please see `this link - <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. - """ - return ensure_compat('rolling', - 'apply', - arg, - window=window, - freq=freq, - center=center, - min_periods=min_periods, - func_kw=['func', 'args', 'kwargs'], - func=func, - args=args, - kwargs=kwargs) - - -def rolling_window(arg, window=None, win_type=None, min_periods=None, - freq=None, center=False, mean=True, - axis=0, how=None, **kwargs): - """ - Applies a moving window of type ``window_type`` and size ``window`` - on the data. - - Parameters - ---------- - arg : Series, DataFrame - window : int or ndarray - Weighting window specification. If the window is an integer, then it is - treated as the window length and win_type is required - win_type : str, default None - Window type (see Notes) - min_periods : int, default None - Minimum number of observations in window required to have a value - (otherwise result is NA). - freq : string or DateOffset object, optional (default None) - Frequency to conform the data to before computing the - statistic. Specified as a frequency string or DateOffset object. - center : boolean, default False - Whether the label should correspond with center of window - mean : boolean, default True - If True computes weighted mean, else weighted sum - axis : {0, 1}, default 0 - how : string, default 'mean' - Method for down- or re-sampling - - Returns - ------- - y : type of input argument - - Notes - ----- - The recognized window types are: - - * ``boxcar`` - * ``triang`` - * ``blackman`` - * ``hamming`` - * ``bartlett`` - * ``parzen`` - * ``bohman`` - * ``blackmanharris`` - * ``nuttall`` - * ``barthann`` - * ``kaiser`` (needs beta) - * ``gaussian`` (needs std) - * ``general_gaussian`` (needs power, width) - * ``slepian`` (needs width). - - By default, the result is set to the right edge of the window. This can be - changed to the center of the window by setting ``center=True``. - - The `freq` keyword is used to conform time series data to a specified - frequency by resampling the data. This is done with the default parameters - of :meth:`~pandas.Series.resample` (i.e. using the `mean`). - - To learn more about the frequency strings, please see `this link - <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. - """ - func = 'mean' if mean else 'sum' - return ensure_compat('rolling', - func, - arg, - window=window, - win_type=win_type, - freq=freq, - center=center, - min_periods=min_periods, - axis=axis, - func_kw=kwargs.keys(), - **kwargs) - - -def _expanding_func(name, desc, func_kw=None, additional_kw=''): - @Substitution(desc, _unary_arg, _expanding_kw + additional_kw, - _type_of_input_retval, "") - @Appender(_doc_template) - def f(arg, min_periods=1, freq=None, **kwargs): - return ensure_compat('expanding', - name, - arg, - min_periods=min_periods, - func_kw=func_kw, - **kwargs) - return f - - -expanding_max = _expanding_func('max', 'Expanding maximum.') -expanding_min = _expanding_func('min', 'Expanding minimum.') -expanding_sum = _expanding_func('sum', 'Expanding sum.') -expanding_mean = _expanding_func('mean', 'Expanding mean.') -expanding_median = _expanding_func('median', 'Expanding median.') - -expanding_std = _expanding_func('std', 'Expanding standard deviation.', - func_kw=['ddof'], - additional_kw=_ddof_kw) -expanding_var = _expanding_func('var', 'Expanding variance.', - func_kw=['ddof'], - additional_kw=_ddof_kw) -expanding_skew = _expanding_func('skew', 'Unbiased expanding skewness.') -expanding_kurt = _expanding_func('kurt', 'Unbiased expanding kurtosis.') - - -def expanding_count(arg, freq=None): - """ - Expanding count of number of non-NaN observations. - - Parameters - ---------- - arg : DataFrame or numpy ndarray-like - freq : string or DateOffset object, optional (default None) - Frequency to conform the data to before computing the - statistic. Specified as a frequency string or DateOffset object. - - Returns - ------- - expanding_count : type of caller - - Notes - ----- - The `freq` keyword is used to conform time series data to a specified - frequency by resampling the data. This is done with the default parameters - of :meth:`~pandas.Series.resample` (i.e. using the `mean`). - - To learn more about the frequency strings, please see `this link - <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. - """ - return ensure_compat('expanding', 'count', arg, freq=freq) - - -def expanding_quantile(arg, quantile, min_periods=1, freq=None): - """Expanding quantile. - - Parameters - ---------- - arg : Series, DataFrame - quantile : float - 0 <= quantile <= 1 - min_periods : int, default None - Minimum number of observations in window required to have a value - (otherwise result is NA). - freq : string or DateOffset object, optional (default None) - Frequency to conform the data to before computing the - statistic. Specified as a frequency string or DateOffset object. - - Returns - ------- - y : type of input argument - - Notes - ----- - The `freq` keyword is used to conform time series data to a specified - frequency by resampling the data. This is done with the default parameters - of :meth:`~pandas.Series.resample` (i.e. using the `mean`). - - To learn more about the frequency strings, please see `this link - <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. - """ - return ensure_compat('expanding', - 'quantile', - arg, - freq=freq, - min_periods=min_periods, - func_kw=['quantile'], - quantile=quantile) - - -@Substitution("Unbiased expanding covariance.", _binary_arg_flex, - _expanding_kw + _pairwise_kw + _ddof_kw, _flex_retval, "") -@Appender(_doc_template) -def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, - pairwise=None, ddof=1): - if arg2 is None: - arg2 = arg1 - pairwise = True if pairwise is None else pairwise - elif isinstance(arg2, (int, float)) and min_periods is None: - min_periods = arg2 - arg2 = arg1 - pairwise = True if pairwise is None else pairwise - return ensure_compat('expanding', - 'cov', - arg1, - other=arg2, - min_periods=min_periods, - pairwise=pairwise, - freq=freq, - ddof=ddof, - func_kw=['other', 'pairwise', 'ddof']) - - -@Substitution("Expanding sample correlation.", _binary_arg_flex, - _expanding_kw + _pairwise_kw, _flex_retval, "") -@Appender(_doc_template) -def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None): - if arg2 is None: - arg2 = arg1 - pairwise = True if pairwise is None else pairwise - elif isinstance(arg2, (int, float)) and min_periods is None: - min_periods = arg2 - arg2 = arg1 - pairwise = True if pairwise is None else pairwise - return ensure_compat('expanding', - 'corr', - arg1, - other=arg2, - min_periods=min_periods, - pairwise=pairwise, - freq=freq, - func_kw=['other', 'pairwise', 'ddof']) - - -def expanding_apply(arg, func, min_periods=1, freq=None, - args=(), kwargs={}): - """Generic expanding function application. - - Parameters - ---------- - arg : Series, DataFrame - func : function - Must produce a single value from an ndarray input - min_periods : int, default None - Minimum number of observations in window required to have a value - (otherwise result is NA). - freq : string or DateOffset object, optional (default None) - Frequency to conform the data to before computing the - statistic. Specified as a frequency string or DateOffset object. - args : tuple - Passed on to func - kwargs : dict - Passed on to func - - Returns - ------- - y : type of input argument - - Notes - ----- - The `freq` keyword is used to conform time series data to a specified - frequency by resampling the data. This is done with the default parameters - of :meth:`~pandas.Series.resample` (i.e. using the `mean`). - - To learn more about the frequency strings, please see `this link - <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. - """ - return ensure_compat('expanding', - 'apply', - arg, - freq=freq, - min_periods=min_periods, - func_kw=['func', 'args', 'kwargs'], - func=func, - args=args, - kwargs=kwargs) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index c20767b09178c..ea6c250420b13 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -35,8 +35,7 @@ class TestPDApi(Base): 'util', 'options', 'io'] # these are already deprecated; awaiting removal - deprecated_modules = ['stats', 'datetools', 'parser', - 'json', 'lib', 'tslib'] + deprecated_modules = ['datetools', 'parser', 'json', 'lib', 'tslib'] # misc misc = ['IndexSlice', 'NaT'] @@ -91,19 +90,7 @@ class TestPDApi(Base): deprecated_funcs_in_future = [] # these are already deprecated; awaiting removal - deprecated_funcs = ['ewma', 'ewmcorr', 'ewmcov', 'ewmstd', 'ewmvar', - 'ewmvol', 'expanding_apply', 'expanding_corr', - 'expanding_count', 'expanding_cov', 'expanding_kurt', - 'expanding_max', 'expanding_mean', 'expanding_median', - 'expanding_min', 'expanding_quantile', - 'expanding_skew', 'expanding_std', 'expanding_sum', - 'expanding_var', 'rolling_apply', - 'rolling_corr', 'rolling_count', 'rolling_cov', - 'rolling_kurt', 'rolling_max', 'rolling_mean', - 'rolling_median', 'rolling_min', 'rolling_quantile', - 'rolling_skew', 'rolling_std', 'rolling_sum', - 'rolling_var', 'rolling_window', - 'pnow', 'match', 'groupby', 'get_store', + deprecated_funcs = ['pnow', 'match', 'groupby', 'get_store', 'plot_params', 'scatter_matrix'] def test_api(self): diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 6f9e872526d0a..22526d14a7168 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -1,6 +1,5 @@ from itertools import product import pytest -import sys import warnings from warnings import catch_warnings @@ -9,16 +8,15 @@ import numpy as np import pandas as pd -from pandas import (Series, DataFrame, bdate_range, isna, - notna, concat, Timestamp, Index) -import pandas.stats.moments as mom +from pandas import (Series, DataFrame, bdate_range, + isna, notna, concat, Timestamp, Index) import pandas.core.window as rwindow import pandas.tseries.offsets as offsets from pandas.core.base import SpecificationError from pandas.errors import UnsupportedFunctionCall import pandas.util.testing as tm import pandas.util._test_decorators as td -from pandas.compat import range, zip, PY3 +from pandas.compat import range, zip N, K = 100, 10 @@ -610,19 +608,6 @@ def test_numpy_compat(self): getattr(e, func), dtype=np.float64) -class TestDeprecations(Base): - """ test that we are catching deprecation warnings """ - - def setup_method(self, method): - self._create_data() - - def test_deprecations(self): - - with catch_warnings(record=True): - mom.rolling_mean(np.ones(10), 3, center=True, axis=0) - mom.rolling_mean(Series(np.ones(10)), 3, center=True, axis=0) - - # gh-12373 : rolling functions error on float32 data # make sure rolling functions works for different dtypes # @@ -863,72 +848,55 @@ def test_centered_axis_validation(self): .rolling(window=3, center=True, axis=2).mean()) def test_rolling_sum(self): - self._check_moment_func(mom.rolling_sum, np.nansum, name='sum', + self._check_moment_func(np.nansum, name='sum', zero_min_periods_equal=False) def test_rolling_count(self): counter = lambda x: np.isfinite(x).astype(float).sum() - self._check_moment_func(mom.rolling_count, counter, name='count', - has_min_periods=False, preserve_nan=False, + self._check_moment_func(counter, name='count', has_min_periods=False, fill_value=0) def test_rolling_mean(self): - self._check_moment_func(mom.rolling_mean, np.mean, name='mean') + self._check_moment_func(np.mean, name='mean') @td.skip_if_no_scipy def test_cmov_mean(self): # GH 8238 vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) - xp = np.array([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516, 12.818, - 12.952, np.nan, np.nan]) - - with catch_warnings(record=True): - rs = mom.rolling_mean(vals, 5, center=True) - tm.assert_almost_equal(xp, rs) - - xp = Series(rs) - rs = Series(vals).rolling(5, center=True).mean() - tm.assert_series_equal(xp, rs) + result = Series(vals).rolling(5, center=True).mean() + expected = Series([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516, + 12.818, 12.952, np.nan, np.nan]) + tm.assert_series_equal(expected, result) @td.skip_if_no_scipy def test_cmov_window(self): # GH 8238 vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) - xp = np.array([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516, 12.818, - 12.952, np.nan, np.nan]) - - with catch_warnings(record=True): - rs = mom.rolling_window(vals, 5, 'boxcar', center=True) - tm.assert_almost_equal(xp, rs) - - xp = Series(rs) - rs = Series(vals).rolling(5, win_type='boxcar', center=True).mean() - tm.assert_series_equal(xp, rs) + result = Series(vals).rolling(5, win_type='boxcar', center=True).mean() + expected = Series([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516, + 12.818, 12.952, np.nan, np.nan]) + tm.assert_series_equal(expected, result) @td.skip_if_no_scipy def test_cmov_window_corner(self): # GH 8238 # all nan - vals = np.empty(10, dtype=float) - vals.fill(np.nan) - with catch_warnings(record=True): - rs = mom.rolling_window(vals, 5, 'boxcar', center=True) - assert np.isnan(rs).all() + vals = pd.Series([np.nan] * 10) + result = vals.rolling(5, center=True, win_type='boxcar').mean() + assert np.isnan(result).all() # empty - vals = np.array([]) - with catch_warnings(record=True): - rs = mom.rolling_window(vals, 5, 'boxcar', center=True) - assert len(rs) == 0 + vals = pd.Series([]) + result = vals.rolling(5, center=True, win_type='boxcar').mean() + assert len(result) == 0 # shorter than window - vals = np.random.randn(5) - with catch_warnings(record=True): - rs = mom.rolling_window(vals, 10, 'boxcar') - assert np.isnan(rs).all() - assert len(rs) == 5 + vals = pd.Series(np.random.randn(5)) + result = vals.rolling(10, win_type='boxcar').mean() + assert np.isnan(result).all() + assert len(result) == 5 @td.skip_if_no_scipy def test_cmov_window_frame(self): @@ -1097,38 +1065,31 @@ def test_cmov_window_special_linear_range(self): tm.assert_series_equal(xp, rs) def test_rolling_median(self): - with catch_warnings(record=True): - self._check_moment_func(mom.rolling_median, np.median, - name='median') + self._check_moment_func(np.median, name='median') def test_rolling_min(self): + self._check_moment_func(np.min, name='min') - with catch_warnings(record=True): - self._check_moment_func(mom.rolling_min, np.min, name='min') - - with catch_warnings(record=True): - a = np.array([1, 2, 3, 4, 5]) - b = mom.rolling_min(a, window=100, min_periods=1) - tm.assert_almost_equal(b, np.ones(len(a))) + a = pd.Series([1, 2, 3, 4, 5]) + result = a.rolling(window=100, min_periods=1).min() + expected = pd.Series(np.ones(len(a))) + tm.assert_series_equal(result, expected) - pytest.raises(ValueError, mom.rolling_min, np.array([1, 2, 3]), - window=3, min_periods=5) + with pytest.raises(ValueError): + pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min() def test_rolling_max(self): + self._check_moment_func(np.max, name='max') - with catch_warnings(record=True): - self._check_moment_func(mom.rolling_max, np.max, name='max') + a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64) + b = a.rolling(window=100, min_periods=1).max() + tm.assert_almost_equal(a, b) - with catch_warnings(record=True): - a = np.array([1, 2, 3, 4, 5], dtype=np.float64) - b = mom.rolling_max(a, window=100, min_periods=1) - tm.assert_almost_equal(a, b) - - pytest.raises(ValueError, mom.rolling_max, np.array([1, 2, 3]), - window=3, min_periods=5) + with pytest.raises(ValueError): + pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max() - def test_rolling_quantile(self): - qs = [0.0, .1, .5, .9, 1.0] + @pytest.mark.parametrize('q', [0.0, .1, .5, .9, 1.0]) + def test_rolling_quantile(self, q): def scoreatpercentile(a, per): values = np.sort(a, axis=0) @@ -1147,18 +1108,11 @@ def scoreatpercentile(a, per): return retval - for q in qs: - - def f(x, window, quantile, min_periods=None, freq=None, - center=False): - return mom.rolling_quantile(x, window, quantile, - min_periods=min_periods, freq=freq, - center=center) + def quantile_func(x): + return scoreatpercentile(x, q) - def alt(x): - return scoreatpercentile(x, q) - - self._check_moment_func(f, alt, name='quantile', quantile=q) + self._check_moment_func(quantile_func, name='quantile', + quantile=q) def test_rolling_quantile_np_percentile(self): # #9413: Tests that rolling window's quantile default behavior @@ -1207,15 +1161,10 @@ def test_rolling_apply(self): tm.assert_series_equal(ser, ser.rolling(10).apply(lambda x: x.mean())) - f = lambda x: x[np.isfinite(x)].mean() - - def roll_mean(x, window, min_periods=None, freq=None, center=False, - **kwargs): - return mom.rolling_apply(x, window, func=f, - min_periods=min_periods, freq=freq, - center=center) + def f(x): + return x[np.isfinite(x)].mean() - self._check_moment_func(roll_mean, np.mean, name='apply', func=f) + self._check_moment_func(np.mean, name='apply', func=f) # GH 8080 s = Series([None, None, None]) @@ -1228,39 +1177,34 @@ def roll_mean(x, window, min_periods=None, freq=None, center=False, def test_rolling_apply_out_of_bounds(self): # #1850 - arr = np.arange(4) + vals = pd.Series([1, 2, 3, 4]) - # it works! - with catch_warnings(record=True): - result = mom.rolling_apply(arr, 10, np.sum) - assert isna(result).all() + result = vals.rolling(10).apply(np.sum) + assert result.isna().all() - with catch_warnings(record=True): - result = mom.rolling_apply(arr, 10, np.sum, min_periods=1) - tm.assert_almost_equal(result, result) + result = vals.rolling(10, min_periods=1).apply(np.sum) + expected = pd.Series([1, 3, 6, 10], dtype=float) + tm.assert_almost_equal(result, expected) def test_rolling_std(self): - self._check_moment_func(mom.rolling_std, lambda x: np.std(x, ddof=1), + self._check_moment_func(lambda x: np.std(x, ddof=1), name='std') - self._check_moment_func(mom.rolling_std, lambda x: np.std(x, ddof=0), + self._check_moment_func(lambda x: np.std(x, ddof=0), name='std', ddof=0) def test_rolling_std_1obs(self): - with catch_warnings(record=True): - result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]), - 1, min_periods=1) - expected = np.array([np.nan] * 5) - tm.assert_almost_equal(result, expected) + vals = pd.Series([1., 2., 3., 4., 5.]) - with catch_warnings(record=True): - result = mom.rolling_std(np.array([1., 2., 3., 4., 5.]), - 1, min_periods=1, ddof=0) - expected = np.zeros(5) - tm.assert_almost_equal(result, expected) + result = vals.rolling(1, min_periods=1).std() + expected = pd.Series([np.nan] * 5) + tm.assert_series_equal(result, expected) - with catch_warnings(record=True): - result = mom.rolling_std(np.array([np.nan, np.nan, 3., 4., 5.]), - 3, min_periods=2) + result = vals.rolling(1, min_periods=1).std(ddof=0) + expected = pd.Series([0.] * 5) + tm.assert_series_equal(result, expected) + + result = (pd.Series([np.nan, np.nan, 3, 4, 5]) + .rolling(3, min_periods=2).std()) assert np.isnan(result[2]) def test_rolling_std_neg_sqrt(self): @@ -1268,208 +1212,53 @@ def test_rolling_std_neg_sqrt(self): # Test move_nanstd for neg sqrt. - a = np.array([0.0011448196318903589, 0.00028718669878572767, - 0.00028718669878572767, 0.00028718669878572767, - 0.00028718669878572767]) - with catch_warnings(record=True): - b = mom.rolling_std(a, window=3) + a = pd.Series([0.0011448196318903589, 0.00028718669878572767, + 0.00028718669878572767, 0.00028718669878572767, + 0.00028718669878572767]) + b = a.rolling(window=3).std() assert np.isfinite(b[2:]).all() - with catch_warnings(record=True): - b = mom.ewmstd(a, span=3) + b = a.ewm(span=3).std() assert np.isfinite(b[2:]).all() def test_rolling_var(self): - self._check_moment_func(mom.rolling_var, lambda x: np.var(x, ddof=1), - test_stable=True, name='var') - self._check_moment_func(mom.rolling_var, lambda x: np.var(x, ddof=0), + self._check_moment_func(lambda x: np.var(x, ddof=1), + name='var') + self._check_moment_func(lambda x: np.var(x, ddof=0), name='var', ddof=0) @td.skip_if_no_scipy def test_rolling_skew(self): from scipy.stats import skew - self._check_moment_func(mom.rolling_skew, - lambda x: skew(x, bias=False), name='skew') + self._check_moment_func(lambda x: skew(x, bias=False), name='skew') @td.skip_if_no_scipy def test_rolling_kurt(self): from scipy.stats import kurtosis - self._check_moment_func(mom.rolling_kurt, - lambda x: kurtosis(x, bias=False), name='kurt') - - def test_fperr_robustness(self): - # TODO: remove this once python 2.5 out of picture - if PY3: - pytest.skip("doesn't work on python 3") - - # #2114 - data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>' # noqa - - arr = np.frombuffer(data, dtype='<f8') - if sys.byteorder != "little": - arr = arr.byteswap().newbyteorder() - - with catch_warnings(record=True): - result = mom.rolling_sum(arr, 2) - assert (result[1:] >= 0).all() - - with catch_warnings(record=True): - result = mom.rolling_mean(arr, 2) - assert (result[1:] >= 0).all() - - with catch_warnings(record=True): - result = mom.rolling_var(arr, 2) - assert (result[1:] >= 0).all() + self._check_moment_func(lambda x: kurtosis(x, bias=False), + name='kurt') - # #2527, ugh - arr = np.array([0.00012456, 0.0003, 0]) - with catch_warnings(record=True): - result = mom.rolling_mean(arr, 1) - assert result[-1] >= 0 - - with catch_warnings(record=True): - result = mom.rolling_mean(-arr, 1) - assert result[-1] <= 0 - - def _check_moment_func(self, f, static_comp, name=None, window=50, - has_min_periods=True, has_center=True, - has_time_rule=True, preserve_nan=True, - fill_value=None, test_stable=False, - zero_min_periods_equal=True, + def _check_moment_func(self, static_comp, name, has_min_periods=True, + has_center=True, has_time_rule=True, + fill_value=None, zero_min_periods_equal=True, **kwargs): - with warnings.catch_warnings(record=True): - self._check_ndarray(f, static_comp, window=window, - has_min_periods=has_min_periods, - preserve_nan=preserve_nan, - has_center=has_center, fill_value=fill_value, - test_stable=test_stable, - zero_min_periods_equal=zero_min_periods_equal, - **kwargs) - - with warnings.catch_warnings(record=True): - self._check_structures(f, static_comp, - has_min_periods=has_min_periods, - has_time_rule=has_time_rule, - fill_value=fill_value, - has_center=has_center, **kwargs) - - # new API - if name is not None: - self._check_structures(f, static_comp, name=name, - has_min_periods=has_min_periods, - has_time_rule=has_time_rule, - fill_value=fill_value, - has_center=has_center, **kwargs) - - def _check_ndarray(self, f, static_comp, window=50, has_min_periods=True, - preserve_nan=True, has_center=True, fill_value=None, - test_stable=False, test_window=True, - zero_min_periods_equal=True, **kwargs): - def get_result(arr, window, min_periods=None, center=False): - return f(arr, window, min_periods=min_periods, center=center, ** - kwargs) - - result = get_result(self.arr, window) - tm.assert_almost_equal(result[-1], static_comp(self.arr[-50:])) - - if preserve_nan: - assert (np.isnan(result[self._nan_locs]).all()) - - # excluding NaNs correctly - arr = randn(50) - arr[:10] = np.NaN - arr[-10:] = np.NaN - - if has_min_periods: - result = get_result(arr, 50, min_periods=30) - tm.assert_almost_equal(result[-1], static_comp(arr[10:-10])) - - # min_periods is working correctly - result = get_result(arr, 20, min_periods=15) - assert np.isnan(result[23]) - assert not np.isnan(result[24]) - - assert not np.isnan(result[-6]) - assert np.isnan(result[-5]) - - arr2 = randn(20) - result = get_result(arr2, 10, min_periods=5) - assert isna(result[3]) - assert notna(result[4]) - - if zero_min_periods_equal: - # min_periods=0 may be equivalent to min_periods=1 - result0 = get_result(arr, 20, min_periods=0) - result1 = get_result(arr, 20, min_periods=1) - tm.assert_almost_equal(result0, result1) - else: - result = get_result(arr, 50) - tm.assert_almost_equal(result[-1], static_comp(arr[10:-10])) - - # GH 7925 - if has_center: - if has_min_periods: - result = get_result(arr, 20, min_periods=15, center=True) - expected = get_result( - np.concatenate((arr, np.array([np.NaN] * 9))), 20, - min_periods=15)[9:] - else: - result = get_result(arr, 20, center=True) - expected = get_result( - np.concatenate((arr, np.array([np.NaN] * 9))), 20)[9:] - - tm.assert_numpy_array_equal(result, expected) - - if test_stable: - result = get_result(self.arr + 1e9, window) - tm.assert_almost_equal(result[-1], - static_comp(self.arr[-50:] + 1e9)) - - # Test window larger than array, #7297 - if test_window: - if has_min_periods: - for minp in (0, len(self.arr) - 1, len(self.arr)): - result = get_result(self.arr, len(self.arr) + 1, - min_periods=minp) - expected = get_result(self.arr, len(self.arr), - min_periods=minp) - nan_mask = np.isnan(result) - tm.assert_numpy_array_equal(nan_mask, np.isnan(expected)) - - nan_mask = ~nan_mask - tm.assert_almost_equal(result[nan_mask], - expected[nan_mask]) - else: - result = get_result(self.arr, len(self.arr) + 1) - expected = get_result(self.arr, len(self.arr)) - nan_mask = np.isnan(result) - tm.assert_numpy_array_equal(nan_mask, np.isnan(expected)) - - nan_mask = ~nan_mask - tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) - - def _check_structures(self, f, static_comp, name=None, - has_min_periods=True, has_time_rule=True, - has_center=True, fill_value=None, **kwargs): def get_result(obj, window, min_periods=None, center=False): - - # check via the API calls if name is provided - if name is not None: - r = obj.rolling(window=window, min_periods=min_periods, - center=center) - return getattr(r, name)(**kwargs) - - # check via the moments API - with catch_warnings(record=True): - return f(obj, window=window, min_periods=min_periods, - center=center, **kwargs) + r = obj.rolling(window=window, min_periods=min_periods, + center=center) + return getattr(r, name)(**kwargs) series_result = get_result(self.series, window=50) - frame_result = get_result(self.frame, window=50) - assert isinstance(series_result, Series) - assert type(frame_result) == DataFrame + tm.assert_almost_equal(series_result.iloc[-1], + static_comp(self.series[-50:])) + + frame_result = get_result(self.frame, window=50) + assert isinstance(frame_result, DataFrame) + tm.assert_series_equal(frame_result.iloc[-1, :], + self.frame.iloc[-50:, :].apply(static_comp, + axis=0), + check_names=False) # check time_rule works if has_time_rule: @@ -1500,8 +1289,72 @@ def get_result(obj, window, min_periods=None, center=False): trunc_frame.apply(static_comp), check_names=False) - # GH 7925 + # excluding NaNs correctly + obj = Series(randn(50)) + obj[:10] = np.NaN + obj[-10:] = np.NaN + if has_min_periods: + result = get_result(obj, 50, min_periods=30) + tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10])) + + # min_periods is working correctly + result = get_result(obj, 20, min_periods=15) + assert isna(result.iloc[23]) + assert not isna(result.iloc[24]) + + assert not isna(result.iloc[-6]) + assert isna(result.iloc[-5]) + + obj2 = Series(randn(20)) + result = get_result(obj2, 10, min_periods=5) + assert isna(result.iloc[3]) + assert notna(result.iloc[4]) + + if zero_min_periods_equal: + # min_periods=0 may be equivalent to min_periods=1 + result0 = get_result(obj, 20, min_periods=0) + result1 = get_result(obj, 20, min_periods=1) + tm.assert_almost_equal(result0, result1) + else: + result = get_result(obj, 50) + tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10])) + + # window larger than series length (#7297) + if has_min_periods: + for minp in (0, len(self.series) - 1, len(self.series)): + result = get_result(self.series, len(self.series) + 1, + min_periods=minp) + expected = get_result(self.series, len(self.series), + min_periods=minp) + nan_mask = isna(result) + tm.assert_series_equal(nan_mask, isna(expected)) + + nan_mask = ~nan_mask + tm.assert_almost_equal(result[nan_mask], + expected[nan_mask]) + else: + result = get_result(self.series, len(self.series) + 1) + expected = get_result(self.series, len(self.series)) + nan_mask = isna(result) + tm.assert_series_equal(nan_mask, isna(expected)) + + nan_mask = ~nan_mask + tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) + + # check center=True if has_center: + if has_min_periods: + result = get_result(obj, 20, min_periods=15, center=True) + expected = get_result( + pd.concat([obj, Series([np.NaN] * 9)]), 20, + min_periods=15)[9:].reset_index(drop=True) + else: + result = get_result(obj, 20, center=True) + expected = get_result( + pd.concat([obj, Series([np.NaN] * 9)]), + 20)[9:].reset_index(drop=True) + + tm.assert_series_equal(result, expected) # shifter index s = ['x%d' % x for x in range(12)] @@ -1541,12 +1394,11 @@ def get_result(obj, window, min_periods=None, center=False): tm.assert_frame_equal(frame_xp, frame_rs) def test_ewma(self): - self._check_ew(mom.ewma, name='mean') + self._check_ew(name='mean') - arr = np.zeros(1000) - arr[5] = 1 - with catch_warnings(record=True): - result = mom.ewma(arr, span=100, adjust=False).sum() + vals = pd.Series(np.zeros(1000)) + vals[5] = 1 + result = vals.ewm(span=100, adjust=False).mean().sum() assert np.abs(result - 1) < 1e-2 s = Series([1.0, 2.0, 4.0, 8.0]) @@ -1626,55 +1478,34 @@ def simple_wma(s, w): tm.assert_series_equal(result, expected) def test_ewmvar(self): - self._check_ew(mom.ewmvar, name='var') + self._check_ew(name='var') def test_ewmvol(self): - self._check_ew(mom.ewmvol, name='vol') + self._check_ew(name='vol') def test_ewma_span_com_args(self): - with catch_warnings(record=True): - A = mom.ewma(self.arr, com=9.5) - B = mom.ewma(self.arr, span=20) - tm.assert_almost_equal(A, B) + A = self.series.ewm(com=9.5).mean() + B = self.series.ewm(span=20).mean() + tm.assert_almost_equal(A, B) - pytest.raises(ValueError, mom.ewma, self.arr, com=9.5, span=20) - pytest.raises(ValueError, mom.ewma, self.arr) + with pytest.raises(ValueError): + self.series.ewm(com=9.5, span=20) + with pytest.raises(ValueError): + self.series.ewm().mean() def test_ewma_halflife_arg(self): - with catch_warnings(record=True): - A = mom.ewma(self.arr, com=13.932726172912965) - B = mom.ewma(self.arr, halflife=10.0) - tm.assert_almost_equal(A, B) - - pytest.raises(ValueError, mom.ewma, self.arr, span=20, - halflife=50) - pytest.raises(ValueError, mom.ewma, self.arr, com=9.5, - halflife=50) - pytest.raises(ValueError, mom.ewma, self.arr, com=9.5, span=20, - halflife=50) - pytest.raises(ValueError, mom.ewma, self.arr) - - def test_ewma_alpha_old_api(self): - # GH 10789 - with catch_warnings(record=True): - a = mom.ewma(self.arr, alpha=0.61722699889169674) - b = mom.ewma(self.arr, com=0.62014947789973052) - c = mom.ewma(self.arr, span=2.240298955799461) - d = mom.ewma(self.arr, halflife=0.721792864318) - tm.assert_numpy_array_equal(a, b) - tm.assert_numpy_array_equal(a, c) - tm.assert_numpy_array_equal(a, d) - - def test_ewma_alpha_arg_old_api(self): - # GH 10789 - with catch_warnings(record=True): - pytest.raises(ValueError, mom.ewma, self.arr) - pytest.raises(ValueError, mom.ewma, self.arr, - com=10.0, alpha=0.5) - pytest.raises(ValueError, mom.ewma, self.arr, - span=10.0, alpha=0.5) - pytest.raises(ValueError, mom.ewma, self.arr, - halflife=10.0, alpha=0.5) + A = self.series.ewm(com=13.932726172912965).mean() + B = self.series.ewm(halflife=10.0).mean() + tm.assert_almost_equal(A, B) + + with pytest.raises(ValueError): + self.series.ewm(span=20, halflife=50) + with pytest.raises(ValueError): + self.series.ewm(com=9.5, halflife=50) + with pytest.raises(ValueError): + self.series.ewm(com=9.5, span=20, halflife=50) + with pytest.raises(ValueError): + self.series.ewm() def test_ewm_alpha(self): # GH 10789 @@ -1689,11 +1520,15 @@ def test_ewm_alpha(self): def test_ewm_alpha_arg(self): # GH 10789 - s = Series(self.arr) - pytest.raises(ValueError, s.ewm) - pytest.raises(ValueError, s.ewm, com=10.0, alpha=0.5) - pytest.raises(ValueError, s.ewm, span=10.0, alpha=0.5) - pytest.raises(ValueError, s.ewm, halflife=10.0, alpha=0.5) + s = self.series + with pytest.raises(ValueError): + s.ewm() + with pytest.raises(ValueError): + s.ewm(com=10.0, alpha=0.5) + with pytest.raises(ValueError): + s.ewm(span=10.0, alpha=0.5) + with pytest.raises(ValueError): + s.ewm(halflife=10.0, alpha=0.5) def test_ewm_domain_checks(self): # GH 12492 @@ -1719,24 +1554,25 @@ def test_ewm_domain_checks(self): s.ewm(alpha=1.0) pytest.raises(ValueError, s.ewm, alpha=1.1) - def test_ew_empty_arrays(self): - arr = np.array([], dtype=np.float64) + def test_ew_empty_series(self): + vals = pd.Series([], dtype=np.float64) - funcs = [mom.ewma, mom.ewmvol, mom.ewmvar] + ewm = vals.ewm(3) + funcs = ['mean', 'vol', 'var'] for f in funcs: - with catch_warnings(record=True): - result = f(arr, 3) - tm.assert_almost_equal(result, arr) + result = getattr(ewm, f)() + tm.assert_almost_equal(result, vals) - def _check_ew(self, func, name=None): - with catch_warnings(record=True): - self._check_ew_ndarray(func, name=name) - self._check_ew_structures(func, name=name) + def _check_ew(self, name=None, preserve_nan=False): + series_result = getattr(self.series.ewm(com=10), name)() + assert isinstance(series_result, Series) + + frame_result = getattr(self.frame.ewm(com=10), name)() + assert type(frame_result) == DataFrame - def _check_ew_ndarray(self, func, preserve_nan=False, name=None): - result = func(self.arr, com=10) + result = getattr(self.series.ewm(com=10), name)() if preserve_nan: - assert (np.isnan(result[self._nan_locs]).all()) + assert result[self._nan_locs].isna().all() # excluding NaNs correctly arr = randn(50) @@ -1746,45 +1582,40 @@ def _check_ew_ndarray(self, func, preserve_nan=False, name=None): # check min_periods # GH 7898 - result = func(s, 50, min_periods=2) - assert np.isnan(result.values[:11]).all() - assert not np.isnan(result.values[11:]).any() + result = getattr(s.ewm(com=50, min_periods=2), name)() + assert result[:11].isna().all() + assert not result[11:].isna().any() for min_periods in (0, 1): - result = func(s, 50, min_periods=min_periods) - if func == mom.ewma: - assert np.isnan(result.values[:10]).all() - assert not np.isnan(result.values[10:]).any() + result = getattr(s.ewm(com=50, min_periods=min_periods), name)() + if name == 'mean': + assert result[:10].isna().all() + assert not result[10:].isna().any() else: - # ewmstd, ewmvol, ewmvar (with bias=False) require at least two - # values - assert np.isnan(result.values[:11]).all() - assert not np.isnan(result.values[11:]).any() + # ewm.std, ewm.vol, ewm.var (with bias=False) require at least + # two values + assert result[:11].isna().all() + assert not result[11:].isna().any() # check series of length 0 - result = func(Series([]), 50, min_periods=min_periods) - tm.assert_series_equal(result, Series([])) + result = getattr(Series().ewm(com=50, min_periods=min_periods), + name)() + tm.assert_series_equal(result, Series()) # check series of length 1 - result = func(Series([1.]), 50, min_periods=min_periods) - if func == mom.ewma: + result = getattr(Series([1.]).ewm(50, min_periods=min_periods), + name)() + if name == 'mean': tm.assert_series_equal(result, Series([1.])) else: - # ewmstd, ewmvol, ewmvar with bias=False require at least two - # values + # ewm.std, ewm.vol, ewm.var with bias=False require at least + # two values tm.assert_series_equal(result, Series([np.NaN])) # pass in ints - result2 = func(np.arange(50), span=10) + result2 = getattr(Series(np.arange(50)).ewm(span=10), name)() assert result2.dtype == np.float_ - def _check_ew_structures(self, func, name): - series_result = getattr(self.series.ewm(com=10), name)() - assert isinstance(series_result, Series) - - frame_result = getattr(self.frame.ewm(com=10), name)() - assert type(frame_result) == DataFrame - class TestPairwise(object): @@ -2021,9 +1852,6 @@ class TestMomentsConsistency(Base): # lambda v: Series(v).skew(), 3, 'skew'), # (lambda v: Series(v).kurt(), 4, 'kurt'), - # (lambda x, min_periods: mom.expanding_quantile(x, 0.3, - # min_periods=min_periods, 'quantile'), - # restore once GH 8084 is fixed # lambda v: Series(v).quantile(0.3), None, 'quantile'), @@ -2585,22 +2413,6 @@ def func(A, B, com, **kwargs): pytest.raises(Exception, func, A, randn(50), 20, min_periods=5) - def test_expanding_apply(self): - ser = Series([]) - tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean())) - - def expanding_mean(x, min_periods=1): - return mom.expanding_apply(x, lambda x: x.mean(), - min_periods=min_periods) - - self._check_expanding(expanding_mean, np.mean) - - # GH 8080 - s = Series([None, None, None]) - result = s.expanding(min_periods=0).apply(lambda x: len(x)) - expected = Series([1., 2., 3.]) - tm.assert_series_equal(result, expected) - def test_expanding_apply_args_kwargs(self): def mean_w_arg(x, const): return np.mean(x) + const @@ -2648,9 +2460,6 @@ def test_expanding_cov(self): tm.assert_almost_equal(rolling_result, result) - def test_expanding_max(self): - self._check_expanding(mom.expanding_max, np.max, preserve_nan=False) - def test_expanding_cov_pairwise(self): result = self.frame.expanding().corr() @@ -2980,55 +2789,73 @@ def test_rolling_kurt_eq_value_fperr(self): a = Series([1.1] * 15).rolling(window=10).kurt() assert np.isnan(a).all() - def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True, - has_time_rule=True, preserve_nan=True): - result = func(self.arr) + @pytest.mark.parametrize('func,static_comp', [('sum', np.sum), + ('mean', np.mean), + ('max', np.max), + ('min', np.min)], + ids=['sum', 'mean', 'max', 'min']) + def test_expanding_func(self, func, static_comp): + def expanding_func(x, min_periods=1, center=False, axis=0): + exp = x.expanding(min_periods=min_periods, + center=center, axis=axis) + return getattr(exp, func)() + self._check_expanding(expanding_func, static_comp, preserve_nan=False) + + def test_expanding_apply(self): + + def expanding_mean(x, min_periods=1): + exp = x.expanding(min_periods=min_periods) + return exp.apply(lambda x: x.mean()) + + self._check_expanding(expanding_mean, np.mean) + + ser = Series([]) + tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean())) - tm.assert_almost_equal(result[10], static_comp(self.arr[:11])) + # GH 8080 + s = Series([None, None, None]) + result = s.expanding(min_periods=0).apply(lambda x: len(x)) + expected = Series([1., 2., 3.]) + tm.assert_series_equal(result, expected) + + def _check_expanding(self, func, static_comp, has_min_periods=True, + has_time_rule=True, preserve_nan=True): + + series_result = func(self.series) + assert isinstance(series_result, Series) + frame_result = func(self.frame) + assert isinstance(frame_result, DataFrame) + + result = func(self.series) + tm.assert_almost_equal(result[10], static_comp(self.series[:11])) if preserve_nan: - assert (np.isnan(result[self._nan_locs]).all()) + assert result.iloc[self._nan_locs].isna().all() - arr = randn(50) + ser = Series(randn(50)) if has_min_periods: - result = func(arr, min_periods=30) - assert (np.isnan(result[:29]).all()) - tm.assert_almost_equal(result[-1], static_comp(arr[:50])) + result = func(ser, min_periods=30) + assert result[:29].isna().all() + tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) # min_periods is working correctly - result = func(arr, min_periods=15) - assert np.isnan(result[13]) - assert not np.isnan(result[14]) + result = func(ser, min_periods=15) + assert isna(result.iloc[13]) + assert notna(result.iloc[14]) - arr2 = randn(20) - result = func(arr2, min_periods=5) + ser2 = Series(randn(20)) + result = func(ser2, min_periods=5) assert isna(result[3]) assert notna(result[4]) # min_periods=0 - result0 = func(arr, min_periods=0) - result1 = func(arr, min_periods=1) + result0 = func(ser, min_periods=0) + result1 = func(ser, min_periods=1) tm.assert_almost_equal(result0, result1) else: - result = func(arr) - tm.assert_almost_equal(result[-1], static_comp(arr[:50])) - - def _check_expanding_structures(self, func): - series_result = func(self.series) - assert isinstance(series_result, Series) - frame_result = func(self.frame) - assert type(frame_result) == DataFrame - - def _check_expanding(self, func, static_comp, has_min_periods=True, - has_time_rule=True, preserve_nan=True): - with warnings.catch_warnings(record=True): - self._check_expanding_ndarray(func, static_comp, - has_min_periods=has_min_periods, - has_time_rule=has_time_rule, - preserve_nan=preserve_nan) - with warnings.catch_warnings(record=True): - self._check_expanding_structures(func) + result = func(ser) + tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) def test_rolling_max_gh6297(self): """Replicate result expected in GH #6297"""
- [x] xref #18601, #18668 and #11603 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR removes ``pd.rolling_*``,`` pd.expanding_*`` ``pd.ewm*`` and the related ``pd.stats`` subpackage from the code base. I have some doubts about some stuff: * there are modules ``doc.plots.stats.*`` that I'm not sure of if they should be changed or removed. Currently they're not touched. * In ``asv_bench.benchmarks.gil.py`` there are calls to ``pd.rolling_*`` etc. I've changed this to be method-based. * ``pandas.tests.test_windows.py`` I felt was a bit complex to clean up. I would appreciate inputs.
https://api.github.com/repos/pandas-dev/pandas/pulls/18723
2017-12-11T00:55:43Z
2018-02-01T09:12:01Z
2018-02-01T09:12:01Z
2018-02-01T11:33:09Z
STYLE: linting issue, xref #17628
diff --git a/ci/lint.sh b/ci/lint.sh index bec82602fa509..13caccdcb1fea 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -97,7 +97,7 @@ if [ "$LINT" ]; then # # Check the following functions: # any(), all(), sum(), max(), min(), list(), dict(), set(), frozenset(), tuple(), str.join() - grep -R --include="*.py*" -E "[^_](any|all|sum|max|min|list|dict|set|frozenset|tuple|join)\(\[.* for .* in .*\]\)" * + grep -R --include="*.py*" -E "[^_](any|all|sum|max|min|list|dict|set|frozenset|tuple|join)\(\[.* for .* in .*\]\)" pandas if [ $? = "0" ]; then RET=1 diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 46edc0b96b7c2..d5551c6c9f297 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -406,7 +406,7 @@ def melt_stub(df, stub, i, j, value_vars, sep): return newdf.set_index(i + [j]) - if any([col in stubnames for col in df.columns]): + if any(col in stubnames for col in df.columns): raise ValueError("stubname can't be identical to a column name") if not is_list_like(stubnames):
https://api.github.com/repos/pandas-dev/pandas/pulls/18722
2017-12-11T00:40:42Z
2017-12-11T00:41:00Z
2017-12-11T00:41:00Z
2017-12-11T00:41:00Z
TST/CLN: parametrize coercion tests
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 752d2deb53304..619a8ca3bf112 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- +import itertools import pytest import numpy as np @@ -13,6 +14,27 @@ ############################################################### +@pytest.fixture(autouse=True, scope='class') +def check_comprehensiveness(request): + # Iterate over combination of dtype, method and klass + # and ensure that each are contained within a collected test + cls = request.cls + combos = itertools.product(cls.klasses, cls.dtypes, [cls.method]) + + def has_test(combo): + klass, dtype, method = combo + cls_funcs = request.node.session.items + return any(klass in x.name and dtype in x.name and + method in x.name for x in cls_funcs) + + for combo in combos: + if not has_test(combo): + msg = 'test method is not defined: {0}, {1}' + raise AssertionError(msg.format(type(cls), combo)) + + yield + + class CoercionBase(object): klasses = ['index', 'series'] @@ -34,15 +56,6 @@ def _assert(self, left, right, dtype): assert left.dtype == dtype assert right.dtype == dtype - def test_has_comprehensive_tests(self): - for klass in self.klasses: - for dtype in self.dtypes: - method_name = 'test_{0}_{1}_{2}'.format(self.method, - klass, dtype) - if not hasattr(self, method_name): - msg = 'test method is not defined: {0}, {1}' - raise AssertionError(msg.format(type(self), method_name)) - class TestSetitemCoercion(CoercionBase): @@ -62,169 +75,124 @@ def _assert_setitem_series_conversion(self, original_series, loc_value, # temp.loc[1] = loc_value # tm.assert_series_equal(temp, expected_series) - def test_setitem_series_object(self): + @pytest.mark.parametrize("val,exp_dtype", [ + (1, np.object), + (1.1, np.object), + (1 + 1j, np.object), + (True, np.object)]) + def test_setitem_series_object(self, val, exp_dtype): obj = pd.Series(list('abcd')) assert obj.dtype == np.object - # object + int -> object - exp = pd.Series(['a', 1, 'c', 'd']) - self._assert_setitem_series_conversion(obj, 1, exp, np.object) - - # object + float -> object - exp = pd.Series(['a', 1.1, 'c', 'd']) - self._assert_setitem_series_conversion(obj, 1.1, exp, np.object) - - # object + complex -> object - exp = pd.Series(['a', 1 + 1j, 'c', 'd']) - self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object) - - # object + bool -> object - exp = pd.Series(['a', True, 'c', 'd']) - self._assert_setitem_series_conversion(obj, True, exp, np.object) + exp = pd.Series(['a', val, 'c', 'd']) + self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) - def test_setitem_series_int64(self): + @pytest.mark.parametrize("val,exp_dtype", [ + (1, np.int64), + (1.1, np.float64), + (1 + 1j, np.complex128), + (True, np.object)]) + def test_setitem_series_int64(self, val, exp_dtype): obj = pd.Series([1, 2, 3, 4]) assert obj.dtype == np.int64 - # int + int -> int - exp = pd.Series([1, 1, 3, 4]) - self._assert_setitem_series_conversion(obj, 1, exp, np.int64) + if exp_dtype is np.float64: + exp = pd.Series([1, 1, 3, 4]) + self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64) + pytest.xfail("GH12747 The result must be float") - # int + float -> float - # TODO_GH12747 The result must be float - # tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4])) - # assert temp.dtype == np.float64 - exp = pd.Series([1, 1, 3, 4]) - self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64) + exp = pd.Series([1, val, 3, 4]) + self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) - # int + complex -> complex - exp = pd.Series([1, 1 + 1j, 3, 4]) - self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128) - - # int + bool -> object - exp = pd.Series([1, True, 3, 4]) - self._assert_setitem_series_conversion(obj, True, exp, np.object) - - def test_setitem_series_int8(self): - # integer dtype coercion (no change) + @pytest.mark.parametrize("val,exp_dtype", [ + (np.int32(1), np.int8), + (np.int16(2**9), np.int16)]) + def test_setitem_series_int8(self, val, exp_dtype): obj = pd.Series([1, 2, 3, 4], dtype=np.int8) assert obj.dtype == np.int8 - exp = pd.Series([1, 1, 3, 4], dtype=np.int8) - self._assert_setitem_series_conversion(obj, np.int32(1), exp, np.int8) + if exp_dtype is np.int16: + exp = pd.Series([1, 0, 3, 4], dtype=np.int8) + self._assert_setitem_series_conversion(obj, val, exp, np.int8) + pytest.xfail("BUG: it must be Series([1, 1, 3, 4], dtype=np.int16") - # BUG: it must be Series([1, 1, 3, 4], dtype=np.int16) - exp = pd.Series([1, 0, 3, 4], dtype=np.int8) - self._assert_setitem_series_conversion(obj, np.int16(2**9), exp, - np.int8) + exp = pd.Series([1, val, 3, 4], dtype=np.int8) + self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) - def test_setitem_series_float64(self): + @pytest.mark.parametrize("val,exp_dtype", [ + (1, np.float64), + (1.1, np.float64), + (1 + 1j, np.complex128), + (True, np.object)]) + def test_setitem_series_float64(self, val, exp_dtype): obj = pd.Series([1.1, 2.2, 3.3, 4.4]) assert obj.dtype == np.float64 - # float + int -> float - exp = pd.Series([1.1, 1.0, 3.3, 4.4]) - self._assert_setitem_series_conversion(obj, 1, exp, np.float64) - - # float + float -> float - exp = pd.Series([1.1, 1.1, 3.3, 4.4]) - self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64) - - # float + complex -> complex - exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4]) - self._assert_setitem_series_conversion(obj, 1 + 1j, exp, - np.complex128) - - # float + bool -> object - exp = pd.Series([1.1, True, 3.3, 4.4]) - self._assert_setitem_series_conversion(obj, True, exp, np.object) + exp = pd.Series([1.1, val, 3.3, 4.4]) + self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) - def test_setitem_series_complex128(self): + @pytest.mark.parametrize("val,exp_dtype", [ + (1, np.complex128), + (1.1, np.complex128), + (1 + 1j, np.complex128), + (True, np.object)]) + def test_setitem_series_complex128(self, val, exp_dtype): obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j]) assert obj.dtype == np.complex128 - # complex + int -> complex - exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]) - self._assert_setitem_series_conversion(obj, 1, exp, np.complex128) + exp = pd.Series([1 + 1j, val, 3 + 3j, 4 + 4j]) + self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) - # complex + float -> complex - exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j]) - self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128) - - # complex + complex -> complex - exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j]) - self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128) - - # complex + bool -> object - exp = pd.Series([1 + 1j, True, 3 + 3j, 4 + 4j]) - self._assert_setitem_series_conversion(obj, True, exp, np.object) - - def test_setitem_series_bool(self): + @pytest.mark.parametrize("val,exp_dtype", [ + (1, np.int64), + (3, np.int64), + (1.1, np.float64), + (1 + 1j, np.complex128), + (True, np.bool)]) + def test_setitem_series_bool(self, val, exp_dtype): obj = pd.Series([True, False, True, False]) assert obj.dtype == np.bool - # bool + int -> int - # TODO_GH12747 The result must be int - # tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0])) - # assert temp.dtype == np.int64 - exp = pd.Series([True, True, True, False]) - self._assert_setitem_series_conversion(obj, 1, exp, np.bool) - - # TODO_GH12747 The result must be int - # assigning int greater than bool - # tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0])) - # assert temp.dtype == np.int64 - exp = pd.Series([True, True, True, False]) - self._assert_setitem_series_conversion(obj, 3, exp, np.bool) - - # bool + float -> float - # TODO_GH12747 The result must be float - # tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.])) - # assert temp.dtype == np.float64 - exp = pd.Series([True, True, True, False]) - self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool) - - # bool + complex -> complex (buggy, results in bool) - # TODO_GH12747 The result must be complex - # tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0])) - # assert temp.dtype == np.complex128 - exp = pd.Series([True, True, True, False]) - self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool) - - # bool + bool -> bool - exp = pd.Series([True, True, True, False]) - self._assert_setitem_series_conversion(obj, True, exp, np.bool) - - def test_setitem_series_datetime64(self): + if exp_dtype is np.int64: + exp = pd.Series([True, True, True, False]) + self._assert_setitem_series_conversion(obj, val, exp, np.bool) + pytest.xfail("TODO_GH12747 The result must be int") + elif exp_dtype is np.float64: + exp = pd.Series([True, True, True, False]) + self._assert_setitem_series_conversion(obj, val, exp, np.bool) + pytest.xfail("TODO_GH12747 The result must be float") + elif exp_dtype is np.complex128: + exp = pd.Series([True, True, True, False]) + self._assert_setitem_series_conversion(obj, val, exp, np.bool) + pytest.xfail("TODO_GH12747 The result must be complex") + + exp = pd.Series([True, val, True, False]) + self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) + + @pytest.mark.parametrize("val,exp_dtype", [ + (pd.Timestamp('2012-01-01'), 'datetime64[ns]'), + (1, np.object), + ('x', np.object)]) + def test_setitem_series_datetime64(self, val, exp_dtype): obj = pd.Series([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.Timestamp('2011-01-03'), pd.Timestamp('2011-01-04')]) assert obj.dtype == 'datetime64[ns]' - # datetime64 + datetime64 -> datetime64 - exp = pd.Series([pd.Timestamp('2011-01-01'), - pd.Timestamp('2012-01-01'), - pd.Timestamp('2011-01-03'), - pd.Timestamp('2011-01-04')]) - self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'), - exp, 'datetime64[ns]') - - # datetime64 + int -> object - exp = pd.Series([pd.Timestamp('2011-01-01'), - 1, - pd.Timestamp('2011-01-03'), - pd.Timestamp('2011-01-04')]) - self._assert_setitem_series_conversion(obj, 1, exp, 'object') - - # datetime64 + object -> object exp = pd.Series([pd.Timestamp('2011-01-01'), - 'x', + val, pd.Timestamp('2011-01-03'), pd.Timestamp('2011-01-04')]) - self._assert_setitem_series_conversion(obj, 'x', exp, np.object) - - def test_setitem_series_datetime64tz(self): + self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) + + @pytest.mark.parametrize("val,exp_dtype", [ + (pd.Timestamp('2012-01-01', tz='US/Eastern'), + 'datetime64[ns, US/Eastern]'), + (pd.Timestamp('2012-01-01', tz='US/Pacific'), np.object), + (pd.Timestamp('2012-01-01'), np.object), + (1, np.object)]) + def test_setitem_series_datetime64tz(self, val, exp_dtype): tz = 'US/Eastern' obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz), pd.Timestamp('2011-01-02', tz=tz), @@ -232,71 +200,28 @@ def test_setitem_series_datetime64tz(self): pd.Timestamp('2011-01-04', tz=tz)]) assert obj.dtype == 'datetime64[ns, US/Eastern]' - # datetime64tz + datetime64tz -> datetime64tz - exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp('2012-01-01', tz=tz), - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - value = pd.Timestamp('2012-01-01', tz=tz) - self._assert_setitem_series_conversion(obj, value, exp, - 'datetime64[ns, US/Eastern]') - - # datetime64tz + datetime64tz (different tz) -> object - exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp('2012-01-01', tz='US/Pacific'), - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - value = pd.Timestamp('2012-01-01', tz='US/Pacific') - self._assert_setitem_series_conversion(obj, value, exp, np.object) - - # datetime64tz + datetime64 -> object - exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp('2012-01-01'), - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - value = pd.Timestamp('2012-01-01') - self._assert_setitem_series_conversion(obj, value, exp, np.object) - - # datetime64 + int -> object exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - 1, + val, pd.Timestamp('2011-01-03', tz=tz), pd.Timestamp('2011-01-04', tz=tz)]) - self._assert_setitem_series_conversion(obj, 1, exp, np.object) - - # ToDo: add more tests once the above issue has been fixed + self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) - def test_setitem_series_timedelta64(self): + @pytest.mark.parametrize("val,exp_dtype", [ + (pd.Timedelta('12 day'), 'timedelta64[ns]'), + (1, np.object), + ('x', np.object)]) + def test_setitem_series_timedelta64(self, val, exp_dtype): obj = pd.Series([pd.Timedelta('1 day'), pd.Timedelta('2 day'), pd.Timedelta('3 day'), pd.Timedelta('4 day')]) assert obj.dtype == 'timedelta64[ns]' - # timedelta64 + timedelta64 -> timedelta64 - exp = pd.Series([pd.Timedelta('1 day'), - pd.Timedelta('12 day'), - pd.Timedelta('3 day'), - pd.Timedelta('4 day')]) - self._assert_setitem_series_conversion(obj, pd.Timedelta('12 day'), - exp, 'timedelta64[ns]') - - # timedelta64 + int -> object - exp = pd.Series([pd.Timedelta('1 day'), - 1, - pd.Timedelta('3 day'), - pd.Timedelta('4 day')]) - self._assert_setitem_series_conversion(obj, 1, exp, np.object) - - # timedelta64 + object -> object exp = pd.Series([pd.Timedelta('1 day'), - 'x', + val, pd.Timedelta('3 day'), pd.Timedelta('4 day')]) - self._assert_setitem_series_conversion(obj, 'x', exp, np.object) - - def test_setitem_series_period(self): - pass + self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) def _assert_setitem_index_conversion(self, original_series, loc_key, expected_index, expected_dtype): @@ -315,58 +240,54 @@ def _assert_setitem_index_conversion(self, original_series, loc_key, # check dtype explicitly for sure assert temp.index.dtype == expected_dtype - def test_setitem_index_object(self): + @pytest.mark.parametrize("val,exp_dtype", [ + ('x', np.object), + (5, IndexError), + (1.1, np.object)]) + def test_setitem_index_object(self, val, exp_dtype): obj = pd.Series([1, 2, 3, 4], index=list('abcd')) assert obj.index.dtype == np.object - # object + object -> object - exp_index = pd.Index(list('abcdx')) - self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object) - - # object + int -> IndexError, regarded as location - temp = obj.copy() - with pytest.raises(IndexError): - temp[5] = 5 - - # object + float -> object - exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1]) - self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object) - - def test_setitem_index_int64(self): - # tests setitem with non-existing numeric key + if exp_dtype is IndexError: + temp = obj.copy() + with pytest.raises(exp_dtype): + temp[5] = 5 + else: + exp_index = pd.Index(list('abcd') + [val]) + self._assert_setitem_index_conversion(obj, val, exp_index, + exp_dtype) + + @pytest.mark.parametrize("val,exp_dtype", [ + (5, np.int64), + (1.1, np.float64), + ('x', np.object)]) + def test_setitem_index_int64(self, val, exp_dtype): obj = pd.Series([1, 2, 3, 4]) assert obj.index.dtype == np.int64 - # int + int -> int - exp_index = pd.Index([0, 1, 2, 3, 5]) - self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64) - - # int + float -> float - exp_index = pd.Index([0, 1, 2, 3, 1.1]) - self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64) - - # int + object -> object - exp_index = pd.Index([0, 1, 2, 3, 'x']) - self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object) + exp_index = pd.Index([0, 1, 2, 3, val]) + self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype) - def test_setitem_index_float64(self): - # tests setitem with non-existing numeric key + @pytest.mark.parametrize("val,exp_dtype", [ + (5, IndexError), + (5.1, np.float64), + ('x', np.object)]) + def test_setitem_index_float64(self, val, exp_dtype): obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1]) assert obj.index.dtype == np.float64 - # float + int -> int - temp = obj.copy() - # TODO_GH12747 The result must be float - with pytest.raises(IndexError): - temp[5] = 5 + if exp_dtype is IndexError: + # float + int -> int + temp = obj.copy() + with pytest.raises(exp_dtype): + temp[5] = 5 + pytest.xfail("TODO_GH12747 The result must be float") - # float + float -> float - exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1]) - self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64) + exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, val]) + self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype) - # float + object -> object - exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x']) - self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object) + def test_setitem_series_period(self): + pass def test_setitem_index_complex128(self): pass @@ -400,121 +321,70 @@ def _assert_insert_conversion(self, original, value, tm.assert_index_equal(res, expected) assert res.dtype == expected_dtype - def test_insert_index_object(self): + @pytest.mark.parametrize("insert, coerced_val, coerced_dtype", [ + (1, 1, np.object), + (1.1, 1.1, np.object), + (False, False, np.object), + ('x', 'x', np.object)]) + def test_insert_index_object(self, insert, coerced_val, coerced_dtype): obj = pd.Index(list('abcd')) assert obj.dtype == np.object - # object + int -> object - exp = pd.Index(['a', 1, 'b', 'c', 'd']) - self._assert_insert_conversion(obj, 1, exp, np.object) - - # object + float -> object - exp = pd.Index(['a', 1.1, 'b', 'c', 'd']) - self._assert_insert_conversion(obj, 1.1, exp, np.object) - - # object + bool -> object - res = obj.insert(1, False) - tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd'])) - assert res.dtype == np.object - - # object + object -> object - exp = pd.Index(['a', 'x', 'b', 'c', 'd']) - self._assert_insert_conversion(obj, 'x', exp, np.object) + exp = pd.Index(['a', coerced_val, 'b', 'c', 'd']) + self._assert_insert_conversion(obj, insert, exp, coerced_dtype) - def test_insert_index_int64(self): + @pytest.mark.parametrize("insert, coerced_val, coerced_dtype", [ + (1, 1, np.int64), + (1.1, 1.1, np.float64), + (False, 0, np.int64), + ('x', 'x', np.object)]) + def test_insert_index_int64(self, insert, coerced_val, coerced_dtype): obj = pd.Int64Index([1, 2, 3, 4]) assert obj.dtype == np.int64 - # int + int -> int - exp = pd.Index([1, 1, 2, 3, 4]) - self._assert_insert_conversion(obj, 1, exp, np.int64) + exp = pd.Index([1, coerced_val, 2, 3, 4]) + self._assert_insert_conversion(obj, insert, exp, coerced_dtype) - # int + float -> float - exp = pd.Index([1, 1.1, 2, 3, 4]) - self._assert_insert_conversion(obj, 1.1, exp, np.float64) - - # int + bool -> int - exp = pd.Index([1, 0, 2, 3, 4]) - self._assert_insert_conversion(obj, False, exp, np.int64) - - # int + object -> object - exp = pd.Index([1, 'x', 2, 3, 4]) - self._assert_insert_conversion(obj, 'x', exp, np.object) - - def test_insert_index_float64(self): + @pytest.mark.parametrize("insert, coerced_val, coerced_dtype", [ + (1, 1., np.float64), + (1.1, 1.1, np.float64), + (False, 0., np.float64), + ('x', 'x', np.object)]) + def test_insert_index_float64(self, insert, coerced_val, coerced_dtype): obj = pd.Float64Index([1., 2., 3., 4.]) assert obj.dtype == np.float64 - # float + int -> int - exp = pd.Index([1., 1., 2., 3., 4.]) - self._assert_insert_conversion(obj, 1, exp, np.float64) - - # float + float -> float - exp = pd.Index([1., 1.1, 2., 3., 4.]) - self._assert_insert_conversion(obj, 1.1, exp, np.float64) - - # float + bool -> float - exp = pd.Index([1., 0., 2., 3., 4.]) - self._assert_insert_conversion(obj, False, exp, np.float64) + exp = pd.Index([1., coerced_val, 2., 3., 4.]) + self._assert_insert_conversion(obj, insert, exp, coerced_dtype) - # float + object -> object - exp = pd.Index([1., 'x', 2., 3., 4.]) - self._assert_insert_conversion(obj, 'x', exp, np.object) - - def test_insert_index_complex128(self): - pass - - def test_insert_index_bool(self): - pass - - def test_insert_index_datetime64(self): + @pytest.mark.parametrize('fill_val,exp_dtype', [ + (pd.Timestamp('2012-01-01'), 'datetime64[ns]'), + (pd.Timestamp('2012-01-01', tz='US/Eastern'), + 'datetime64[ns, US/Eastern]')], + ids=['datetime64', 'datetime64tz']) + def test_insert_index_datetimes(self, fill_val, exp_dtype): obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', - '2011-01-04']) - assert obj.dtype == 'datetime64[ns]' + '2011-01-04'], tz=fill_val.tz) + assert obj.dtype == exp_dtype - # datetime64 + datetime64 => datetime64 - exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02', - '2011-01-03', '2011-01-04']) - self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'), - exp, 'datetime64[ns]') + exp = pd.DatetimeIndex(['2011-01-01', fill_val.date(), '2011-01-02', + '2011-01-03', '2011-01-04'], tz=fill_val.tz) + self._assert_insert_conversion(obj, fill_val, exp, exp_dtype) - # ToDo: must coerce to object msg = "Passed item and index have different timezone" - with tm.assert_raises_regex(ValueError, msg): - obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern')) - - # ToDo: must coerce to object - msg = "cannot insert DatetimeIndex with incompatible label" - with tm.assert_raises_regex(TypeError, msg): - obj.insert(1, 1) - - def test_insert_index_datetime64tz(self): - obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', - '2011-01-04'], tz='US/Eastern') - assert obj.dtype == 'datetime64[ns, US/Eastern]' - - # datetime64tz + datetime64tz => datetime64 - exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02', - '2011-01-03', '2011-01-04'], tz='US/Eastern') - val = pd.Timestamp('2012-01-01', tz='US/Eastern') - self._assert_insert_conversion(obj, val, exp, - 'datetime64[ns, US/Eastern]') + if fill_val.tz: + with tm.assert_raises_regex(ValueError, msg): + obj.insert(1, pd.Timestamp('2012-01-01')) - # ToDo: must coerce to object - msg = "Passed item and index have different timezone" - with tm.assert_raises_regex(ValueError, msg): - obj.insert(1, pd.Timestamp('2012-01-01')) - - # ToDo: must coerce to object - msg = "Passed item and index have different timezone" with tm.assert_raises_regex(ValueError, msg): obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo')) - # ToDo: must coerce to object msg = "cannot insert DatetimeIndex with incompatible label" with tm.assert_raises_regex(TypeError, msg): obj.insert(1, 1) + pytest.xfail("ToDo: must coerce to object") + def test_insert_index_timedelta64(self): obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day']) assert obj.dtype == 'timedelta64[ns]' @@ -534,41 +404,33 @@ def test_insert_index_timedelta64(self): with tm.assert_raises_regex(TypeError, msg): obj.insert(1, 1) - def test_insert_index_period(self): + @pytest.mark.parametrize("insert, coerced_val, coerced_dtype", [ + (pd.Period('2012-01', freq='M'), '2012-01', 'period[M]'), + (pd.Timestamp('2012-01-01'), pd.Timestamp('2012-01-01'), np.object), + (1, 1, np.object), + ('x', 'x', np.object)]) + def test_insert_index_period(self, insert, coerced_val, coerced_dtype): obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'], freq='M') assert obj.dtype == 'period[M]' - # period + period => period - exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02', - '2011-03', '2011-04'], freq='M') - self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'), - exp, 'period[M]') - - # period + datetime64 => object - exp = pd.Index([pd.Period('2011-01', freq='M'), - pd.Timestamp('2012-01-01'), - pd.Period('2011-02', freq='M'), - pd.Period('2011-03', freq='M'), - pd.Period('2011-04', freq='M')], freq='M') - self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'), - exp, np.object) - - # period + int => object - exp = pd.Index([pd.Period('2011-01', freq='M'), - 1, - pd.Period('2011-02', freq='M'), - pd.Period('2011-03', freq='M'), - pd.Period('2011-04', freq='M')], freq='M') - self._assert_insert_conversion(obj, 1, exp, np.object) - - # period + object => object - exp = pd.Index([pd.Period('2011-01', freq='M'), - 'x', - pd.Period('2011-02', freq='M'), - pd.Period('2011-03', freq='M'), - pd.Period('2011-04', freq='M')], freq='M') - self._assert_insert_conversion(obj, 'x', exp, np.object) + if isinstance(insert, pd.Period): + index_type = pd.PeriodIndex + else: + index_type = pd.Index + + exp = index_type([pd.Period('2011-01', freq='M'), + coerced_val, + pd.Period('2011-02', freq='M'), + pd.Period('2011-03', freq='M'), + pd.Period('2011-04', freq='M')], freq='M') + self._assert_insert_conversion(obj, insert, exp, coerced_dtype) + + def test_insert_index_complex128(self): + pass + + def test_insert_index_bool(self): + pass class TestWhereCoercion(CoercionBase): @@ -582,233 +444,128 @@ def _assert_where_conversion(self, original, cond, values, res = target.where(cond, values) self._assert(res, expected, expected_dtype) - def _where_object_common(self, klass): + @pytest.mark.parametrize("klass", [pd.Series, pd.Index], + ids=['series', 'index']) + @pytest.mark.parametrize("fill_val,exp_dtype", [ + (1, np.object), + (1.1, np.object), + (1 + 1j, np.object), + (True, np.object)]) + def test_where_object(self, klass, fill_val, exp_dtype): obj = klass(list('abcd')) assert obj.dtype == np.object cond = klass([True, False, True, False]) - # object + int -> object - exp = klass(['a', 1, 'c', 1]) - self._assert_where_conversion(obj, cond, 1, exp, np.object) - - values = klass([5, 6, 7, 8]) - exp = klass(['a', 6, 'c', 8]) - self._assert_where_conversion(obj, cond, values, exp, np.object) - - # object + float -> object - exp = klass(['a', 1.1, 'c', 1.1]) - self._assert_where_conversion(obj, cond, 1.1, exp, np.object) - - values = klass([5.5, 6.6, 7.7, 8.8]) - exp = klass(['a', 6.6, 'c', 8.8]) - self._assert_where_conversion(obj, cond, values, exp, np.object) - - # object + complex -> object - exp = klass(['a', 1 + 1j, 'c', 1 + 1j]) - self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object) - - values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]) - exp = klass(['a', 6 + 6j, 'c', 8 + 8j]) - self._assert_where_conversion(obj, cond, values, exp, np.object) + if fill_val is True and klass is pd.Series: + ret_val = 1 + else: + ret_val = fill_val - if klass is pd.Series: - exp = klass(['a', 1, 'c', 1]) - self._assert_where_conversion(obj, cond, True, exp, np.object) + exp = klass(['a', ret_val, 'c', ret_val]) + self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype) + if fill_val is True: values = klass([True, False, True, True]) - exp = klass(['a', 0, 'c', 1]) - self._assert_where_conversion(obj, cond, values, exp, np.object) - elif klass is pd.Index: - # object + bool -> object - exp = klass(['a', True, 'c', True]) - self._assert_where_conversion(obj, cond, True, exp, np.object) - - values = klass([True, False, True, True]) - exp = klass(['a', False, 'c', True]) - self._assert_where_conversion(obj, cond, values, exp, np.object) else: - NotImplementedError - - def test_where_series_object(self): - self._where_object_common(pd.Series) - - def test_where_index_object(self): - self._where_object_common(pd.Index) - - def _where_int64_common(self, klass): + values = klass(fill_val * x for x in [5, 6, 7, 8]) + + exp = klass(['a', values[1], 'c', values[3]]) + self._assert_where_conversion(obj, cond, values, exp, exp_dtype) + + @pytest.mark.parametrize("klass", [pd.Series, pd.Index], + ids=['series', 'index']) + @pytest.mark.parametrize("fill_val,exp_dtype", [ + (1, np.int64), + (1.1, np.float64), + (1 + 1j, np.complex128), + (True, np.object)]) + def test_where_int64(self, klass, fill_val, exp_dtype): + if klass is pd.Index and exp_dtype is np.complex128: + pytest.skip("Complex Index not supported") obj = klass([1, 2, 3, 4]) assert obj.dtype == np.int64 cond = klass([True, False, True, False]) - # int + int -> int - exp = klass([1, 1, 3, 1]) - self._assert_where_conversion(obj, cond, 1, exp, np.int64) + exp = klass([1, fill_val, 3, fill_val]) + self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype) - values = klass([5, 6, 7, 8]) - exp = klass([1, 6, 3, 8]) - self._assert_where_conversion(obj, cond, values, exp, np.int64) - - # int + float -> float - exp = klass([1, 1.1, 3, 1.1]) - self._assert_where_conversion(obj, cond, 1.1, exp, np.float64) - - values = klass([5.5, 6.6, 7.7, 8.8]) - exp = klass([1, 6.6, 3, 8.8]) - self._assert_where_conversion(obj, cond, values, exp, np.float64) - - # int + complex -> complex - if klass is pd.Series: - exp = klass([1, 1 + 1j, 3, 1 + 1j]) - self._assert_where_conversion(obj, cond, 1 + 1j, exp, - np.complex128) - - values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]) - exp = klass([1, 6 + 6j, 3, 8 + 8j]) - self._assert_where_conversion(obj, cond, values, exp, - np.complex128) - - # int + bool -> object - exp = klass([1, True, 3, True]) - self._assert_where_conversion(obj, cond, True, exp, np.object) - - values = klass([True, False, True, True]) - exp = klass([1, False, 3, True]) - self._assert_where_conversion(obj, cond, values, exp, np.object) - - def test_where_series_int64(self): - self._where_int64_common(pd.Series) - - def test_where_index_int64(self): - self._where_int64_common(pd.Index) - - def _where_float64_common(self, klass): + if fill_val is True: + values = klass([True, False, True, True]) + else: + values = klass(x * fill_val for x in [5, 6, 7, 8]) + exp = klass([1, values[1], 3, values[3]]) + self._assert_where_conversion(obj, cond, values, exp, exp_dtype) + + @pytest.mark.parametrize("klass", [pd.Series, pd.Index], + ids=['series', 'index']) + @pytest.mark.parametrize("fill_val, exp_dtype", [ + (1, np.float64), + (1.1, np.float64), + (1 + 1j, np.complex128), + (True, np.object)]) + def test_where_float64(self, klass, fill_val, exp_dtype): + if klass is pd.Index and exp_dtype is np.complex128: + pytest.skip("Complex Index not supported") obj = klass([1.1, 2.2, 3.3, 4.4]) assert obj.dtype == np.float64 cond = klass([True, False, True, False]) - # float + int -> float - exp = klass([1.1, 1.0, 3.3, 1.0]) - self._assert_where_conversion(obj, cond, 1, exp, np.float64) - - values = klass([5, 6, 7, 8]) - exp = klass([1.1, 6.0, 3.3, 8.0]) - self._assert_where_conversion(obj, cond, values, exp, np.float64) - - # float + float -> float - exp = klass([1.1, 1.1, 3.3, 1.1]) - self._assert_where_conversion(obj, cond, 1.1, exp, np.float64) - - values = klass([5.5, 6.6, 7.7, 8.8]) - exp = klass([1.1, 6.6, 3.3, 8.8]) - self._assert_where_conversion(obj, cond, values, exp, np.float64) - - # float + complex -> complex - if klass is pd.Series: - exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j]) - self._assert_where_conversion(obj, cond, 1 + 1j, exp, - np.complex128) - - values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]) - exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j]) - self._assert_where_conversion(obj, cond, values, exp, - np.complex128) - - # float + bool -> object - exp = klass([1.1, True, 3.3, True]) - self._assert_where_conversion(obj, cond, True, exp, np.object) + exp = klass([1.1, fill_val, 3.3, fill_val]) + self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype) - values = klass([True, False, True, True]) - exp = klass([1.1, False, 3.3, True]) - self._assert_where_conversion(obj, cond, values, exp, np.object) - - def test_where_series_float64(self): - self._where_float64_common(pd.Series) - - def test_where_index_float64(self): - self._where_float64_common(pd.Index) - - def test_where_series_complex128(self): + if fill_val is True: + values = klass([True, False, True, True]) + else: + values = klass(x * fill_val for x in [5, 6, 7, 8]) + exp = klass([1.1, values[1], 3.3, values[3]]) + self._assert_where_conversion(obj, cond, values, exp, exp_dtype) + + @pytest.mark.parametrize("fill_val,exp_dtype", [ + (1, np.complex128), + (1.1, np.complex128), + (1 + 1j, np.complex128), + (True, np.object)]) + def test_where_series_complex128(self, fill_val, exp_dtype): obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j]) assert obj.dtype == np.complex128 cond = pd.Series([True, False, True, False]) - # complex + int -> complex - exp = pd.Series([1 + 1j, 1, 3 + 3j, 1]) - self._assert_where_conversion(obj, cond, 1, exp, np.complex128) - - values = pd.Series([5, 6, 7, 8]) - exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0]) - self._assert_where_conversion(obj, cond, values, exp, np.complex128) - - # complex + float -> complex - exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1]) - self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128) - - values = pd.Series([5.5, 6.6, 7.7, 8.8]) - exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8]) - self._assert_where_conversion(obj, cond, values, exp, np.complex128) - - # complex + complex -> complex - exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j]) - self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128) + exp = pd.Series([1 + 1j, fill_val, 3 + 3j, fill_val]) + self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype) - values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]) - exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j]) - self._assert_where_conversion(obj, cond, values, exp, np.complex128) - - # complex + bool -> object - exp = pd.Series([1 + 1j, True, 3 + 3j, True]) - self._assert_where_conversion(obj, cond, True, exp, np.object) - - values = pd.Series([True, False, True, True]) - exp = pd.Series([1 + 1j, False, 3 + 3j, True]) - self._assert_where_conversion(obj, cond, values, exp, np.object) - - def test_where_index_complex128(self): - pass + if fill_val is True: + values = pd.Series([True, False, True, True]) + else: + values = pd.Series(x * fill_val for x in [5, 6, 7, 8]) + exp = pd.Series([1 + 1j, values[1], 3 + 3j, values[3]]) + self._assert_where_conversion(obj, cond, values, exp, exp_dtype) - def test_where_series_bool(self): + @pytest.mark.parametrize("fill_val,exp_dtype", [ + (1, np.object), + (1.1, np.object), + (1 + 1j, np.object), + (True, np.bool)]) + def test_where_series_bool(self, fill_val, exp_dtype): obj = pd.Series([True, False, True, False]) assert obj.dtype == np.bool cond = pd.Series([True, False, True, False]) - # bool + int -> object - exp = pd.Series([True, 1, True, 1]) - self._assert_where_conversion(obj, cond, 1, exp, np.object) - - values = pd.Series([5, 6, 7, 8]) - exp = pd.Series([True, 6, True, 8]) - self._assert_where_conversion(obj, cond, values, exp, np.object) - - # bool + float -> object - exp = pd.Series([True, 1.1, True, 1.1]) - self._assert_where_conversion(obj, cond, 1.1, exp, np.object) - - values = pd.Series([5.5, 6.6, 7.7, 8.8]) - exp = pd.Series([True, 6.6, True, 8.8]) - self._assert_where_conversion(obj, cond, values, exp, np.object) + exp = pd.Series([True, fill_val, True, fill_val]) + self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype) - # bool + complex -> object - exp = pd.Series([True, 1 + 1j, True, 1 + 1j]) - self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object) - - values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j]) - exp = pd.Series([True, 6 + 6j, True, 8 + 8j]) - self._assert_where_conversion(obj, cond, values, exp, np.object) - - # bool + bool -> bool - exp = pd.Series([True, True, True, True]) - self._assert_where_conversion(obj, cond, True, exp, np.bool) - - values = pd.Series([True, False, True, True]) - exp = pd.Series([True, False, True, True]) - self._assert_where_conversion(obj, cond, values, exp, np.bool) - - def test_where_index_bool(self): - pass - - def test_where_series_datetime64(self): + if fill_val is True: + values = pd.Series([True, False, True, True]) + else: + values = pd.Series(x * fill_val for x in [5, 6, 7, 8]) + exp = pd.Series([True, values[1], True, values[3]]) + self._assert_where_conversion(obj, cond, values, exp, exp_dtype) + + @pytest.mark.parametrize("fill_val,exp_dtype", [ + (pd.Timestamp('2012-01-01'), 'datetime64[ns]'), + (pd.Timestamp('2012-01-01', tz='US/Eastern'), np.object)], + ids=['datetime64', 'datetime64tz']) + def test_where_series_datetime64(self, fill_val, exp_dtype): obj = pd.Series([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.Timestamp('2011-01-03'), @@ -816,46 +573,29 @@ def test_where_series_datetime64(self): assert obj.dtype == 'datetime64[ns]' cond = pd.Series([True, False, True, False]) - # datetime64 + datetime64 -> datetime64 - exp = pd.Series([pd.Timestamp('2011-01-01'), - pd.Timestamp('2012-01-01'), - pd.Timestamp('2011-01-03'), - pd.Timestamp('2012-01-01')]) - self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'), - exp, 'datetime64[ns]') - - values = pd.Series([pd.Timestamp('2012-01-01'), - pd.Timestamp('2012-01-02'), - pd.Timestamp('2012-01-03'), - pd.Timestamp('2012-01-04')]) - exp = pd.Series([pd.Timestamp('2011-01-01'), - pd.Timestamp('2012-01-02'), - pd.Timestamp('2011-01-03'), - pd.Timestamp('2012-01-04')]) - self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]') + exp = pd.Series([pd.Timestamp('2011-01-01'), fill_val, + pd.Timestamp('2011-01-03'), fill_val]) + self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype) - # datetime64 + datetime64tz -> object - exp = pd.Series([pd.Timestamp('2011-01-01'), - pd.Timestamp('2012-01-01', tz='US/Eastern'), - pd.Timestamp('2011-01-03'), - pd.Timestamp('2012-01-01', tz='US/Eastern')]) - self._assert_where_conversion( - obj, cond, - pd.Timestamp('2012-01-01', tz='US/Eastern'), - exp, np.object) - - # ToDo: do not coerce to UTC, must be object - values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'), - pd.Timestamp('2012-01-02', tz='US/Eastern'), - pd.Timestamp('2012-01-03', tz='US/Eastern'), - pd.Timestamp('2012-01-04', tz='US/Eastern')]) - exp = pd.Series([pd.Timestamp('2011-01-01'), - pd.Timestamp('2012-01-02 05:00'), - pd.Timestamp('2011-01-03'), - pd.Timestamp('2012-01-04 05:00')]) - self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]') - - def test_where_index_datetime64(self): + values = pd.Series(pd.date_range(fill_val, periods=4)) + if fill_val.tz: + exp = pd.Series([pd.Timestamp('2011-01-01'), + pd.Timestamp('2012-01-02 05:00'), + pd.Timestamp('2011-01-03'), + pd.Timestamp('2012-01-04 05:00')]) + self._assert_where_conversion(obj, cond, values, exp, + 'datetime64[ns]') + pytest.xfail("ToDo: do not coerce to UTC, must be object") + + exp = pd.Series([pd.Timestamp('2011-01-01'), values[1], + pd.Timestamp('2011-01-03'), values[3]]) + self._assert_where_conversion(obj, cond, values, exp, exp_dtype) + + @pytest.mark.parametrize("fill_val,exp_dtype", [ + (pd.Timestamp('2012-01-01'), 'datetime64[ns]'), + (pd.Timestamp('2012-01-01', tz='US/Eastern'), np.object)], + ids=['datetime64', 'datetime64tz']) + def test_where_index_datetime(self, fill_val, exp_dtype): obj = pd.Index([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.Timestamp('2011-01-03'), @@ -863,38 +603,30 @@ def test_where_index_datetime64(self): assert obj.dtype == 'datetime64[ns]' cond = pd.Index([True, False, True, False]) - # datetime64 + datetime64 -> datetime64 - # must support scalar - msg = "cannot coerce a Timestamp with a tz on a naive Block" - with pytest.raises(TypeError): - obj.where(cond, pd.Timestamp('2012-01-01')) - - values = pd.Index([pd.Timestamp('2012-01-01'), - pd.Timestamp('2012-01-02'), - pd.Timestamp('2012-01-03'), - pd.Timestamp('2012-01-04')]) - exp = pd.Index([pd.Timestamp('2011-01-01'), - pd.Timestamp('2012-01-02'), - pd.Timestamp('2011-01-03'), - pd.Timestamp('2012-01-04')]) - self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]') - - # ToDo: coerce to object msg = ("Index\\(\\.\\.\\.\\) must be called with a collection " "of some kind") with tm.assert_raises_regex(TypeError, msg): - obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern')) + obj.where(cond, fill_val) - # ToDo: do not ignore timezone, must be object - values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'), - pd.Timestamp('2012-01-02', tz='US/Eastern'), - pd.Timestamp('2012-01-03', tz='US/Eastern'), - pd.Timestamp('2012-01-04', tz='US/Eastern')]) + values = pd.Index(pd.date_range(fill_val, periods=4)) exp = pd.Index([pd.Timestamp('2011-01-01'), pd.Timestamp('2012-01-02'), pd.Timestamp('2011-01-03'), pd.Timestamp('2012-01-04')]) - self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]') + + if fill_val.tz: + self._assert_where_conversion(obj, cond, values, exp, + 'datetime64[ns]') + pytest.xfail("ToDo: do not ignore timezone, must be object") + self._assert_where_conversion(obj, cond, values, exp, exp_dtype) + pytest.xfail("datetime64 + datetime64 -> datetime64 must support" + " scalar") + + def test_where_index_complex128(self): + pass + + def test_where_index_bool(self): + pass def test_where_series_datetime64tz(self): pass @@ -921,6 +653,9 @@ class TestFillnaSeriesCoercion(CoercionBase): method = 'fillna' + def test_has_comprehensive_tests(self): + pass + def _assert_fillna_conversion(self, original, value, expected, expected_dtype): """ test coercion triggered by fillna """ @@ -928,181 +663,105 @@ def _assert_fillna_conversion(self, original, value, res = target.fillna(value) self._assert(res, expected, expected_dtype) - def _fillna_object_common(self, klass): + @pytest.mark.parametrize("klass", [pd.Series, pd.Index], + ids=['series', 'index']) + @pytest.mark.parametrize("fill_val, fill_dtype", [ + (1, np.object), + (1.1, np.object), + (1 + 1j, np.object), + (True, np.object)]) + def test_fillna_object(self, klass, fill_val, fill_dtype): obj = klass(['a', np.nan, 'c', 'd']) assert obj.dtype == np.object - # object + int -> object - exp = klass(['a', 1, 'c', 'd']) - self._assert_fillna_conversion(obj, 1, exp, np.object) - - # object + float -> object - exp = klass(['a', 1.1, 'c', 'd']) - self._assert_fillna_conversion(obj, 1.1, exp, np.object) - - # object + complex -> object - exp = klass(['a', 1 + 1j, 'c', 'd']) - self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object) - - # object + bool -> object - exp = klass(['a', True, 'c', 'd']) - self._assert_fillna_conversion(obj, True, exp, np.object) - - def test_fillna_series_object(self): - self._fillna_object_common(pd.Series) - - def test_fillna_index_object(self): - self._fillna_object_common(pd.Index) - - def test_fillna_series_int64(self): - # int can't hold NaN - pass - - def test_fillna_index_int64(self): - pass - - def _fillna_float64_common(self, klass, complex): + exp = klass(['a', fill_val, 'c', 'd']) + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.parametrize("klass", [pd.Series, pd.Index], + ids=['series', 'index']) + @pytest.mark.parametrize("fill_val,fill_dtype", [ + (1, np.float64), + (1.1, np.float64), + (1 + 1j, np.complex128), + (True, np.object)]) + def test_fillna_float64(self, klass, fill_val, fill_dtype): obj = klass([1.1, np.nan, 3.3, 4.4]) assert obj.dtype == np.float64 - # float + int -> float - exp = klass([1.1, 1.0, 3.3, 4.4]) - self._assert_fillna_conversion(obj, 1, exp, np.float64) - - # float + float -> float - exp = klass([1.1, 1.1, 3.3, 4.4]) - self._assert_fillna_conversion(obj, 1.1, exp, np.float64) - + exp = klass([1.1, fill_val, 3.3, 4.4]) # float + complex -> we don't support a complex Index # complex for Series, # object for Index - exp = klass([1.1, 1 + 1j, 3.3, 4.4]) - self._assert_fillna_conversion(obj, 1 + 1j, exp, complex) - - # float + bool -> object - exp = klass([1.1, True, 3.3, 4.4]) - self._assert_fillna_conversion(obj, True, exp, np.object) - - def test_fillna_series_float64(self): - self._fillna_float64_common(pd.Series, complex=np.complex128) - - def test_fillna_index_float64(self): - self._fillna_float64_common(pd.Index, complex=np.object) - - def test_fillna_series_complex128(self): + if fill_dtype == np.complex128 and klass == pd.Index: + fill_dtype = np.object + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.parametrize("fill_val,fill_dtype", [ + (1, np.complex128), + (1.1, np.complex128), + (1 + 1j, np.complex128), + (True, np.object)]) + def test_fillna_series_complex128(self, fill_val, fill_dtype): obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j]) assert obj.dtype == np.complex128 - # complex + int -> complex - exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j]) - self._assert_fillna_conversion(obj, 1, exp, np.complex128) - - # complex + float -> complex - exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j]) - self._assert_fillna_conversion(obj, 1.1, exp, np.complex128) - - # complex + complex -> complex - exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j]) - self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128) - - # complex + bool -> object - exp = pd.Series([1 + 1j, True, 3 + 3j, 4 + 4j]) - self._assert_fillna_conversion(obj, True, exp, np.object) - - def test_fillna_index_complex128(self): - self._fillna_float64_common(pd.Index, complex=np.object) - - def test_fillna_series_bool(self): - # bool can't hold NaN - pass - - def test_fillna_index_bool(self): - pass - - def test_fillna_series_datetime64(self): - obj = pd.Series([pd.Timestamp('2011-01-01'), - pd.NaT, - pd.Timestamp('2011-01-03'), - pd.Timestamp('2011-01-04')]) + exp = pd.Series([1 + 1j, fill_val, 3 + 3j, 4 + 4j]) + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.parametrize("klass", [pd.Series, pd.Index], + ids=['series', 'index']) + @pytest.mark.parametrize("fill_val,fill_dtype", [ + (pd.Timestamp('2012-01-01'), 'datetime64[ns]'), + (pd.Timestamp('2012-01-01', tz='US/Eastern'), np.object), + (1, np.object), ('x', np.object)], + ids=['datetime64', 'datetime64tz', 'object', 'object']) + def test_fillna_datetime(self, klass, fill_val, fill_dtype): + obj = klass([pd.Timestamp('2011-01-01'), + pd.NaT, + pd.Timestamp('2011-01-03'), + pd.Timestamp('2011-01-04')]) assert obj.dtype == 'datetime64[ns]' - # datetime64 + datetime64 => datetime64 - exp = pd.Series([pd.Timestamp('2011-01-01'), - pd.Timestamp('2012-01-01'), - pd.Timestamp('2011-01-03'), - pd.Timestamp('2011-01-04')]) - self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'), - exp, 'datetime64[ns]') - - # datetime64 + datetime64tz => object - exp = pd.Series([pd.Timestamp('2011-01-01'), - pd.Timestamp('2012-01-01', tz='US/Eastern'), - pd.Timestamp('2011-01-03'), - pd.Timestamp('2011-01-04')]) - value = pd.Timestamp('2012-01-01', tz='US/Eastern') - self._assert_fillna_conversion(obj, value, exp, np.object) - - # datetime64 + int => object - exp = pd.Series([pd.Timestamp('2011-01-01'), - 1, - pd.Timestamp('2011-01-03'), - pd.Timestamp('2011-01-04')]) - self._assert_fillna_conversion(obj, 1, exp, 'object') - - # datetime64 + object => object - exp = pd.Series([pd.Timestamp('2011-01-01'), - 'x', - pd.Timestamp('2011-01-03'), - pd.Timestamp('2011-01-04')]) - self._assert_fillna_conversion(obj, 'x', exp, np.object) - - def test_fillna_series_datetime64tz(self): + exp = klass([pd.Timestamp('2011-01-01'), + fill_val, + pd.Timestamp('2011-01-03'), + pd.Timestamp('2011-01-04')]) + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.parametrize("klass", [pd.Series, pd.Index]) + @pytest.mark.parametrize("fill_val,fill_dtype", [ + (pd.Timestamp('2012-01-01', tz='US/Eastern'), + 'datetime64[ns, US/Eastern]'), + (pd.Timestamp('2012-01-01'), np.object), + (pd.Timestamp('2012-01-01', tz='Asia/Tokyo'), np.object), + (1, np.object), + ('x', np.object)]) + def test_fillna_datetime64tz(self, klass, fill_val, fill_dtype): tz = 'US/Eastern' - obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - pd.NaT, - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) + obj = klass([pd.Timestamp('2011-01-01', tz=tz), + pd.NaT, + pd.Timestamp('2011-01-03', tz=tz), + pd.Timestamp('2011-01-04', tz=tz)]) assert obj.dtype == 'datetime64[ns, US/Eastern]' - # datetime64tz + datetime64tz => datetime64tz - exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp('2012-01-01', tz=tz), - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - value = pd.Timestamp('2012-01-01', tz=tz) - self._assert_fillna_conversion(obj, value, exp, - 'datetime64[ns, US/Eastern]') + exp = klass([pd.Timestamp('2011-01-01', tz=tz), + fill_val, + pd.Timestamp('2011-01-03', tz=tz), + pd.Timestamp('2011-01-04', tz=tz)]) + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) - # datetime64tz + datetime64 => object - exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp('2012-01-01'), - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - value = pd.Timestamp('2012-01-01') - self._assert_fillna_conversion(obj, value, exp, np.object) + def test_fillna_series_int64(self): + pass - # datetime64tz + datetime64tz(different tz) => object - exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp('2012-01-01', tz='Asia/Tokyo'), - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo') - self._assert_fillna_conversion(obj, value, exp, np.object) + def test_fillna_index_int64(self): + pass - # datetime64tz + int => object - exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - 1, - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - self._assert_fillna_conversion(obj, 1, exp, np.object) + def test_fillna_series_bool(self): + pass - # datetime64tz + object => object - exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - 'x', - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - self._assert_fillna_conversion(obj, 'x', exp, np.object) + def test_fillna_index_bool(self): + pass def test_fillna_series_timedelta64(self): pass @@ -1110,83 +769,6 @@ def test_fillna_series_timedelta64(self): def test_fillna_series_period(self): pass - def test_fillna_index_datetime64(self): - obj = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03', - '2011-01-04']) - assert obj.dtype == 'datetime64[ns]' - - # datetime64 + datetime64 => datetime64 - exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', - '2011-01-03', '2011-01-04']) - self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'), - exp, 'datetime64[ns]') - - # datetime64 + datetime64tz => object - exp = pd.Index([pd.Timestamp('2011-01-01'), - pd.Timestamp('2012-01-01', tz='US/Eastern'), - pd.Timestamp('2011-01-03'), - pd.Timestamp('2011-01-04')]) - value = pd.Timestamp('2012-01-01', tz='US/Eastern') - self._assert_fillna_conversion(obj, value, exp, np.object) - - # datetime64 + int => object - exp = pd.Index([pd.Timestamp('2011-01-01'), - 1, - pd.Timestamp('2011-01-03'), - pd.Timestamp('2011-01-04')]) - self._assert_fillna_conversion(obj, 1, exp, np.object) - - # datetime64 + object => object - exp = pd.Index([pd.Timestamp('2011-01-01'), - 'x', - pd.Timestamp('2011-01-03'), - pd.Timestamp('2011-01-04')]) - self._assert_fillna_conversion(obj, 'x', exp, np.object) - - def test_fillna_index_datetime64tz(self): - tz = 'US/Eastern' - - obj = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03', - '2011-01-04'], tz=tz) - assert obj.dtype == 'datetime64[ns, US/Eastern]' - - # datetime64tz + datetime64tz => datetime64tz - exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', - '2011-01-03', '2011-01-04'], tz=tz) - value = pd.Timestamp('2012-01-01', tz=tz) - self._assert_fillna_conversion(obj, value, exp, - 'datetime64[ns, US/Eastern]') - - # datetime64tz + datetime64 => object - exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp('2012-01-01'), - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - value = pd.Timestamp('2012-01-01') - self._assert_fillna_conversion(obj, value, exp, np.object) - - # datetime64tz + datetime64tz(different tz) => object - exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp('2012-01-01', tz='Asia/Tokyo'), - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo') - self._assert_fillna_conversion(obj, value, exp, np.object) - - # datetime64tz + int => object - exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz), - 1, - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - self._assert_fillna_conversion(obj, 1, exp, np.object) - - # datetime64tz + object => object - exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz), - 'x', - pd.Timestamp('2011-01-03', tz=tz), - pd.Timestamp('2011-01-04', tz=tz)]) - self._assert_fillna_conversion(obj, 'x', exp, np.object) - def test_fillna_index_timedelta64(self): pass @@ -1196,38 +778,49 @@ def test_fillna_index_period(self): class TestReplaceSeriesCoercion(CoercionBase): - # not indexing, but place here for consisntency - klasses = ['series'] method = 'replace' - def setup_method(self, method): - self.rep = {} - self.rep['object'] = ['a', 'b'] - self.rep['int64'] = [4, 5] - self.rep['float64'] = [1.1, 2.2] - self.rep['complex128'] = [1 + 1j, 2 + 2j] - self.rep['bool'] = [True, False] - self.rep['datetime64[ns]'] = [pd.Timestamp('2011-01-01'), - pd.Timestamp('2011-01-03')] - - for tz in ['UTC', 'US/Eastern']: - # to test tz => different tz replacement - key = 'datetime64[ns, {0}]'.format(tz) - self.rep[key] = [pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp('2011-01-03', tz=tz)] - - self.rep['timedelta64[ns]'] = [pd.Timedelta('1 day'), - pd.Timedelta('2 day')] - - def _assert_replace_conversion(self, from_key, to_key, how): + rep = {} + rep['object'] = ['a', 'b'] + rep['int64'] = [4, 5] + rep['float64'] = [1.1, 2.2] + rep['complex128'] = [1 + 1j, 2 + 2j] + rep['bool'] = [True, False] + rep['datetime64[ns]'] = [pd.Timestamp('2011-01-01'), + pd.Timestamp('2011-01-03')] + + for tz in ['UTC', 'US/Eastern']: + # to test tz => different tz replacement + key = 'datetime64[ns, {0}]'.format(tz) + rep[key] = [pd.Timestamp('2011-01-01', tz=tz), + pd.Timestamp('2011-01-03', tz=tz)] + + rep['timedelta64[ns]'] = [pd.Timedelta('1 day'), + pd.Timedelta('2 day')] + + @pytest.mark.parametrize('how', ['dict', 'series']) + @pytest.mark.parametrize('to_key', [ + 'object', 'int64', 'float64', 'complex128', 'bool', 'datetime64[ns]', + 'datetime64[ns, UTC]', 'datetime64[ns, US/Eastern]', 'timedelta64[ns]' + ], ids=['object', 'int64', 'float64', 'complex128', 'bool', + 'datetime64', 'datetime64tz', 'datetime64tz', 'timedelta64']) + @pytest.mark.parametrize('from_key', [ + 'object', 'int64', 'float64', 'complex128', 'bool', 'datetime64[ns]', + 'datetime64[ns, UTC]', 'datetime64[ns, US/Eastern]', 'timedelta64[ns]'] + ) + def test_replace_series(self, how, to_key, from_key): + if from_key == 'bool' and how == 'series' and compat.PY3: + # doesn't work in PY3, though ...dict_from_bool works fine + pytest.skip("doesn't work as in PY3") + index = pd.Index([3, 4], name='xxx') obj = pd.Series(self.rep[from_key], index=index, name='yyy') assert obj.dtype == from_key if (from_key.startswith('datetime') and to_key.startswith('datetime')): - # different tz, currently mask_missing raises SystemError - return + pytest.xfail("different tz, currently mask_missing " + "raises SystemError") if how == 'dict': replacer = dict(zip(self.rep[from_key], self.rep[to_key])) @@ -1242,7 +835,6 @@ def _assert_replace_conversion(self, from_key, to_key, how): (from_key == 'complex128' and to_key in ('int64', 'float64'))): - # buggy on 32-bit / window if compat.is_platform_32bit() or compat.is_platform_windows(): pytest.skip("32-bit platform buggy: {0} -> {1}".format (from_key, to_key)) @@ -1257,77 +849,5 @@ def _assert_replace_conversion(self, from_key, to_key, how): tm.assert_series_equal(result, exp) - def test_replace_series_object(self): - from_key = 'object' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='dict') - - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='series') - - def test_replace_series_int64(self): - from_key = 'int64' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='dict') - - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='series') - - def test_replace_series_float64(self): - from_key = 'float64' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='dict') - - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='series') - - def test_replace_series_complex128(self): - from_key = 'complex128' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='dict') - - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='series') - - def test_replace_series_bool(self): - from_key = 'bool' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='dict') - - for to_key in self.rep: - - if compat.PY3: - # doesn't work in PY3, though ...dict_from_bool works fine - pytest.skip("doesn't work as in PY3") - - self._assert_replace_conversion(from_key, to_key, how='series') - - def test_replace_series_datetime64(self): - from_key = 'datetime64[ns]' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='dict') - - from_key = 'datetime64[ns]' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='series') - - def test_replace_series_datetime64tz(self): - from_key = 'datetime64[ns, US/Eastern]' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='dict') - - from_key = 'datetime64[ns, US/Eastern]' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='series') - - def test_replace_series_timedelta64(self): - from_key = 'timedelta64[ns]' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='dict') - - from_key = 'timedelta64[ns]' - for to_key in self.rep: - self._assert_replace_conversion(from_key, to_key, how='series') - def test_replace_series_period(self): pass
- [ ] progress towards #18706 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This is not yet finished but wanted to share progress in case of feedback. The main thing I'm questioning is the need to use the `` test_has_comprehensive_tests`` method in ``CoercionBase``. If we want to keep I would need to refactor, but I'm curious if others even find it necessary given that there are often just blank tests being created in subclasses to make that test pass
https://api.github.com/repos/pandas-dev/pandas/pulls/18721
2017-12-11T00:22:59Z
2017-12-13T02:02:45Z
2017-12-13T02:02:45Z
2018-01-11T20:50:22Z
TST: make it possible to run tests without moto installed
diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index 828d5d0ccd3c6..57e72da2fd3f4 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -1,6 +1,5 @@ import os -import moto import pytest from pandas.io.parsers import read_table @@ -42,6 +41,7 @@ def s3_resource(tips_file, jsonl_file): is yielded by the fixture. """ pytest.importorskip('s3fs') + moto = pytest.importorskip('moto') moto.mock_s3().start() test_s3_files = [ diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index e9976da6f6774..077752039a558 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -1,5 +1,4 @@ import pytest -import moto import pandas as pd from pandas import compat @@ -73,6 +72,8 @@ def test_read_zipped_json(): def test_with_s3_url(compression): boto3 = pytest.importorskip('boto3') pytest.importorskip('s3fs') + moto = pytest.importorskip('moto') + if compression == 'xz': tm._skip_if_no_lzma() diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 3fd55bcad677a..4efeeecf8ee4a 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -10,7 +10,6 @@ import numpy as np import pytest from numpy import nan -import moto import pandas as pd import pandas.util.testing as tm @@ -616,6 +615,7 @@ def test_read_from_http_url(self): def test_read_from_s3_url(self): boto3 = pytest.importorskip('boto3') pytest.importorskip('s3fs') + moto = pytest.importorskip('moto') with moto.mock_s3(): conn = boto3.resource("s3", region_name="us-east-1")
https://api.github.com/repos/pandas-dev/pandas/pulls/18719
2017-12-10T23:11:04Z
2017-12-11T10:57:01Z
2017-12-11T10:57:01Z
2017-12-11T10:57:04Z
DOC: improved pivot_table(..) aggfunc parameter explanation
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9ce6b6148be56..5f323d0f040bc 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4413,10 +4413,12 @@ def pivot(self, index=None, columns=None, values=None): list can contain any of the other types (except list). Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. - aggfunc : function or list of functions, default numpy.mean + aggfunc : function, list of functions, dict, default numpy.mean If list of functions passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves) + If dict is passed, the key is column to aggregate and value + is function or list of functions fill_value : scalar, default None Value to replace missing values with margins : boolean, default False @@ -4452,7 +4454,6 @@ def pivot(self, index=None, columns=None, values=None): >>> table = pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table - ... # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4.0 5.0 @@ -4460,6 +4461,28 @@ def pivot(self, index=None, columns=None, values=None): foo one 4.0 1.0 two NaN 6.0 + >>> table = pivot_table(df, values='D', index=['A', 'B'], + ... columns=['C'], aggfunc=np.sum) + >>> table + C large small + A B + bar one 4.0 5.0 + two 7.0 6.0 + foo one 4.0 1.0 + two NaN 6.0 + + >>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'], + ... aggfunc={'D': np.mean, + ... 'E': [min, max, np.mean]}) + >>> table + D E + mean max median min + A C + bar large 5.500000 16 14.5 13 + small 5.500000 15 14.5 14 + foo large 2.000000 10 9.5 9 + small 2.333333 12 11.0 8 + Returns ------- table : DataFrame
- [ ] closes #18712 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18718
2017-12-10T17:06:50Z
2017-12-12T22:03:19Z
2017-12-12T22:03:19Z
2017-12-12T22:03:31Z
Parquet: Add error message for no engine found
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 7827c3ae04d4d..3ebdd760aa0a6 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -25,6 +25,11 @@ def get_engine(engine): except ImportError: pass + raise ImportError("Unable to find a usable engine; " + "tried using: 'pyarrow', 'fastparquet'.\n" + "pyarrow or fastparquet is required for parquet " + "support") + if engine not in ['pyarrow', 'fastparquet']: raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
Give a better error message for engine="auto" case when none of the engines were found installed. closes #18156 Cherry-picking the doc part of PR https://github.com/pandas-dev/pandas/pull/18568
https://api.github.com/repos/pandas-dev/pandas/pulls/18717
2017-12-10T16:42:17Z
2017-12-10T22:50:24Z
2017-12-10T22:50:24Z
2017-12-12T02:38:31Z
BLD/TST: cross compat for pa/fp failing on osx & windows
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index dac3625cba4ba..b9d1c9354eeb4 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -155,12 +155,11 @@ echo "[removing installed pandas]" conda remove pandas -y --force pip uninstall -y pandas -if [ "$BUILD_TEST" ]; then +# remove any installation +conda list pandas +pip list --format columns | grep pandas - # remove any installation - pip uninstall -y pandas - conda list pandas - pip list --format columns |grep pandas +if [ "$BUILD_TEST" ]; then # build & install testing echo ["building release"]
xref #18662 closes #18714
https://api.github.com/repos/pandas-dev/pandas/pulls/18715
2017-12-10T15:28:36Z
2017-12-18T12:24:08Z
null
2017-12-18T12:24:08Z
BUG: Fix Series.astype and Categorical.astype to update existing Categorical data
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index c2da0c420f643..f02c389252f47 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -263,6 +263,7 @@ Conversion - Adding a ``Period`` object to a ``datetime`` or ``Timestamp`` object will now correctly raise a ``TypeError`` (:issue:`17983`) - Fixed a bug where ``FY5253`` date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`) - Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`) +- Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`) Indexing diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index e34755e665f8d..356e76df366b4 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -436,9 +436,12 @@ def astype(self, dtype, copy=True): """ if is_categorical_dtype(dtype): - if copy is True: - return self.copy() - return self + # GH 10696/18593 + dtype = self.dtype._update_dtype(dtype) + self = self.copy() if copy else self + if dtype == self.dtype: + return self + return self._set_dtype(dtype) return np.array(self, dtype=dtype, copy=copy) @cache_readonly diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 4169a001655cb..3a64a0ef84e3d 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -54,7 +54,7 @@ import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex -from pandas.core.common import is_null_slice +from pandas.core.common import is_null_slice, _any_not_none import pandas.core.algorithms as algos from pandas.core.index import Index, MultiIndex, _ensure_index @@ -573,7 +573,6 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, raise TypeError(msg) # may need to convert to categorical - # this is only called for non-categoricals if self.is_categorical_astype(dtype): # deprecated 17636 @@ -589,13 +588,16 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, "CategoricalDtype instead", FutureWarning, stacklevel=7) - kwargs = kwargs.copy() - categories = getattr(dtype, 'categories', None) - ordered = getattr(dtype, 'ordered', False) + categories = kwargs.get('categories', None) + ordered = kwargs.get('ordered', None) + if _any_not_none(categories, ordered): + dtype = CategoricalDtype(categories, ordered) - kwargs.setdefault('categories', categories) - kwargs.setdefault('ordered', ordered) - return self.make_block(Categorical(self.values, **kwargs)) + if is_categorical_dtype(self.values): + # GH 10696/18593: update an existing categorical efficiently + return self.make_block(self.values.astype(dtype, copy=copy)) + + return self.make_block(Categorical(self.values, dtype=dtype)) # astype processing dtype = np.dtype(dtype) @@ -2427,23 +2429,6 @@ def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): return self.make_block_same_class(new_values, new_mgr_locs) - def _astype(self, dtype, copy=False, errors='raise', values=None, - klass=None, mgr=None): - """ - Coerce to the new type (if copy=True, return a new copy) - raise on an except if raise == True - """ - - if self.is_categorical_astype(dtype): - values = self.values - else: - values = np.asarray(self.values).astype(dtype, copy=False) - - if copy: - values = values.copy() - - return self.make_block(values) - def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ diff --git a/pandas/tests/categorical/test_dtypes.py b/pandas/tests/categorical/test_dtypes.py index 0a41b628bc057..bad2c27026b31 100644 --- a/pandas/tests/categorical/test_dtypes.py +++ b/pandas/tests/categorical/test_dtypes.py @@ -99,10 +99,54 @@ def test_codes_dtypes(self): result = result.remove_categories(['foo%05d' % i for i in range(300)]) assert result.codes.dtype == 'int8' - def test_astype_categorical(self): + @pytest.mark.parametrize('ordered', [True, False]) + def test_astype(self, ordered): + # string + cat = Categorical(list('abbaaccc'), ordered=ordered) + result = cat.astype(object) + expected = np.array(cat) + tm.assert_numpy_array_equal(result, expected) + + msg = 'could not convert string to float' + with tm.assert_raises_regex(ValueError, msg): + cat.astype(float) + + # numeric + cat = Categorical([0, 1, 2, 2, 1, 0, 1, 0, 2], ordered=ordered) + result = cat.astype(object) + expected = np.array(cat, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = cat.astype(int) + expected = np.array(cat, dtype=np.int) + tm.assert_numpy_array_equal(result, expected) + + result = cat.astype(float) + expected = np.array(cat, dtype=np.float) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize('dtype_ordered', [True, False]) + @pytest.mark.parametrize('cat_ordered', [True, False]) + def test_astype_category(self, dtype_ordered, cat_ordered): + # GH 10696/18593 + data = list('abcaacbab') + cat = Categorical(data, categories=list('bac'), ordered=cat_ordered) + + # standard categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = cat.astype(dtype) + expected = Categorical( + data, categories=cat.categories, ordered=dtype_ordered) + tm.assert_categorical_equal(result, expected) - cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) - tm.assert_categorical_equal(cat, cat.astype('category')) - tm.assert_almost_equal(np.array(cat), cat.astype('object')) + # non-standard categories + dtype = CategoricalDtype(list('adc'), dtype_ordered) + result = cat.astype(dtype) + expected = Categorical(data, dtype=dtype) + tm.assert_categorical_equal(result, expected) - pytest.raises(ValueError, lambda: cat.astype(float)) + if dtype_ordered is False: + # dtype='category' can't specify ordered, so only test once + result = cat.astype('category') + expected = cat + tm.assert_categorical_equal(result, expected) diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index ae9e011d76597..543f59013ff12 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -411,11 +411,10 @@ def test_astype(self): result = IntervalIndex.from_intervals(result.values) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize('copy', [True, False]) @pytest.mark.parametrize('name', [None, 'foo']) @pytest.mark.parametrize('dtype_ordered', [True, False]) @pytest.mark.parametrize('index_ordered', [True, False]) - def test_astype_category(self, copy, name, dtype_ordered, index_ordered): + def test_astype_category(self, name, dtype_ordered, index_ordered): # GH 18630 index = self.create_index(ordered=index_ordered) if name: @@ -423,7 +422,7 @@ def test_astype_category(self, copy, name, dtype_ordered, index_ordered): # standard categories dtype = CategoricalDtype(ordered=dtype_ordered) - result = index.astype(dtype, copy=copy) + result = index.astype(dtype) expected = CategoricalIndex(index.tolist(), name=name, categories=index.categories, @@ -432,13 +431,13 @@ def test_astype_category(self, copy, name, dtype_ordered, index_ordered): # non-standard categories dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered) - result = index.astype(dtype, copy=copy) + result = index.astype(dtype) expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype) tm.assert_index_equal(result, expected) if dtype_ordered is False: # dtype='category' can't specify ordered, so only test once - result = index.astype('category', copy=copy) + result = index.astype('category') expected = index tm.assert_index_equal(result, expected) diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 12d0267005f19..441e811706487 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -322,6 +322,44 @@ def cmp(a, b): lambda x: x.astype('object').astype(Categorical)]: pytest.raises(TypeError, lambda: invalid(s)) + @pytest.mark.parametrize('name', [None, 'foo']) + @pytest.mark.parametrize('dtype_ordered', [True, False]) + @pytest.mark.parametrize('series_ordered', [True, False]) + def test_astype_categorical_to_categorical(self, name, dtype_ordered, + series_ordered): + # GH 10696/18593 + s_data = list('abcaacbab') + s_dtype = CategoricalDtype(list('bac'), ordered=series_ordered) + s = Series(s_data, dtype=s_dtype, name=name) + + # unspecified categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = s.astype(dtype) + exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered) + expected = Series(s_data, name=name, dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = s.astype('category', ordered=dtype_ordered) + tm.assert_series_equal(result, expected) + + # different categories + dtype = CategoricalDtype(list('adc'), dtype_ordered) + result = s.astype(dtype) + expected = Series(s_data, name=name, dtype=dtype) + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = s.astype( + 'category', categories=list('adc'), ordered=dtype_ordered) + tm.assert_series_equal(result, expected) + + if dtype_ordered is False: + # not specifying ordered, so only test once + expected = s + result = s.astype('category') + tm.assert_series_equal(result, expected) + def test_astype_categoricaldtype(self): s = Series(['a', 'b', 'a']) result = s.astype(CategoricalDtype(['a', 'b'], ordered=True))
- [X] closes #10696 - [X] closes #18593 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Couldn't find an issue about it, but the same problem described with `Series.astype` in the linked issues was occurring with `Categorical.astype`. Put in a fix for that too with some code very similar to what was done in #18677 for `CategoricalIndex.astype`. Could probably consolidate the two into a single helper function, potentially as part of #18704.
https://api.github.com/repos/pandas-dev/pandas/pulls/18710
2017-12-10T05:07:27Z
2017-12-13T14:35:18Z
2017-12-13T14:35:18Z
2017-12-13T19:34:10Z
TST: lock down timeseries now tests, xref #18666
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 7b0504388be22..c7035df8ac15c 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -220,7 +220,7 @@ def _test_parse_iso8601(object ts): if ts == 'now': return Timestamp.utcnow() elif ts == 'today': - return Timestamp.utcnow().normalize() + return Timestamp.now().normalize() _string_to_dts(ts, &obj.dts, &out_local, &out_tzoffset) obj.value = dtstruct_to_dt64(&obj.dts) @@ -734,7 +734,7 @@ cdef inline bint _parse_today_now(str val, int64_t* iresult): return True elif val == 'today': # Note: this is *not* the same as Timestamp('today') - iresult[0] = Timestamp.utcnow().normalize().value + iresult[0] = Timestamp.now().normalize().value return True return False diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index bdee67a4ff674..a94865d8e9657 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -21,6 +21,7 @@ from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.dtypes.common import is_datetime64_ns_dtype from pandas.util import testing as tm +import pandas.util._test_decorators as td from pandas.util.testing import assert_series_equal, _skip_if_has_locale from pandas import (isna, to_datetime, Timestamp, Series, DataFrame, Index, DatetimeIndex, NaT, date_range, compat) @@ -187,6 +188,56 @@ def test_to_datetime_format_weeks(self, cache): class TestToDatetime(object): + @td.skip_if_windows # `tm.set_timezone` does not work in windows + def test_to_datetime_now(self): + # See GH#18666 + with tm.set_timezone('US/Eastern'): + npnow = np.datetime64('now').astype('datetime64[ns]') + pdnow = pd.to_datetime('now') + pdnow2 = pd.to_datetime(['now'])[0] + + # These should all be equal with infinite perf; this gives + # a generous margin of 10 seconds + assert abs(pdnow.value - npnow.astype(np.int64)) < 1e10 + assert abs(pdnow2.value - npnow.astype(np.int64)) < 1e10 + + assert pdnow.tzinfo is None + assert pdnow2.tzinfo is None + + @td.skip_if_windows # `tm.set_timezone` does not work in windows + def test_to_datetime_today(self): + # See GH#18666 + # Test with one timezone far ahead of UTC and another far behind, so + # one of these will _almost_ alawys be in a different day from UTC. + # Unfortunately this test between 12 and 1 AM Samoa time + # this both of these timezones _and_ UTC will all be in the same day, + # so this test will not detect the regression introduced in #18666. + with tm.set_timezone('Pacific/Auckland'): # 12-13 hours ahead of UTC + nptoday = np.datetime64('today').astype('datetime64[ns]') + pdtoday = pd.to_datetime('today') + pdtoday2 = pd.to_datetime(['today'])[0] + + # These should all be equal with infinite perf; this gives + # a generous margin of 10 seconds + assert abs(pdtoday.value - nptoday.astype(np.int64)) < 1e10 + assert abs(pdtoday2.value - nptoday.astype(np.int64)) < 1e10 + + assert pdtoday.tzinfo is None + assert pdtoday2.tzinfo is None + + with tm.set_timezone('US/Samoa'): # 11 hours behind UTC + nptoday = np.datetime64('today').astype('datetime64[ns]') + pdtoday = pd.to_datetime('today') + pdtoday2 = pd.to_datetime(['today'])[0] + + # These should all be equal with infinite perf; this gives + # a generous margin of 10 seconds + assert abs(pdtoday.value - nptoday.astype(np.int64)) < 1e10 + assert abs(pdtoday2.value - nptoday.astype(np.int64)) < 1e10 + + assert pdtoday.tzinfo is None + assert pdtoday2.tzinfo is None + @pytest.mark.parametrize('cache', [True, False]) def test_to_datetime_dt64s(self, cache): in_bound_dts = [
See the comment attached to `test_to_datetime_today`. This test is not deterministic, will fail to detect the change introduced by #18666 1 hour out of each day. - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/18709
2017-12-10T03:11:12Z
2017-12-12T01:20:58Z
2017-12-12T01:20:58Z
2018-02-11T22:00:38Z
BUG: Fix resample with np.timedelta64 loffset has no effect
diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 9f5439b68558b..3b6ba84a416e6 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1051,6 +1051,8 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean', if isinstance(loffset, compat.string_types): loffset = to_offset(loffset) + elif isinstance(loffset, np.timedelta64): + loffset = loffset.tolist() self.loffset = loffset self.how = how diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 1fd6befd64f57..7a022cd6bf6db 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -1251,24 +1251,31 @@ def test_resample_loffset(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min') s = Series(np.random.randn(14), index=rng) - result = s.resample('5min', closed='right', label='right', - loffset=timedelta(minutes=1)).mean() + # GH7687 + result1 = s.resample('5min', closed='right', label='right', + loffset=np.timedelta64(1, 'm')).mean() + result2 = s.resample('5min', closed='right', label='right', + loffset=timedelta(minutes=1)).mean() idx = date_range('1/1/2000', periods=4, freq='5min') expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()], index=idx + timedelta(minutes=1)) - assert_series_equal(result, expected) + assert_series_equal(result1, expected) + assert_series_equal(result2, expected) expected = s.resample( '5min', closed='right', label='right', loffset='1min').mean() - assert_series_equal(result, expected) + assert_series_equal(result1, expected) + assert_series_equal(result2, expected) expected = s.resample( '5min', closed='right', label='right', loffset=Minute(1)).mean() - assert_series_equal(result, expected) + assert_series_equal(result1, expected) + assert_series_equal(result2, expected) - assert result.index.freq == Minute(5) + assert result1.index.freq == Minute(5) + assert result2.index.freq == Minute(5) # from daily dti = DatetimeIndex(start=datetime(2005, 1, 1),
- [x] closes #7687 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18708
2017-12-10T02:49:40Z
2018-01-21T18:24:29Z
null
2018-01-21T18:24:29Z
BUG: Pass kwargs from Index.to_series to pd.Series
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 841eec69d41ba..220e55a1f8ee9 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -189,7 +189,7 @@ Other API Changes - :func:`pandas.DataFrame.merge` no longer casts a ``float`` column to ``object`` when merging on ``int`` and ``float`` columns (:issue:`16572`) - The default NA value for :class:`UInt64Index` has changed from 0 to ``NaN``, which impacts methods that mask with NA, such as ``UInt64Index.where()`` (:issue:`18398`) - Refactored ``setup.py`` to use ``find_packages`` instead of explicitly listing out all subpackages (:issue:`18535`) -- Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:pr:`16672`) +- Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:issue:`16672`) - :func:`pandas.merge` now raises a ``ValueError`` when trying to merge on incompatible data types (:issue:`9780`) .. _whatsnew_0220.deprecations: @@ -272,6 +272,8 @@ Indexing - Bug in :func:`IntervalIndex.symmetric_difference` where the symmetric difference with a non-``IntervalIndex`` did not raise (:issue:`18475`) - Bug in indexing a datetimelike ``Index`` that raised ``ValueError`` instead of ``IndexError`` (:issue:`18386`). - Bug in tz-aware :class:`DatetimeIndex` where addition/subtraction with a :class:`TimedeltaIndex` or array with ``dtype='timedelta64[ns]'`` was incorrect (:issue:`17558`) +- :func:`Index.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`) +- :func:`DatetimeIndex.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`) I/O ^^^ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 938fd7130faa5..d17767d662bb4 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -983,20 +983,32 @@ def _format_attrs(self): attrs.append(('length', len(self))) return attrs - def to_series(self, **kwargs): + def to_series(self, index=None, name=None): """ Create a Series with both index and values equal to the index keys useful with map for returning an indexer based on an index + Parameters + ---------- + index : Index, optional + index of resulting Series. If None, defaults to original index + name : string, optional + name of resulting Series. If None, defaults to name of original + index + Returns ------- Series : dtype will be based on the type of the Index values. """ from pandas import Series - return Series(self._to_embed(), - index=self._shallow_copy(), - name=self.name) + + if index is None: + index = self._shallow_copy() + if name is None: + name = self.name + + return Series(self._to_embed(), index=index, name=name) def to_frame(self, index=True): """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 290c77dd7f040..b7c1b60f77650 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -927,7 +927,7 @@ def _get_time_micros(self): values = self._local_timestamps() return fields.get_time_micros(values) - def to_series(self, keep_tz=False): + def to_series(self, keep_tz=False, index=None, name=None): """ Create a Series with both index and values equal to the index keys useful with map for returning an indexer based on an index @@ -949,15 +949,24 @@ def to_series(self, keep_tz=False): Series will have a datetime64[ns] dtype. TZ aware objects will have the tz removed. + index : Index, optional + index of resulting Series. If None, defaults to original index + name : string, optional + name of resulting Series. If None, defaults to name of original + index Returns ------- Series """ from pandas import Series - return Series(self._to_embed(keep_tz), - index=self._shallow_copy(), - name=self.name) + + if index is None: + index = self._shallow_copy() + if name is None: + name = self.name + + return Series(self._to_embed(keep_tz), index=index, name=name) def _to_embed(self, keep_tz=False, dtype=None): """ diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index c1ee18526cc01..a4b72f29aa65f 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -50,6 +50,25 @@ def test_to_series(self): assert s.index is not idx assert s.name == idx.name + def test_to_series_with_arguments(self): + # GH18699 + + # index kwarg + idx = self.create_index() + s = idx.to_series(index=idx) + + assert s.values is not idx.values + assert s.index is idx + assert s.name == idx.name + + # name kwarg + idx = self.create_index() + s = idx.to_series(name='__test') + + assert s.values is not idx.values + assert s.index is not idx + assert s.name != idx.name + def test_to_frame(self): # see gh-15230 idx = self.create_index()
- [x] closes #18699 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Please let me know if you have any questions
https://api.github.com/repos/pandas-dev/pandas/pulls/18707
2017-12-10T00:27:16Z
2017-12-13T14:36:55Z
2017-12-13T14:36:55Z
2017-12-13T15:02:56Z
BUG: Fix df.loc slice support
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 81d892fba0fe2..f4a5dfa3fd29a 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2097,7 +2097,9 @@ def _maybe_to_slice(loc): mask[loc] = True return mask - if not isinstance(key, tuple): + if isinstance(key, slice): + return key + elif not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) return _maybe_to_slice(loc) diff --git a/pandas/tests/indexing/test_multiindex.py b/pandas/tests/indexing/test_multiindex.py index d89c64fc5b9f8..014573cdf621d 100644 --- a/pandas/tests/indexing/test_multiindex.py +++ b/pandas/tests/indexing/test_multiindex.py @@ -239,6 +239,15 @@ def test_iloc_getitem_multiindex(self): xp = mi_labels.ix['j'].ix[:, 'j'].ix[0, 0] assert rs == xp + # GH8856 + s = pd.Series(np.arange(10), + pd.MultiIndex.from_product(([0, 1], list('abcde')))) + result = s.iloc[::4] + expected = pd.Series(np.array([0, 4, 8]), + MultiIndex(levels=[[0, 1], list('ade')], + labels=[[0, 0, 1], [0, 2, 1]])) + tm.assert_series_equal(result, expected) + def test_loc_multiindex(self): mi_labels = DataFrame(np.random.randn(3, 3), @@ -278,6 +287,15 @@ def test_loc_multiindex(self): xp = mi_int.ix[4] tm.assert_frame_equal(rs, xp) + # GH8856 + s = pd.Series(np.arange(10), + pd.MultiIndex.from_product(([0, 1], list('abcde')))) + result = s.loc[::4] + expected = pd.Series(np.array([0, 4, 8]), + MultiIndex(levels=[[0, 1], ['a', 'd', 'e']], + labels=[[0, 0, 1], [0, 2, 1]])) + tm.assert_series_equal(result, expected) + def test_getitem_partial_int(self): # GH 12416 # with single item
- [x] closes #8856 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18703
2017-12-09T12:02:42Z
2018-02-10T18:43:23Z
null
2018-02-10T18:43:23Z
add benchmarks for timestamp parsing
diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py index 2d1ff3a24f787..f99f95678a0b7 100644 --- a/asv_bench/benchmarks/timedelta.py +++ b/asv_bench/benchmarks/timedelta.py @@ -1,5 +1,7 @@ -from .pandas_vb_common import * -from pandas import to_timedelta, Timestamp +import numpy as np +import pandas as pd + +from pandas import to_timedelta, Timestamp, Timedelta class ToTimedelta(object): @@ -67,8 +69,8 @@ class DatetimeAccessor(object): def setup(self): self.N = 100000 self.series = pd.Series( - pd.timedelta_range('1 days', periods=self.N, freq='h') - ) + pd.timedelta_range('1 days', periods=self.N, freq='h')) + def time_dt_accessor(self): self.series.dt diff --git a/asv_bench/benchmarks/timestamp.py b/asv_bench/benchmarks/timestamp.py index c8a93b09d94c0..62abaca17d22f 100644 --- a/asv_bench/benchmarks/timestamp.py +++ b/asv_bench/benchmarks/timestamp.py @@ -1,8 +1,27 @@ -from pandas import to_timedelta, Timestamp +from pandas import Timestamp import pytz import datetime +class TimestampConstruction(object): + # TODO: classmethod constructors: fromordinal, fromtimestamp... + + def time_parse_iso8601_no_tz(self): + Timestamp('2017-08-25 08:16:14') + + def time_parse_iso8601_tz(self): + Timestamp('2017-08-25 08:16:14-0500') + + def time_parse_dateutil(self): + Timestamp('2017/08/25 08:16:14 AM') + + def time_parse_today(self): + Timestamp('today') + + def time_parse_now(self): + Timestamp('now') + + class TimestampProperties(object): goal_time = 0.2 diff --git a/ci/lint.sh b/ci/lint.sh index 832e1227d9752..bec82602fa509 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -24,7 +24,7 @@ if [ "$LINT" ]; then echo "Linting setup.py DONE" echo "Linting asv_bench/benchmarks/" - flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/[ghijoprst]*.py --ignore=F811 + flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/[ghijoprs]*.py --ignore=F811 if [ $? -ne "0" ]; then RET=1 fi
There's a reasonable case to be made that these and timeseries.ToDatetime benchmarks belong in the same place. LMK if there's a preference. Small flake8 fixups got timedelta.py passing, enabled linting for modules starting with "t".
https://api.github.com/repos/pandas-dev/pandas/pulls/18698
2017-12-08T19:32:30Z
2017-12-09T14:56:26Z
2017-12-09T14:56:26Z
2018-01-23T04:40:57Z
0.21.1 backports (round 1)
diff --git a/.travis.yml b/.travis.yml index fe1a2950dbf08..42b4ef0396fc8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -102,8 +102,6 @@ before_install: - uname -a - git --version - git tag - - ci/before_install_travis.sh - - export DISPLAY=":99.0" install: - echo "install start" @@ -114,6 +112,8 @@ install: before_script: - ci/install_db_travis.sh + - export DISPLAY=":99.0" + - ci/before_script_travis.sh script: - echo "script start" diff --git a/appveyor.yml b/appveyor.yml index a1f8886f6d068..44af73b498aa8 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -22,7 +22,7 @@ environment: PYTHON_VERSION: "3.6" PYTHON_ARCH: "64" CONDA_PY: "36" - CONDA_NPY: "112" + CONDA_NPY: "113" - CONDA_ROOT: "C:\\Miniconda3_64" PYTHON_VERSION: "2.7" diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py index dda684b35e301..16889b2f19e89 100644 --- a/asv_bench/benchmarks/plotting.py +++ b/asv_bench/benchmarks/plotting.py @@ -10,15 +10,37 @@ def date_range(start=None, end=None, periods=None, freq=None): from pandas.tools.plotting import andrews_curves +class Plotting(object): + goal_time = 0.2 + + def setup(self): + import matplotlib + matplotlib.use('Agg') + self.s = Series(np.random.randn(1000000)) + self.df = DataFrame({'col': self.s}) + + def time_series_plot(self): + self.s.plot() + + def time_frame_plot(self): + self.df.plot() + + class TimeseriesPlotting(object): goal_time = 0.2 def setup(self): import matplotlib matplotlib.use('Agg') - self.N = 2000 - self.M = 5 - self.df = DataFrame(np.random.randn(self.N, self.M), index=date_range('1/1/1975', periods=self.N)) + N = 2000 + M = 5 + idx = date_range('1/1/1975', periods=N) + self.df = DataFrame(np.random.randn(N, M), index=idx) + + idx_irregular = pd.DatetimeIndex(np.concatenate((idx.values[0:10], + idx.values[12:]))) + self.df2 = DataFrame(np.random.randn(len(idx_irregular), M), + index=idx_irregular) def time_plot_regular(self): self.df.plot() @@ -26,6 +48,9 @@ def time_plot_regular(self): def time_plot_regular_compat(self): self.df.plot(x_compat=True) + def time_plot_irregular(self): + self.df2.plot() + class Misc(object): goal_time = 0.6 diff --git a/ci/before_install_travis.sh b/ci/before_script_travis.sh similarity index 93% rename from ci/before_install_travis.sh rename to ci/before_script_travis.sh index 2d0b4da6120dc..0b3939b1906a2 100755 --- a/ci/before_install_travis.sh +++ b/ci/before_script_travis.sh @@ -4,6 +4,7 @@ echo "inside $0" if [ "${TRAVIS_OS_NAME}" == "linux" ]; then sh -e /etc/init.d/xvfb start + sleep 3 fi # Never fail because bad things happened here. diff --git a/ci/check_imports.py b/ci/check_imports.py index a83436e7d258c..d6f24ebcc4d3e 100644 --- a/ci/check_imports.py +++ b/ci/check_imports.py @@ -9,7 +9,6 @@ 'ipython', 'jinja2' 'lxml', - 'matplotlib', 'numexpr', 'openpyxl', 'py', diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml new file mode 100644 index 0000000000000..c3d3d59f895c6 --- /dev/null +++ b/ci/environment-dev.yaml @@ -0,0 +1,14 @@ +name: pandas-dev +channels: + - defaults + - conda-forge +dependencies: + - Cython + - NumPy + - moto + - pytest + - python-dateutil + - python=3 + - pytz + - setuptools + - sphinx diff --git a/ci/install_travis.sh b/ci/install_travis.sh index b85263daa1eac..dac3625cba4ba 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -34,9 +34,9 @@ fi # install miniconda if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1 + time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -q -O miniconda.sh || exit 1 else - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1 + time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -q -O miniconda.sh || exit 1 fi time bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1 @@ -107,7 +107,7 @@ time conda install -n pandas pytest>=3.1.0 time pip install pytest-xdist moto if [ "$LINT" ]; then - conda install flake8 + conda install flake8=3.4.1 pip install cpplint fi diff --git a/ci/requirements-2.7_BUILD_TEST.pip b/ci/requirements-2.7_BUILD_TEST.pip index a0fc77c40bc00..f4617133cad5b 100644 --- a/ci/requirements-2.7_BUILD_TEST.pip +++ b/ci/requirements-2.7_BUILD_TEST.pip @@ -1,7 +1,6 @@ xarray geopandas seaborn -pandas_gbq pandas_datareader statsmodels scikit-learn diff --git a/ci/requirements-3.5.pip b/ci/requirements-3.5.pip index 6e4f7b65f9728..c9565f2173070 100644 --- a/ci/requirements-3.5.pip +++ b/ci/requirements-3.5.pip @@ -1,2 +1,2 @@ xarray==0.9.1 -pandas-gbq +pandas_gbq diff --git a/ci/requirements-3.6.sh b/ci/requirements-3.6.sh new file mode 100644 index 0000000000000..f5c3dbf59a29d --- /dev/null +++ b/ci/requirements-3.6.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +source activate pandas + +echo "[install 3.6 downstream deps]" + +conda install -n pandas -c conda-forge pandas-datareader xarray geopandas seaborn statsmodels scikit-learn dask diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run index af7a90b126f22..db2d429a2a4ff 100644 --- a/ci/requirements-3.6_WIN.run +++ b/ci/requirements-3.6_WIN.run @@ -1,6 +1,6 @@ python-dateutil pytz -numpy=1.12* +numpy=1.13* bottleneck openpyxl xlsxwriter diff --git a/ci/requirements_all.txt b/ci/requirements-optional-conda.txt similarity index 68% rename from ci/requirements_all.txt rename to ci/requirements-optional-conda.txt index e13afd619f105..6edb8d17337e4 100644 --- a/ci/requirements_all.txt +++ b/ci/requirements-optional-conda.txt @@ -1,28 +1,27 @@ -pytest>=3.1.0 -pytest-cov -pytest-xdist -flake8 -sphinx=1.5* -nbsphinx -ipython -python-dateutil -pytz -openpyxl -xlsxwriter -xlrd -xlwt -html5lib -patsy beautifulsoup4 -numpy -cython -scipy +blosc +bottleneck +fastparquet +feather-format +html5lib +ipython +ipykernel +jinja2 +lxml +matplotlib +nbsphinx numexpr +openpyxl +pyarrow +pymysql pytables -matplotlib +pytest-cov +pytest-xdist +s3fs +scipy seaborn -lxml sqlalchemy -bottleneck -pymysql -Jinja2 +xarray +xlrd +xlsxwriter +xlwt diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt new file mode 100644 index 0000000000000..06b22bd8f2c63 --- /dev/null +++ b/ci/requirements-optional-pip.txt @@ -0,0 +1,27 @@ +# This file was autogenerated by scripts/convert_deps.py +# Do not modify directlybeautifulsoup4 +blosc +bottleneck +fastparquet +feather-format +html5lib +ipython +jinja2 +lxml +matplotlib +nbsphinx +numexpr +openpyxl +pyarrow +pymysql +tables +pytest-cov +pytest-xdist +s3fs +scipy +seaborn +sqlalchemy +xarray +xlrd +xlsxwriter +xlwt \ No newline at end of file diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt index dbc4f6cbd6509..2fb36b7cd70d8 100644 --- a/ci/requirements_dev.txt +++ b/ci/requirements_dev.txt @@ -1,8 +1,10 @@ +# This file was autogenerated by scripts/convert_deps.py +# Do not modify directly +Cython +NumPy +moto +pytest python-dateutil pytz -numpy -cython -pytest>=3.1.0 -pytest-cov -flake8 -moto +setuptools +sphinx \ No newline at end of file diff --git a/ci/script_multi.sh b/ci/script_multi.sh index ee9fbcaad5ef5..ae8f030b92d66 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e echo "[script multi]" diff --git a/doc/source/api.rst b/doc/source/api.rst index 80f8d42be8ed6..a9766b5c04496 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1794,6 +1794,7 @@ Methods Timestamp.strftime Timestamp.strptime Timestamp.time + Timestamp.timestamp Timestamp.timetuple Timestamp.timetz Timestamp.to_datetime64 @@ -2173,6 +2174,17 @@ Style Export and Import Styler.export Styler.use +Plotting +~~~~~~~~ + +.. currentmodule:: pandas + +.. autosummary:: + :toctree: generated/ + + plotting.register_matplotlib_converters + plotting.deregister_matplotlib_converters + .. currentmodule:: pandas General utility functions diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 466ac3c9cbf51..0cdfec63fd696 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -346,7 +346,9 @@ The following methods are available: :meth:`~Window.sum`, Sum of values :meth:`~Window.mean`, Mean of values -The weights used in the window are specified by the ``win_type`` keyword. The list of recognized types are: +The weights used in the window are specified by the ``win_type`` keyword. +The list of recognized types are the `scipy.signal window functions + <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__: - ``boxcar`` - ``triang`` diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index d8d57a8bfffdd..4426d3fb0165e 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -11,32 +11,32 @@ Where to start? =============== All contributions, bug reports, bug fixes, documentation improvements, -enhancements and ideas are welcome. +enhancements, and ideas are welcome. -If you are simply looking to start working with the *pandas* codebase, navigate to the -`GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ and start looking through -interesting issues. There are a number of issues listed under `Docs +If you are brand new to pandas or open-source development, we recommend going +through the `GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ +to find issues that interest you. There are a number of issues listed under `Docs <https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_ and `Difficulty Novice <https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_ -where you could start out. - -Or maybe through using *pandas* you have an idea of your own or are looking for something -in the documentation and thinking 'this can be improved'...you can do something -about it! +where you could start out. Once you've found an interesting issue, you can +return here to get your development environment setup. Feel free to ask questions on the `mailing list -<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_ or on `Gitter -<https://gitter.im/pydata/pandas>`_. +<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_ or on `Gitter`_. + +.. _contributing.bug_reports: Bug reports and enhancement requests ==================================== -Bug reports are an important part of making *pandas* more stable. Having a complete bug report -will allow others to reproduce the bug and provide insight into fixing. Because many versions of -*pandas* are supported, knowing version information will also identify improvements made since -previous versions. Trying the bug-producing code out on the *master* branch is often a worthwhile exercise -to confirm the bug still exists. It is also worth searching existing bug reports and pull requests +Bug reports are an important part of making *pandas* more stable. Having a complete bug report +will allow others to reproduce the bug and provide insight into fixing. See +`this stackoverflow article <https://stackoverflow.com/help/mcve>`_ for tips on +writing a good bug report. + +Trying the bug-producing code out on the *master* branch is often a worthwhile exercise +to confirm the bug still exists. It is also worth searching existing bug reports and pull requests to see if the issue has already been reported and/or fixed. Bug reports must: @@ -60,12 +60,16 @@ Bug reports must: The issue will then show up to the *pandas* community and be open to comments/ideas from others. +.. _contributing.github + Working with the code ===================== Now that you have an issue you want to fix, enhancement to add, or documentation to improve, you need to learn how to work with GitHub and the *pandas* code base. +.. _contributing.version_control: + Version control, Git, and GitHub -------------------------------- @@ -103,167 +107,164 @@ want to clone your fork to your machine:: git clone https://github.com/your-user-name/pandas.git pandas-yourname cd pandas-yourname - git remote add upstream git://github.com/pandas-dev/pandas.git + git remote add upstream https://github.com/pandas-dev/pandas.git This creates the directory `pandas-yourname` and connects your repository to the upstream (main project) *pandas* repository. -Creating a branch ------------------ +.. _contributing.dev_env: -You want your master branch to reflect only production-ready code, so create a -feature branch for making your changes. For example:: +Creating a development environment +---------------------------------- - git branch shiny-new-feature - git checkout shiny-new-feature +To test out code changes, you'll need to build pandas from source, which +requires a C compiler and python environment. If you're making documentation +changes, you can skip to :ref:`contributing.documentation` but you won't be able +to build the documentation locally before pushing your changes. -The above can be simplified to:: +.. _contributiong.dev_c: - git checkout -b shiny-new-feature +Installing a C Complier +~~~~~~~~~~~~~~~~~~~~~~~ -This changes your working directory to the shiny-new-feature branch. Keep any -changes in this branch specific to one bug or feature so it is clear -what the branch brings to *pandas*. You can have many shiny-new-features -and switch in between them using the git checkout command. +Pandas uses C extensions (mostly written using Cython) to speed up certain +operations. To install pandas from source, you need to compile these C +extensions, which means you need a C complier. This process depends on which +platform you're using. Follow the `CPython contributing guidelines +<https://docs.python.org/devguide/setup.html#build-dependencies>`_ for getting a +complier installed. You don't need to do any of the ``./configure`` or ``make`` +steps; you only need to install the complier. -To update this branch, you need to retrieve the changes from the master branch:: +For Windows developers, the following links may be helpful. - git fetch upstream - git rebase upstream/master +- https://blogs.msdn.microsoft.com/pythonengineering/2016/04/11/unable-to-find-vcvarsall-bat/ +- https://github.com/conda/conda-recipes/wiki/Building-from-Source-on-Windows-32-bit-and-64-bit +- https://cowboyprogrammer.org/building-python-wheels-for-windows/ +- https://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/ +- https://support.enthought.com/hc/en-us/articles/204469260-Building-Python-extensions-with-Canopy -This will replay your commits on top of the latest pandas git master. If this -leads to merge conflicts, you must resolve these before submitting your pull -request. If you have uncommitted changes, you will need to ``stash`` them prior -to updating. This will effectively store your changes and they can be reapplied -after updating. +Let us know if you have any difficulties by opening an issue or reaching out on +`Gitter`_. -.. _contributing.dev_env: +.. _contributiong.dev_python: -Creating a development environment ----------------------------------- +Creating a Python Environment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -An easy way to create a *pandas* development environment is as follows. +Now that you have a C complier, create an isolated pandas development +environment: -- Install either :ref:`Anaconda <install.anaconda>` or :ref:`miniconda <install.miniconda>` +- Install either `Anaconda <https://www.anaconda.com/download/>`_ or `miniconda + <https://conda.io/miniconda.html>`_ +- Make sure your conda is up to date (``conda update conda``) - Make sure that you have :ref:`cloned the repository <contributing.forking>` - ``cd`` to the *pandas* source directory -Tell conda to create a new environment, named ``pandas_dev``, or any other name you would like -for this environment, by running:: - - conda create -n pandas_dev --file ci/requirements_dev.txt - - -For a python 3 environment:: - - conda create -n pandas_dev python=3 --file ci/requirements_dev.txt - -.. warning:: - - If you are on Windows, see :ref:`here for a fully compliant Windows environment <contributing.windows>`. - -This will create the new environment, and not touch any of your existing environments, -nor any existing python installation. It will install all of the basic dependencies of -*pandas*, as well as the development and testing tools. If you would like to install -other dependencies, you can install them as follows:: +We'll now kick off a three-step process: - conda install -n pandas_dev -c pandas pytables scipy +1. Install the build dependencies +2. Build and install pandas +3. Install the optional dependencies -To install *all* pandas dependencies you can do the following:: +.. code-block:: none - conda install -n pandas_dev -c conda-forge --file ci/requirements_all.txt + # Create and activate the build environment + conda env create -f ci/environment-dev.yaml + conda activate pandas-dev -To work in this environment, Windows users should ``activate`` it as follows:: + # Build and install pandas + python setup.py build_ext --inplace -j 4 + python -m pip install -e . - activate pandas_dev + # Install the rest of the optional dependencies + conda install -c defaults -c conda-forge --file=ci/requirements-optional-conda.txt -Mac OSX / Linux users should use:: +At this point you should be able to import pandas from your locally built version:: - source activate pandas_dev + $ python # start an interpreter + >>> import pandas + >>> print(pandas.__version__) + 0.22.0.dev0+29.g4ad6d4d74 -You will then see a confirmation message to indicate you are in the new development environment. +This will create the new environment, and not touch any of your existing environments, +nor any existing python installation. To view your environments:: conda info -e -To return to your home root environment in Windows:: - - deactivate +To return to your root environment:: -To return to your home root environment in OSX / Linux:: - - source deactivate + conda deactivate See the full conda docs `here <http://conda.pydata.org/docs>`__. -At this point you can easily do an *in-place* install, as detailed in the next section. - -.. _contributing.windows: - -Creating a Windows development environment ------------------------------------------- +.. _contributing.pip: -To build on Windows, you need to have compilers installed to build the extensions. You will need to install the appropriate Visual Studio compilers, VS 2008 for Python 2.7, VS 2010 for 3.4, and VS 2015 for Python 3.5 and 3.6. +Creating a Python Environment (pip) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For Python 2.7, you can install the ``mingw`` compiler which will work equivalently to VS 2008:: +If you aren't using conda for you development environment, follow these instructions. +You'll need to have at least python3.5 installed on your system. - conda install -n pandas_dev libpython +.. code-block:: none -or use the `Microsoft Visual Studio VC++ compiler for Python <https://www.microsoft.com/en-us/download/details.aspx?id=44266>`__. Note that you have to check the ``x64`` box to install the ``x64`` extension building capability as this is not installed by default. + # Create a virtual environment + # Use an ENV_DIR of your choice. We'll use ~/virtualenvs/pandas-dev + # Any parent directories should already exist + python3 -m venv ~/virtualenvs/pandas-dev + # Activate the virtulaenv + . ~/virtualenvs/pandas-dev/bin/activate -For Python 3.4, you can download and install the `Windows 7.1 SDK <https://www.microsoft.com/en-us/download/details.aspx?id=8279>`__. Read the references below as there may be various gotchas during the installation. - -For Python 3.5 and 3.6, you can download and install the `Visual Studio 2015 Community Edition <https://www.visualstudio.com/en-us/downloads/visual-studio-2015-downloads-vs.aspx>`__. - -Here are some references and blogs: - -- https://blogs.msdn.microsoft.com/pythonengineering/2016/04/11/unable-to-find-vcvarsall-bat/ -- https://github.com/conda/conda-recipes/wiki/Building-from-Source-on-Windows-32-bit-and-64-bit -- https://cowboyprogrammer.org/building-python-wheels-for-windows/ -- https://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/ -- https://support.enthought.com/hc/en-us/articles/204469260-Building-Python-extensions-with-Canopy + # Install the build dependencies + python -m pip install -r ci/requirements_dev.txt + # Build and install pandas + python setup.py build_ext --inplace -j 4 + python -m pip install -e . -.. _contributing.getting_source: + # Install additional dependencies + python -m pip install -r ci/requirements-optional-pip.txt -Making changes --------------- +Creating a branch +----------------- -Before making your code changes, it is often necessary to build the code that was -just checked out. There are two primary methods of doing this. +You want your master branch to reflect only production-ready code, so create a +feature branch for making your changes. For example:: -#. The best way to develop *pandas* is to build the C extensions in-place by - running:: + git branch shiny-new-feature + git checkout shiny-new-feature - python setup.py build_ext --inplace +The above can be simplified to:: - If you startup the Python interpreter in the *pandas* source directory you - will call the built C extensions + git checkout -b shiny-new-feature -#. Another very common option is to do a ``develop`` install of *pandas*:: +This changes your working directory to the shiny-new-feature branch. Keep any +changes in this branch specific to one bug or feature so it is clear +what the branch brings to *pandas*. You can have many shiny-new-features +and switch in between them using the git checkout command. - python setup.py develop +To update this branch, you need to retrieve the changes from the master branch:: - This makes a symbolic link that tells the Python interpreter to import *pandas* - from your development directory. Thus, you can always be using the development - version on your system without being inside the clone directory. + git fetch upstream + git rebase upstream/master +This will replay your commits on top of the latest pandas git master. If this +leads to merge conflicts, you must resolve these before submitting your pull +request. If you have uncommitted changes, you will need to ``stash`` them prior +to updating. This will effectively store your changes and they can be reapplied +after updating. .. _contributing.documentation: Contributing to the documentation ================================= -If you're not the developer type, contributing to the documentation is still -of huge value. You don't even have to be an expert on -*pandas* to do so! Something as simple as rewriting small passages for clarity -as you reference the docs is a simple but effective way to contribute. The -next person to read that passage will be in your debt! - -In fact, there are sections of the docs that are worse off after being written -by experts. If something in the docs doesn't make sense to you, updating the -relevant section after you figure it out is a simple way to ensure it will -help the next person. +If you're not the developer type, contributing to the documentation is still of +huge value. You don't even have to be an expert on *pandas* to do so! In fact, +there are sections of the docs that are worse off after being written by +experts. If something in the docs doesn't make sense to you, updating the +relevant section after you figure it out is a great way to ensure it will help +the next person. .. contents:: Documentation: :local: @@ -330,7 +331,7 @@ The utility script ``scripts/api_rst_coverage.py`` can be used to compare the list of methods documented in ``doc/source/api.rst`` (which is used to generate the `API Reference <http://pandas.pydata.org/pandas-docs/stable/api.html>`_ page) and the actual public methods. -This will identify methods documented in in ``doc/source/api.rst`` that are not actually +This will identify methods documented in ``doc/source/api.rst`` that are not actually class methods, and existing methods that are not documented in ``doc/source/api.rst``. @@ -342,30 +343,6 @@ Requirements First, you need to have a development environment to be able to build pandas (see the docs on :ref:`creating a development environment above <contributing.dev_env>`). -Further, to build the docs, there are some extra requirements: you will need to -have ``sphinx`` and ``ipython`` installed. `numpydoc -<https://github.com/numpy/numpydoc>`_ is used to parse the docstrings that -follow the Numpy Docstring Standard (see above), but you don't need to install -this because a local copy of numpydoc is included in the *pandas* source -code. `nbsphinx <https://nbsphinx.readthedocs.io/>`_ is required to build -the Jupyter notebooks included in the documentation. - -If you have a conda environment named ``pandas_dev``, you can install the extra -requirements with:: - - conda install -n pandas_dev sphinx ipython nbconvert nbformat - conda install -n pandas_dev -c conda-forge nbsphinx - -Furthermore, it is recommended to have all :ref:`optional dependencies <install.optional_dependencies>`. -installed. This is not strictly necessary, but be aware that you will see some error -messages when building the docs. This happens because all the code in the documentation -is executed during the doc build, and so code examples using optional dependencies -will generate errors. Run ``pd.show_versions()`` to get an overview of the installed -version of all dependencies. - -.. warning:: - - You need to have ``sphinx`` version >= 1.3.2. Building the documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -386,10 +363,10 @@ If you want to do a full clean build, do:: python make.py clean python make.py html -Starting with *pandas* 0.13.1 you can tell ``make.py`` to compile only a single section -of the docs, greatly reducing the turn-around time for checking your changes. -You will be prompted to delete ``.rst`` files that aren't required. This is okay because -the prior versions of these files can be checked out from git. However, you must make sure +You can tell ``make.py`` to compile only a single section of the docs, greatly +reducing the turn-around time for checking your changes. You will be prompted to +delete ``.rst`` files that aren't required. This is okay because the prior +versions of these files can be checked out from git. However, you must make sure not to commit the file deletions to your Git repository! :: @@ -422,6 +399,8 @@ the documentation are also built by Travis-CI. These docs are then hosted `here <http://pandas-docs.github.io/pandas-docs-travis>`__, see also the :ref:`Continuous Integration <contributing.ci>` section. +.. _contributing.code: + Contributing to the code base ============================= @@ -480,7 +459,7 @@ Once configured, you can run the tool as follows:: clang-format modified-c-file This will output what your file will look like if the changes are made, and to apply -them, just run the following command:: +them, run the following command:: clang-format -i modified-c-file @@ -1033,7 +1012,7 @@ delete your branch:: git checkout master git merge upstream/master -Then you can just do:: +Then you can do:: git branch -d shiny-new-feature @@ -1043,3 +1022,6 @@ branch has not actually been merged. The branch will still exist on GitHub, so to delete it there do:: git push origin --delete shiny-new-feature + + +.. _Gitter: https://gitter.im/pydata/pandas diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 2348a3d10c54f..69913b2c1fbd8 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -53,6 +53,18 @@ the latest web technologies. Its goal is to provide elegant, concise constructio graphics in the style of Protovis/D3, while delivering high-performance interactivity over large data to thin clients. +`seaborn <https://seaborn.pydata.org>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Seaborn is a Python visualization library based on `matplotlib +<http://matplotlib.org>`__. It provides a high-level, dataset-oriented +interface for creating attractive statistical graphics. The plotting functions +in seaborn understand pandas objects and leverage pandas grouping operations +internally to support concise specification of complex visualizations. Seaborn +also goes beyond matplotlib and pandas with the option to perform statistical +estimation while plotting, aggregating across observations and visualizing the +fit of statistical models to emphasize patterns in a dataset. + `yhat/ggplot <https://github.com/yhat/ggplot>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -64,15 +76,6 @@ but a faithful implementation for python users has long been missing. Although s (as of Jan-2014), the `yhat/ggplot <https://github.com/yhat/ggplot>`__ project has been progressing quickly in that direction. -`Seaborn <https://github.com/mwaskom/seaborn>`__ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Although pandas has quite a bit of "just plot it" functionality built-in, visualization and -in particular statistical graphics is a vast field with a long tradition and lots of ground -to cover. The `Seaborn <https://github.com/mwaskom/seaborn>`__ project builds on top of pandas -and `matplotlib <http://matplotlib.org>`__ to provide easy plotting of data which extends to -more advanced types of plots then those offered by pandas. - `Vincent <https://github.com/wrobstory/vincent>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -222,7 +225,13 @@ Out-of-core ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dask is a flexible parallel computing library for analytics. Dask -allow a familiar ``DataFrame`` interface to out-of-core, parallel and distributed computing. +provides a familiar ``DataFrame`` interface for out-of-core, parallel and distributed computing. + +`Dask-ML <https://dask-ml.readthedocs.io/en/latest/>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Dask-ML enables parallel and distributed machine learning using Dask alongside existing machine learning libraries like Scikit-Learn, XGBoost, and TensorFlow. + `Blaze <http://blaze.pydata.org/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index fdb002a642d62..b329fac969343 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -1835,15 +1835,27 @@ that you've done this: Yikes! +.. _indexing.evaluation_order: + Evaluation order matters ~~~~~~~~~~~~~~~~~~~~~~~~ -Furthermore, in chained expressions, the order may determine whether a copy is returned or not. -If an expression will set values on a copy of a slice, then a ``SettingWithCopy`` -warning will be issued. +When you use chained indexing, the order and type of the indexing operation +partially determine whether the result is a slice into the original object, or +a copy of the slice. + +Pandas has the ``SettingWithCopyWarning`` because assigning to a copy of a +slice is frequently not intentional, but a mistake caused by chained indexing +returning a copy where a slice was expected. + +If you would like pandas to be more or less trusting about assignment to a +chained indexing expression, you can set the :ref:`option <options>` +``mode.chained_assignment`` to one of these values: -You can control the action of a chained assignment via the option ``mode.chained_assignment``, -which can take the values ``['raise','warn',None]``, where showing a warning is the default. +* ``'warn'``, the default, means a ``SettingWithCopyWarning`` is printed. +* ``'raise'`` means pandas will raise a ``SettingWithCopyException`` + you have to deal with. +* ``None`` will suppress the warnings entirely. .. ipython:: python :okwarning: diff --git a/doc/source/install.rst b/doc/source/install.rst index c805f84d0faaa..27dde005e5a87 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -141,28 +141,24 @@ and can take a few minutes to complete. Installing using your Linux distribution's package manager. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The commands in this table will install pandas for Python 2 from your distribution. -To install pandas for Python 3 you may need to use the package ``python3-pandas``. +The commands in this table will install pandas for Python 3 from your distribution. +To install pandas for Python 2 you may need to use the package ``python-pandas``. .. csv-table:: :header: "Distribution", "Status", "Download / Repository Link", "Install method" :widths: 10, 10, 20, 50 - Debian, stable, `official Debian repository <http://packages.debian.org/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python-pandas`` - Debian & Ubuntu, unstable (latest packages), `NeuroDebian <http://neuro.debian.net/index.html#how-to-use-this-repository>`__ , ``sudo apt-get install python-pandas`` - Ubuntu, stable, `official Ubuntu repository <http://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python-pandas`` - Ubuntu, unstable (daily builds), `PythonXY PPA <https://code.launchpad.net/~pythonxy/+archive/pythonxy-devel>`__; activate by: ``sudo add-apt-repository ppa:pythonxy/pythonxy-devel && sudo apt-get update``, ``sudo apt-get install python-pandas`` - OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python-pandas`` - Fedora, stable, `official Fedora repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``dnf install python-pandas`` - Centos/RHEL, stable, `EPEL repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``yum install python-pandas`` - - - - - - + Debian, stable, `official Debian repository <http://packages.debian.org/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python3-pandas`` + Debian & Ubuntu, unstable (latest packages), `NeuroDebian <http://neuro.debian.net/index.html#how-to-use-this-repository>`__ , ``sudo apt-get install python3-pandas`` + Ubuntu, stable, `official Ubuntu repository <http://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python3-pandas`` + OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python3-pandas`` + Fedora, stable, `official Fedora repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``dnf install python3-pandas`` + Centos/RHEL, stable, `EPEL repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``yum install python3-pandas`` +**However**, the packages in the linux package managers are often a few versions behind, so +to get the newest version of pandas, it's recommended to install using the ``pip`` or ``conda`` +methods described above. Installing from source @@ -258,7 +254,8 @@ Optional Dependencies <http://www.vergenet.net/~conrad/software/xsel/>`__, or `xclip <https://github.com/astrand/xclip/>`__: necessary to use :func:`~pandas.read_clipboard`. Most package managers on Linux distributions will have ``xclip`` and/or ``xsel`` immediately available for installation. -* For Google BigQuery I/O - see `here <https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__ +* `pandas-gbq <https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__: for Google BigQuery I/O. + * `Backports.lzma <https://pypi.python.org/pypi/backports.lzma/>`__: Only for Python 2, for writing to and/or reading from an xz compressed DataFrame in CSV; Python 3 support is built into the standard library. * One of the following combinations of libraries is needed to use the diff --git a/doc/source/io.rst b/doc/source/io.rst index 82cb83c168b22..4024414610a82 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -103,15 +103,20 @@ Column and Index Locations and Names ++++++++++++++++++++++++++++++++++++ header : int or list of ints, default ``'infer'`` - Row number(s) to use as the column names, and the start of the data. Default - behavior is as if ``header=0`` if no ``names`` passed, otherwise as if - ``header=None``. Explicitly pass ``header=0`` to be able to replace existing - names. The header can be a list of ints that specify row locations for a - multi-index on the columns e.g. ``[0,1,3]``. Intervening rows that are not - specified will be skipped (e.g. 2 in this example is skipped). Note that - this parameter ignores commented lines and empty lines if - ``skip_blank_lines=True``, so header=0 denotes the first line of data - rather than the first line of the file. + Row number(s) to use as the column names, and the start of the + data. Default behavior is to infer the column names: if no names are + passed the behavior is identical to ``header=0`` and column names + are inferred from the first line of the file, if column names are + passed explicitly then the behavior is identical to + ``header=None``. Explicitly pass ``header=0`` to be able to replace + existing names. + + The header can be a list of ints that specify row locations + for a multi-index on the columns e.g. ``[0,1,3]``. Intervening rows + that are not specified will be skipped (e.g. 2 in this example is + skipped). Note that this parameter ignores commented lines and empty + lines if ``skip_blank_lines=True``, so header=0 denotes the first + line of data rather than the first line of the file. names : array-like, default ``None`` List of column names to use. If file contains no header row, then you should explicitly pass ``header=None``. Duplicates in this list will cause @@ -553,6 +558,14 @@ If the header is in a row other than the first, pass the row number to data = 'skip this skip it\na,b,c\n1,2,3\n4,5,6\n7,8,9' pd.read_csv(StringIO(data), header=1) +.. note:: + + Default behavior is to infer the column names: if no names are + passed the behavior is identical to ``header=0`` and column names + are inferred from the first nonblank line of the file, if column + names are passed explicitly then the behavior is identical to + ``header=None``. + .. _io.dupe_names: Duplicate names parsing @@ -4469,8 +4482,10 @@ Several caveats. - This is a newer library, and the format, though stable, is not guaranteed to be backward compatible to the earlier versions. -- The format will NOT write an ``Index``, or ``MultiIndex`` for the ``DataFrame`` and will raise an - error if a non-default one is provided. You can simply ``.reset_index()`` in order to store the index. +- The format will NOT write an ``Index``, or ``MultiIndex`` for the + ``DataFrame`` and will raise an error if a non-default one is provided. You + can ``.reset_index()`` to store the index or ``.reset_index(drop=True)`` to + ignore it. - Duplicate column names and non-string columns names are not supported - Non supported types include ``Period`` and actual python object types. These will raise a helpful error message on an attempt at serialization. @@ -4533,8 +4548,10 @@ dtypes, including extension dtypes such as datetime with tz. Several caveats. -- The format will NOT write an ``Index``, or ``MultiIndex`` for the ``DataFrame`` and will raise an - error if a non-default one is provided. You can simply ``.reset_index(drop=True)`` in order to store the index. +- The format will NOT write an ``Index``, or ``MultiIndex`` for the + ``DataFrame`` and will raise an error if a non-default one is provided. You + can ``.reset_index()`` to store the index or ``.reset_index(drop=True)`` to + ignore it. - Duplicate column names and non-string columns names are not supported - Categorical dtypes can be serialized to parquet, but will de-serialize as ``object`` dtype. - Non supported types include ``Period`` and actual python object types. These will raise a helpful error message @@ -4580,6 +4597,15 @@ Read from a parquet file. result.dtypes +Read only certain columns of a parquet file. + +.. ipython:: python + + result = pd.read_parquet('example_fp.parquet', engine='fastparquet', columns=['a', 'b']) + + result.dtypes + + .. ipython:: python :suppress: diff --git a/doc/source/options.rst b/doc/source/options.rst index 2da55a5a658a4..db3380bd4a3e7 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -273,164 +273,167 @@ Options are 'right', and 'left'. Available Options ----------------- -=================================== ============ ================================== -Option Default Function -=================================== ============ ================================== -display.chop_threshold None If set to a float value, all float - values smaller then the given - threshold will be displayed as - exactly 0 by repr and friends. -display.colheader_justify right Controls the justification of - column headers. used by DataFrameFormatter. -display.column_space 12 No description available. -display.date_dayfirst False When True, prints and parses dates - with the day first, eg 20/01/2005 -display.date_yearfirst False When True, prints and parses dates - with the year first, eg 2005/01/20 -display.encoding UTF-8 Defaults to the detected encoding - of the console. Specifies the encoding - to be used for strings returned by - to_string, these are generally strings - meant to be displayed on the console. -display.expand_frame_repr True Whether to print out the full DataFrame - repr for wide DataFrames across - multiple lines, `max_columns` is - still respected, but the output will - wrap-around across multiple "pages" - if its width exceeds `display.width`. -display.float_format None The callable should accept a floating - point number and return a string with - the desired format of the number. - This is used in some places like - SeriesFormatter. - See core.format.EngFormatter for an example. -display.large_repr truncate For DataFrames exceeding max_rows/max_cols, - the repr (and HTML repr) can show - a truncated table (the default), - or switch to the view from df.info() - (the behaviour in earlier versions of pandas). - allowable settings, ['truncate', 'info'] -display.latex.repr False Whether to produce a latex DataFrame - representation for jupyter frontends - that support it. -display.latex.escape True Escapes special characters in DataFrames, when - using the to_latex method. -display.latex.longtable False Specifies if the to_latex method of a DataFrame - uses the longtable format. -display.latex.multicolumn True Combines columns when using a MultiIndex -display.latex.multicolumn_format 'l' Alignment of multicolumn labels -display.latex.multirow False Combines rows when using a MultiIndex. - Centered instead of top-aligned, - separated by clines. -display.max_columns 20 max_rows and max_columns are used - in __repr__() methods to decide if - to_string() or info() is used to - render an object to a string. In - case python/IPython is running in - a terminal this can be set to 0 and - pandas will correctly auto-detect - the width the terminal and swap to - a smaller format in case all columns - would not fit vertically. The IPython - notebook, IPython qtconsole, or IDLE - do not run in a terminal and hence - it is not possible to do correct - auto-detection. 'None' value means - unlimited. -display.max_colwidth 50 The maximum width in characters of - a column in the repr of a pandas - data structure. When the column overflows, - a "..." placeholder is embedded in - the output. -display.max_info_columns 100 max_info_columns is used in DataFrame.info - method to decide if per column information - will be printed. -display.max_info_rows 1690785 df.info() will usually show null-counts - for each column. For large frames - this can be quite slow. max_info_rows - and max_info_cols limit this null - check only to frames with smaller - dimensions then specified. -display.max_rows 60 This sets the maximum number of rows - pandas should output when printing - out various output. For example, - this value determines whether the - repr() for a dataframe prints out - fully or just a summary repr. - 'None' value means unlimited. -display.max_seq_items 100 when pretty-printing a long sequence, - no more then `max_seq_items` will - be printed. If items are omitted, - they will be denoted by the addition - of "..." to the resulting string. - If set to None, the number of items - to be printed is unlimited. -display.memory_usage True This specifies if the memory usage of - a DataFrame should be displayed when the - df.info() method is invoked. -display.multi_sparse True "Sparsify" MultiIndex display (don't - display repeated elements in outer - levels within groups) -display.notebook_repr_html True When True, IPython notebook will - use html representation for - pandas objects (if it is available). -display.pprint_nest_depth 3 Controls the number of nested levels - to process when pretty-printing -display.precision 6 Floating point output precision in - terms of number of places after the - decimal, for regular formatting as well - as scientific notation. Similar to - numpy's ``precision`` print option -display.show_dimensions truncate Whether to print out dimensions - at the end of DataFrame repr. - If 'truncate' is specified, only - print out the dimensions if the - frame is truncated (e.g. not display - all rows and/or columns) -display.width 80 Width of the display in characters. - In case python/IPython is running in - a terminal this can be set to None - and pandas will correctly auto-detect - the width. Note that the IPython notebook, - IPython qtconsole, or IDLE do not run in a - terminal and hence it is not possible - to correctly detect the width. -display.html.table_schema False Whether to publish a Table Schema - representation for frontends that - support it. -display.html.border 1 A ``border=value`` attribute is - inserted in the ``<table>`` tag - for the DataFrame HTML repr. -io.excel.xls.writer xlwt The default Excel writer engine for - 'xls' files. -io.excel.xlsm.writer openpyxl The default Excel writer engine for - 'xlsm' files. Available options: - 'openpyxl' (the default). -io.excel.xlsx.writer openpyxl The default Excel writer engine for - 'xlsx' files. -io.hdf.default_format None default format writing format, if - None, then put will default to - 'fixed' and append will default to - 'table' -io.hdf.dropna_table True drop ALL nan rows when appending - to a table -io.parquet.engine None The engine to use as a default for - parquet reading and writing. If None - then try 'pyarrow' and 'fastparquet' -mode.chained_assignment warn Raise an exception, warn, or no - action if trying to use chained - assignment, The default is warn -mode.sim_interactive False Whether to simulate interactive mode - for purposes of testing. -mode.use_inf_as_na False True means treat None, NaN, -INF, - INF as NA (old way), False means - None and NaN are null, but INF, -INF - are not NA (new way). -compute.use_bottleneck True Use the bottleneck library to accelerate - computation if it is installed. -compute.use_numexpr True Use the numexpr library to accelerate - computation if it is installed. -=================================== ============ ================================== +======================================= ============ ================================== +Option Default Function +======================================= ============ ================================== +display.chop_threshold None If set to a float value, all float + values smaller then the given + threshold will be displayed as + exactly 0 by repr and friends. +display.colheader_justify right Controls the justification of + column headers. used by DataFrameFormatter. +display.column_space 12 No description available. +display.date_dayfirst False When True, prints and parses dates + with the day first, eg 20/01/2005 +display.date_yearfirst False When True, prints and parses dates + with the year first, eg 2005/01/20 +display.encoding UTF-8 Defaults to the detected encoding + of the console. Specifies the encoding + to be used for strings returned by + to_string, these are generally strings + meant to be displayed on the console. +display.expand_frame_repr True Whether to print out the full DataFrame + repr for wide DataFrames across + multiple lines, `max_columns` is + still respected, but the output will + wrap-around across multiple "pages" + if its width exceeds `display.width`. +display.float_format None The callable should accept a floating + point number and return a string with + the desired format of the number. + This is used in some places like + SeriesFormatter. + See core.format.EngFormatter for an example. +display.large_repr truncate For DataFrames exceeding max_rows/max_cols, + the repr (and HTML repr) can show + a truncated table (the default), + or switch to the view from df.info() + (the behaviour in earlier versions of pandas). + allowable settings, ['truncate', 'info'] +display.latex.repr False Whether to produce a latex DataFrame + representation for jupyter frontends + that support it. +display.latex.escape True Escapes special characters in DataFrames, when + using the to_latex method. +display.latex.longtable False Specifies if the to_latex method of a DataFrame + uses the longtable format. +display.latex.multicolumn True Combines columns when using a MultiIndex +display.latex.multicolumn_format 'l' Alignment of multicolumn labels +display.latex.multirow False Combines rows when using a MultiIndex. + Centered instead of top-aligned, + separated by clines. +display.max_columns 20 max_rows and max_columns are used + in __repr__() methods to decide if + to_string() or info() is used to + render an object to a string. In + case python/IPython is running in + a terminal this can be set to 0 and + pandas will correctly auto-detect + the width the terminal and swap to + a smaller format in case all columns + would not fit vertically. The IPython + notebook, IPython qtconsole, or IDLE + do not run in a terminal and hence + it is not possible to do correct + auto-detection. 'None' value means + unlimited. +display.max_colwidth 50 The maximum width in characters of + a column in the repr of a pandas + data structure. When the column overflows, + a "..." placeholder is embedded in + the output. +display.max_info_columns 100 max_info_columns is used in DataFrame.info + method to decide if per column information + will be printed. +display.max_info_rows 1690785 df.info() will usually show null-counts + for each column. For large frames + this can be quite slow. max_info_rows + and max_info_cols limit this null + check only to frames with smaller + dimensions then specified. +display.max_rows 60 This sets the maximum number of rows + pandas should output when printing + out various output. For example, + this value determines whether the + repr() for a dataframe prints out + fully or just a summary repr. + 'None' value means unlimited. +display.max_seq_items 100 when pretty-printing a long sequence, + no more then `max_seq_items` will + be printed. If items are omitted, + they will be denoted by the addition + of "..." to the resulting string. + If set to None, the number of items + to be printed is unlimited. +display.memory_usage True This specifies if the memory usage of + a DataFrame should be displayed when the + df.info() method is invoked. +display.multi_sparse True "Sparsify" MultiIndex display (don't + display repeated elements in outer + levels within groups) +display.notebook_repr_html True When True, IPython notebook will + use html representation for + pandas objects (if it is available). +display.pprint_nest_depth 3 Controls the number of nested levels + to process when pretty-printing +display.precision 6 Floating point output precision in + terms of number of places after the + decimal, for regular formatting as well + as scientific notation. Similar to + numpy's ``precision`` print option +display.show_dimensions truncate Whether to print out dimensions + at the end of DataFrame repr. + If 'truncate' is specified, only + print out the dimensions if the + frame is truncated (e.g. not display + all rows and/or columns) +display.width 80 Width of the display in characters. + In case python/IPython is running in + a terminal this can be set to None + and pandas will correctly auto-detect + the width. Note that the IPython notebook, + IPython qtconsole, or IDLE do not run in a + terminal and hence it is not possible + to correctly detect the width. +display.html.table_schema False Whether to publish a Table Schema + representation for frontends that + support it. +display.html.border 1 A ``border=value`` attribute is + inserted in the ``<table>`` tag + for the DataFrame HTML repr. +io.excel.xls.writer xlwt The default Excel writer engine for + 'xls' files. +io.excel.xlsm.writer openpyxl The default Excel writer engine for + 'xlsm' files. Available options: + 'openpyxl' (the default). +io.excel.xlsx.writer openpyxl The default Excel writer engine for + 'xlsx' files. +io.hdf.default_format None default format writing format, if + None, then put will default to + 'fixed' and append will default to + 'table' +io.hdf.dropna_table True drop ALL nan rows when appending + to a table +io.parquet.engine None The engine to use as a default for + parquet reading and writing. If None + then try 'pyarrow' and 'fastparquet' +mode.chained_assignment warn Controls ``SettingWithCopyWarning``: + 'raise', 'warn', or None. Raise an + exception, warn, or no action if + trying to use :ref:`chained assignment <indexing.evaluation_order>`. +mode.sim_interactive False Whether to simulate interactive mode + for purposes of testing. +mode.use_inf_as_na False True means treat None, NaN, -INF, + INF as NA (old way), False means + None and NaN are null, but INF, -INF + are not NA (new way). +compute.use_bottleneck True Use the bottleneck library to accelerate + computation if it is installed. +compute.use_numexpr True Use the numexpr library to accelerate + computation if it is installed. +plotting.matplotlib.register_converters True Register custom converters with + matplotlib. Set to False to de-register. +======================================= ============ ======================================== .. _basics.console_output: diff --git a/doc/source/release.rst b/doc/source/release.rst index 6c3e7f847b485..a3289b1144863 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -52,7 +52,7 @@ Highlights include: - Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` function and :meth:`DataFrame.to_parquet` method, see :ref:`here <whatsnew_0210.enhancements.parquet>`. - New user-facing :class:`pandas.api.types.CategoricalDtype` for specifying categoricals independent of the data, see :ref:`here <whatsnew_0210.enhancements.categorical_dtype>`. -- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. +- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and ``sum`` and ``prod`` on empty Series now return NaN instead of 0, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. - Compatibility fixes for pypy, see :ref:`here <whatsnew_0210.pypy>`. - Additions to the ``drop``, ``reindex`` and ``rename`` API to make them more consistent, see :ref:`here <whatsnew_0210.enhancements.drop_api>`. - Addition of the new methods ``DataFrame.infer_objects`` (see :ref:`here <whatsnew_0210.enhancements.infer_objects>`) and ``GroupBy.pipe`` (see :ref:`here <whatsnew_0210.enhancements.GroupBy_pipe>`). diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 4c460eeb85b82..89e2d3006696c 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -12,7 +12,7 @@ Highlights include: - Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` function and :meth:`DataFrame.to_parquet` method, see :ref:`here <whatsnew_0210.enhancements.parquet>`. - New user-facing :class:`pandas.api.types.CategoricalDtype` for specifying categoricals independent of the data, see :ref:`here <whatsnew_0210.enhancements.categorical_dtype>`. -- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. +- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and ``sum`` and ``prod`` on empty Series now return NaN instead of 0, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. - Compatibility fixes for pypy, see :ref:`here <whatsnew_0210.pypy>`. - Additions to the ``drop``, ``reindex`` and ``rename`` API to make them more consistent, see :ref:`here <whatsnew_0210.enhancements.drop_api>`. - Addition of the new methods ``DataFrame.infer_objects`` (see :ref:`here <whatsnew_0210.enhancements.infer_objects>`) and ``GroupBy.pipe`` (see :ref:`here <whatsnew_0210.enhancements.GroupBy_pipe>`). @@ -369,11 +369,11 @@ Additionally, support has been dropped for Python 3.4 (:issue:`15251`). .. _whatsnew_0210.api_breaking.bottleneck: -Sum/Prod of all-NaN Series/DataFrames is now consistently NaN -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Sum/Prod of all-NaN or empty Series/DataFrames is now consistently NaN +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames no longer depends on -whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed. (:issue:`9422`, :issue:`15507`). +whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and return value of ``sum`` and ``prod`` on an empty Series has changed (:issue:`9422`, :issue:`15507`). Calling ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of a ``DataFrame``, will result in ``NaN``. See the :ref:`docs <missing_data.numeric_sum>`. @@ -381,35 +381,35 @@ Calling ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of s = Series([np.nan]) -Previously NO ``bottleneck`` +Previously WITHOUT ``bottleneck`` installed: .. code-block:: ipython In [2]: s.sum() Out[2]: np.nan -Previously WITH ``bottleneck`` +Previously WITH ``bottleneck``: .. code-block:: ipython In [2]: s.sum() Out[2]: 0.0 -New Behavior, without regard to the bottleneck installation. +New Behavior, without regard to the bottleneck installation: .. ipython:: python s.sum() -Note that this also changes the sum of an empty ``Series`` - -Previously regardless of ``bottlenck`` +Note that this also changes the sum of an empty ``Series``. Previously this always returned 0 regardless of a ``bottlenck`` installation: .. code-block:: ipython In [1]: pd.Series([]).sum() Out[1]: 0 +but for consistency with the all-NaN case, this was changed to return NaN as well: + .. ipython:: python pd.Series([]).sum() @@ -877,6 +877,28 @@ New Behavior: pd.interval_range(start=0, end=4) +.. _whatsnew_0210.api.mpl_converters: + +No Automatic Matplotlib Converters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas no longer registers our ``date``, ``time``, ``datetime``, +``datetime64``, and ``Period`` converters with matplotlib when pandas is +imported. Matplotlib plot methods (``plt.plot``, ``ax.plot``, ...), will not +nicely format the x-axis for ``DatetimeIndex`` or ``PeriodIndex`` values. You +must explicitly register these methods: + +.. ipython:: python + + from pandas.tseries import converter + converter.register() + + fig, ax = plt.subplots() + plt.plot(pd.date_range('2017', periods=6), range(6)) + +Pandas built-in ``Series.plot`` and ``DataFrame.plot`` *will* register these +converters on first-use (:issue:17710). + .. _whatsnew_0210.api: Other API Changes @@ -900,8 +922,6 @@ Other API Changes - Renamed non-functional ``index`` to ``index_col`` in :func:`read_stata` to improve API consistency (:issue:`16342`) - Bug in :func:`DataFrame.drop` caused boolean labels ``False`` and ``True`` to be treated as labels 0 and 1 respectively when dropping indices from a numeric index. This will now raise a ValueError (:issue:`16877`) - Restricted DateOffset keyword arguments. Previously, ``DateOffset`` subclasses allowed arbitrary keyword arguments which could lead to unexpected behavior. Now, only valid arguments will be accepted. (:issue:`17176`). -- Pandas no longer registers matplotlib converters on import. The converters - will be registered and used when the first plot is draw (:issue:`17710`) .. _whatsnew_0210.deprecations: diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 56412651f13f0..00726a4606cf7 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -7,6 +7,36 @@ This is a minor release from 0.21.1 and includes a number of deprecations, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. +.. _whatsnew_0211.special: + +Restore Matplotlib datetime Converter Registration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Pandas implements some matplotlib converters for nicely formatting the axis +labels on plots with ``datetime`` or ``Period`` values. Prior to pandas 0.21.0, +these were implicitly registered with matplotlib, as a side effect of ``import +pandas``. + +In pandas 0.21.0, we required users to explicitly register the +converter. This caused problems for some users who relied on those converters +being present for regular ``matplotlib.pyplot`` plotting methods, so we're +temporarily reverting that change; pandas will again register the converters on +import. + +We've added a new option to control the converters: +``pd.options.plotting.matplotlib.register_converters``. By default, they are +registered. Toggling this to ``False`` removes pandas' formatters and restore +any converters we overwrote when registering them (:issue:`18301`). + +We're working with the matplotlib developers to make this easier. We're trying +to balance user convenience (automatically registering the converters) with +import performance and best practices (importing pandas shouldn't have the side +effect of overwriting any custom converters you've already set). In the future +we hope to have most of the datetime formatting functionality in matplotlib, +with just the pandas-specific converters in pandas. We'll then gracefully +deprecate the automatic registration of converters in favor of users explicitly +registering them when they want them. + .. _whatsnew_0211.enhancements: New features @@ -21,8 +51,8 @@ New features Other Enhancements ^^^^^^^^^^^^^^^^^^ -- -- +- :meth:`Timestamp.timestamp` is now available in Python 2.7. (:issue:`17329`) +- :class:`Grouper` and :class:`TimeGrouper` now have a friendly repr output (:issue:`18203`). - .. _whatsnew_0211.deprecations: @@ -30,16 +60,15 @@ Other Enhancements Deprecations ~~~~~~~~~~~~ -- -- -- +- ``pandas.tseries.register`` has been renamed to + :func:`pandas.plotting.register_matplotlib_converters`` (:issue:`18301`) .. _whatsnew_0211.performance: Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- +- Improved performance of plotting large series/dataframes (:issue:`18236`). - - @@ -60,15 +89,21 @@ Bug Fixes Conversion ^^^^^^^^^^ -- -- +- Bug in :class:`TimedeltaIndex` subtraction could incorrectly overflow when ``NaT`` is present (:issue:`17791`) +- Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`) +- Bug in :meth:`IntervalIndex.copy` when copying and ``IntervalIndex`` with non-default ``closed`` (:issue:`18339`) +- Bug in :func:`DataFrame.to_dict` where columns of datetime that are tz-aware were not converted to required arrays when used with ``orient='records'``, raising``TypeError` (:issue:`18372`) +- Bug in :class:`DateTimeIndex` and :meth:`date_range` where mismatching tz-aware ``start`` and ``end`` timezones would not raise an err if ``end.tzinfo`` is None (:issue:`18431`) - Indexing ^^^^^^^^ -- -- +- Bug in a boolean comparison of a ``datetime.datetime`` and a ``datetime64[ns]`` dtype Series (:issue:`17965`) +- Bug where a ``MultiIndex`` with more than a million records was not raising ``AttributeError`` when trying to access a missing attribute (:issue:`18165`) +- Bug in :class:`IntervalIndex` constructor when a list of intervals is passed with non-default ``closed`` (:issue:`18334`) +- Bug in ``Index.putmask`` when an invalid mask passed (:issue:`18368`) +- Bug in masked assignment of a ``timedelta64[ns]`` dtype ``Series``, incorrectly coerced to float (:issue:`18493`) - I/O @@ -76,18 +111,32 @@ I/O - Bug in class:`~pandas.io.stata.StataReader` not converting date/time columns with display formatting addressed (:issue:`17990`). Previously columns with display formatting were normally left as ordinal numbers and not converted to datetime objects. - Bug in :func:`read_csv` when reading a compressed UTF-16 encoded file (:issue:`18071`) +- Bug in :func:`read_csv` for handling null values in index columns when specifying ``na_filter=False`` (:issue:`5239`) +- Bug in :func:`read_csv` when reading numeric category fields with high cardinality (:issue:`18186`) +- Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) +- :func:`read_parquet` now allows to specify the columns to read from a parquet file (:issue:`18154`) +- :func:`read_parquet` now allows to specify kwargs which are passed to the respective engine (:issue:`18216`) +- Bug in parsing integer datetime-like columns with specified format in ``read_sql`` (:issue:`17855`). +- Bug in :meth:`DataFrame.to_msgpack` when serializing data of the numpy.bool_ datatype (:issue:`18390`) +- Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) +- Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`) +- Bug in :func:`to_latex` where repeated multi-index values were not printed even though a higher level index differed from the previous row (:issue:`14484`) + Plotting ^^^^^^^^ -- +- Bug in ``DataFrame.plot()`` and ``Series.plot()`` with :class:`DatetimeIndex` where a figure generated by them is not pickleable in Python 3 (:issue:`18439`) - - Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- +- Bug in ``DataFrame.resample(...).apply(...)`` when there is a callable that returns different columns (:issue:`15169`) +- Bug in ``DataFrame.resample(...)`` when there is a time change (DST) and resampling frequecy is 12h or higher (:issue:`15549`) +- Bug in ``pd.DataFrameGroupBy.count()`` when counting over a datetimelike column (:issue:`13393`) +- Bug in ``rolling.var`` where calculation is inaccurate with a zero-valued array (:issue:`18430`) - - @@ -101,13 +150,15 @@ Sparse Reshaping ^^^^^^^^^ -- -- -- +- Error message in ``pd.merge_asof()`` for key datatype mismatch now includes datatype of left and right key (:issue:`18068`) +- Bug in ``pd.concat`` when empty and non-empty DataFrames or Series are concatenated (:issue:`18178` :issue:`18187`) +- Bug in ``DataFrame.filter(...)`` when :class:`unicode` is passed as a condition in Python 2 (:issue:`13101`) +- Bug when merging empty DataFrames when ``np.seterr(divide='raise')`` is set (:issue:`17776`) Numeric ^^^^^^^ +- Bug in ``pd.Series.rolling.skew()`` and ``rolling.kurt()`` with all equal values has floating issue (:issue:`18044`) - - - @@ -115,13 +166,20 @@ Numeric Categorical ^^^^^^^^^^^ -- -- -- +- Bug in :meth:`DataFrame.astype` where casting to 'category' on an empty ``DataFrame`` causes a segmentation fault (:issue:`18004`) +- Error messages in the testing module have been improved when items have + different ``CategoricalDtype`` (:issue:`18069`) +- ``CategoricalIndex`` can now correctly take a ``pd.api.types.CategoricalDtype`` as its dtype (:issue:`18116`) +- Bug in ``Categorical.unique()`` returning read-only ``codes`` array when all categories were ``NaN`` (:issue:`18051`) +- Bug in ``DataFrame.groupby(axis=1)`` with a ``CategoricalIndex`` (:issue:`18432`) + +String +^^^^^^ + +- :meth:`Series.str.split()` will now propogate ``NaN`` values across all expanded columns instead of ``None`` (:issue:`18450`) Other ^^^^^ - - -- diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index d159761c3f5e6..a44a7288bda45 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -87,7 +87,7 @@ class NegInfinity(object): @cython.boundscheck(False) def is_lexsorted(list list_of_arrays): cdef: - int i + Py_ssize_t i Py_ssize_t n, nlevels int64_t k, cur, pre ndarray arr @@ -99,11 +99,12 @@ def is_lexsorted(list list_of_arrays): cdef int64_t **vecs = <int64_t**> malloc(nlevels * sizeof(int64_t*)) for i in range(nlevels): arr = list_of_arrays[i] + assert arr.dtype.name == 'int64' vecs[i] = <int64_t*> arr.data # Assume uniqueness?? with nogil: - for i in range(n): + for i in range(1, n): for k in range(nlevels): cur = vecs[k][i] pre = vecs[k][i -1] diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index c96251a0293d6..65e99f5f46fc2 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -19,7 +19,7 @@ from hashtable cimport HashTable from pandas._libs import algos, hashtable as _hash from pandas._libs.tslib import Timestamp, Timedelta -from datetime import datetime, timedelta +from datetime import datetime, timedelta, date from cpython cimport PyTuple_Check, PyList_Check @@ -500,7 +500,7 @@ cpdef convert_scalar(ndarray arr, object value): if arr.descr.type_num == NPY_DATETIME: if isinstance(value, np.ndarray): pass - elif isinstance(value, datetime): + elif isinstance(value, (datetime, np.datetime64, date)): return Timestamp(value).value elif value is None or value != value: return iNaT diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 44fad899ff099..a90039d789972 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -2221,9 +2221,10 @@ def _concatenate_chunks(list chunks): for name in names: arrs = [chunk.pop(name) for chunk in chunks] # Check each arr for consistent types. - dtypes = set([a.dtype for a in arrs]) - if len(dtypes) > 1: - common_type = np.find_common_type(dtypes, []) + dtypes = {a.dtype for a in arrs} + numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)} + if len(numpy_dtypes) > 1: + common_type = np.find_common_type(numpy_dtypes, []) if common_type == np.object: warning_columns.append(str(name)) diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index b0a64e1ccc225..c340e870e9722 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -349,13 +349,13 @@ def infer_dtype(object value, bint skipna=False): if values.dtype != np.object_: values = values.astype('O') + # make contiguous + values = values.ravel() + n = len(values) if n == 0: return 'empty' - # make contiguous - values = values.ravel() - # try to use a valid value for i in range(n): val = util.get_value_1d(values, i) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index a0aae6a5de707..20b974ce5a659 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -961,8 +961,7 @@ class NaTType(_NaT): combine = _make_error_func('combine', None) utcnow = _make_error_func('utcnow', None) - if PY3: - timestamp = _make_error_func('timestamp', datetime) + timestamp = _make_error_func('timestamp', Timestamp) # GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or # return NaT create functions that raise, for binding to NaTType @@ -1409,6 +1408,11 @@ cdef class _Timestamp(datetime): def __get__(self): return np.datetime64(self.value, 'ns') + def timestamp(self): + """Return POSIX timestamp as float.""" + # py27 compat, see GH#17329 + return round(self.value / 1e9, 6) + cdef PyTypeObject* ts_type = <PyTypeObject*> Timestamp @@ -3366,7 +3370,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): """ Convert the val (in i8) from timezone1 to timezone2 - This is a single timezone versoin of tz_convert + This is a single timezone version of tz_convert Parameters ---------- diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 7f778dde86e23..ba7031bc382b1 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -283,10 +283,9 @@ cdef object get_dst_info(object tz): def infer_tzinfo(start, end): if start is not None and end is not None: tz = start.tzinfo - if end.tzinfo: - if not (get_timezone(tz) == get_timezone(end.tzinfo)): - msg = 'Inputs must both have the same timezone, {tz1} != {tz2}' - raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo)) + if not (get_timezone(tz) == get_timezone(end.tzinfo)): + msg = 'Inputs must both have the same timezone, {tz1} != {tz2}' + raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo)) elif start is not None: tz = start.tzinfo elif end is not None: diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index b6bd6f92f6199..a1c4ddbc8d0b0 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -661,9 +661,11 @@ cdef inline void add_var(double val, double *nobs, double *mean_x, if val == val: nobs[0] = nobs[0] + 1 - delta = (val - mean_x[0]) + # a part of Welford's method for the online variance-calculation + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + delta = val - mean_x[0] mean_x[0] = mean_x[0] + delta / nobs[0] - ssqdm_x[0] = ssqdm_x[0] + delta * (val - mean_x[0]) + ssqdm_x[0] = ssqdm_x[0] + ((nobs[0] - 1) * delta ** 2) / nobs[0] cdef inline void remove_var(double val, double *nobs, double *mean_x, @@ -675,9 +677,11 @@ cdef inline void remove_var(double val, double *nobs, double *mean_x, if val == val: nobs[0] = nobs[0] - 1 if nobs[0]: - delta = (val - mean_x[0]) + # a part of Welford's method for the online variance-calculation + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + delta = val - mean_x[0] mean_x[0] = mean_x[0] - delta / nobs[0] - ssqdm_x[0] = ssqdm_x[0] - delta * (val - mean_x[0]) + ssqdm_x[0] = ssqdm_x[0] - ((nobs[0] + 1) * delta ** 2) / nobs[0] else: mean_x[0] = 0 ssqdm_x[0] = 0 @@ -689,7 +693,7 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, Numerically stable implementation using Welford's method. """ cdef: - double val, prev, mean_x = 0, ssqdm_x = 0, nobs = 0, delta + double val, prev, mean_x = 0, ssqdm_x = 0, nobs = 0, delta, mean_x_old int64_t s, e bint is_variable Py_ssize_t i, j, N @@ -749,6 +753,9 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, add_var(input[i], &nobs, &mean_x, &ssqdm_x) output[i] = calc_var(minp, ddof, nobs, ssqdm_x) + # a part of Welford's method for the online variance-calculation + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + # After the first window, observations can both be added and # removed for i from win <= i < N: @@ -760,10 +767,12 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, # Adding one observation and removing another one delta = val - prev - prev -= mean_x + mean_x_old = mean_x + mean_x += delta / nobs - val -= mean_x - ssqdm_x += (val + prev) * delta + ssqdm_x += ((nobs - 1) * val + + (nobs + 1) * prev + - 2 * nobs * mean_x_old) * delta / nobs else: add_var(val, &nobs, &mean_x, &ssqdm_x) @@ -788,7 +797,17 @@ cdef inline double calc_skew(int64_t minp, int64_t nobs, double x, double xx, A = x / dnobs B = xx / dnobs - A * A C = xxx / dnobs - A * A * A - 3 * A * B - if B <= 0 or nobs < 3: + + # #18044: with uniform distribution, floating issue will + # cause B != 0. and cause the result is a very + # large number. + # + # in core/nanops.py nanskew/nankurt call the function + # _zero_out_fperr(m2) to fix floating error. + # if the variance is less than 1e-14, it could be + # treat as zero, here we follow the original + # skew/kurt behaviour to check B <= 1e-14 + if B <= 1e-14 or nobs < 3: result = NaN else: R = sqrt(B) @@ -915,7 +934,16 @@ cdef inline double calc_kurt(int64_t minp, int64_t nobs, double x, double xx, R = R * A D = xxxx / dnobs - R - 6 * B * A * A - 4 * C * A - if B == 0 or nobs < 4: + # #18044: with uniform distribution, floating issue will + # cause B != 0. and cause the result is a very + # large number. + # + # in core/nanops.py nanskew/nankurt call the function + # _zero_out_fperr(m2) to fix floating error. + # if the variance is less than 1e-14, it could be + # treat as zero, here we follow the original + # skew/kurt behaviour to check B <= 1e-14 + if B <= 1e-14 or nobs < 4: result = NaN else: K = (dnobs * dnobs - 1.) * D / (B * B) - 3 * ((dnobs - 1.) ** 2) diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 3853ac017044c..288d9d7742daf 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -257,6 +257,16 @@ def u(s): def u_safe(s): return s + def to_str(s): + """ + Convert bytes and non-string into Python 3 str + """ + if isinstance(s, binary_type): + s = bytes_to_str(s) + elif not isinstance(s, string_types): + s = str(s) + return s + def strlen(data, encoding=None): # encoding is for compat with PY2 return len(data) @@ -302,6 +312,14 @@ def u_safe(s): except: return s + def to_str(s): + """ + Convert unicode and non-string into Python 2 str + """ + if not isinstance(s, string_types): + s = str(s) + return s + def strlen(data, encoding=None): try: data = data.decode(encoding) @@ -381,17 +399,20 @@ def raise_with_traceback(exc, traceback=Ellipsis): # http://stackoverflow.com/questions/4126348 # Thanks to @martineau at SO -from dateutil import parser as _date_parser import dateutil + +if PY2 and LooseVersion(dateutil.__version__) == '2.0': + # dateutil brokenness + raise Exception('dateutil 2.0 incompatible with Python 2.x, you must ' + 'install version 1.5 or 2.1+!') + +from dateutil import parser as _date_parser if LooseVersion(dateutil.__version__) < '2.0': + @functools.wraps(_date_parser.parse) def parse_date(timestr, *args, **kwargs): timestr = bytes(timestr) return _date_parser.parse(timestr, *args, **kwargs) -elif PY2 and LooseVersion(dateutil.__version__) == '2.0': - # dateutil brokenness - raise Exception('dateutil 2.0 incompatible with Python 2.x, you must ' - 'install version 1.5 or 2.1+!') else: parse_date = _date_parser.parse diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index e709c771b7d18..c574e6d56916b 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -2268,7 +2268,7 @@ def _recode_for_categories(codes, old_categories, new_categories): if len(old_categories) == 0: # All null anyway, so just retain the nulls - return codes + return codes.copy() indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories), new_categories) new_codes = take_1d(indexer, codes.copy(), fill_value=-1) diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 33531e80449d8..94208a61a4377 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -479,3 +479,29 @@ def use_inf_as_na_cb(key): cf.register_option( 'engine', 'auto', parquet_engine_doc, validator=is_one_of_factory(['auto', 'pyarrow', 'fastparquet'])) + +# -------- +# Plotting +# --------- + +register_converter_doc = """ +: bool + Whether to register converters with matplotlib's units registry for + dates, times, datetimes, and Periods. Toggling to False will remove + the converters, restoring any converters that pandas overwrote. +""" + + +def register_converter_cb(key): + from pandas.plotting import register_matplotlib_converters + from pandas.plotting import deregister_matplotlib_converters + + if cf.get_option(key): + register_matplotlib_converters() + else: + deregister_matplotlib_converters() + + +with cf.config_prefix("plotting.matplotlib"): + cf.register_option("register_converters", True, register_converter_doc, + validator=bool, cb=register_converter_cb) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index f3b11e52cdd7a..eae283e9bc00d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -136,7 +136,7 @@ def trans(x): # noqa try: if np.allclose(new_result, result, rtol=0): return new_result - except: + except Exception: # comparison of an object dtype with a number type could # hit here @@ -151,14 +151,14 @@ def trans(x): # noqa elif dtype.kind in ['M', 'm'] and result.dtype.kind in ['i', 'f']: try: result = result.astype(dtype) - except: + except Exception: if dtype.tz: # convert to datetime and change timezone from pandas import to_datetime result = to_datetime(result).tz_localize('utc') result = result.tz_convert(dtype.tz) - except: + except Exception: pass return result @@ -210,7 +210,7 @@ def changeit(): new_result[mask] = om_at result[:] = new_result return result, False - except: + except Exception: pass # we are forced to change the dtype of the result as the input @@ -243,7 +243,7 @@ def changeit(): try: np.place(result, mask, other) - except: + except Exception: return changeit() return result, False @@ -274,14 +274,14 @@ def maybe_promote(dtype, fill_value=np.nan): if issubclass(dtype.type, np.datetime64): try: fill_value = tslib.Timestamp(fill_value).value - except: + except Exception: # the proper thing to do here would probably be to upcast # to object (but numpy 1.6.1 doesn't do this properly) fill_value = iNaT elif issubclass(dtype.type, np.timedelta64): try: fill_value = lib.Timedelta(fill_value).value - except: + except Exception: # as for datetimes, cannot upcast to object fill_value = iNaT else: @@ -592,12 +592,12 @@ def maybe_convert_scalar(values): def coerce_indexer_dtype(indexer, categories): """ coerce the indexer input array to the smallest dtype possible """ - l = len(categories) - if l < _int8_max: + length = len(categories) + if length < _int8_max: return _ensure_int8(indexer) - elif l < _int16_max: + elif length < _int16_max: return _ensure_int16(indexer) - elif l < _int32_max: + elif length < _int32_max: return _ensure_int32(indexer) return _ensure_int64(indexer) @@ -629,7 +629,7 @@ def conv(r, dtype): r = float(r) elif dtype.kind == 'i': r = int(r) - except: + except Exception: pass return r @@ -756,7 +756,7 @@ def maybe_convert_objects(values, convert_dates=True, convert_numeric=True, if not isna(new_values).all(): values = new_values - except: + except Exception: pass else: # soft-conversion @@ -817,7 +817,7 @@ def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True, # If all NaNs, then do not-alter values = converted if not isna(converted).all() else values values = values.copy() if copy else values - except: + except Exception: pass return values @@ -888,10 +888,10 @@ def try_datetime(v): try: from pandas import to_datetime return to_datetime(v) - except: + except Exception: pass - except: + except Exception: pass return v.reshape(shape) @@ -903,7 +903,7 @@ def try_timedelta(v): from pandas import to_timedelta try: return to_timedelta(v)._values.reshape(shape) - except: + except Exception: return v.reshape(shape) inferred_type = lib.infer_datetimelike_array(_ensure_object(v)) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 93993fd0a0cab..bca5847f3a6cc 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -569,9 +569,10 @@ def _concat_rangeindex_same_dtype(indexes): start = step = next = None - for obj in indexes: - if not len(obj): - continue + # Filter the empty indexes + non_empty_indexes = [obj for obj in indexes if len(obj)] + + for obj in non_empty_indexes: if start is None: # This is set by the first non-empty index @@ -595,8 +596,16 @@ def _concat_rangeindex_same_dtype(indexes): if step is not None: next = obj[-1] + step - if start is None: + if non_empty_indexes: + # Get the stop value from "next" or alternatively + # from the last non-empty index + stop = non_empty_indexes[-1]._stop if next is None else next + else: + # Here all "indexes" had 0 length, i.e. were empty. + # Simply take start, stop, and step from the last empty index. + obj = indexes[-1] start = obj._start step = obj._step - stop = obj._stop if next is None else next + stop = obj._stop + return indexes[0].__class__(start, stop, step) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a1af806e5cb9e..ad79001e45b86 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -997,7 +997,7 @@ def to_dict(self, orient='dict', into=dict): for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): return [into_c((k, _maybe_box_datetimelike(v)) - for k, v in zip(self.columns, row)) + for k, v in zip(self.columns, np.atleast_1d(row))) for row in self.values] elif orient.lower().startswith('i'): return into_c((k, v.to_dict(into)) for k, v in self.iterrows()) @@ -3751,7 +3751,7 @@ def nlargest(self, n, columns, keep='first'): Number of items to retrieve columns : list or str Column name or names to order by - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. @@ -3788,7 +3788,7 @@ def nsmallest(self, n, columns, keep='first'): Number of items to retrieve columns : list or str Column name or names to order by - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. @@ -4035,6 +4035,8 @@ def combine(self, other, func, fill_value=None, overwrite=True): ---------- other : DataFrame func : function + Function that takes two series as inputs and return a Series or a + scalar fill_value : scalar value overwrite : boolean, default True If True then overwrite values for common keys in the calling frame @@ -4042,8 +4044,21 @@ def combine(self, other, func, fill_value=None, overwrite=True): Returns ------- result : DataFrame - """ + Examples + -------- + >>> df1 = DataFrame({'A': [0, 0], 'B': [4, 4]}) + >>> df2 = DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2) + A B + 0 0 3 + 1 0 3 + + See Also + -------- + DataFrame.combine_first : Combine two DataFrame objects and default to + non-null values in frame calling the method + """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) @@ -4131,16 +4146,24 @@ def combine_first(self, other): ---------- other : DataFrame + Returns + ------- + combined : DataFrame + Examples -------- - a's values prioritized, use values from b to fill holes: - - >>> a.combine_first(b) + df1's values prioritized, use values from df2 to fill holes: + >>> df1 = pd.DataFrame([[1, np.nan]]) + >>> df2 = pd.DataFrame([[3, 4]]) + >>> df1.combine_first(df2) + 0 1 + 0 1 4.0 - Returns - ------- - combined : DataFrame + See Also + -------- + DataFrame.combine : Perform series-wise operation on two DataFrames + using a given function """ import pandas.core.computation.expressions as expressions @@ -4283,7 +4306,7 @@ def first_valid_index(self): return valid_indices[0] if len(valid_indices) else None @Appender(_shared_docs['valid_index'] % { - 'position': 'first', 'klass': 'DataFrame'}) + 'position': 'last', 'klass': 'DataFrame'}) def last_valid_index(self): if len(self) == 0: return None @@ -5113,7 +5136,7 @@ def append(self, other, ignore_index=False, verify_integrity=False): >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): - ... df = df.append({'A'}: i}, ignore_index=True) + ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 @@ -5790,7 +5813,12 @@ def idxmin(self, axis=0, skipna=True): 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result - will be NA + will be NA. + + Raises + ------ + ValueError + * If the row/column is empty Returns ------- @@ -5821,7 +5849,12 @@ def idxmax(self, axis=0, skipna=True): 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result - will be first index. + will be NA. + + Raises + ------ + ValueError + * If the row/column is empty Returns ------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 118e7d5cd437b..58d86251a4a62 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -49,7 +49,7 @@ from pandas.tseries.frequencies import to_offset from pandas import compat from pandas.compat.numpy import function as nv -from pandas.compat import (map, zip, lzip, lrange, string_types, +from pandas.compat import (map, zip, lzip, lrange, string_types, to_str, isidentifier, set_function_name, cPickle as pkl) from pandas.core.ops import _align_method_FRAME import pandas.core.nanops as nanops @@ -3235,14 +3235,14 @@ def filter(self, items=None, like=None, regex=None, axis=None): **{name: [r for r in items if r in labels]}) elif like: def f(x): - if not isinstance(x, string_types): - x = str(x) - return like in x + return like in to_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: + def f(x): + return matcher.search(to_str(x)) is not None matcher = re.compile(regex) - values = labels.map(lambda x: matcher.search(str(x)) is not None) + values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError('Must pass either `items`, `like`, or `regex`') diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 5c07033f5a68f..5931f6e009dab 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -77,6 +77,119 @@ pandas.Panel.%(name)s """ +_apply_docs = dict( + template=""" + Apply function ``func`` group-wise and combine the results together. + + The function passed to ``apply`` must take a {input} as its first + argument and return a dataframe, a series or a scalar. ``apply`` will + then take care of combining the results back together into a single + dataframe or series. ``apply`` is therefore a highly flexible + grouping method. + + While ``apply`` is a very flexible method, its downside is that + using it can be quite a bit slower than using more specific methods. + Pandas offers a wide range of method that will be much faster + than using ``apply`` for their specific purposes, so try to use them + before reaching for ``apply``. + + Parameters + ---------- + func : function + A callable that takes a {input} as its first argument, and + returns a dataframe, a series or a scalar. In addition the + callable may take positional and keyword arguments + args, kwargs : tuple and dict + Optional positional and keyword arguments to pass to ``func`` + + Returns + ------- + applied : Series or DataFrame + + Notes + ----- + In the current implementation ``apply`` calls func twice on the + first group to decide whether it can take a fast or slow code + path. This can lead to unexpected behavior if func has + side-effects, as they will take effect twice for the first + group. + + Examples + -------- + {examples} + + See also + -------- + pipe : Apply function to the full GroupBy object instead of to each + group. + aggregate, transform + """, + dataframe_examples=""" + >>> df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1,2,3], 'C': [4,6, 5]}) + >>> g = df.groupby('A') + + From ``df`` above we can see that ``g`` has two groups, ``a``, ``b``. + Calling ``apply`` in various ways, we can get different grouping results: + + Example 1: below the function passed to ``apply`` takes a dataframe as + its argument and returns a dataframe. ``apply`` combines the result for + each group together into a new dataframe: + + >>> g.apply(lambda x: x / x.sum()) + B C + 0 0.333333 0.4 + 1 0.666667 0.6 + 2 1.000000 1.0 + + Example 2: The function passed to ``apply`` takes a dataframe as + its argument and returns a series. ``apply`` combines the result for + each group together into a new dataframe: + + >>> g.apply(lambda x: x.max() - x.min()) + B C + A + a 1 2 + b 0 0 + + Example 3: The function passed to ``apply`` takes a dataframe as + its argument and returns a scalar. ``apply`` combines the result for + each group together into a series, including setting the index as + appropriate: + + >>> g.apply(lambda x: x.C.max() - x.B.min()) + A + a 5 + b 2 + dtype: int64 + """, + series_examples=""" + >>> ser = pd.Series([0, 1, 2], index='a a b'.split()) + >>> g = ser.groupby(ser.index) + + From ``ser`` above we can see that ``g`` has two groups, ``a``, ``b``. + Calling ``apply`` in various ways, we can get different grouping results: + + Example 1: The function passed to ``apply`` takes a series as + its argument and returns a series. ``apply`` combines the result for + each group together into a new series: + + >>> g.apply(lambda x: x*2 if x.name == 'b' else x/2) + 0 0.0 + 1 0.5 + 2 4.0 + dtype: float64 + + Example 2: The function passed to ``apply`` takes a series as + its argument and returns a scalar. ``apply`` combines the result for + each group together into a series, including setting the index as + appropriate: + + >>> g.apply(lambda x: x.max() - x.min()) + a 1 + b 0 + dtype: int64 + """) + _transform_template = """ Call function producing a like-indexed %(klass)s on each group and return a %(klass)s having the same indexes as the original object @@ -144,6 +257,7 @@ """ + # special case to prevent duplicate plots when catching exceptions when # forwarding methods from NDFrames _plotting_methods = frozenset(['plot', 'boxplot', 'hist']) @@ -206,12 +320,13 @@ class Grouper(object): sort : boolean, default to False whether to sort the resulting labels - additional kwargs to control time-like groupers (when freq is passed) + additional kwargs to control time-like groupers (when ``freq`` is passed) - closed : closed end of interval; left or right - label : interval boundary to use for labeling; left or right + closed : closed end of interval; 'left' or 'right' + label : interval boundary to use for labeling; 'left' or 'right' convention : {'start', 'end', 'e', 's'} If grouper is PeriodIndex + base, loffset Returns ------- @@ -233,6 +348,7 @@ class Grouper(object): >>> df.groupby(Grouper(level='date', freq='60s', axis=1)) """ + _attributes = ('key', 'level', 'freq', 'axis', 'sort') def __new__(cls, *args, **kwargs): if kwargs.get('freq') is not None: @@ -333,6 +449,14 @@ def _set_grouper(self, obj, sort=False): def groups(self): return self.grouper.groups + def __repr__(self): + attrs_list = ["{}={!r}".format(attr_name, getattr(self, attr_name)) + for attr_name in self._attributes + if getattr(self, attr_name) is not None] + attrs = ", ".join(attrs_list) + cls_name = self.__class__.__name__ + return "{}({})".format(cls_name, attrs) + class GroupByPlot(PandasObject): """ @@ -653,50 +777,10 @@ def __iter__(self): """ return self.grouper.get_iterator(self.obj, axis=self.axis) - @Substitution(name='groupby') + @Appender(_apply_docs['template'] + .format(input="dataframe", + examples=_apply_docs['dataframe_examples'])) def apply(self, func, *args, **kwargs): - """ - Apply function and combine results together in an intelligent way. - - The split-apply-combine combination rules attempt to be as common - sense based as possible. For example: - - case 1: - group DataFrame - apply aggregation function (f(chunk) -> Series) - yield DataFrame, with group axis having group labels - - case 2: - group DataFrame - apply transform function ((f(chunk) -> DataFrame with same indexes) - yield DataFrame with resulting chunks glued together - - case 3: - group Series - apply function with f(chunk) -> DataFrame - yield DataFrame with result of chunks glued together - - Parameters - ---------- - func : function - - Notes - ----- - See online documentation for full exposition on how to use apply. - - In the current implementation apply calls func twice on the - first group to decide whether it can take a fast or slow code - path. This can lead to unexpected behavior if func has - side-effects, as they will take effect twice for the first - group. - - - See also - -------- - pipe : Apply function to the full GroupBy object instead of to each - group. - aggregate, transform - """ func = self._is_builtin_func(func) @@ -2847,9 +2931,11 @@ def is_in_obj(gpr): else: in_axis, name = False, None - if is_categorical_dtype(gpr) and len(gpr) != len(obj): - raise ValueError("Categorical dtype grouper must " - "have len(grouper) == len(data)") + if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]: + raise ValueError( + ("Length of grouper ({len_gpr}) and axis ({len_axis})" + " must be same length" + .format(len_gpr=len(gpr), len_axis=obj.shape[axis]))) # create the Grouping # allow us to passing the actual Grouping as the gpr @@ -3011,6 +3097,12 @@ def _selection_name(self): """) + @Appender(_apply_docs['template'] + .format(input='series', + examples=_apply_docs['series_examples'])) + def apply(self, func, *args, **kwargs): + return super(SeriesGroupBy, self).apply(func, *args, **kwargs) + @Appender(_agg_doc) @Appender(_shared_docs['aggregate'] % dict( klass='Series', @@ -4363,7 +4455,8 @@ def count(self): ids, _, ngroups = self.grouper.group_info mask = ids != -1 - val = ((mask & ~isna(blk.get_values())) for blk in data.blocks) + val = ((mask & ~isna(np.atleast_2d(blk.get_values()))) + for blk in data.blocks) loc = (blk.mgr_locs for blk in data.blocks) counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a995fc10a6674..83c78f084a9da 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1934,7 +1934,10 @@ def putmask(self, mask, value): try: np.putmask(values, mask, self._convert_for_op(value)) return self._shallow_copy(values) - except (ValueError, TypeError): + except (ValueError, TypeError) as err: + if is_object_dtype(self): + raise err + # coerces to object return self.astype(object).putmask(mask, value) @@ -2032,7 +2035,7 @@ def equals(self, other): try: return array_equivalent(_values_from_object(self), _values_from_object(other)) - except: + except Exception: return False def identical(self, other): @@ -2315,7 +2318,7 @@ def intersection(self, other): try: indexer = Index(other._values).get_indexer(self._values) indexer = indexer.take((indexer != -1).nonzero()[0]) - except: + except Exception: # duplicates indexer = algos.unique1d( Index(other._values).get_indexer_non_unique(self._values)[0]) @@ -3024,13 +3027,13 @@ def _reindex_non_unique(self, target): new_indexer = None if len(missing): - l = np.arange(len(indexer)) + length = np.arange(len(indexer)) missing = _ensure_platform_int(missing) missing_labels = target.take(missing) - missing_indexer = _ensure_int64(l[~check]) + missing_indexer = _ensure_int64(length[~check]) cur_labels = self.take(indexer[check]).values - cur_indexer = _ensure_int64(l[check]) + cur_indexer = _ensure_int64(length[check]) new_labels = np.empty(tuple([len(indexer)]), dtype=object) new_labels[cur_indexer] = cur_labels diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 8b680127723c3..70b531ffb0ec4 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -79,7 +79,8 @@ def __new__(cls, data=None, categories=None, ordered=None, dtype=None, if data is not None or categories is None: cls._scalar_data_error(data) data = [] - data = cls._create_categorical(cls, data, categories, ordered) + data = cls._create_categorical(cls, data, categories, ordered, + dtype) if copy: data = data.copy() diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 18be6c61abdf7..50085889ad88f 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -2,9 +2,11 @@ from __future__ import division import operator import warnings -from datetime import time, datetime -from datetime import timedelta +from datetime import time, datetime, timedelta + import numpy as np +from pytz import utc + from pandas.core.base import _shared_docs from pandas.core.dtypes.common import ( @@ -55,10 +57,6 @@ from pandas._libs.tslibs import timezones -def _utc(): - import pytz - return pytz.utc - # -------- some conversion wrapper functions @@ -66,7 +64,6 @@ def _field_accessor(name, field, docstring=None): def f(self): values = self.asi8 if self.tz is not None: - utc = _utc() if self.tz is not utc: values = self._local_timestamps() @@ -451,7 +448,7 @@ def _generate(cls, start, end, periods, name, offset, try: inferred_tz = timezones.infer_tzinfo(start, end) - except: + except Exception: raise TypeError('Start and end cannot both be tz-aware with ' 'different timezones') @@ -562,8 +559,6 @@ def _convert_for_op(self, value): raise ValueError('Passed item and index have different timezone') def _local_timestamps(self): - utc = _utc() - if self.is_monotonic: return libts.tz_convert(self.asi8, utc, self.tz) else: @@ -823,7 +818,6 @@ def _add_delta(self, delta): tz = 'UTC' if self.tz is not None else None result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer') - utc = _utc() if self.tz is not None and self.tz is not utc: result = result.tz_convert(self.tz) return result @@ -877,7 +871,6 @@ def astype(self, dtype, copy=True): raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype) def _get_time_micros(self): - utc = _utc() values = self.asi8 if self.tz is not None and self.tz is not utc: values = self._local_timestamps() @@ -1183,12 +1176,12 @@ def __iter__(self): # convert in chunks of 10k for efficiency data = self.asi8 - l = len(self) + length = len(self) chunksize = 10000 - chunks = int(l / chunksize) + 1 + chunks = int(length / chunksize) + 1 for i in range(chunks): start_i = i * chunksize - end_i = min((i + 1) * chunksize, l) + end_i = min((i + 1) * chunksize, length) converted = libts.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, freq=self.freq, box=True) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 7bf7cfce515a1..9619f5403b761 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -179,7 +179,7 @@ def __new__(cls, data, closed='right', if isinstance(data, IntervalIndex): left = data.left right = data.right - + closed = data.closed else: # don't allow scalars @@ -187,7 +187,7 @@ def __new__(cls, data, closed='right', cls._scalar_data_error(data) data = IntervalIndex.from_intervals(data, name=name) - left, right = data.left, data.right + left, right, closed = data.left, data.right, data.closed return cls._simple_new(left, right, closed, name, copy=copy, verify_integrity=verify_integrity) @@ -569,7 +569,8 @@ def copy(self, deep=False, name=None): left = self.left.copy(deep=True) if deep else self.left right = self.right.copy(deep=True) if deep else self.right name = name if name is not None else self.name - return type(self).from_arrays(left, right, name=name) + closed = self.closed + return type(self).from_arrays(left, right, closed=closed, name=name) @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 4cc59f5297058..f4acb6862addb 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -446,6 +446,17 @@ def _shallow_copy_with_infer(self, values=None, **kwargs): **kwargs) return self._shallow_copy(values, **kwargs) + @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) + def __contains__(self, key): + hash(key) + try: + self.get_loc(key) + return True + except (LookupError, TypeError): + return False + + contains = __contains__ + @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, values=None, **kwargs): if values is not None: @@ -809,9 +820,10 @@ def duplicated(self, keep='first'): return duplicated_int64(ids, keep) - @Appender(ibase._index_shared_docs['fillna']) def fillna(self, value=None, downcast=None): - # isna is not implemented for MultiIndex + """ + fillna is not implemented for MultiIndex + """ raise NotImplementedError('isna is not defined for MultiIndex') @Appender(_index_shared_docs['dropna']) @@ -1370,17 +1382,6 @@ def nlevels(self): def levshape(self): return tuple(len(x) for x in self.levels) - @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) - def __contains__(self, key): - hash(key) - try: - self.get_loc(key) - return True - except LookupError: - return False - - contains = __contains__ - def __reduce__(self): """Necessary for making this object picklable""" d = dict(levels=[lev for lev in self.levels], diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 6e08c32f30dcd..445adb6bd3b18 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -36,6 +36,26 @@ join as libjoin, Timedelta, NaT, iNaT) +def _field_accessor(name, alias, docstring=None): + def f(self): + if self.hasnans: + result = np.empty(len(self), dtype='float64') + mask = self._isnan + imask = ~mask + result.flat[imask] = np.array([getattr(Timedelta(val), alias) + for val in self.asi8[imask]]) + result[mask] = np.nan + else: + result = np.array([getattr(Timedelta(val), alias) + for val in self.asi8], dtype='int64') + + return Index(result, name=self.name) + + f.__name__ = name + f.__doc__ = docstring + return property(f) + + def _td_index_cmp(opname, nat_result=False): """ Wrap comparison operations to convert timedelta-like to timedelta64 @@ -380,46 +400,17 @@ def _format_native_types(self, na_rep=u('NaT'), nat_rep=na_rep, justify='all').get_result() - def _get_field(self, m): - - values = self.asi8 - hasnans = self.hasnans - if hasnans: - result = np.empty(len(self), dtype='float64') - mask = self._isnan - imask = ~mask - result.flat[imask] = np.array( - [getattr(Timedelta(val), m) for val in values[imask]]) - result[mask] = np.nan - else: - result = np.array([getattr(Timedelta(val), m) - for val in values], dtype='int64') - return Index(result, name=self.name) - - @property - def days(self): - """ Number of days for each element. """ - return self._get_field('days') - - @property - def seconds(self): - """ Number of seconds (>= 0 and less than 1 day) for each element. """ - return self._get_field('seconds') - - @property - def microseconds(self): - """ - Number of microseconds (>= 0 and less than 1 second) for each - element. """ - return self._get_field('microseconds') - - @property - def nanoseconds(self): - """ - Number of nanoseconds (>= 0 and less than 1 microsecond) for each - element. - """ - return self._get_field('nanoseconds') + days = _field_accessor("days", "days", + " Number of days for each element. ") + seconds = _field_accessor("seconds", "seconds", + " Number of seconds (>= 0 and less than 1 day) " + "for each element. ") + microseconds = _field_accessor("microseconds", "microseconds", + "\nNumber of microseconds (>= 0 and less " + "than 1 second) for each\nelement. ") + nanoseconds = _field_accessor("nanoseconds", "nanoseconds", + "\nNumber of nanoseconds (>= 0 and less " + "than 1 microsecond) for each\nelement.\n") @property def components(self): @@ -850,7 +841,7 @@ def insert(self, loc, item): if _is_convertible_to_td(item): try: item = Timedelta(item) - except: + except Exception: pass freq = None diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 045580d393b26..b929dfd5a9d0b 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1946,7 +1946,8 @@ def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, np.timedelta64) - return isinstance(element, (timedelta, np.timedelta64)) + return is_integer(element) or isinstance( + element, (timedelta, np.timedelta64)) def fillna(self, value, **kwargs): diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index baeb869239c1e..e1c09947ac0b4 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -548,6 +548,9 @@ def nanskew(values, axis=None, skipna=True): m3 = adjusted3.sum(axis, dtype=np.float64) # floating point error + # + # #18044 in _libs/windows.pyx calc_skew follow this behavior + # to fix the fperr to treat m2 <1e-14 as zero m2 = _zero_out_fperr(m2) m3 = _zero_out_fperr(m3) @@ -609,6 +612,9 @@ def nankurt(values, axis=None, skipna=True): result = numer / denom - adj # floating point error + # + # #18044 in _libs/windows.pyx calc_kurt follow this behavior + # to fix the fperr to treat denom <1e-14 as zero numer = _zero_out_fperr(numer) denom = _zero_out_fperr(denom) @@ -699,6 +705,7 @@ def _maybe_null_out(result, axis, mask): def _zero_out_fperr(arg): + # #18044 reference this behavior to fix rolling skew/kurt issue if isinstance(arg, np.ndarray): with np.errstate(invalid='ignore'): return np.where(np.abs(arg) < 1e-14, 0, arg) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 6edbb99641542..1adb3a078cca3 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -395,7 +395,11 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis) try: - result = grouped.aggregate(how, *args, **kwargs) + if isinstance(obj, ABCDataFrame) and compat.callable(how): + # Check if the function is reducing or not. + result = grouped._aggregate_item_by_item(how, *args, **kwargs) + else: + result = grouped.aggregate(how, *args, **kwargs) except Exception: # we have a non-reducing function @@ -1010,22 +1014,18 @@ class TimeGrouper(Grouper): Parameters ---------- freq : pandas date offset or offset alias for identifying bin edges - closed : closed end of interval; left or right - label : interval boundary to use for labeling; left or right - nperiods : optional, integer + closed : closed end of interval; 'left' or 'right' + label : interval boundary to use for labeling; 'left' or 'right' convention : {'start', 'end', 'e', 's'} If axis is PeriodIndex - - Notes - ----- - Use begin, end, nperiods to generate intervals that cannot be derived - directly from the associated object """ + _attributes = Grouper._attributes + ('closed', 'label', 'how', + 'loffset', 'kind', 'convention', + 'base') def __init__(self, freq='Min', closed=None, label=None, how='mean', - nperiods=None, axis=0, - fill_method=None, limit=None, loffset=None, kind=None, - convention=None, base=0, **kwargs): + axis=0, fill_method=None, limit=None, loffset=None, + kind=None, convention=None, base=0, **kwargs): freq = to_offset(freq) end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W']) @@ -1044,7 +1044,6 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean', self.closed = closed self.label = label - self.nperiods = nperiods self.kind = kind self.convention = convention or 'E' @@ -1137,6 +1136,16 @@ def _get_time_bins(self, ax): tz=tz, name=ax.name) + # GH 15549 + # In edge case of tz-aware resapmling binner last index can be + # less than the last variable in data object, this happens because of + # DST time change + if len(binner) > 1 and binner[-1] < last: + extra_date_range = pd.date_range(binner[-1], last + self.freq, + freq=self.freq, tz=tz, + name=ax.name) + binner = labels = binner.append(extra_date_range[1:]) + # a little hack trimmed = False if (len(binner) > 2 and binner[-2] == last and diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index e409090e76944..bdb7ec00a29fd 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -126,7 +126,7 @@ def _groupby_and_merge(by, on, left, right, _merge_pieces, try: if k in merged: merged[k] = key - except: + except KeyError: pass pieces.append(merged) @@ -1253,10 +1253,12 @@ def _get_merge_keys(self): join_names) = super(_AsOfMerge, self)._get_merge_keys() # validate index types are the same - for lk, rk in zip(left_join_keys, right_join_keys): + for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)): if not is_dtype_equal(lk.dtype, rk.dtype): - raise MergeError("incompatible merge keys, " - "must be the same type") + raise MergeError("incompatible merge keys [{i}] {lkdtype} and " + "{rkdtype}, must be the same type" + .format(i=i, lkdtype=lk.dtype, + rkdtype=rk.dtype)) # validate tolerance; must be a Timedelta if we have a DTI if self.tolerance is not None: @@ -1266,8 +1268,10 @@ def _get_merge_keys(self): else: lt = left_join_keys[-1] - msg = "incompatible tolerance, must be compat " \ - "with type {lt}".format(lt=type(lt)) + msg = ("incompatible tolerance {tolerance}, must be compat " + "with type {lkdtype}".format( + tolerance=type(self.tolerance), + lkdtype=lt.dtype)) if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt): if not isinstance(self.tolerance, Timedelta): @@ -1503,12 +1507,12 @@ def _sort_labels(uniques, left, right): # tuplesafe uniques = Index(uniques).values - l = len(left) + llength = len(left) labels = np.concatenate([left, right]) _, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1) new_labels = _ensure_int64(new_labels) - new_left, new_right = new_labels[:l], new_labels[l:] + new_left, new_right = new_labels[:llength], new_labels[llength:] return new_left, new_right @@ -1525,7 +1529,8 @@ def _get_join_keys(llab, rlab, shape, sort): rkey = stride * rlab[0].astype('i8', subok=False, copy=False) for i in range(1, nlev): - stride //= shape[i] + with np.errstate(divide='ignore'): + stride //= shape[i] lkey += llab[i] * stride rkey += rlab[i] * stride diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index fda339aa30461..2adf17a227a59 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -148,7 +148,7 @@ def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): Parameters ---------- - x : ndarray or Series + x : 1d ndarray or Series q : integer or array of quantiles Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles diff --git a/pandas/core/series.py b/pandas/core/series.py index 1c92c4b8850ee..2b4f9c4c6f7e3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -597,7 +597,7 @@ def _ixs(self, i, axis=0): return values[i] except IndexError: raise - except: + except Exception: if isinstance(i, slice): indexer = self.index._convert_slice_indexer(i, kind='iloc') return self._get_values(indexer) @@ -675,7 +675,7 @@ def _get_with(self, key): if isinstance(key, tuple): try: return self._get_values_tuple(key) - except: + except Exception: if len(key) == 1: key = key[0] if isinstance(key, slice): @@ -818,7 +818,7 @@ def _set_with(self, key, value): if not isinstance(key, (list, Series, np.ndarray, Series)): try: key = list(key) - except: + except Exception: key = [key] if isinstance(key, Index): @@ -1306,7 +1306,13 @@ def idxmin(self, axis=None, skipna=True, *args, **kwargs): Parameters ---------- skipna : boolean, default True - Exclude NA/null values + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + + Raises + ------ + ValueError + * If the Series is empty Returns ------- @@ -1336,7 +1342,13 @@ def idxmax(self, axis=None, skipna=True, *args, **kwargs): Parameters ---------- skipna : boolean, default True - Exclude NA/null values + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + + Raises + ------ + ValueError + * If the Series is empty Returns ------- @@ -1731,11 +1743,26 @@ def combine(self, other, func, fill_value=np.nan): ---------- other : Series or scalar value func : function + Function that takes two scalars as inputs and return a scalar fill_value : scalar value Returns ------- result : Series + + Examples + -------- + >>> s1 = Series([1, 2]) + >>> s2 = Series([0, 3]) + >>> s1.combine(s2, lambda x1, x2: x1 if x1 < x2 else x2) + 0 0 + 1 2 + dtype: int64 + + See Also + -------- + Series.combine_first : Combine Series values, choosing the calling + Series's values first """ if isinstance(other, Series): new_index = self.index.union(other.index) @@ -1764,7 +1791,21 @@ def combine_first(self, other): Returns ------- - y : Series + combined : Series + + Examples + -------- + >>> s1 = pd.Series([1, np.nan]) + >>> s2 = pd.Series([3, 4]) + >>> s1.combine_first(s2) + 0 1.0 + 1 4.0 + dtype: float64 + + See Also + -------- + Series.combine : Perform elementwise operation on two Series + using a given function """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) @@ -1982,7 +2023,7 @@ def nlargest(self, n=5, keep='first'): ---------- n : int Return this many descending sorted values - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. @@ -2029,7 +2070,7 @@ def nsmallest(self, n=5, keep='first'): ---------- n : int Return this many ascending sorted values - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. diff --git a/pandas/core/strings.py b/pandas/core/strings.py index abef6f6086dbd..9614641aa1abf 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1423,6 +1423,10 @@ def cons_row(x): return [x] result = [cons_row(x) for x in result] + if result: + # propogate nan values to match longest sequence (GH 18450) + max_len = max(len(x) for x in result) + result = [x * max_len if x[0] is np.nan else x for x in result] if not isinstance(expand, bool): raise ValueError("expand must be True or False") diff --git a/pandas/core/window.py b/pandas/core/window.py index 5143dddc5e866..345f9b035a36b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -503,6 +503,9 @@ class Window(_Window): * ``general_gaussian`` (needs power, width) * ``slepian`` (needs width). + If ``win_type=None`` all points are evenly weighted. To learn more about + different window types see `scipy.signal window functions + <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__. """ def validate(self): diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index c5d4a0ecf44ab..24eeb1dd94c18 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -45,7 +45,6 @@ import pandas as pd import numpy as np -import itertools import csv from functools import partial @@ -891,6 +890,7 @@ def get_col_type(dtype): name = any(self.frame.index.names) cname = any(self.frame.columns.names) lastcol = self.frame.index.nlevels - 1 + previous_lev3 = None for i, lev in enumerate(self.frame.index.levels): lev2 = lev.format() blank = ' ' * len(lev2[0]) @@ -901,11 +901,19 @@ def get_col_type(dtype): lev3 = [blank] * clevels if name: lev3.append(lev.name) - for level_idx, group in itertools.groupby( - self.frame.index.labels[i]): - count = len(list(group)) - lev3.extend([lev2[level_idx]] + [blank] * (count - 1)) + current_idx_val = None + for level_idx in self.frame.index.labels[i]: + if ((previous_lev3 is None or + previous_lev3[len(lev3)].isspace()) and + lev2[level_idx] == current_idx_val): + # same index as above row and left index was the same + lev3.append(blank) + else: + # different value than above or left index different + lev3.append(lev2[level_idx]) + current_idx_val = lev2[level_idx] strcols.insert(i, lev3) + previous_lev3 = lev3 column_format = self.column_format if column_format is None: @@ -1695,7 +1703,7 @@ def _save_header(self): else: encoded_labels = [] - if not has_mi_columns: + if not has_mi_columns or has_aliases: encoded_labels += list(write_cols) writer.writerow(encoded_labels) else: diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index b4dc9173f11ba..caa67d1ce6bce 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -29,9 +29,8 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, The main method a user calls to execute a Query in Google BigQuery and read results into a pandas DataFrame. - Google BigQuery API Client Library v2 for Python is used. - Documentation is available `here - <https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__ + This function requires the `pandas-gbq package + <https://pandas-gbq.readthedocs.io>`__. Authentication to the Google BigQuery service is via OAuth 2.0. @@ -70,7 +69,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. - 'standard' : Use BigQuery's standard SQL (beta), which is + 'standard' : Use BigQuery's standard SQL, which is compliant with the SQL 2011 standard. For more information see `BigQuery SQL Reference <https://cloud.google.com/bigquery/sql-reference/>`__ diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index be39f4baba0fb..203b1d62fcbf3 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -5,7 +5,7 @@ import pandas._libs.json as json from pandas._libs.tslib import iNaT -from pandas.compat import StringIO, long, u +from pandas.compat import StringIO, long, u, to_str from pandas import compat, isna from pandas import Series, DataFrame, to_datetime, MultiIndex from pandas.io.common import (get_filepath_or_buffer, _get_handle, @@ -458,8 +458,10 @@ def read(self): if self.lines and self.chunksize: obj = concat(self) elif self.lines: + + data = to_str(self.data) obj = self._get_object_parser( - self._combine_lines(self.data.split('\n')) + self._combine_lines(data.split('\n')) ) else: obj = self._get_object_parser(self.data) @@ -612,7 +614,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: dtype = np.dtype(dtype) return data.astype(dtype), True - except: + except (TypeError, ValueError): return data, False if convert_dates: @@ -628,7 +630,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('float64') result = True - except: + except (TypeError, ValueError): pass if data.dtype.kind == 'f': @@ -639,7 +641,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('float64') result = True - except: + except (TypeError, ValueError): pass # do't coerce 0-len data @@ -651,7 +653,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, if (new_data == data).all(): data = new_data result = True - except: + except (TypeError, ValueError): pass # coerce ints to 64 @@ -661,7 +663,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('int64') result = True - except: + except (TypeError, ValueError): pass return data, result @@ -680,7 +682,7 @@ def _try_convert_to_date(self, data): if new_data.dtype == 'object': try: new_data = data.astype('int64') - except: + except (TypeError, ValueError): pass # ignore numbers that are out of range @@ -697,7 +699,7 @@ def _try_convert_to_date(self, data): unit=date_unit) except ValueError: continue - except: + except Exception: break return new_data, True return data, False diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index e811dd1eab142..23d2f730d070c 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -181,7 +181,7 @@ def _pull_field(js, spec): return result - if isinstance(data, list) and len(data) is 0: + if isinstance(data, list) and not data: return DataFrame() # A bit of a hackjob @@ -207,9 +207,7 @@ def _pull_field(js, spec): elif not isinstance(meta, list): meta = [meta] - for i, x in enumerate(meta): - if not isinstance(x, list): - meta[i] = [x] + meta = [m if isinstance(m, list) else [m] for m in meta] # Disastrously inefficient for now records = [] diff --git a/pandas/io/msgpack/_packer.pyx b/pandas/io/msgpack/_packer.pyx index fd3f4612fb432..f175a6743f44b 100644 --- a/pandas/io/msgpack/_packer.pyx +++ b/pandas/io/msgpack/_packer.pyx @@ -8,6 +8,7 @@ from libc.limits cimport * from pandas.io.msgpack.exceptions import PackValueError from pandas.io.msgpack import ExtType +import numpy as np cdef extern from "../../src/msgpack/pack.h": @@ -133,7 +134,7 @@ cdef class Packer(object): while True: if o is None: ret = msgpack_pack_nil(&self.pk) - elif isinstance(o, bool): + elif isinstance(o, (bool, np.bool_)): if o: ret = msgpack_pack_true(&self.pk) else: diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 92270b39f56ef..abd258034af99 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -350,8 +350,11 @@ def unconvert(values, dtype, compress=None): ) # fall through to copying `np.fromstring` - # Copy the string into a numpy array. - return np.fromstring(values, dtype=dtype) + # Copy the bytes into a numpy array. + buf = np.frombuffer(values, dtype=dtype) + buf = buf.copy() # required to not mutate the original data + buf.flags.writeable = True + return buf def encode(obj): diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 4b507b7f5df6f..4a13d2c9db944 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -76,9 +76,10 @@ def write(self, df, path, compression='snappy', table, path, compression=compression, coerce_timestamps=coerce_timestamps, **kwargs) - def read(self, path): + def read(self, path, columns=None, **kwargs): path, _, _ = get_filepath_or_buffer(path) - return self.api.parquet.read_table(path).to_pandas() + return self.api.parquet.read_table(path, columns=columns, + **kwargs).to_pandas() class FastParquetImpl(object): @@ -115,9 +116,9 @@ def write(self, df, path, compression='snappy', **kwargs): self.api.write(path, df, compression=compression, **kwargs) - def read(self, path): + def read(self, path, columns=None, **kwargs): path, _, _ = get_filepath_or_buffer(path) - return self.api.ParquetFile(path).to_pandas() + return self.api.ParquetFile(path).to_pandas(columns=columns, **kwargs) def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): @@ -175,10 +176,10 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): if df.columns.inferred_type not in valid_types: raise ValueError("parquet must have string column names") - return impl.write(df, path, compression=compression) + return impl.write(df, path, compression=compression, **kwargs) -def read_parquet(path, engine='auto', **kwargs): +def read_parquet(path, engine='auto', columns=None, **kwargs): """ Load a parquet object from the file path, returning a DataFrame. @@ -188,6 +189,10 @@ def read_parquet(path, engine='auto', **kwargs): ---------- path : string File path + columns: list, default=None + If not None, only these columns will be read from the file. + + .. versionadded 0.21.1 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet reader library to use. If 'auto', then the option 'io.parquet.engine' is used. If 'auto', then the first @@ -201,4 +206,4 @@ def read_parquet(path, engine='auto', **kwargs): """ impl = get_engine(engine) - return impl.read(path) + return impl.read(path, columns=columns, **kwargs) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 7f3f5630e49f9..df8b1b5cca1d3 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -74,15 +74,19 @@ .. versionadded:: 0.18.1 support for the Python parser. header : int or list of ints, default 'infer' - Row number(s) to use as the column names, and the start of the data. - Default behavior is as if set to 0 if no ``names`` passed, otherwise - ``None``. Explicitly pass ``header=0`` to be able to replace existing - names. The header can be a list of integers that specify row locations for - a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not - specified will be skipped (e.g. 2 in this example is skipped). Note that - this parameter ignores commented lines and empty lines if - ``skip_blank_lines=True``, so header=0 denotes the first line of data - rather than the first line of the file. + Row number(s) to use as the column names, and the start of the + data. Default behavior is to infer the column names: if no names + are passed the behavior is identical to ``header=0`` and column + names are inferred from the first line of the file, if column + names are passed explicitly then the behavior is identical to + ``header=None``. Explicitly pass ``header=0`` to be able to + replace existing names. The header can be a list of integers that + specify row locations for a multi-index on the columns + e.g. [0,1,3]. Intervening rows that are not specified will be + skipped (e.g. 2 in this example is skipped). Note that this + parameter ignores commented lines and empty lines if + ``skip_blank_lines=True``, so header=0 denotes the first line of + data rather than the first line of the file. names : array-like, default None List of column names to use. If file contains no header row, then you should explicitly pass header=None. Duplicates in this list will cause @@ -1231,6 +1235,8 @@ def __init__(self, kwds): self.na_values = kwds.get('na_values') self.na_fvalues = kwds.get('na_fvalues') + self.na_filter = kwds.get('na_filter', False) + self.true_values = kwds.get('true_values') self.false_values = kwds.get('false_values') self.as_recarray = kwds.get('as_recarray', False) @@ -1404,7 +1410,6 @@ def _make_index(self, data, alldata, columns, indexnamerow=False): elif not self._has_complex_date_col: index = self._get_simple_index(alldata, columns) index = self._agg_index(index) - elif self._has_complex_date_col: if not self._name_processed: (self.index_names, _, @@ -1487,8 +1492,12 @@ def _agg_index(self, index, try_parse_dates=True): if (try_parse_dates and self._should_parse_dates(i)): arr = self._date_conv(arr) - col_na_values = self.na_values - col_na_fvalues = self.na_fvalues + if self.na_filter: + col_na_values = self.na_values + col_na_fvalues = self.na_fvalues + else: + col_na_values = set() + col_na_fvalues = set() if isinstance(self.na_values, dict): col_name = self.index_names[i] @@ -1671,8 +1680,8 @@ def __init__(self, src, **kwds): ParserBase.__init__(self, kwds) - if (kwds.get('compression') is None - and 'utf-16' in (kwds.get('encoding') or '')): + if (kwds.get('compression') is None and + 'utf-16' in (kwds.get('encoding') or '')): # if source is utf-16 plain text, convert source to utf-8 if isinstance(src, compat.string_types): src = open(src, 'rb') @@ -2043,8 +2052,6 @@ def __init__(self, f, **kwds): self.names_passed = kwds['names'] or None - self.na_filter = kwds['na_filter'] - self.has_index_names = False if 'has_index_names' in kwds: self.has_index_names = kwds['has_index_names'] diff --git a/pandas/io/sql.py b/pandas/io/sql.py index c42c19e1357bc..a9b4f504dd624 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -103,12 +103,12 @@ def _handle_date_column(col, utc=None, format=None): if isinstance(format, dict): return to_datetime(col, errors='ignore', **format) else: - if format in ['D', 's', 'ms', 'us', 'ns']: - return to_datetime(col, errors='coerce', unit=format, utc=utc) - elif (issubclass(col.dtype.type, np.floating) or - issubclass(col.dtype.type, np.integer)): - # parse dates as timestamp - format = 's' if format is None else format + # Allow passing of formatting string for integers + # GH17855 + if format is None and (issubclass(col.dtype.type, np.floating) or + issubclass(col.dtype.type, np.integer)): + format = 's' + if format in ['D', 'd', 'h', 'm', 's', 'ms', 'us', 'ns']: return to_datetime(col, errors='coerce', unit=format, utc=utc) elif is_datetime64tz_dtype(col): # coerce to UTC timezone diff --git a/pandas/io/stata.py b/pandas/io/stata.py index afc1631a947c8..aafe5f2ce76bd 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -306,11 +306,11 @@ def convert_delta_safe(base, deltas, unit): data_col[bad_locs] = 1.0 # Replace with NaT dates = dates.astype(np.int64) - if fmt in ["%tc", "tc"]: # Delta ms relative to base + if fmt.startswith(("%tc", "tc")): # Delta ms relative to base base = stata_epoch ms = dates conv_dates = convert_delta_safe(base, ms, 'ms') - elif fmt in ["%tC", "tC"]: + elif fmt.startswith(("%tC", "tC")): from warnings import warn warn("Encountered %tC format. Leaving in Stata Internal Format.") @@ -318,27 +318,30 @@ def convert_delta_safe(base, deltas, unit): if has_bad_values: conv_dates[bad_locs] = pd.NaT return conv_dates - elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base + # Delta days relative to base + elif fmt.startswith(("%td", "td", "%d", "d")): base = stata_epoch days = dates conv_dates = convert_delta_safe(base, days, 'd') - elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week + # does not count leap days - 7 days is a week. + # 52nd week may have more than 7 days + elif fmt.startswith(("%tw", "tw")): year = stata_epoch.year + dates // 52 days = (dates % 52) * 7 conv_dates = convert_year_days_safe(year, days) - elif fmt in ["%tm", "tm"]: # Delta months relative to base + elif fmt.startswith(("%tm", "tm")): # Delta months relative to base year = stata_epoch.year + dates // 12 month = (dates % 12) + 1 conv_dates = convert_year_month_safe(year, month) - elif fmt in ["%tq", "tq"]: # Delta quarters relative to base + elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base year = stata_epoch.year + dates // 4 month = (dates % 4) * 3 + 1 conv_dates = convert_year_month_safe(year, month) - elif fmt in ["%th", "th"]: # Delta half-years relative to base + elif fmt.startswith(("%th", "th")): # Delta half-years relative to base year = stata_epoch.year + dates // 2 month = (dates % 2) * 6 + 1 conv_dates = convert_year_month_safe(year, month) - elif fmt in ["%ty", "ty"]: # Years -- not delta + elif fmt.startswith(("%ty", "ty")): # Years -- not delta year = dates month = np.ones_like(dates) conv_dates = convert_year_month_safe(year, month) @@ -1029,10 +1032,6 @@ def _read_header(self): # calculate size of a data record self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist) - # remove format details from %td - self.fmtlist = ["%td" if x.startswith("%td") else x - for x in self.fmtlist] - def _read_new_header(self, first_char): # The first part of the header is common to 117 and 118. self.path_or_buf.read(27) # stata_dta><header><release> @@ -1578,7 +1577,8 @@ def read(self, nrows=None, convert_dates=None, self._do_convert_missing(data, convert_missing) if convert_dates: - cols = np.where(lmap(lambda x: x in _date_formats, + cols = np.where(lmap(lambda x: any(x.startswith(fmt) + for fmt in _date_formats), self.fmtlist))[0] for i in cols: col = data.columns[i] diff --git a/pandas/plotting/__init__.py b/pandas/plotting/__init__.py index 8f98e297e3e66..385d4d7f047c7 100644 --- a/pandas/plotting/__init__.py +++ b/pandas/plotting/__init__.py @@ -11,3 +11,10 @@ from pandas.plotting._core import boxplot from pandas.plotting._style import plot_params from pandas.plotting._tools import table +try: + from pandas.plotting._converter import \ + register as register_matplotlib_converters + from pandas.plotting._converter import \ + deregister as deregister_matplotlib_converters +except ImportError: + pass diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 47d15195315ba..357e84d1f17ea 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -1,3 +1,4 @@ +import warnings from datetime import datetime, timedelta import datetime as pydt import numpy as np @@ -44,14 +45,96 @@ MUSEC_PER_DAY = 1e6 * SEC_PER_DAY +_WARN = True # Global for whether pandas has registered the units explicitly +_mpl_units = {} # Cache for units overwritten by us -def register(): - units.registry[lib.Timestamp] = DatetimeConverter() - units.registry[Period] = PeriodConverter() - units.registry[pydt.datetime] = DatetimeConverter() - units.registry[pydt.date] = DatetimeConverter() - units.registry[pydt.time] = TimeConverter() - units.registry[np.datetime64] = DatetimeConverter() + +def get_pairs(): + pairs = [ + (lib.Timestamp, DatetimeConverter), + (Period, PeriodConverter), + (pydt.datetime, DatetimeConverter), + (pydt.date, DatetimeConverter), + (pydt.time, TimeConverter), + (np.datetime64, DatetimeConverter), + ] + return pairs + + +def register(explicit=True): + """Register Pandas Formatters and Converters with matplotlib + + This function modifies the global ``matplotlib.units.registry`` + dictionary. Pandas adds custom converters for + + * pd.Timestamp + * pd.Period + * np.datetime64 + * datetime.datetime + * datetime.date + * datetime.time + + See Also + -------- + deregister_matplotlib_converter + """ + # Renamed in pandas.plotting.__init__ + global _WARN + + if explicit: + _WARN = False + + pairs = get_pairs() + for type_, cls in pairs: + converter = cls() + if type_ in units.registry: + previous = units.registry[type_] + _mpl_units[type_] = previous + units.registry[type_] = converter + + +def deregister(): + """Remove pandas' formatters and converters + + Removes the custom converters added by :func:`register`. This + attempts to set the state of the registry back to the state before + pandas registered its own units. Converters for pandas' own types like + Timestamp and Period are removed completely. Converters for types + pandas overwrites, like ``datetime.datetime``, are restored to their + original value. + + See Also + -------- + deregister_matplotlib_converters + """ + # Renamed in pandas.plotting.__init__ + for type_, cls in get_pairs(): + # We use type to catch our classes directly, no inheritance + if type(units.registry.get(type_)) is cls: + units.registry.pop(type_) + + # restore the old keys + for unit, formatter in _mpl_units.items(): + if type(formatter) not in {DatetimeConverter, PeriodConverter, + TimeConverter}: + # make it idempotent by excluding ours. + units.registry[unit] = formatter + + +def _check_implicitly_registered(): + global _WARN + + if _WARN: + msg = ("Using an implicitly registered datetime converter for a " + "matplotlib plotting method. The converter was registered " + "by pandas on import. Future versions of pandas will require " + "you to explicitly register matplotlib converters.\n\n" + "To register the converters:\n\t" + ">>> from pandas.plotting import register_matplotlib_converters" + "\n\t" + ">>> register_matplotlib_converters()") + warnings.warn(msg, FutureWarning) + _WARN = False def _to_ordinalf(tm): @@ -189,6 +272,7 @@ class DatetimeConverter(dates.DateConverter): @staticmethod def convert(values, unit, axis): # values might be a 1-d array, or a list-like of arrays. + _check_implicitly_registered() if is_nested_list_like(values): values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values] @@ -273,6 +357,7 @@ class PandasAutoDateLocator(dates.AutoDateLocator): def get_locator(self, dmin, dmax): 'Pick the best locator based on a distance.' + _check_implicitly_registered() delta = relativedelta(dmax, dmin) num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days @@ -314,6 +399,7 @@ def get_unit_generic(freq): def __call__(self): # if no data have been set, this will tank with a ValueError + _check_implicitly_registered() try: dmin, dmax = self.viewlim_to_dt() except ValueError: @@ -914,6 +1000,8 @@ def _get_default_locs(self, vmin, vmax): def __call__(self): 'Return the locations of the ticks.' # axis calls Locator.set_axis inside set_m<xxxx>_formatter + _check_implicitly_registered() + vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: self.plot_obj.date_axis_info = None @@ -998,6 +1086,8 @@ def set_locs(self, locs): 'Sets the locations of the ticks' # don't actually use the locs. This is just needed to work with # matplotlib. Force to use vmin, vmax + _check_implicitly_registered() + self.locs = locs (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) @@ -1009,6 +1099,8 @@ def set_locs(self, locs): self._set_default_format(vmin, vmax) def __call__(self, x, pos=0): + _check_implicitly_registered() + if self.formatdict is None: return '' else: @@ -1039,6 +1131,7 @@ def format_timedelta_ticks(x, pos, n_decimals): return s def __call__(self, x, pos=0): + _check_implicitly_registered() (vmin, vmax) = tuple(self.axis.get_view_interval()) n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin)))) if n_decimals > 9: diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 43f33cf30dea1..e1380953e4519 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -11,6 +11,7 @@ from pandas.util._decorators import cache_readonly from pandas.core.base import PandasObject +from pandas.core.config import get_option from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike from pandas.core.dtypes.common import ( is_list_like, @@ -40,16 +41,13 @@ _get_xlim, _set_ticks_props, format_date_labels) -_registered = False - - -def _setup(): - # delay the import of matplotlib until nescessary - global _registered - if not _registered: - from pandas.plotting import _converter - _converter.register() - _registered = True +try: + from pandas.plotting import _converter +except ImportError: + pass +else: + if get_option('plotting.matplotlib.register_converters'): + _converter.register(explicit=True) def _get_standard_kind(kind): @@ -99,7 +97,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None, secondary_y=False, colormap=None, table=False, layout=None, **kwds): - _setup() + _converter._WARN = False self.data = data self.by = by @@ -383,12 +381,16 @@ def _add_table(self): def _post_plot_logic_common(self, ax, data): """Common post process for each axes""" - labels = [pprint_thing(key) for key in data.index] - labels = dict(zip(range(len(data.index)), labels)) + + def get_label(i): + try: + return pprint_thing(data.index[i]) + except Exception: + return '' if self.orientation == 'vertical' or self.orientation is None: if self._need_to_set_index: - xticklabels = [labels.get(x, '') for x in ax.get_xticks()] + xticklabels = [get_label(x) for x in ax.get_xticks()] ax.set_xticklabels(xticklabels) self._apply_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize) @@ -400,7 +402,7 @@ def _post_plot_logic_common(self, ax, data): elif self.orientation == 'horizontal': if self._need_to_set_index: - yticklabels = [labels.get(y, '') for y in ax.get_yticks()] + yticklabels = [get_label(y) for y in ax.get_yticks()] ax.set_yticklabels(yticklabels) self._apply_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize) @@ -2059,7 +2061,7 @@ def boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, figsize=None, layout=None, return_type=None, **kwds): import matplotlib.pyplot as plt - _setup() + _converter._WARN = False ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, grid=grid, rot=rot, figsize=figsize, layout=layout, return_type=return_type, **kwds) @@ -2155,7 +2157,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, kwds : other plotting keyword arguments To be passed to hist function """ - _setup() + _converter._WARN = False if by is not None: axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize, sharex=sharex, sharey=sharey, @@ -2289,6 +2291,8 @@ def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, ------- axes: collection of Matplotlib Axes """ + _converter._WARN = False + def plot_group(group, ax): ax.hist(group.dropna().values, bins=bins, **kwargs) @@ -2352,7 +2356,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1) >>> boxplot_frame_groupby(grouped, subplots=False) """ - _setup() + _converter._WARN = False if subplots is True: naxes = len(grouped) fig, axes = _subplots(naxes=naxes, squeeze=False, diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 3d04973ed0009..56b5311326e98 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -1,5 +1,7 @@ # TODO: Use the fact that axis can have units to simplify the process +import functools + import numpy as np from matplotlib import pylab @@ -293,6 +295,10 @@ def format_timedelta_ticks(x, pos, n_decimals): return s +def _format_coord(freq, t, y): + return "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y) + + def format_dateaxis(subplot, freq, index): """ Pretty-formats the date axis (x-axis). @@ -327,8 +333,7 @@ def format_dateaxis(subplot, freq, index): subplot.xaxis.set_minor_formatter(minformatter) # x and y coord info - subplot.format_coord = lambda t, y: ( - "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y)) + subplot.format_coord = functools.partial(_format_coord, freq) elif isinstance(index, TimedeltaIndex): subplot.xaxis.set_major_formatter( diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index d9fb458c83529..82a35fa711e8c 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -38,17 +38,17 @@ def test_downcast_conv(self): arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]) result = maybe_downcast_to_dtype(arr, 'infer') - assert (np.array_equal(result, arr)) + tm.assert_numpy_array_equal(result, arr) arr = np.array([8., 8., 8., 8., 8.9999999999995]) result = maybe_downcast_to_dtype(arr, 'infer') - expected = np.array([8, 8, 8, 8, 9]) - assert (np.array_equal(result, expected)) + expected = np.array([8, 8, 8, 8, 9], dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) arr = np.array([8., 8., 8., 8., 9.0000000000005]) result = maybe_downcast_to_dtype(arr, 'infer') - expected = np.array([8, 8, 8, 8, 9]) - assert (np.array_equal(result, expected)) + expected = np.array([8, 8, 8, 8, 9], dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) # GH16875 coercing of bools ser = Series([True, True, False]) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 70273f9e999cf..7195cb43a70dc 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -416,6 +416,12 @@ def test_length_zero(self): result = lib.infer_dtype([]) assert result == 'empty' + # GH 18004 + arr = np.array([np.array([], dtype=object), + np.array([], dtype=object)]) + result = lib.infer_dtype(arr) + assert result == 'empty' + def test_integers(self): arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O') result = lib.infer_dtype(arr) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 1e2f630401c89..343e235fb741c 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -884,6 +884,27 @@ def test_filter_regex_search(self): exp = df[[x for x in df.columns if 'BB' in x]] assert_frame_equal(result, exp) + @pytest.mark.parametrize('name,expected', [ + ('a', DataFrame({u'a': [1, 2]})), + (u'a', DataFrame({u'a': [1, 2]})), + (u'あ', DataFrame({u'あ': [3, 4]})) + ]) + def test_filter_unicode(self, name, expected): + # GH13101 + df = DataFrame({u'a': [1, 2], u'あ': [3, 4]}) + + assert_frame_equal(df.filter(like=name), expected) + assert_frame_equal(df.filter(regex=name), expected) + + @pytest.mark.parametrize('name', ['a', u'a']) + def test_filter_bytestring(self, name): + # GH13101 + df = DataFrame({b'a': [1, 2], b'b': [3, 4]}) + expected = DataFrame({b'a': [1, 2]}) + + assert_frame_equal(df.filter(like=name), expected) + assert_frame_equal(df.filter(regex=name), expected) + def test_filter_corner(self): empty = DataFrame() diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index c55c79ef18602..8291e9d452348 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1913,10 +1913,11 @@ def test_from_records_len0_with_columns(self): # #2633 result = DataFrame.from_records([], index='foo', columns=['foo', 'bar']) + expected = Index(['bar']) - assert np.array_equal(result.columns, ['bar']) assert len(result) == 0 assert result.index.name == 'foo' + tm.assert_index_equal(result.columns, expected) def test_to_frame_with_falsey_names(self): # GH 16114 diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index 5bdb76494f4c8..7d2d18db8d41c 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -1,6 +1,9 @@ # -*- coding: utf-8 -*- +from datetime import datetime + import pytest +import pytz import collections import numpy as np @@ -249,3 +252,18 @@ def test_to_dict_box_scalars(self): result = DataFrame(d).to_dict(orient='records') assert isinstance(result[0]['a'], (int, long)) + + def test_frame_to_dict_tz(self): + # GH18372 When converting to dict with orient='records' columns of + # datetime that are tz-aware were not converted to required arrays + data = [(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),), + (datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc,),)] + df = DataFrame(list(data), columns=["d", ]) + + result = df.to_dict(orient='records') + expected = [ + {'d': Timestamp('2017-11-18 21:53:00.219225+0000', tz=pytz.utc)}, + {'d': Timestamp('2017-11-18 22:06:30.061810+0000', tz=pytz.utc)}, + ] + tm.assert_dict_equal(result[0], expected[0]) + tm.assert_dict_equal(result[1], expected[1]) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index abb528f0d2179..5adcd3b6855ce 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -10,6 +10,8 @@ from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp, compat, concat, option_context) from pandas.compat import u +from pandas import _np_version_under1p14 + from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.tests.frame.common import TestData from pandas.util.testing import (assert_series_equal, @@ -531,7 +533,12 @@ def test_astype_str(self): assert_frame_equal(result, expected) result = DataFrame([1.12345678901234567890]).astype(tt) - expected = DataFrame(['1.12345678901']) + if _np_version_under1p14: + # < 1.14 truncates + expected = DataFrame(['1.12345678901']) + else: + # >= 1.14 preserves the full repr + expected = DataFrame(['1.1234567890123457']) assert_frame_equal(result, expected) @pytest.mark.parametrize("dtype_class", [dict, Series]) diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index 4f77ba0ae1f5a..5b903c5a1eaf6 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -448,7 +448,7 @@ def test_as_matrix_duplicates(self): expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']], dtype=object) - assert np.array_equal(result, expected) + tm.assert_numpy_array_equal(result, expected) def test_set_value_by_index(self): # See gh-12344 diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 4162a586f8063..ca8a0d8bda3ab 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -1203,3 +1203,16 @@ def test_period_index_date_overflow(self): expected = ',0\n1990-01-01,4\n,5\n3005-01-01,6\n' assert result == expected + + def test_multi_index_header(self): + # see gh-5539 + columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), + ("b", 1), ("b", 2)]) + df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) + df.columns = columns + + header = ["a", "b", "c", "d"] + result = df.to_csv(header=header) + + expected = ",a,b,c,d\n0,1,2,3,4\n1,5,6,7,8\n" + assert result == expected diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index 485241d593d4f..787d99086873e 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -2,9 +2,11 @@ from __future__ import print_function import numpy as np +import pytest -from pandas import (DataFrame, Series, MultiIndex) -from pandas.util.testing import assert_series_equal +from pandas import (DataFrame, Series, MultiIndex, Timestamp, Timedelta, + Period) +from pandas.util.testing import (assert_series_equal, assert_frame_equal) from pandas.compat import (range, product as cart_product) @@ -195,3 +197,18 @@ def test_ngroup_respects_groupby_order(self): g.ngroup()) assert_series_equal(Series(df['group_index'].values), g.cumcount()) + + @pytest.mark.parametrize('datetimelike', [ + [Timestamp('2016-05-%02d 20:09:25+00:00' % i) for i in range(1, 4)], + [Timestamp('2016-05-%02d 20:09:25' % i) for i in range(1, 4)], + [Timedelta(x, unit="h") for x in range(1, 4)], + [Period(freq="2W", year=2017, month=x) for x in range(1, 4)]]) + def test_count_with_datetimelike(self, datetimelike): + # test for #13393, where DataframeGroupBy.count() fails + # when counting a datetimelike column. + + df = DataFrame({'x': ['a', 'a', 'b'], 'y': datetimelike}) + res = df.groupby('x').count() + expected = DataFrame({'y': [2, 1]}, index=['a', 'b']) + expected.index.name = "x" + assert_frame_equal(expected, res) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 9d25117fbd954..675f8d6413b2a 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -10,7 +10,7 @@ from pandas import (date_range, bdate_range, Timestamp, Index, MultiIndex, DataFrame, Series, - concat, Panel, DatetimeIndex) + concat, Panel, DatetimeIndex, CategoricalIndex) from pandas.errors import UnsupportedFunctionCall, PerformanceWarning from pandas.util.testing import (assert_panel_equal, assert_frame_equal, assert_series_equal, assert_almost_equal, @@ -28,6 +28,15 @@ from .common import MixIn +class TestGrouper(object): + + def test_repr(self): + # GH18203 + result = repr(pd.Grouper(key='A', level='B')) + expected = "Grouper(key='A', level='B', axis=0, sort=False)" + assert result == expected + + class TestGroupBy(MixIn): def test_basic(self): @@ -253,6 +262,29 @@ def test_grouper_column_and_index(self): expected = df_single.reset_index().groupby(['inner', 'B']).mean() assert_frame_equal(result, expected) + def test_groupby_categorical_index_and_columns(self): + # GH18432 + columns = ['A', 'B', 'A', 'B'] + categories = ['B', 'A'] + data = np.ones((5, 4), int) + cat_columns = CategoricalIndex(columns, + categories=categories, + ordered=True) + df = DataFrame(data=data, columns=cat_columns) + result = df.groupby(axis=1, level=0).sum() + expected_data = 2 * np.ones((5, 2), int) + expected_columns = CategoricalIndex(categories, + categories=categories, + ordered=True) + expected = DataFrame(data=expected_data, columns=expected_columns) + assert_frame_equal(result, expected) + + # test transposed version + df = DataFrame(data.T, index=cat_columns) + result = df.groupby(axis=0, level=0).sum() + expected = DataFrame(data=expected_data.T, index=expected_columns) + assert_frame_equal(result, expected) + def test_grouper_getting_correct_binner(self): # GH 10063 diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 456e5a9bd6439..3a57337efea6f 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -996,3 +996,16 @@ def test_searchsorted_monotonic(self, indices): # non-monotonic should raise. with pytest.raises(ValueError): indices._searchsorted_monotonic(value, side='left') + + def test_putmask_with_wrong_mask(self): + # GH18368 + index = self.create_index() + + with pytest.raises(ValueError): + index.putmask(np.ones(len(index) + 1, np.bool), 1) + + with pytest.raises(ValueError): + index.putmask(np.ones(len(index) - 1, np.bool), 1) + + with pytest.raises(ValueError): + index.putmask('foo', 1) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 3b40ef092f364..1349f2f761a2f 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -20,11 +20,6 @@ START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) -def eq_gen_range(kwargs, expected): - rng = generate_range(**kwargs) - assert (np.array_equal(list(rng), expected)) - - class TestDateRanges(TestData): def test_date_range_gen_error(self): @@ -201,20 +196,23 @@ def test_generate_cday(self): assert rng1 == rng2 def test_1(self): - eq_gen_range(dict(start=datetime(2009, 3, 25), periods=2), - [datetime(2009, 3, 25), datetime(2009, 3, 26)]) + rng = list(generate_range(start=datetime(2009, 3, 25), periods=2)) + expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)] + assert rng == expected def test_2(self): - eq_gen_range(dict(start=datetime(2008, 1, 1), - end=datetime(2008, 1, 3)), - [datetime(2008, 1, 1), - datetime(2008, 1, 2), - datetime(2008, 1, 3)]) + rng = list(generate_range(start=datetime(2008, 1, 1), + end=datetime(2008, 1, 3))) + expected = [datetime(2008, 1, 1), + datetime(2008, 1, 2), + datetime(2008, 1, 3)] + assert rng == expected def test_3(self): - eq_gen_range(dict(start=datetime(2008, 1, 5), - end=datetime(2008, 1, 6)), - []) + rng = list(generate_range(start=datetime(2008, 1, 5), + end=datetime(2008, 1, 6))) + expected = [] + assert rng == expected def test_precision_finer_than_offset(self): # GH 9907 @@ -236,6 +234,22 @@ def test_precision_finer_than_offset(self): tm.assert_index_equal(result1, expected1) tm.assert_index_equal(result2, expected2) + dt1, dt2 = '2017-01-01', '2017-01-01' + tz1, tz2 = 'US/Eastern', 'Europe/London' + + @pytest.mark.parametrize("start,end", [ + (pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2)), + (pd.Timestamp(dt1), pd.Timestamp(dt2, tz=tz2)), + (pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2, tz=tz2)), + (pd.Timestamp(dt1, tz=tz2), pd.Timestamp(dt2, tz=tz1)) + ]) + def test_mismatching_tz_raises_err(self, start, end): + # issue 18488 + with pytest.raises(TypeError): + pd.date_range(start, end) + with pytest.raises(TypeError): + pd.DatetimeIndex(start, end, freq=BDay()) + class TestBusinessDateRange(object): diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index e7d03aa193cbd..04c180350fb72 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -2,9 +2,10 @@ import pytest -from datetime import datetime +from datetime import datetime, date import numpy as np import pandas as pd +import operator as op from pandas import (DatetimeIndex, Series, DataFrame, date_range, Index, Timedelta, Timestamp) @@ -268,3 +269,21 @@ def test_loc_datetime_length_one(self): result = df.loc['2016-10-01T00:00:00':] tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize('datetimelike', [ + Timestamp('20130101'), datetime(2013, 1, 1), + date(2013, 1, 1), np.datetime64('2013-01-01T00:00', 'ns')]) + @pytest.mark.parametrize('op,expected', [ + (op.lt, [True, False, False, False]), + (op.le, [True, True, False, False]), + (op.eq, [False, True, False, False]), + (op.gt, [False, False, False, True])]) + def test_selection_by_datetimelike(self, datetimelike, op, expected): + # GH issue #17965, test for ability to compare datetime64[ns] columns + # to datetimelike + df = DataFrame({'A': [pd.Timestamp('20120101'), + pd.Timestamp('20130101'), + np.nan, pd.Timestamp('20130103')]}) + result = op(df.A, datetimelike) + expected = Series(expected, name='A') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 330ec9f357655..c7944c078d8c4 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -960,6 +960,7 @@ def test_guess_datetime_format_nopadding(self): for dt_string, dt_format in dt_string_to_format: assert tools._guess_datetime_format(dt_string) == dt_format + @pytest.mark.xfail(reason="GH18141 - dateutil > 2.6.1 broken") def test_guess_datetime_format_for_array(self): tm._skip_if_not_us_locale() expected_format = '%Y-%m-%d %H:%M:%S.%f' diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index d8ec23b9c7e0e..5e40e06d57413 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -4,6 +4,7 @@ import pandas.util.testing as tm from pandas.core.indexes.api import Index, CategoricalIndex +from pandas.core.dtypes.dtypes import CategoricalDtype from .common import Base from pandas.compat import range, PY3 @@ -95,6 +96,11 @@ def test_construction(self): 1, -1, 0], dtype='int8')) assert result.ordered + result = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True) + expected = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True, + dtype='category') + tm.assert_index_equal(result, expected, exact=True) + # turn me to an Index result = Index(np.array(ci)) assert isinstance(result, Index) @@ -125,6 +131,25 @@ def test_construction_with_dtype(self): result = CategoricalIndex(idx, categories=idx, ordered=True) tm.assert_index_equal(result, expected, exact=True) + def test_construction_with_categorical_dtype(self): + # construction with CategoricalDtype + # GH18109 + data, cats, ordered = 'a a b b'.split(), 'c b a'.split(), True + dtype = CategoricalDtype(categories=cats, ordered=ordered) + + result = pd.CategoricalIndex(data, dtype=dtype) + expected = pd.CategoricalIndex(data, categories=cats, + ordered=ordered) + tm.assert_index_equal(result, expected, exact=True) + + # error to combine categories or ordered and dtype keywords args + with pytest.raises(ValueError, match="Cannot specify both `dtype` and " + "`categories` or `ordered`."): + pd.CategoricalIndex(data, categories=cats, dtype=dtype) + with pytest.raises(ValueError, match="Cannot specify both `dtype` and " + "`categories` or `ordered`."): + pd.CategoricalIndex(data, ordered=ordered, dtype=dtype) + def test_create_categorical(self): # https://github.com/pandas-dev/pandas/pull/17513 # The public CI constructor doesn't hit this code path with diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index b55bab3a210cc..399d88309072e 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -6,6 +6,7 @@ from pandas import (Interval, IntervalIndex, Index, isna, interval_range, Timestamp, Timedelta, compat, date_range, timedelta_range, DateOffset) +from pandas.compat import zip from pandas.tseries.offsets import Day from pandas._libs.interval import IntervalTree from pandas.tests.indexes.common import Base @@ -13,6 +14,11 @@ import pandas as pd +@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither']) +def closed(request): + return request.param + + class TestIntervalIndex(Base): _holder = IntervalIndex @@ -22,34 +28,63 @@ def setup_method(self, method): [(0, 1), np.nan, (1, 2)]) self.indices = dict(intervalIndex=tm.makeIntervalIndex(10)) - def create_index(self): - return IntervalIndex.from_breaks(np.arange(10)) + def create_index(self, closed='right'): + return IntervalIndex.from_breaks(np.arange(3), closed=closed) - def test_constructors(self): - expected = self.index - actual = IntervalIndex.from_breaks(np.arange(3), closed='right') - assert expected.equals(actual) + def create_index_with_nan(self, closed='right'): + return IntervalIndex.from_tuples( + [(0, 1), np.nan, (1, 2)], closed=closed) - alternate = IntervalIndex.from_breaks(np.arange(3), closed='left') - assert not expected.equals(alternate) + @pytest.mark.parametrize('name', [None, 'foo']) + def test_constructors(self, closed, name): + left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4]) + ivs = [Interval(l, r, closed=closed) for l, r in zip(left, right)] + expected = IntervalIndex._simple_new( + left=left, right=right, closed=closed, name=name) - actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)]) - assert expected.equals(actual) + result = IntervalIndex(ivs, name=name) + tm.assert_index_equal(result, expected) - actual = IntervalIndex([Interval(0, 1), Interval(1, 2)]) - assert expected.equals(actual) + result = IntervalIndex.from_intervals(ivs, name=name) + tm.assert_index_equal(result, expected) - actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1, - closed='right') - assert expected.equals(actual) + result = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name=name) + tm.assert_index_equal(result, expected) - actual = Index([Interval(0, 1), Interval(1, 2)]) - assert isinstance(actual, IntervalIndex) - assert expected.equals(actual) + result = IntervalIndex.from_arrays( + left.values, right.values, closed=closed, name=name) + tm.assert_index_equal(result, expected) - actual = Index(expected) - assert isinstance(actual, IntervalIndex) - assert expected.equals(actual) + result = IntervalIndex.from_tuples( + zip(left, right), closed=closed, name=name) + tm.assert_index_equal(result, expected) + + result = Index(ivs, name=name) + assert isinstance(result, IntervalIndex) + tm.assert_index_equal(result, expected) + + # idempotent + tm.assert_index_equal(Index(expected), expected) + tm.assert_index_equal(IntervalIndex(expected), expected) + + result = IntervalIndex.from_intervals( + expected.values, name=expected.name) + tm.assert_index_equal(result, expected) + + left, right = expected.left, expected.right + result = IntervalIndex.from_arrays( + left, right, closed=expected.closed, name=expected.name) + tm.assert_index_equal(result, expected) + + result = IntervalIndex.from_tuples( + expected.to_tuples(), closed=expected.closed, name=expected.name) + tm.assert_index_equal(result, expected) + + breaks = expected.left.tolist() + [expected.right[-1]] + result = IntervalIndex.from_breaks( + breaks, closed=expected.closed, name=expected.name) + tm.assert_index_equal(result, expected) def test_constructors_other(self): @@ -66,43 +101,57 @@ def test_constructors_other(self): def test_constructors_errors(self): # scalar - with pytest.raises(TypeError): + msg = ('IntervalIndex(...) must be called with a collection of ' + 'some kind, 5 was passed') + with pytest.raises(TypeError, message=msg): IntervalIndex(5) # not an interval - with pytest.raises(TypeError): + msg = "type <class 'numpy.int32'> with value 0 is not an interval" + with pytest.raises(TypeError, message=msg): IntervalIndex([0, 1]) - with pytest.raises(TypeError): + with pytest.raises(TypeError, message=msg): IntervalIndex.from_intervals([0, 1]) # invalid closed - with pytest.raises(ValueError): + msg = "invalid options for 'closed': invalid" + with pytest.raises(ValueError, message=msg): IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid') # mismatched closed - with pytest.raises(ValueError): + msg = 'intervals must all be closed on the same side' + with pytest.raises(ValueError, message=msg): IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2, closed='left')]) - with pytest.raises(ValueError): + with pytest.raises(ValueError, message=msg): IntervalIndex.from_arrays([0, 10], [3, 5]) - with pytest.raises(ValueError): + with pytest.raises(ValueError, message=msg): Index([Interval(0, 1), Interval(2, 3, closed='left')]) # no point in nesting periods in an IntervalIndex - with pytest.raises(ValueError): + msg = 'Period dtypes are not supported, use a PeriodIndex instead' + with pytest.raises(ValueError, message=msg): IntervalIndex.from_breaks( pd.period_range('2000-01-01', periods=3)) - def test_constructors_datetimelike(self): + # decreasing breaks/arrays + msg = 'left side of interval must be <= right side' + with pytest.raises(ValueError, message=msg): + IntervalIndex.from_breaks(range(10, -1, -1)) + + with pytest.raises(ValueError, message=msg): + IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1)) + + def test_constructors_datetimelike(self, closed): # DTI / TDI for idx in [pd.date_range('20130101', periods=5), pd.timedelta_range('1 day', periods=5)]: - result = IntervalIndex.from_breaks(idx) - expected = IntervalIndex.from_breaks(idx.values) + result = IntervalIndex.from_breaks(idx, closed=closed) + expected = IntervalIndex.from_breaks(idx.values, closed=closed) tm.assert_index_equal(result, expected) expected_scalar_type = type(idx[0]) @@ -117,8 +166,8 @@ def f(): IntervalIndex.from_intervals([0.997, 4.0]) pytest.raises(TypeError, f) - def test_properties(self): - index = self.index + def test_properties(self, closed): + index = self.create_index(closed=closed) assert len(index) == 2 assert index.size == 2 assert index.shape == (2, ) @@ -127,14 +176,15 @@ def test_properties(self): tm.assert_index_equal(index.right, Index([1, 2])) tm.assert_index_equal(index.mid, Index([0.5, 1.5])) - assert index.closed == 'right' + assert index.closed == closed - expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object) + expected = np.array([Interval(0, 1, closed=closed), + Interval(1, 2, closed=closed)], dtype=object) tm.assert_numpy_array_equal(np.asarray(index), expected) tm.assert_numpy_array_equal(index.values, expected) # with nans - index = self.index_with_nan + index = self.create_index_with_nan(closed=closed) assert len(index) == 3 assert index.size == 3 assert index.shape == (3, ) @@ -143,41 +193,43 @@ def test_properties(self): tm.assert_index_equal(index.right, Index([1, np.nan, 2])) tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5])) - assert index.closed == 'right' + assert index.closed == closed - expected = np.array([Interval(0, 1), np.nan, - Interval(1, 2)], dtype=object) + expected = np.array([Interval(0, 1, closed=closed), np.nan, + Interval(1, 2, closed=closed)], dtype=object) tm.assert_numpy_array_equal(np.asarray(index), expected) tm.assert_numpy_array_equal(index.values, expected) - def test_with_nans(self): - index = self.index + def test_with_nans(self, closed): + index = self.create_index(closed=closed) assert not index.hasnans tm.assert_numpy_array_equal(index.isna(), np.array([False, False])) tm.assert_numpy_array_equal(index.notna(), np.array([True, True])) - index = self.index_with_nan + index = self.create_index_with_nan(closed=closed) assert index.hasnans tm.assert_numpy_array_equal(index.notna(), np.array([True, False, True])) tm.assert_numpy_array_equal(index.isna(), np.array([False, True, False])) - def test_copy(self): - actual = self.index.copy() - assert actual.equals(self.index) + def test_copy(self, closed): + expected = IntervalIndex.from_breaks(np.arange(5), closed=closed) + + result = expected.copy() + assert result.equals(expected) - actual = self.index.copy(deep=True) - assert actual.equals(self.index) - assert actual.left is not self.index.left + result = expected.copy(deep=True) + assert result.equals(expected) + assert result.left is not expected.left - def test_ensure_copied_data(self): + def test_ensure_copied_data(self, closed): # exercise the copy flag in the constructor # not copying - index = self.index + index = self.create_index(closed=closed) result = IntervalIndex(index, copy=False) tm.assert_numpy_array_equal(index.left.values, result.left.values, check_same='same') @@ -191,23 +243,34 @@ def test_ensure_copied_data(self): tm.assert_numpy_array_equal(index.right.values, result.right.values, check_same='copy') - def test_equals(self): + def test_equals(self, closed): + expected = IntervalIndex.from_breaks(np.arange(5), closed=closed) + assert expected.equals(expected) + assert expected.equals(expected.copy()) - idx = self.index - assert idx.equals(idx) - assert idx.equals(idx.copy()) + assert not expected.equals(expected.astype(object)) + assert not expected.equals(np.array(expected)) + assert not expected.equals(list(expected)) - assert not idx.equals(idx.astype(object)) - assert not idx.equals(np.array(idx)) - assert not idx.equals(list(idx)) + assert not expected.equals([1, 2]) + assert not expected.equals(np.array([1, 2])) + assert not expected.equals(pd.date_range('20130101', periods=2)) - assert not idx.equals([1, 2]) - assert not idx.equals(np.array([1, 2])) - assert not idx.equals(pd.date_range('20130101', periods=2)) + expected_name1 = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name='foo') + expected_name2 = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name='bar') + assert expected.equals(expected_name1) + assert expected_name1.equals(expected_name2) - def test_astype(self): + for other_closed in {'left', 'right', 'both', 'neither'} - {closed}: + expected_other_closed = IntervalIndex.from_breaks( + np.arange(5), closed=other_closed) + assert not expected.equals(expected_other_closed) - idx = self.index + def test_astype(self, closed): + + idx = self.create_index(closed=closed) for dtype in [np.int64, np.float64, 'datetime64[ns]', 'datetime64[ns, US/Eastern]', 'timedelta64', @@ -227,24 +290,24 @@ def test_astype(self): expected = pd.Categorical(idx, ordered=True) tm.assert_categorical_equal(result, expected) - def test_where(self): - expected = self.index - result = self.index.where(self.index.notna()) + def test_where(self, closed): + expected = self.create_index(closed=closed) + result = expected.where(expected.notna()) tm.assert_index_equal(result, expected) - idx = IntervalIndex.from_breaks([1, 2]) + idx = IntervalIndex.from_breaks([1, 2], closed=closed) result = idx.where([True, False]) expected = IntervalIndex.from_intervals( - [Interval(1.0, 2.0, closed='right'), np.nan]) + [Interval(1.0, 2.0, closed=closed), np.nan]) tm.assert_index_equal(result, expected) def test_where_array_like(self): pass - def test_delete(self): - expected = IntervalIndex.from_breaks([1, 2]) - actual = self.index.delete(0) - assert expected.equals(actual) + def test_delete(self, closed): + expected = IntervalIndex.from_breaks([1, 2], closed=closed) + result = self.create_index(closed=closed).delete(0) + tm.assert_index_equal(result, expected) def test_insert(self): expected = IntervalIndex.from_breaks(range(4)) @@ -255,113 +318,128 @@ def test_insert(self): pytest.raises(ValueError, self.index.insert, 0, Interval(2, 3, closed='left')) - def test_take(self): - actual = self.index.take([0, 1]) - assert self.index.equals(actual) + def test_take(self, closed): + index = self.create_index(closed=closed) - expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2]) - actual = self.index.take([0, 0, 1]) - assert expected.equals(actual) + actual = index.take([0, 1]) + tm.assert_index_equal(actual, index) + + expected = IntervalIndex.from_arrays( + [0, 0, 1], [1, 1, 2], closed=closed) + actual = index.take([0, 0, 1]) + tm.assert_index_equal(actual, expected) - def test_unique(self): + def test_unique(self, closed): # unique non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (2, 3), (4, 5)], closed=closed) assert idx.is_unique # unique overlapping - distinct endpoints - idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)]) + idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed) assert idx.is_unique # unique overlapping - shared endpoints - idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)]) + idx = pd.IntervalIndex.from_tuples( + [(1, 2), (1, 3), (2, 3)], closed=closed) assert idx.is_unique # unique nested - idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)]) + idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed) assert idx.is_unique # duplicate - idx = IntervalIndex.from_tuples([(0, 1), (0, 1), (2, 3)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (0, 1), (2, 3)], closed=closed) assert not idx.is_unique # unique mixed - idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')]) + idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed) assert idx.is_unique # duplicate mixed - idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b'), (0, 1)]) + idx = IntervalIndex.from_tuples( + [(0, 1), ('a', 'b'), (0, 1)], closed=closed) assert not idx.is_unique # empty - idx = IntervalIndex([]) + idx = IntervalIndex([], closed=closed) assert idx.is_unique - def test_monotonic(self): + def test_monotonic(self, closed): # increasing non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (2, 3), (4, 5)], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # decreasing non-overlapping - idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)]) + idx = IntervalIndex.from_tuples( + [(4, 5), (2, 3), (1, 2)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert idx._is_strictly_monotonic_decreasing # unordered non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (4, 5), (2, 3)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # increasing overlapping - idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)]) + idx = IntervalIndex.from_tuples( + [(0, 2), (0.5, 2.5), (1, 3)], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # decreasing overlapping - idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)]) + idx = IntervalIndex.from_tuples( + [(1, 3), (0.5, 2.5), (0, 2)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert idx._is_strictly_monotonic_decreasing # unordered overlapping - idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)]) + idx = IntervalIndex.from_tuples( + [(0.5, 2.5), (0, 2), (1, 3)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # increasing overlapping shared endpoints - idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)]) + idx = pd.IntervalIndex.from_tuples( + [(1, 2), (1, 3), (2, 3)], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # decreasing overlapping shared endpoints - idx = pd.IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)]) + idx = pd.IntervalIndex.from_tuples( + [(2, 3), (1, 3), (1, 2)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert idx._is_strictly_monotonic_decreasing # stationary - idx = IntervalIndex.from_tuples([(0, 1), (0, 1)]) + idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed) assert idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # empty - idx = IntervalIndex([]) + idx = IntervalIndex([], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing @@ -395,24 +473,24 @@ def test_repr_max_seq_item_setting(self): def test_repr_roundtrip(self): super(TestIntervalIndex, self).test_repr_roundtrip() - def test_get_item(self): + def test_get_item(self, closed): i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), - closed='right') - assert i[0] == Interval(0.0, 1.0) - assert i[1] == Interval(1.0, 2.0) + closed=closed) + assert i[0] == Interval(0.0, 1.0, closed=closed) + assert i[1] == Interval(1.0, 2.0, closed=closed) assert isna(i[2]) result = i[0:1] - expected = IntervalIndex.from_arrays((0.,), (1.,), closed='right') + expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed) tm.assert_index_equal(result, expected) result = i[0:2] - expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed='right') + expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed) tm.assert_index_equal(result, expected) result = i[1:3] expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan), - closed='right') + closed=closed) tm.assert_index_equal(result, expected) def test_get_loc_value(self): @@ -581,20 +659,22 @@ def testcontains(self): assert not i.contains(20) assert not i.contains(-20) - def test_dropna(self): + def test_dropna(self, closed): - expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)]) + expected = IntervalIndex.from_tuples( + [(0.0, 1.0), (1.0, 2.0)], closed=closed) - ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan]) + ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed) result = ii.dropna() tm.assert_index_equal(result, expected) - ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan]) + ii = IntervalIndex.from_arrays( + [0, 1, np.nan], [1, 2, np.nan], closed=closed) result = ii.dropna() tm.assert_index_equal(result, expected) - def test_non_contiguous(self): - index = IntervalIndex.from_tuples([(0, 1), (2, 3)]) + def test_non_contiguous(self, closed): + index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed) target = [0.5, 1.5, 2.5] actual = index.get_indexer(target) expected = np.array([0, -1, 1], dtype='intp') @@ -602,31 +682,32 @@ def test_non_contiguous(self): assert 1.5 not in index - def test_union(self): - other = IntervalIndex.from_arrays([2], [3]) - expected = IntervalIndex.from_arrays(range(3), range(1, 4)) - actual = self.index.union(other) + def test_union(self, closed): + idx = self.create_index(closed=closed) + other = IntervalIndex.from_arrays([2], [3], closed=closed) + expected = IntervalIndex.from_arrays( + range(3), range(1, 4), closed=closed) + actual = idx.union(other) assert expected.equals(actual) - actual = other.union(self.index) + actual = other.union(idx) assert expected.equals(actual) - tm.assert_index_equal(self.index.union(self.index), self.index) - tm.assert_index_equal(self.index.union(self.index[:1]), - self.index) + tm.assert_index_equal(idx.union(idx), idx) + tm.assert_index_equal(idx.union(idx[:1]), idx) - def test_intersection(self): - other = IntervalIndex.from_breaks([1, 2, 3]) - expected = IntervalIndex.from_breaks([1, 2]) - actual = self.index.intersection(other) + def test_intersection(self, closed): + idx = self.create_index(closed=closed) + other = IntervalIndex.from_breaks([1, 2, 3], closed=closed) + expected = IntervalIndex.from_breaks([1, 2], closed=closed) + actual = idx.intersection(other) assert expected.equals(actual) - tm.assert_index_equal(self.index.intersection(self.index), - self.index) + tm.assert_index_equal(idx.intersection(idx), idx) - def test_difference(self): - tm.assert_index_equal(self.index.difference(self.index[:1]), - self.index[1:]) + def test_difference(self, closed): + idx = self.create_index(closed=closed) + tm.assert_index_equal(idx.difference(idx[:1]), idx[1:]) def test_symmetric_difference(self): result = self.index[:1].symmetric_difference(self.index[1:]) @@ -639,11 +720,12 @@ def test_set_operation_errors(self): other = IntervalIndex.from_breaks([0, 1, 2], closed='neither') pytest.raises(ValueError, self.index.union, other) - def test_isin(self): - actual = self.index.isin(self.index) + def test_isin(self, closed): + idx = self.create_index(closed=closed) + actual = idx.isin(idx) tm.assert_numpy_array_equal(np.array([True, True]), actual) - actual = self.index.isin(self.index[:1]) + actual = idx.isin(idx[:1]) tm.assert_numpy_array_equal(np.array([True, False]), actual) def test_comparison(self): @@ -702,25 +784,28 @@ def test_comparison(self): with pytest.raises(ValueError): self.index > np.arange(3) - def test_missing_values(self): - idx = pd.Index([np.nan, pd.Interval(0, 1), pd.Interval(1, 2)]) - idx2 = pd.IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2]) + def test_missing_values(self, closed): + idx = Index([np.nan, Interval(0, 1, closed=closed), + Interval(1, 2, closed=closed)]) + idx2 = IntervalIndex.from_arrays( + [np.nan, 0, 1], [np.nan, 1, 2], closed=closed) assert idx.equals(idx2) with pytest.raises(ValueError): - IntervalIndex.from_arrays([np.nan, 0, 1], np.array([0, 1, 2])) + IntervalIndex.from_arrays( + [np.nan, 0, 1], np.array([0, 1, 2]), closed=closed) tm.assert_numpy_array_equal(isna(idx), np.array([True, False, False])) - def test_sort_values(self): - expected = IntervalIndex.from_breaks([1, 2, 3, 4]) - actual = IntervalIndex.from_tuples([(3, 4), (1, 2), - (2, 3)]).sort_values() + def test_sort_values(self, closed): + expected = IntervalIndex.from_breaks([1, 2, 3, 4], closed=closed) + actual = IntervalIndex.from_tuples( + [(3, 4), (1, 2), (2, 3)], closed=closed).sort_values() tm.assert_index_equal(expected, actual) # nan - idx = self.index_with_nan + idx = self.create_index_with_nan(closed=closed) mask = idx.isna() tm.assert_numpy_array_equal(mask, np.array([False, True, False])) @@ -733,84 +818,83 @@ def test_sort_values(self): tm.assert_numpy_array_equal(mask, np.array([True, False, False])) def test_datetime(self): - dates = pd.date_range('2000', periods=3) + dates = date_range('2000', periods=3) idx = IntervalIndex.from_breaks(dates) tm.assert_index_equal(idx.left, dates[:2]) tm.assert_index_equal(idx.right, dates[-2:]) - expected = pd.date_range('2000-01-01T12:00', periods=2) + expected = date_range('2000-01-01T12:00', periods=2) tm.assert_index_equal(idx.mid, expected) - assert pd.Timestamp('2000-01-01T12') not in idx - assert pd.Timestamp('2000-01-01T12') not in idx + assert Timestamp('2000-01-01T12') not in idx + assert Timestamp('2000-01-01T12') not in idx - target = pd.date_range('1999-12-31T12:00', periods=7, freq='12H') + target = date_range('1999-12-31T12:00', periods=7, freq='12H') actual = idx.get_indexer(target) expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp') tm.assert_numpy_array_equal(actual, expected) - def test_append(self): + def test_append(self, closed): - index1 = IntervalIndex.from_arrays([0, 1], [1, 2]) - index2 = IntervalIndex.from_arrays([1, 2], [2, 3]) + index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed) + index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed) result = index1.append(index2) - expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3]) + expected = IntervalIndex.from_arrays( + [0, 1, 1, 2], [1, 2, 2, 3], closed=closed) tm.assert_index_equal(result, expected) result = index1.append([index1, index2]) - expected = IntervalIndex.from_arrays([0, 1, 0, 1, 1, 2], - [1, 2, 1, 2, 2, 3]) + expected = IntervalIndex.from_arrays( + [0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed) tm.assert_index_equal(result, expected) - def f(): - index1.append(IntervalIndex.from_arrays([0, 1], [1, 2], - closed='both')) - - pytest.raises(ValueError, f) + msg = ('can only append two IntervalIndex objects that are closed ' + 'on the same side') + for other_closed in {'left', 'right', 'both', 'neither'} - {closed}: + index_other_closed = IntervalIndex.from_arrays( + [0, 1], [1, 2], closed=other_closed) + with tm.assert_raises_regex(ValueError, msg): + index1.append(index_other_closed) - def test_is_non_overlapping_monotonic(self): + def test_is_non_overlapping_monotonic(self, closed): # Should be True in all cases tpls = [(0, 1), (2, 3), (4, 5), (6, 7)] - for closed in ('left', 'right', 'neither', 'both'): - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is True + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is True - idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) - assert idx.is_non_overlapping_monotonic is True + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) + assert idx.is_non_overlapping_monotonic is True # Should be False in all cases (overlapping) tpls = [(0, 2), (1, 3), (4, 5), (6, 7)] - for closed in ('left', 'right', 'neither', 'both'): - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is False + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is False - idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) - assert idx.is_non_overlapping_monotonic is False + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) + assert idx.is_non_overlapping_monotonic is False # Should be False in all cases (non-monotonic) tpls = [(0, 1), (2, 3), (6, 7), (4, 5)] - for closed in ('left', 'right', 'neither', 'both'): - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is False - - idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) - assert idx.is_non_overlapping_monotonic is False + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is False - # Should be False for closed='both', overwise True (GH16560) - idx = IntervalIndex.from_breaks(range(4), closed='both') + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) assert idx.is_non_overlapping_monotonic is False - for closed in ('left', 'right', 'neither'): + # Should be False for closed='both', overwise True (GH16560) + if closed == 'both': + idx = IntervalIndex.from_breaks(range(4), closed=closed) + assert idx.is_non_overlapping_monotonic is False + else: idx = IntervalIndex.from_breaks(range(4), closed=closed) assert idx.is_non_overlapping_monotonic is True class TestIntervalRange(object): - @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both']) def test_construction_from_numeric(self, closed): # combinations of start/end/periods without freq expected = IntervalIndex.from_breaks( @@ -848,7 +932,6 @@ def test_construction_from_numeric(self, closed): closed=closed) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both']) def test_construction_from_timestamp(self, closed): # combinations of start/end/periods without freq start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06') @@ -915,7 +998,6 @@ def test_construction_from_timestamp(self, closed): closed=closed) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both']) def test_construction_from_timedelta(self, closed): # combinations of start/end/periods without freq start, end = Timedelta('1 day'), Timedelta('6 days') diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 18bfc3d0efbee..c9c4029786c64 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -2980,3 +2980,13 @@ def test_nan_stays_float(self): assert pd.isna(df0.index.get_level_values(1)).all() # the following failed in 0.14.1 assert pd.isna(dfm.index.get_level_values(1)[:-1]).all() + + def test_million_record_attribute_error(self): + # GH 18165 + r = list(range(1000000)) + df = pd.DataFrame({'a': r, 'b': r}, + index=pd.MultiIndex.from_tuples([(x, x) for x in r])) + + with tm.assert_raises_regex(AttributeError, + "'Series' object has no attribute 'foo'"): + df['a'].foo() diff --git a/pandas/tests/indexing/test_timedelta.py b/pandas/tests/indexing/test_timedelta.py index 32609362e49af..3ad3b771b2ab2 100644 --- a/pandas/tests/indexing/test_timedelta.py +++ b/pandas/tests/indexing/test_timedelta.py @@ -2,6 +2,7 @@ import pandas as pd from pandas.util import testing as tm +import numpy as np class TestTimedeltaIndexing(object): @@ -47,3 +48,23 @@ def test_string_indexing(self): expected = df.iloc[0] sliced = df.loc['0 days'] tm.assert_series_equal(sliced, expected) + + @pytest.mark.parametrize( + "value", + [None, pd.NaT, np.nan]) + def test_masked_setitem(self, value): + # issue (#18586) + series = pd.Series([0, 1, 2], dtype='timedelta64[ns]') + series[series == series[0]] = value + expected = pd.Series([pd.NaT, 1, 2], dtype='timedelta64[ns]') + tm.assert_series_equal(series, expected) + + @pytest.mark.parametrize( + "value", + [None, pd.NaT, np.nan]) + def test_listlike_setitem(self, value): + # issue (#18586) + series = pd.Series([0, 1, 2], dtype='timedelta64[ns]') + series.iloc[0] = value + expected = pd.Series([pd.NaT, 1, 2], dtype='timedelta64[ns]') + tm.assert_series_equal(series, expected) diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py new file mode 100644 index 0000000000000..828d5d0ccd3c6 --- /dev/null +++ b/pandas/tests/io/conftest.py @@ -0,0 +1,74 @@ +import os + +import moto +import pytest +from pandas.io.parsers import read_table + +HERE = os.path.dirname(__file__) + + +@pytest.fixture(scope='module') +def tips_file(): + """Path to the tips dataset""" + return os.path.join(HERE, 'parser', 'data', 'tips.csv') + + +@pytest.fixture(scope='module') +def jsonl_file(): + """Path a JSONL dataset""" + return os.path.join(HERE, 'parser', 'data', 'items.jsonl') + + +@pytest.fixture(scope='module') +def salaries_table(): + """DataFrame with the salaries dataset""" + path = os.path.join(HERE, 'parser', 'data', 'salaries.csv') + return read_table(path) + + +@pytest.fixture(scope='module') +def s3_resource(tips_file, jsonl_file): + """Fixture for mocking S3 interaction. + + The primary bucket name is "pandas-test". The following datasets + are loaded. + + - tips.csv + - tips.csv.gz + - tips.csv.bz2 + - items.jsonl + + A private bucket "cant_get_it" is also created. The boto3 s3 resource + is yielded by the fixture. + """ + pytest.importorskip('s3fs') + moto.mock_s3().start() + + test_s3_files = [ + ('tips.csv', tips_file), + ('tips.csv.gz', tips_file + '.gz'), + ('tips.csv.bz2', tips_file + '.bz2'), + ('items.jsonl', jsonl_file), + ] + + def add_tips_files(bucket_name): + for s3_key, file_name in test_s3_files: + with open(file_name, 'rb') as f: + conn.Bucket(bucket_name).put_object( + Key=s3_key, + Body=f) + + boto3 = pytest.importorskip('boto3') + # see gh-16135 + bucket = 'pandas-test' + + conn = boto3.resource("s3", region_name="us-east-1") + conn.create_bucket(Bucket=bucket) + add_tips_files(bucket) + + conn.create_bucket(Bucket='cant_get_it', ACL='private') + add_tips_files('cant_get_it') + + yield conn + + moto.mock_s3().stop() diff --git a/pandas/tests/io/data/stata13_dates.dta b/pandas/tests/io/data/stata13_dates.dta new file mode 100644 index 0000000000000..87b857559e501 Binary files /dev/null and b/pandas/tests/io/data/stata13_dates.dta differ diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index aa86d1d9231fb..c0b7d4cee384a 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -221,6 +221,28 @@ def test_to_latex_multiindex(self): assert result == expected + def test_to_latex_multiindex_dupe_level(self): + # see gh-14484 + # + # If an index is repeated in subsequent rows, it should be + # replaced with a blank in the created table. This should + # ONLY happen if all higher order indices (to the left) are + # equal too. In this test, 'c' has to be printed both times + # because the higher order index 'A' != 'B'. + df = pd.DataFrame(index=pd.MultiIndex.from_tuples( + [('A', 'c'), ('B', 'c')]), columns=['col']) + result = df.to_latex() + expected = r"""\begin{tabular}{lll} +\toprule + & & col \\ +\midrule +A & c & NaN \\ +B & c & NaN \\ +\bottomrule +\end{tabular} +""" + assert result == expected + def test_to_latex_multicolumnrow(self): df = pd.DataFrame({ ('c1', 0): dict((x, x) for x in range(5)), diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 49b765b18d623..1cceae32cd748 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -173,6 +173,21 @@ def test_meta_name_conflict(self): for val in ['metafoo', 'metabar', 'foo', 'bar']: assert val in result + def test_meta_parameter_not_modified(self): + # GH 18610 + data = [{'foo': 'hello', + 'bar': 'there', + 'data': [{'foo': 'something', 'bar': 'else'}, + {'foo': 'something2', 'bar': 'else2'}]}] + + COLUMNS = ['foo', 'bar'] + result = json_normalize(data, 'data', meta=COLUMNS, + meta_prefix='meta') + + assert COLUMNS == ['foo', 'bar'] + for val in ['metafoo', 'metabar', 'foo', 'bar']: + assert val in result + def test_record_prefix(self, state_data): result = json_normalize(state_data[0], 'counties') expected = DataFrame(state_data[0]['counties']) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 6625446bea469..78e33f8966d1f 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -4,7 +4,6 @@ from pandas.compat import (range, lrange, StringIO, OrderedDict, is_platform_32bit) import os - import numpy as np from pandas import (Series, DataFrame, DatetimeIndex, Timestamp, read_json, compat) @@ -1030,6 +1029,70 @@ def test_tz_range_is_utc(self): df = DataFrame({'DT': dti}) assert dumps(df, iso_dates=True) == dfexp + def test_read_inline_jsonl(self): + # GH9180 + result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_s3_jsonl(self, s3_resource): + pytest.importorskip('s3fs') + # GH17200 + + result = read_json('s3n://pandas-test/items.jsonl', lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_local_jsonl(self): + # GH17200 + with ensure_clean('tmp_items.json') as path: + with open(path, 'w') as infile: + infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n') + result = read_json(path, lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_jsonl_unicode_chars(self): + # GH15132: non-ascii unicode characters + # \u201d == RIGHT DOUBLE QUOTATION MARK + + # simulate file handle + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + json = StringIO(json) + result = read_json(json, lines=True) + expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], + columns=['a', 'b']) + assert_frame_equal(result, expected) + + # simulate string + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + result = read_json(json, lines=True) + expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], + columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_to_jsonl(self): + # GH9180 + df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + result = df.to_json(orient="records", lines=True) + expected = '{"a":1,"b":2}\n{"a":1,"b":2}' + assert result == expected + + df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b']) + result = df.to_json(orient="records", lines=True) + expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}' + assert result == expected + assert_frame_equal(pd.read_json(result, lines=True), df) + + # GH15096: escaped characters in columns and data + df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], + columns=["a\\", 'b']) + result = df.to_json(orient="records", lines=True) + expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n' + '{"a\\\\":"foo\\"","b":"bar"}') + assert result == expected + assert_frame_equal(pd.read_json(result, lines=True), df) + def test_latin_encoding(self): if compat.PY2: tm.assert_raises_regex( diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py index c68b2bf064d97..6d476e326213e 100644 --- a/pandas/tests/io/parser/c_parser_only.py +++ b/pandas/tests/io/parser/c_parser_only.py @@ -290,11 +290,11 @@ def test_empty_header_read(count): test_empty_header_read(count) def test_parse_trim_buffers(self): - # This test is part of a bugfix for issue #13703. It attmepts to + # This test is part of a bugfix for issue #13703. It attempts to # to stress the system memory allocator, to cause it to move the # stream buffer and either let the OS reclaim the region, or let # other memory requests of parser otherwise modify the contents - # of memory space, where it was formely located. + # of memory space, where it was formally located. # This test is designed to cause a `segfault` with unpatched # `tokenizer.c`. Sometimes the test fails on `segfault`, other # times it fails due to memory corruption, which causes the @@ -346,7 +346,7 @@ def test_parse_trim_buffers(self): # Generate the expected output: manually create the dataframe # by splitting by comma and repeating the `n_lines` times. - row = tuple(val_ if val_ else float("nan") + row = tuple(val_ if val_ else np.nan for val_ in record_.split(",")) expected = pd.DataFrame([row for _ in range(n_lines)], dtype=object, columns=None, index=None) @@ -359,6 +359,15 @@ def test_parse_trim_buffers(self): # Check for data corruption if there was no segfault tm.assert_frame_equal(result, expected) + # This extra test was added to replicate the fault in gh-5291. + # Force 'utf-8' encoding, so that `_string_convert` would take + # a different execution branch. + chunks_ = self.read_csv(StringIO(csv_data), header=None, + dtype=object, chunksize=chunksize, + encoding='utf_8') + result = pd.concat(chunks_, axis=0, ignore_index=True) + tm.assert_frame_equal(result, expected) + def test_internal_null_byte(self): # see gh-14012 # diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index e85d3ad294655..6a996213b28bb 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -823,7 +823,7 @@ def test_parse_integers_above_fp_precision(self): 17007000002000192, 17007000002000194]}) - assert np.array_equal(result['Numbers'], expected['Numbers']) + tm.assert_series_equal(result['Numbers'], expected['Numbers']) def test_chunks_have_consistent_numerical_type(self): integers = [str(i) for i in range(499999)] diff --git a/pandas/tests/io/parser/data/items.jsonl b/pandas/tests/io/parser/data/items.jsonl new file mode 100644 index 0000000000000..f784d37befa82 --- /dev/null +++ b/pandas/tests/io/parser/data/items.jsonl @@ -0,0 +1,2 @@ +{"a": 1, "b": 2} +{"b":2, "a" :1} diff --git a/pandas/tests/io/parser/dtypes.py b/pandas/tests/io/parser/dtypes.py index 7d3df6201a390..b91ce04673e29 100644 --- a/pandas/tests/io/parser/dtypes.py +++ b/pandas/tests/io/parser/dtypes.py @@ -114,6 +114,17 @@ def test_categorical_dtype(self): actual = self.read_csv(StringIO(data), dtype='category') tm.assert_frame_equal(actual, expected) + @pytest.mark.slow + def test_categorical_dtype_high_cardinality_numeric(self): + # GH 18186 + data = np.sort([str(i) for i in range(524289)]) + expected = DataFrame({'a': Categorical(data, ordered=True)}) + actual = self.read_csv(StringIO('a\n' + '\n'.join(data)), + dtype='category') + actual["a"] = actual["a"].cat.reorder_categories( + np.sort(actual.a.cat.categories), ordered=True) + tm.assert_frame_equal(actual, expected) + def test_categorical_dtype_encoding(self): # GH 10153 pth = tm.get_data_path('unicode_series.csv') diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py index 7fbf174e19eee..8dc599b42ddc7 100644 --- a/pandas/tests/io/parser/na_values.py +++ b/pandas/tests/io/parser/na_values.py @@ -312,3 +312,21 @@ def test_empty_na_values_no_default_with_index(self): out = self.read_csv(StringIO(data), keep_default_na=False, index_col=0) tm.assert_frame_equal(out, expected) + + def test_no_na_filter_on_index(self): + # see gh-5239 + data = "a,b,c\n1,,3\n4,5,6" + + # Don't parse NA-values in index when na_filter=False. + out = self.read_csv(StringIO(data), index_col=[1], na_filter=False) + + expected = DataFrame({"a": [1, 4], "c": [3, 6]}, + index=Index(["", "5"], name="b")) + tm.assert_frame_equal(out, expected) + + # Parse NA-values in index when na_filter=True. + out = self.read_csv(StringIO(data), index_col=[1], na_filter=True) + + expected = DataFrame({"a": [1, 4], "c": [3, 6]}, + index=Index([np.nan, 5.0], name="b")) + tm.assert_frame_equal(out, expected) diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py index 90103e7bf26b0..4c0f67fa6876a 100644 --- a/pandas/tests/io/parser/parse_dates.py +++ b/pandas/tests/io/parser/parse_dates.py @@ -656,3 +656,21 @@ def test_parse_date_column_with_empty_string(self): [621, ' ']] expected = DataFrame(expected_data, columns=['case', 'opdate']) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("data,expected", [ + ("a\n135217135789158401\n1352171357E+5", + DataFrame({"a": [135217135789158401, + 135217135700000]}, dtype="float64")), + ("a\n99999999999\n123456789012345\n1234E+0", + DataFrame({"a": [99999999999, + 123456789012345, + 1234]}, dtype="float64")) + ]) + @pytest.mark.parametrize("parse_dates", [True, False]) + def test_parse_date_float(self, data, expected, parse_dates): + # see gh-2697 + # + # Date parsing should fail, so we leave the data untouched + # (i.e. float precision should remain unchanged). + result = self.read_csv(StringIO(data), parse_dates=parse_dates) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 27cc708889fa2..d00d3f31ce189 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -4,10 +4,7 @@ Tests parsers ability to read and parse non-local files and hence require a network connection to be read. """ -import os - import pytest -import moto import pandas.util.testing as tm from pandas import DataFrame @@ -15,51 +12,6 @@ from pandas.compat import BytesIO -@pytest.fixture(scope='module') -def tips_file(): - return os.path.join(tm.get_data_path(), 'tips.csv') - - -@pytest.fixture(scope='module') -def salaries_table(): - path = os.path.join(tm.get_data_path(), 'salaries.csv') - return read_table(path) - - -@pytest.fixture(scope='module') -def s3_resource(tips_file): - pytest.importorskip('s3fs') - moto.mock_s3().start() - - test_s3_files = [ - ('tips.csv', tips_file), - ('tips.csv.gz', tips_file + '.gz'), - ('tips.csv.bz2', tips_file + '.bz2'), - ] - - def add_tips_files(bucket_name): - for s3_key, file_name in test_s3_files: - with open(file_name, 'rb') as f: - conn.Bucket(bucket_name).put_object( - Key=s3_key, - Body=f) - - boto3 = pytest.importorskip('boto3') - # see gh-16135 - bucket = 'pandas-test' - - conn = boto3.resource("s3", region_name="us-east-1") - conn.create_bucket(Bucket=bucket) - add_tips_files(bucket) - - conn.create_bucket(Bucket='cant_get_it', ACL='private') - add_tips_files('cant_get_it') - - yield conn - - moto.mock_s3().stop() - - @pytest.mark.network @pytest.mark.parametrize( "compression,extension", diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index c9088d2ecc5e7..f66f9ccf065f7 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -161,9 +161,9 @@ def test_skip_bad_lines(self): error_bad_lines=False, warn_bad_lines=False) result = reader.read() - expected = {0: ['a', 'd', 'g', 'l'], - 1: ['b', 'e', 'h', 'm'], - 2: ['c', 'f', 'i', 'n']} + expected = {0: np.array(['a', 'd', 'g', 'l'], dtype=object), + 1: np.array(['b', 'e', 'h', 'm'], dtype=object), + 2: np.array(['c', 'f', 'i', 'n'], dtype=object)} assert_array_dicts_equal(result, expected) reader = TextReader(StringIO(data), delimiter=':', @@ -189,8 +189,10 @@ def test_header_not_enough_lines(self): assert header == expected recs = reader.read() - expected = {0: [1, 4], 1: [2, 5], 2: [3, 6]} - assert_array_dicts_equal(expected, recs) + expected = {0: np.array([1, 4], dtype=np.int64), + 1: np.array([2, 5], dtype=np.int64), + 2: np.array([3, 6], dtype=np.int64)} + assert_array_dicts_equal(recs, expected) # not enough rows pytest.raises(parser.ParserError, TextReader, StringIO(data), @@ -203,14 +205,16 @@ def test_header_not_enough_lines_as_recarray(self): '1,2,3\n' '4,5,6') - reader = TextReader(StringIO(data), delimiter=',', header=2, - as_recarray=True) + reader = TextReader(StringIO(data), delimiter=',', + header=2, as_recarray=True) header = reader.header expected = [['a', 'b', 'c']] assert header == expected recs = reader.read() - expected = {'a': [1, 4], 'b': [2, 5], 'c': [3, 6]} + expected = {'a': np.array([1, 4], dtype=np.int64), + 'b': np.array([2, 5], dtype=np.int64), + 'c': np.array([3, 6], dtype=np.int64)} assert_array_dicts_equal(expected, recs) # not enough rows @@ -225,7 +229,7 @@ def test_escapechar(self): reader = TextReader(StringIO(data), delimiter=',', header=None, escapechar='\\') result = reader.read() - expected = {0: ['"hello world"'] * 3} + expected = {0: np.array(['"hello world"'] * 3, dtype=object)} assert_array_dicts_equal(result, expected) def test_eof_has_eol(self): @@ -360,7 +364,7 @@ def test_empty_field_eof(self): result = TextReader(StringIO(data), delimiter=',').read() - expected = {0: np.array([1, 4]), + expected = {0: np.array([1, 4], dtype=np.int64), 1: np.array(['2', ''], dtype=object), 2: np.array(['3', ''], dtype=object)} assert_array_dicts_equal(result, expected) @@ -397,4 +401,5 @@ def test_empty_csv_input(self): def assert_array_dicts_equal(left, right): for k, v in compat.iteritems(left): - assert(np.array_equal(v, right[k])) + assert tm.assert_numpy_array_equal(np.asarray(v), + np.asarray(right[k])) diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 940a331a9de84..b5d1435c29cb7 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -18,7 +18,7 @@ try: DataFrame({'A': [1, 2]}).to_clipboard() _DEPS_INSTALLED = 1 -except PyperclipException: +except (PyperclipException, RuntimeError): _DEPS_INSTALLED = 0 diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index a28adcf1ee771..bc58ea1c7c228 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -180,6 +180,15 @@ def test_scalar_float(self): x_rec = self.encode_decode(x) tm.assert_almost_equal(x, x_rec) + def test_scalar_bool(self): + x = np.bool_(1) + x_rec = self.encode_decode(x) + tm.assert_almost_equal(x, x_rec) + + x = np.bool_(0) + x_rec = self.encode_decode(x) + tm.assert_almost_equal(x, x_rec) + def test_scalar_complex(self): x = np.random.rand() + 1j * np.random.rand() x_rec = self.encode_decode(x) @@ -263,7 +272,7 @@ def test_numpy_array_complex(self): x.dtype == x_rec.dtype) def test_list_mixed(self): - x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')] + x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo'), np.bool_(1)] x_rec = self.encode_decode(x) # current msgpack cannot distinguish list/tuple tm.assert_almost_equal(tuple(x), x_rec) @@ -401,6 +410,7 @@ def setup_method(self, method): 'G': [Timestamp('20130102', tz='US/Eastern')] * 5, 'H': Categorical([1, 2, 3, 4, 5]), 'I': Categorical([1, 2, 3, 4, 5], ordered=True), + 'J': (np.bool_(1), 2, 3, 4, 5), } self.d['float'] = Series(data['A']) @@ -410,6 +420,7 @@ def setup_method(self, method): self.d['dt_tz'] = Series(data['G']) self.d['cat_ordered'] = Series(data['H']) self.d['cat_unordered'] = Series(data['I']) + self.d['numpy_bool_mixed'] = Series(data['J']) def test_basic(self): diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index ecd4e8f719014..e7bcff22371b7 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -105,7 +105,7 @@ def test_options_py(df_compat, pa): with pd.option_context('io.parquet.engine', 'pyarrow'): df.to_parquet(path) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -118,7 +118,7 @@ def test_options_fp(df_compat, fp): with pd.option_context('io.parquet.engine', 'fastparquet'): df.to_parquet(path, compression=None) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -130,7 +130,7 @@ def test_options_auto(df_compat, fp, pa): with pd.option_context('io.parquet.engine', 'auto'): df.to_parquet(path) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -162,7 +162,7 @@ def test_cross_engine_pa_fp(df_cross_compat, pa, fp): with tm.ensure_clean() as path: df.to_parquet(path, engine=pa, compression=None) - result = read_parquet(path, engine=fp, compression=None) + result = read_parquet(path, engine=fp) tm.assert_frame_equal(result, df) @@ -174,7 +174,7 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp): with tm.ensure_clean() as path: df.to_parquet(path, engine=fp, compression=None) - result = read_parquet(path, engine=pa, compression=None) + result = read_parquet(path, engine=pa) tm.assert_frame_equal(result, df) @@ -188,19 +188,23 @@ def check_error_on_write(self, df, engine, exc): with tm.ensure_clean() as path: to_parquet(df, path, engine, compression=None) - def check_round_trip(self, df, engine, expected=None, **kwargs): - + def check_round_trip(self, df, engine, expected=None, + write_kwargs=None, read_kwargs=None): + if write_kwargs is None: + write_kwargs = {} + if read_kwargs is None: + read_kwargs = {} with tm.ensure_clean() as path: - df.to_parquet(path, engine, **kwargs) - result = read_parquet(path, engine) + df.to_parquet(path, engine, **write_kwargs) + result = read_parquet(path, engine, **read_kwargs) if expected is None: expected = df tm.assert_frame_equal(result, expected) # repeat - to_parquet(df, path, engine, **kwargs) - result = pd.read_parquet(path, engine) + to_parquet(df, path, engine, **write_kwargs) + result = pd.read_parquet(path, engine, **read_kwargs) if expected is None: expected = df @@ -222,7 +226,7 @@ def test_columns_dtypes(self, engine): # unicode df.columns = [u'foo', u'bar'] - self.check_round_trip(df, engine, compression=None) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) def test_columns_dtypes_invalid(self, engine): @@ -246,7 +250,7 @@ def test_columns_dtypes_invalid(self, engine): def test_write_with_index(self, engine): df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, compression=None) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) # non-default index for index in [[2, 3, 4], @@ -280,7 +284,18 @@ def test_compression(self, engine, compression): pytest.importorskip('brotli') df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, compression=compression) + self.check_round_trip(df, engine, + write_kwargs={'compression': compression}) + + def test_read_columns(self, engine): + # GH18154 + df = pd.DataFrame({'string': list('abc'), + 'int': list(range(1, 4))}) + + expected = pd.DataFrame({'string': list('abc')}) + self.check_round_trip(df, engine, expected=expected, + write_kwargs={'compression': None}, + read_kwargs={'columns': ['string']}) class TestParquetPyArrow(Base): @@ -368,7 +383,7 @@ def test_basic(self, fp): 'timedelta': pd.timedelta_range('1 day', periods=3), }) - self.check_round_trip(df, fp, compression=None) + self.check_round_trip(df, fp, write_kwargs={'compression': None}) @pytest.mark.skip(reason="not supported") def test_duplicate_columns(self, fp): @@ -381,7 +396,8 @@ def test_duplicate_columns(self, fp): def test_bool_with_none(self, fp): df = pd.DataFrame({'a': [True, None, False]}) expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16') - self.check_round_trip(df, fp, expected=expected, compression=None) + self.check_round_trip(df, fp, expected=expected, + write_kwargs={'compression': None}) def test_unsupported(self, fp): @@ -397,7 +413,7 @@ def test_categorical(self, fp): if LooseVersion(fastparquet.__version__) < LooseVersion("0.1.3"): pytest.skip("CategoricalDtype not supported for older fp") df = pd.DataFrame({'a': pd.Categorical(list('abc'))}) - self.check_round_trip(df, fp, compression=None) + self.check_round_trip(df, fp, write_kwargs={'compression': None}) def test_datetime_tz(self, fp): # doesn't preserve tz @@ -407,4 +423,13 @@ def test_datetime_tz(self, fp): # warns on the coercion with catch_warnings(record=True): self.check_round_trip(df, fp, df.astype('datetime64[ns]'), - compression=None) + write_kwargs={'compression': None}) + + def test_filter_row_groups(self, fp): + d = {'a': list(range(0, 3))} + df = pd.DataFrame(d) + with tm.ensure_clean() as path: + df.to_parquet(path, fp, compression=None, + row_group_offsets=1) + result = read_parquet(path, fp, filters=[('a', '==', 0)]) + assert len(result) == 1 diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 2df43158b5370..4528565eefa0c 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -88,6 +88,7 @@ "TextCol" TEXT, "DateCol" TEXT, "IntDateCol" INTEGER, + "IntDateOnlyCol" INTEGER, "FloatCol" REAL, "IntCol" INTEGER, "BoolCol" INTEGER, @@ -98,6 +99,7 @@ `TextCol` TEXT, `DateCol` DATETIME, `IntDateCol` INTEGER, + `IntDateOnlyCol` INTEGER, `FloatCol` DOUBLE, `IntCol` INTEGER, `BoolCol` BOOLEAN, @@ -109,6 +111,7 @@ "DateCol" TIMESTAMP, "DateColWithTz" TIMESTAMP WITH TIME ZONE, "IntDateCol" INTEGER, + "IntDateOnlyCol" INTEGER, "FloatCol" DOUBLE PRECISION, "IntCol" INTEGER, "BoolCol" BOOLEAN, @@ -120,31 +123,33 @@ 'sqlite': { 'query': """ INSERT INTO types_test_data - VALUES(?, ?, ?, ?, ?, ?, ?, ?) + VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?) """, 'fields': ( - 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol', - 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + 'TextCol', 'DateCol', 'IntDateCol', 'IntDateOnlyCol', + 'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull', + 'BoolColWithNull' ) }, 'mysql': { 'query': """ INSERT INTO types_test_data - VALUES("%s", %s, %s, %s, %s, %s, %s, %s) + VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s) """, 'fields': ( - 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol', - 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + 'TextCol', 'DateCol', 'IntDateCol', 'IntDateOnlyCol', + 'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull', + 'BoolColWithNull' ) }, 'postgresql': { 'query': """ INSERT INTO types_test_data - VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) + VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """, 'fields': ( 'TextCol', 'DateCol', 'DateColWithTz', - 'IntDateCol', 'FloatCol', + 'IntDateCol', 'IntDateOnlyCol', 'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' ) }, @@ -313,13 +318,13 @@ def _load_raw_sql(self): self.drop_table('types_test_data') self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor]) ins = SQL_STRINGS['insert_test_types'][self.flavor] - data = [ { 'TextCol': 'first', 'DateCol': '2000-01-03 00:00:00', 'DateColWithTz': '2000-01-01 00:00:00-08:00', 'IntDateCol': 535852800, + 'IntDateOnlyCol': 20101010, 'FloatCol': 10.10, 'IntCol': 1, 'BoolCol': False, @@ -331,6 +336,7 @@ def _load_raw_sql(self): 'DateCol': '2000-01-04 00:00:00', 'DateColWithTz': '2000-06-01 00:00:00-07:00', 'IntDateCol': 1356998400, + 'IntDateOnlyCol': 20101212, 'FloatCol': 10.10, 'IntCol': 1, 'BoolCol': False, @@ -610,20 +616,42 @@ def test_date_parsing(self): df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates=['DateCol']) assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + pd.Timestamp(2000, 1, 3, 0, 0, 0), + pd.Timestamp(2000, 1, 4, 0, 0, 0) + ] df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'}) assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + pd.Timestamp(2000, 1, 3, 0, 0, 0), + pd.Timestamp(2000, 1, 4, 0, 0, 0) + ] df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates=['IntDateCol']) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + pd.Timestamp(1986, 12, 25, 0, 0, 0), + pd.Timestamp(2013, 1, 1, 0, 0, 0) + ] df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates={'IntDateCol': 's'}) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + pd.Timestamp(1986, 12, 25, 0, 0, 0), + pd.Timestamp(2013, 1, 1, 0, 0, 0) + ] + + df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, + parse_dates={'IntDateOnlyCol': '%Y%m%d'}) + assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64) + assert df.IntDateOnlyCol.tolist() == [ + pd.Timestamp('2010-10-10'), + pd.Timestamp('2010-12-12') + ] def test_date_and_index(self): # Test case where same column appears in parse_date and index_col diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 055a490bc6b5d..78b47960e1a04 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -96,6 +96,8 @@ def setup_method(self, method): self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta') + self.stata_dates = os.path.join(self.dirpath, 'stata13_dates.dta') + def read_dta(self, file): # Legacy default reader configuration return read_stata(file, convert_dates=True) @@ -1327,3 +1329,22 @@ def test_set_index(self): df.to_stata(path) reread = pd.read_stata(path, index_col='index') tm.assert_frame_equal(df, reread) + + @pytest.mark.parametrize( + 'column', ['ms', 'day', 'week', 'month', 'qtr', 'half', 'yr']) + def test_date_parsing_ignores_format_details(self, column): + # GH 17797 + # + # Test that display formats are ignored when determining if a numeric + # column is a date value. + # + # All date types are stored as numbers and format associated with the + # column denotes both the type of the date and the display format. + # + # STATA supports 9 date types which each have distinct units. We test 7 + # of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that + # accounts for leap seconds and %tb relies on STATAs business calendar. + df = read_stata(self.stata_dates) + unformatted = df.loc[0, column] + formatted = df.loc[0, column + "_fmt"] + assert unformatted == formatted diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index e1f64bed5598d..3818c04649366 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -1,20 +1,144 @@ +import subprocess import pytest from datetime import datetime, date import numpy as np -from pandas import Timestamp, Period, Index +from pandas import Timestamp, Period, Index, date_range, Series from pandas.compat import u +import pandas.core.config as cf import pandas.util.testing as tm from pandas.tseries.offsets import Second, Milli, Micro, Day from pandas.compat.numpy import np_datetime64_compat converter = pytest.importorskip('pandas.plotting._converter') +from pandas.plotting import (register_matplotlib_converters, + deregister_matplotlib_converters) def test_timtetonum_accepts_unicode(): assert (converter.time2num("00:01") == converter.time2num(u("00:01"))) +class TestRegistration(object): + + def test_register_by_default(self): + # Run in subprocess to ensure a clean state + code = ("'import matplotlib.units; " + "import pandas as pd; " + "units = dict(matplotlib.units.registry); " + "assert pd.Timestamp in units)'") + call = ['python', '-c', code] + assert subprocess.check_call(call) == 0 + + def test_warns(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + _, ax = plt.subplots() + + # Set to the "warning" state, in case this isn't the first test run + converter._WARN = True + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False) as w: + ax.plot(s.index, s.values) + plt.close() + + assert len(w) == 1 + assert "Using an implicitly registered datetime converter" in str(w[0]) + + def test_registering_no_warning(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + _, ax = plt.subplots() + + # Set to the "warn" state, in case this isn't the first test run + converter._WARN = True + register_matplotlib_converters() + with tm.assert_produces_warning(None) as w: + ax.plot(s.index, s.values) + + assert len(w) == 0 + + def test_pandas_plots_register(self): + pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + # Set to the "warn" state, in case this isn't the first test run + converter._WARN = True + with tm.assert_produces_warning(None) as w: + s.plot() + + assert len(w) == 0 + + def test_matplotlib_formatters(self): + units = pytest.importorskip("matplotlib.units") + assert Timestamp in units.registry + + ctx = cf.option_context("plotting.matplotlib.register_converters", + False) + with ctx: + assert Timestamp not in units.registry + + assert Timestamp in units.registry + + def test_option_no_warning(self): + pytest.importorskip("matplotlib.pyplot") + ctx = cf.option_context("plotting.matplotlib.register_converters", + False) + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + _, ax = plt.subplots() + + converter._WARN = True + # Test without registering first, no warning + with ctx: + with tm.assert_produces_warning(None) as w: + ax.plot(s.index, s.values) + + assert len(w) == 0 + + # Now test with registering + converter._WARN = True + register_matplotlib_converters() + with ctx: + with tm.assert_produces_warning(None) as w: + ax.plot(s.index, s.values) + + assert len(w) == 0 + + def test_registry_resets(self): + units = pytest.importorskip("matplotlib.units") + dates = pytest.importorskip("matplotlib.dates") + + # make a copy, to reset to + original = dict(units.registry) + + try: + # get to a known state + units.registry.clear() + date_converter = dates.DateConverter() + units.registry[datetime] = date_converter + units.registry[date] = date_converter + + register_matplotlib_converters() + assert units.registry[date] is not date_converter + deregister_matplotlib_converters() + assert units.registry[date] is date_converter + + finally: + # restore original stater + units.registry.clear() + for k, v in original.items(): + units.registry[k] = v + + def test_old_import_warns(self): + with tm.assert_produces_warning(FutureWarning) as w: + from pandas.tseries import converter + converter.register() + + assert len(w) + assert ('pandas.plotting.register_matplotlib_converters' in + str(w[0].message)) + + class TestDateTimeConverter(object): def setup_method(self, method): diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index d66012e2a56a0..d6cedac747f25 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1,13 +1,14 @@ """ Test cases for time series specific (freq conversion, etc) """ from datetime import datetime, timedelta, date, time +import pickle import pytest from pandas.compat import lrange, zip import numpy as np from pandas import Index, Series, DataFrame, NaT -from pandas.compat import is_platform_mac +from pandas.compat import is_platform_mac, PY3 from pandas.core.indexes.datetimes import date_range, bdate_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.tseries.offsets import DateOffset @@ -1470,5 +1471,12 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs): with ensure_clean(return_filelike=True) as path: plt.savefig(path) + + # GH18439 + # this is supported only in Python 3 pickle since + # pickle in Python2 doesn't support instancemethod pickling + if PY3: + with ensure_clean(return_filelike=True) as path: + pickle.dump(fig, path) finally: plt.close(fig) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 6f476553091d9..54a512d14fef4 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -201,6 +201,7 @@ def test_parallel_coordinates(self): with tm.assert_produces_warning(FutureWarning): parallel_coordinates(df, 'Name', colors=colors) + @pytest.mark.xfail(reason="unreliable test") def test_parallel_coordinates_with_sorted_labels(self): """ For #15908 """ from pandas.plotting import parallel_coordinates diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 84a15cab34cd0..11368e44943d8 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1594,7 +1594,9 @@ def test_concat_series_axis1_same_names_ignore_index(self): s2 = Series(randn(len(dates)), index=dates, name='value') result = concat([s1, s2], axis=1, ignore_index=True) - assert np.array_equal(result.columns, [0, 1]) + expected = Index([0, 1]) + + tm.assert_index_equal(result.columns, expected) def test_concat_iterables(self): from collections import deque, Iterable @@ -1981,3 +1983,21 @@ def test_concat_will_upcast(dt, pdt): pdt(np.array([5], dtype=dt, ndmin=dims))] x = pd.concat(dfs) assert x.values.dtype == 'float64' + + +def test_concat_empty_and_non_empty_frame_regression(): + # GH 18178 regression test + df1 = pd.DataFrame({'foo': [1]}) + df2 = pd.DataFrame({'foo': []}) + expected = pd.DataFrame({'foo': [1.0]}) + result = pd.concat([df1, df2]) + assert_frame_equal(result, expected) + + +def test_concat_empty_and_non_empty_series_regression(): + # GH 18187 regression test + s1 = pd.Series([1]) + s2 = pd.Series([]) + expected = s1 + result = pd.concat([s1, s2]) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/reshape/test_merge.py b/pandas/tests/reshape/test_merge.py index 172667c9a0fb8..33d91af21c723 100644 --- a/pandas/tests/reshape/test_merge.py +++ b/pandas/tests/reshape/test_merge.py @@ -861,6 +861,12 @@ def test_validation(self): result = merge(left, right, on=['a', 'b'], validate='1:1') assert_frame_equal(result, expected_multi) + def test_merge_two_empty_df_no_division_error(self): + # GH17776, PR #17846 + a = pd.DataFrame({'a': [], 'b': [], 'c': []}) + with np.errstate(divide='raise'): + merge(a, a, on=('a', 'b')) + def _check_merge(x, y): for how in ['inner', 'left', 'outer']: diff --git a/pandas/tests/reshape/test_merge_asof.py b/pandas/tests/reshape/test_merge_asof.py index 78bfa2ff8597c..4b2680b9be592 100644 --- a/pandas/tests/reshape/test_merge_asof.py +++ b/pandas/tests/reshape/test_merge_asof.py @@ -973,3 +973,15 @@ def test_on_float_by_int(self): columns=['symbol', 'exch', 'price', 'mpv']) assert_frame_equal(result, expected) + + def test_merge_datatype_error(self): + """ Tests merge datatype mismatch error """ + msg = 'merge keys \[0\] object and int64, must be the same type' + + left = pd.DataFrame({'left_val': [1, 5, 10], + 'a': ['a', 'b', 'c']}) + right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7], + 'a': [1, 2, 3, 6, 7]}) + + with tm.assert_raises_regex(MergeError, msg): + merge_asof(left, right, on='a') diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 135e4c544de41..0e69371511294 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -125,12 +125,13 @@ def test_round_nat(klass): def test_NaT_methods(): # GH 9513 + # GH 17329 for `timestamp` raise_methods = ['astimezone', 'combine', 'ctime', 'dst', 'fromordinal', 'fromtimestamp', 'isocalendar', 'strftime', 'strptime', 'time', 'timestamp', 'timetuple', 'timetz', 'toordinal', 'tzname', 'utcfromtimestamp', 'utcnow', 'utcoffset', - 'utctimetuple'] + 'utctimetuple', 'timestamp'] nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today', 'tz_convert', 'tz_localize'] nan_methods = ['weekday', 'isoweekday'] diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index c1b9f858a08de..4053257fbd2c8 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -19,7 +19,7 @@ from pandas._libs import tslib, period from pandas._libs.tslibs.timezones import get_timezone -from pandas.compat import lrange, long +from pandas.compat import lrange, long, PY3 from pandas.util.testing import assert_series_equal from pandas.compat.numpy import np_datetime64_compat from pandas import (Timestamp, date_range, Period, Timedelta, compat, @@ -1079,6 +1079,28 @@ def test_is_leap_year(self): dt = Timestamp('2100-01-01 00:00:00', tz=tz) assert not dt.is_leap_year + def test_timestamp(self): + # GH#17329 + # tz-naive --> treat it as if it were UTC for purposes of timestamp() + ts = Timestamp.now() + uts = ts.replace(tzinfo=utc) + assert ts.timestamp() == uts.timestamp() + + tsc = Timestamp('2014-10-11 11:00:01.12345678', tz='US/Central') + utsc = tsc.tz_convert('UTC') + + # utsc is a different representation of the same time + assert tsc.timestamp() == utsc.timestamp() + + if PY3: + + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + + # should agree with datetime.timestamp method + dt = ts.to_pydatetime() + assert dt.timestamp() == ts.timestamp() + class TestTimestampNsOperations(object): diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 8cc40bb5146c5..2ee404ab5fe0d 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -848,6 +848,12 @@ def test_value_counts_nunique(self): result = series.nunique() assert result == 11 + # GH 18051 + s = pd.Series(pd.Categorical([])) + assert s.nunique() == 0 + s = pd.Series(pd.Categorical([np.nan])) + assert s.nunique() == 0 + def test_unique(self): # 714 also, dtype=float @@ -920,6 +926,14 @@ def test_drop_duplicates(self): sc.drop_duplicates(keep=False, inplace=True) assert_series_equal(sc, s[~expected]) + # GH 18051 + s = pd.Series(pd.Categorical([])) + tm.assert_categorical_equal(s.unique(), pd.Categorical([]), + check_dtype=False) + s = pd.Series(pd.Categorical([np.nan])) + tm.assert_categorical_equal(s.unique(), pd.Categorical([np.nan]), + check_dtype=False) + def test_clip(self): val = self.ts.median() diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index bd4e8b23f31b4..5ca4eba4da13b 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -636,17 +636,21 @@ def test_valid(self): def test_isna(self): ser = Series([0, 5.4, 3, nan, -0.001]) - np.array_equal(ser.isna(), - Series([False, False, False, True, False]).values) + expected = Series([False, False, False, True, False]) + tm.assert_series_equal(ser.isna(), expected) + ser = Series(["hi", "", nan]) - np.array_equal(ser.isna(), Series([False, False, True]).values) + expected = Series([False, False, True]) + tm.assert_series_equal(ser.isna(), expected) def test_notna(self): ser = Series([0, 5.4, 3, nan, -0.001]) - np.array_equal(ser.notna(), - Series([True, True, True, False, True]).values) + expected = Series([True, True, True, False, True]) + tm.assert_series_equal(ser.notna(), expected) + ser = Series(["hi", "", nan]) - np.array_equal(ser.notna(), Series([True, True, False]).values) + expected = Series([True, True, False]) + tm.assert_series_equal(ser.notna(), expected) def test_pad_nan(self): x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'], diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 38625bfb29917..240a7ad4b22f9 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1132,19 +1132,19 @@ def test_pad_backfill_object_segfault(): result = libalgos.pad_object(old, new) expected = np.array([-1], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) result = libalgos.pad_object(new, old) expected = np.array([], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) result = libalgos.backfill_object(old, new) expected = np.array([-1], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) result = libalgos.backfill_object(new, old) expected = np.array([], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) def test_arrmap(): @@ -1219,7 +1219,7 @@ def test_is_lexsorted(): 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0]), + 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'), np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, @@ -1231,19 +1231,10 @@ def test_is_lexsorted(): 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, - 4, 3, 2, 1, 0])] + 4, 3, 2, 1, 0], dtype='int64')] assert (not libalgos.is_lexsorted(failure)) -# def test_get_group_index(): -# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype=np.int64) -# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype=np.int64) -# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype=np.int64) - -# result = lib.get_group_index([a, b], (3, 4)) - -# assert(np.array_equal(result, expected)) - def test_groupsort_indexer(): a = np.random.randint(0, 1000, 100).astype(np.int64) @@ -1252,14 +1243,22 @@ def test_groupsort_indexer(): result = libalgos.groupsort_indexer(a, 1000)[0] # need to use a stable sort + # np.argsort returns int, groupsort_indexer + # always returns int64 expected = np.argsort(a, kind='mergesort') - assert (np.array_equal(result, expected)) + expected = expected.astype(np.int64) + + tm.assert_numpy_array_equal(result, expected) # compare with lexsort + # np.lexsort returns int, groupsort_indexer + # always returns int64 key = a * 1000 + b result = libalgos.groupsort_indexer(key, 1000000)[0] expected = np.lexsort((b, a)) - assert (np.array_equal(result, expected)) + expected = expected.astype(np.int64) + + tm.assert_numpy_array_equal(result, expected) def test_infinity_sort(): diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 272ba25bf8f8a..6366aae8ccdf6 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -2124,6 +2124,13 @@ def test_creation_astype(self): res = s.astype(CategoricalDtype(list('abcdef'), ordered=True)) tm.assert_series_equal(res, exp) + @pytest.mark.parametrize('columns', [['x'], ['x', 'y'], ['x', 'y', 'z']]) + def test_empty_astype(self, columns): + # GH 18004 + msg = '> 1 ndim Categorical are not supported at this time' + with tm.assert_raises_regex(NotImplementedError, msg): + DataFrame(columns=columns).astype('category') + def test_construction_series(self): l = [1, 2, 3, 1] diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 61f0c992225c6..b8e9191002640 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -52,7 +52,6 @@ def test_xarray(df): assert df.to_xarray() is not None -@tm.network def test_statsmodels(): statsmodels = import_module('statsmodels') # noqa diff --git a/pandas/tests/test_join.py b/pandas/tests/test_join.py index cde1cab37d09c..af946436b55c7 100644 --- a/pandas/tests/test_join.py +++ b/pandas/tests/test_join.py @@ -53,7 +53,7 @@ def test_left_join_indexer_unique(): result = _join.left_join_indexer_unique_int64(b, a) expected = np.array([1, 1, 2, 3, 3], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) def test_left_outer_join_bug(): @@ -69,13 +69,14 @@ def test_left_outer_join_bug(): lidx, ridx = _join.left_outer_join(left, right, max_groups, sort=False) - exp_lidx = np.arange(len(left)) - exp_ridx = -np.ones(len(left)) + exp_lidx = np.arange(len(left), dtype=np.int64) + exp_ridx = -np.ones(len(left), dtype=np.int64) + exp_ridx[left == 1] = 1 exp_ridx[left == 3] = 0 - assert (np.array_equal(lidx, exp_lidx)) - assert (np.array_equal(ridx, exp_ridx)) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) def test_inner_join_indexer(): diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index 2662720bb436d..75aa9aa4e8198 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -198,7 +198,7 @@ def test_get_reverse_indexer(self): indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64) result = lib.get_reverse_indexer(indexer, 5) expected = np.array([4, 2, 3, 6, 7], dtype=np.int64) - assert np.array_equal(result, expected) + tm.assert_numpy_array_equal(result, expected) class TestNAObj(object): diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index ac8297a53de37..e64bf2217e717 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -2729,6 +2729,34 @@ def test_resample_weekly_bug_1726(self): # it works! df.resample('W-MON', closed='left', label='left').first() + def test_resample_with_dst_time_change(self): + # GH 15549 + index = pd.DatetimeIndex([1457537600000000000, 1458059600000000000], + tz='UTC').tz_convert('America/Chicago') + df = pd.DataFrame([1, 2], index=index) + result = df.resample('12h', closed='right', + label='right').last().ffill() + + expected_index_values = ['2016-03-09 12:00:00-06:00', + '2016-03-10 00:00:00-06:00', + '2016-03-10 12:00:00-06:00', + '2016-03-11 00:00:00-06:00', + '2016-03-11 12:00:00-06:00', + '2016-03-12 00:00:00-06:00', + '2016-03-12 12:00:00-06:00', + '2016-03-13 00:00:00-06:00', + '2016-03-13 13:00:00-05:00', + '2016-03-14 01:00:00-05:00', + '2016-03-14 13:00:00-05:00', + '2016-03-15 01:00:00-05:00', + '2016-03-15 13:00:00-05:00'] + index = pd.DatetimeIndex(expected_index_values, + tz='UTC').tz_convert('America/Chicago') + expected = pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 2.0], index=index) + assert_frame_equal(result, expected) + def test_resample_bms_2752(self): # GH2753 foo = pd.Series(index=pd.bdate_range('20000101', '20000201')) @@ -3103,6 +3131,26 @@ def f(x): result = g.apply(f) assert_frame_equal(result, expected) + def test_apply_with_mutated_index(self): + # GH 15169 + index = pd.date_range('1-1-2015', '12-31-15', freq='D') + df = pd.DataFrame(data={'col1': np.random.rand(len(index))}, + index=index) + + def f(x): + s = pd.Series([1, 2], index=['a', 'b']) + return s + + expected = df.groupby(pd.Grouper(freq='M')).apply(f) + + result = df.resample('M').apply(f) + assert_frame_equal(result, expected) + + # A case for series + expected = df['col1'].groupby(pd.Grouper(freq='M')).apply(f) + result = df['col1'].resample('M').apply(f) + assert_series_equal(result, expected) + def test_resample_groupby_with_label(self): # GH 13235 index = date_range('2000-01-01', freq='2D', periods=5) @@ -3380,3 +3428,11 @@ def test_aggregate_with_nat(self): # if NaT is included, 'var', 'std', 'mean', 'first','last' # and 'nth' doesn't work yet + + def test_repr(self): + # GH18203 + result = repr(TimeGrouper(key='A', freq='H')) + expected = ("TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, " + "closed='left', label='left', how='mean', " + "convention='e', base=0)") + assert result == expected diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index a5b12bbf9608a..06c1fa1c0905a 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -332,16 +332,17 @@ def testit(label_list, shape): label_list2 = decons_group_index(group_index, shape) for a, b in zip(label_list, label_list2): - assert (np.array_equal(a, b)) + tm.assert_numpy_array_equal(a, b) shape = (4, 5, 6) - label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100), np.tile( - [0, 2, 4, 3, 0, 1, 2, 3], 100), np.tile( - [5, 1, 0, 2, 3, 0, 5, 4], 100)] + label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64)] testit(label_list, shape) shape = (10000, 10000) - label_list = [np.tile(np.arange(10000), 5), np.tile(np.arange(10000), 5)] + label_list = [np.tile(np.arange(10000, dtype=np.int64), 5), + np.tile(np.arange(10000, dtype=np.int64), 5)] testit(label_list, shape) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index f1b97081b6d93..8aa69bcbfdf7f 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -2086,6 +2086,18 @@ def test_rsplit_to_multiindex_expand(self): tm.assert_index_equal(result, exp) assert result.nlevels == 2 + def test_split_nan_expand(self): + # gh-18450 + s = Series(["foo,bar,baz", NA]) + result = s.str.split(",", expand=True) + exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]]) + tm.assert_frame_equal(result, exp) + + # check that these are actually np.nan and not None + # TODO see GH 18463 + # tm.assert_frame_equal does not differentiate + assert all(np.isnan(x) for x in result.iloc[1]) + def test_split_with_name(self): # GH 12617 diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index c567613acebd1..35ae4ad4d5db4 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -2491,6 +2491,14 @@ def test_rolling_corr_pairwise(self): self._check_pairwise_moment('rolling', 'corr', window=10, min_periods=5) + @pytest.mark.parametrize('window', range(7)) + def test_rolling_corr_with_zero_variance(self, window): + # GH 18430 + s = pd.Series(np.zeros(20)) + other = pd.Series(np.arange(20)) + + assert s.rolling(window=window).corr(other=other).isna().all() + def _check_pairwise_moment(self, dispatch, name, **kwargs): def get_result(obj, obj2=None): return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2) @@ -2979,6 +2987,16 @@ def test_rolling_kurt_edge_cases(self): x = d.rolling(window=4).kurt() tm.assert_series_equal(expected, x) + def test_rolling_skew_eq_value_fperr(self): + # #18804 all rolling skew for all equal values should return Nan + a = pd.Series([1.1] * 15).rolling(window=10).skew() + assert np.isnan(a).all() + + def test_rolling_kurt_eq_value_fperr(self): + # #18804 all rolling kurt for all equal values should return Nan + a = pd.Series([1.1] * 15).rolling(window=10).kurt() + assert np.isnan(a).all() + def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True, has_time_rule=True, preserve_nan=True): result = func(self.arr) diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index aa8fe90ea6500..823e22c4f87d1 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -13,7 +13,7 @@ import pandas.util.testing as tm import pandas.tseries.offsets as offsets -from pandas.compat import lrange, zip +from pandas.compat import lrange, zip, PY3 from pandas.core.indexes.datetimes import bdate_range, date_range from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas._libs import tslib @@ -70,7 +70,7 @@ def test_utc_to_local_no_modify(self): rng_eastern = rng.tz_convert(self.tzstr('US/Eastern')) # Values are unmodified - assert np.array_equal(rng.asi8, rng_eastern.asi8) + tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) assert self.cmptz(rng_eastern.tz, self.tz('US/Eastern')) @@ -108,7 +108,7 @@ def test_localize_utc_conversion_explicit(self): rng = date_range('3/10/2012', '3/11/2012', freq='30T') converted = rng.tz_localize(self.tz('US/Eastern')) expected_naive = rng + offsets.Hour(5) - assert np.array_equal(converted.asi8, expected_naive.asi8) + tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) # DST ambiguity, this should fail rng = date_range('3/11/2012', '3/12/2012', freq='30T') @@ -424,7 +424,7 @@ def test_with_tz(self): # datetimes with tzinfo set dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), - '1/1/2009', tz=pytz.utc) + datetime(2009, 1, 1, tzinfo=pytz.utc)) pytest.raises(Exception, bdate_range, datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009', @@ -1278,16 +1278,22 @@ def test_replace_tzinfo(self): result_dt = dt.replace(tzinfo=tzinfo) result_pd = Timestamp(dt).replace(tzinfo=tzinfo) - if hasattr(result_dt, 'timestamp'): # New method in Py 3.3 - assert result_dt.timestamp() == result_pd.timestamp() + if PY3: + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + assert result_dt.timestamp() == result_pd.timestamp() + assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None) result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None) - if hasattr(result_dt, 'timestamp'): # New method in Py 3.3 - assert result_dt.timestamp() == result_pd.timestamp() + if PY3: + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + assert result_dt.timestamp() == result_pd.timestamp() + assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py index df603c4d880d8..26d3f3cb85edc 100644 --- a/pandas/tseries/converter.py +++ b/pandas/tseries/converter.py @@ -1,6 +1,7 @@ # flake8: noqa +import warnings -from pandas.plotting._converter import (register, time2num, +from pandas.plotting._converter import (time2num, TimeConverter, TimeFormatter, PeriodConverter, get_datevalue, DatetimeConverter, @@ -9,3 +10,11 @@ MilliSecondLocator, get_finder, TimeSeries_DateLocator, TimeSeries_DateFormatter) + + +def register(): + from pandas.plotting._converter import register as register_ + msg = ("'pandas.tseries.converter.register' has been moved and renamed to " + "'pandas.plotting.register_matplotlib_converters'. ") + warnings.warn(msg, FutureWarning, stacklevel=2) + register_() diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 730d2782e85d2..dec67bbea854f 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1074,8 +1074,12 @@ def assert_categorical_equal(left, right, check_dtype=True, def raise_assert_detail(obj, message, left, right, diff=None): if isinstance(left, np.ndarray): left = pprint_thing(left) + elif is_categorical_dtype(left): + left = repr(left) if isinstance(right, np.ndarray): right = pprint_thing(right) + elif is_categorical_dtype(right): + right = repr(right) msg = """{obj} are different diff --git a/scripts/convert_deps.py b/scripts/convert_deps.py new file mode 100644 index 0000000000000..aabeb24a0c3c8 --- /dev/null +++ b/scripts/convert_deps.py @@ -0,0 +1,29 @@ +""" +Convert the conda environment.yaml to a pip requirements.txt +""" +import yaml + +exclude = {'python=3'} +rename = {'pytables': 'tables'} + +with open("ci/environment-dev.yaml") as f: + dev = yaml.load(f) + +with open("ci/requirements-optional-conda.txt") as f: + optional = [x.strip() for x in f.readlines()] + +required = dev['dependencies'] +required = [rename.get(dep, dep) for dep in required if dep not in exclude] +optional = [rename.get(dep, dep) for dep in optional if dep not in exclude] + + +with open("ci/requirements_dev.txt", 'wt') as f: + f.write("# This file was autogenerated by scripts/convert_deps.py\n") + f.write("# Do not modify directly\n") + f.write('\n'.join(required)) + + +with open("ci/requirements-optional-pip.txt", 'wt') as f: + f.write("# This file was autogenerated by scripts/convert_deps.py\n") + f.write("# Do not modify directly\n") + f.write("\n".join(optional))
Reminder: don't squash before merging.
https://api.github.com/repos/pandas-dev/pandas/pulls/18697
2017-12-08T19:13:55Z
2017-12-11T17:24:51Z
2017-12-11T17:24:51Z
2017-12-11T20:25:37Z
API: Improper x/y arg given to df.plot
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index c3a0e3599a0f9..5a5d6e92c21e8 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -282,7 +282,7 @@ I/O Plotting ^^^^^^^^ -- +- :func: `DataFrame.plot` now raises a ``ValueError`` when the ``x`` or ``y`` argument is improperly formed (:issue:`18671`) - - diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index adaaa206edadd..9d74a308f79c8 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -19,7 +19,7 @@ is_number, is_hashable, is_iterator) -from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame from pandas.core.common import AbstractMethodError, _try_sort, _any_not_none from pandas.core.generic import _shared_docs, _shared_doc_kwargs @@ -1680,9 +1680,8 @@ def _plot(data, x=None, y=None, subplots=False, else: raise ValueError("%r is not a valid plot kind" % kind) - from pandas import DataFrame if kind in _dataframe_kinds: - if isinstance(data, DataFrame): + if isinstance(data, ABCDataFrame): plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax, kind=kind, **kwds) else: @@ -1690,7 +1689,7 @@ def _plot(data, x=None, y=None, subplots=False, % kind) elif kind in _series_kinds: - if isinstance(data, DataFrame): + if isinstance(data, ABCDataFrame): if y is None and subplots is False: msg = "{0} requires either y column or 'subplots=True'" raise ValueError(msg.format(kind)) @@ -1702,15 +1701,19 @@ def _plot(data, x=None, y=None, subplots=False, data.index.name = y plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds) else: - if isinstance(data, DataFrame): + if isinstance(data, ABCDataFrame): if x is not None: if is_integer(x) and not data.columns.holds_integer(): x = data.columns[x] + elif not isinstance(data[x], ABCSeries): + raise ValueError("x must be a label or position") data = data.set_index(x) if y is not None: if is_integer(y) and not data.columns.holds_integer(): y = data.columns[y] + elif not isinstance(data[y], ABCSeries): + raise ValueError("y must be a label or position") label = kwds['label'] if 'label' in kwds else y series = data[y].copy() # Don't modify series.name = label diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 5c72d778a1220..3b3f6666340b8 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -2170,6 +2170,26 @@ def test_invalid_kind(self): with pytest.raises(ValueError): df.plot(kind='aasdf') + @pytest.mark.parametrize("x,y", [ + (['B', 'C'], 'A'), + ('A', ['B', 'C']) + ]) + def test_invalid_xy_args(self, x, y): + # GH 18671 + df = DataFrame({"A": [1, 2], 'B': [3, 4], 'C': [5, 6]}) + with pytest.raises(ValueError): + df.plot(x=x, y=y) + + @pytest.mark.parametrize("x,y", [ + ('A', 'B'), + ('B', 'A') + ]) + def test_invalid_xy_args_dup_cols(self, x, y): + # GH 18671 + df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list('AAB')) + with pytest.raises(ValueError): + df.plot(x=x, y=y) + @pytest.mark.slow def test_hexbin_basic(self): df = self.hexbin_df
- [x] closes #18671 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I'm not incredibly familiar with your codebase so I did my best to follow conventions. Not sure what to do with whatsnew for this case? Description: - Validation of x or y arg to `df.plot` to match specifications in documentation
https://api.github.com/repos/pandas-dev/pandas/pulls/18695
2017-12-08T16:52:11Z
2017-12-10T15:39:57Z
2017-12-10T15:39:57Z
2018-03-20T18:30:28Z
Converted windows / 32bit skips into decorators
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index c2d1eb8ae1372..06e645563d51c 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -28,6 +28,7 @@ import pandas.core.computation.expr as expr import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.util.testing import (assert_frame_equal, randbool, assert_numpy_array_equal, assert_series_equal, assert_produces_warning) @@ -175,9 +176,8 @@ def test_floor_division(self): for lhs, rhs in product(self.lhses, self.rhses): self.check_floor_division(lhs, '//', rhs) + @td.skip_if_windows def test_pow(self): - tm._skip_if_windows() - # odd failure on win32 platform, so skip for lhs, rhs in product(self.lhses, self.rhses): self.check_pow(lhs, '**', rhs) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 826e20b8b0586..3ce51983c111d 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -11,6 +11,7 @@ import pandas as pd import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas import compat from pandas import date_range, bdate_range, offsets, DatetimeIndex, Timestamp from pandas.tseries.offsets import (generate_range, CDay, BDay, DateOffset, @@ -49,8 +50,8 @@ def test_date_range_timestamp_equiv_explicit_pytz(self): ts = Timestamp('20090415', tz=pytz.timezone('US/Eastern'), freq='D') assert ts == stamp + @td.skip_if_windows_python_3 def test_date_range_timestamp_equiv_explicit_dateutil(self): - tm._skip_if_windows_python_3() from pandas._libs.tslibs.timezones import dateutil_gettz as gettz rng = date_range('20090415', '20090519', tz=gettz('US/Eastern')) diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index 5df75338d01d7..b74da4922429d 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -5,6 +5,7 @@ import pandas as pd import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas import (DatetimeIndex, date_range, Series, bdate_range, DataFrame, Int64Index, Index, to_datetime) from pandas.tseries.offsets import Minute, BMonthEnd, MonthEnd @@ -358,9 +359,8 @@ def test_month_range_union_tz_pytz(self): early_dr.union(late_dr) + @td.skip_if_windows_python_3 def test_month_range_union_tz_dateutil(self): - tm._skip_if_windows_python_3() - from pandas._libs.tslibs.timezones import dateutil_gettz tz = dateutil_gettz('US/Eastern') diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 48378233dd638..599f6efd16f74 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -3,10 +3,11 @@ import numpy as np import pandas as pd +import pandas.util._test_decorators as td from pandas.util import testing as tm from pandas import (PeriodIndex, period_range, notna, DatetimeIndex, NaT, Index, Period, Int64Index, Series, DataFrame, date_range, - offsets, compat) + offsets) from ..datetimelike import DatetimeLike @@ -544,9 +545,8 @@ def test_shift_nat(self): tm.assert_index_equal(result, expected) assert result.name == expected.name + @td.skip_if_32bit def test_ndarray_compat_properties(self): - if compat.is_platform_32bit(): - pytest.skip("skipping on 32bit") super(TestPeriodIndex, self).test_ndarray_compat_properties() def test_shift_ndarray(self): diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py index 6d476e326213e..7a1fca55dd51e 100644 --- a/pandas/tests/io/parser/c_parser_only.py +++ b/pandas/tests/io/parser/c_parser_only.py @@ -16,6 +16,7 @@ import pandas as pd import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas import DataFrame from pandas import compat from pandas.compat import StringIO, range, lrange @@ -129,9 +130,8 @@ def test_unsupported_dtype(self): dtype={'A': 'U8'}, index_col=0) + @td.skip_if_32bit def test_precise_conversion(self): - # see gh-8002 - tm._skip_if_32bit() from decimal import Decimal normal_errors = [] diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 85f24e794f12a..6df31b73da9b7 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -17,6 +17,7 @@ isna, compat, concat, Timestamp) import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.util.testing import (assert_panel4d_equal, assert_panel_equal, assert_frame_equal, @@ -24,7 +25,7 @@ set_timezone) from pandas.compat import (is_platform_windows, is_platform_little_endian, - PY3, PY35, PY36, BytesIO, text_type, + PY35, PY36, BytesIO, text_type, range, lrange, u) from pandas.io.formats.printing import pprint_thing from pandas.core.dtypes.common import is_categorical_dtype @@ -40,10 +41,6 @@ LooseVersion('2.2') else 'zlib') -# testing on windows/py3 seems to fault -# for using compression -skip_compression = PY3 and is_platform_windows() - # contextmanager to ensure the file cleanup @@ -719,12 +716,10 @@ def test_put_compression(self): pytest.raises(ValueError, store.put, 'b', df, format='fixed', complib='zlib') + @td.skip_if_windows_python_3 def test_put_compression_blosc(self): tm.skip_if_no_package('tables', min_version='2.2', app='blosc support') - if skip_compression: - pytest.skip("skipping on windows/PY3") - df = tm.makeTimeDataFrame() with ensure_clean_store(self.path) as store: @@ -2892,7 +2887,10 @@ def test_timeseries_preepoch(self): except OverflowError: pytest.skip('known failer on some windows platforms') - def test_frame(self): + @pytest.mark.parametrize("compression", [ + False, pytest.param(True, marks=td.skip_if_windows_python_3) + ]) + def test_frame(self, compression): df = tm.makeDataFrame() @@ -2900,21 +2898,14 @@ def test_frame(self): df.values[0, 0] = np.nan df.values[5, 3] = np.nan - self._check_roundtrip_table(df, tm.assert_frame_equal) - self._check_roundtrip(df, tm.assert_frame_equal) - - if not skip_compression: - self._check_roundtrip_table(df, tm.assert_frame_equal, - compression=True) - self._check_roundtrip(df, tm.assert_frame_equal, - compression=True) + self._check_roundtrip_table(df, tm.assert_frame_equal, + compression=compression) + self._check_roundtrip(df, tm.assert_frame_equal, + compression=compression) tdf = tm.makeTimeDataFrame() - self._check_roundtrip(tdf, tm.assert_frame_equal) - - if not skip_compression: - self._check_roundtrip(tdf, tm.assert_frame_equal, - compression=True) + self._check_roundtrip(tdf, tm.assert_frame_equal, + compression=compression) with ensure_clean_store(self.path) as store: # not consolidated @@ -3021,7 +3012,10 @@ def test_store_series_name(self): recons = store['series'] tm.assert_series_equal(recons, series) - def test_store_mixed(self): + @pytest.mark.parametrize("compression", [ + False, pytest.param(True, marks=td.skip_if_windows_python_3) + ]) + def test_store_mixed(self, compression): def _make_one(): df = tm.makeDataFrame() @@ -3046,19 +3040,12 @@ def _make_one(): tm.assert_frame_equal(store['obj'], df2) # check that can store Series of all of these types - self._check_roundtrip(df1['obj1'], tm.assert_series_equal) - self._check_roundtrip(df1['bool1'], tm.assert_series_equal) - self._check_roundtrip(df1['int1'], tm.assert_series_equal) - - if not skip_compression: - self._check_roundtrip(df1['obj1'], tm.assert_series_equal, - compression=True) - self._check_roundtrip(df1['bool1'], tm.assert_series_equal, - compression=True) - self._check_roundtrip(df1['int1'], tm.assert_series_equal, - compression=True) - self._check_roundtrip(df1, tm.assert_frame_equal, - compression=True) + self._check_roundtrip(df1['obj1'], tm.assert_series_equal, + compression=compression) + self._check_roundtrip(df1['bool1'], tm.assert_series_equal, + compression=compression) + self._check_roundtrip(df1['int1'], tm.assert_series_equal, + compression=compression) def test_wide(self): @@ -5639,6 +5626,7 @@ def test_fixed_offset_tz(self): tm.assert_index_equal(recons.index, rng) assert rng.tz == recons.index.tz + @td.skip_if_windows def test_store_timezone(self): # GH2852 # issue storing datetime.date with a timezone as it resets when read diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index 80421dcd462d4..9c649a42fb8ee 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -15,6 +15,7 @@ from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.tseries import offsets, frequencies from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz from pandas._libs.tslibs import conversion, period @@ -943,6 +944,7 @@ def test_is_leap_year(self, tz): dt = Timestamp('2100-01-01 00:00:00', tz=tz) assert not dt.is_leap_year + @td.skip_if_windows def test_timestamp(self): # GH#17329 # tz-naive --> treat it as if it were UTC for purposes of timestamp() @@ -1366,9 +1368,8 @@ def test_timestamp_to_datetime_explicit_pytz(self): assert stamp == dtval assert stamp.tzinfo == dtval.tzinfo + @td.skip_if_windows_python_3 def test_timestamp_to_datetime_explicit_dateutil(self): - tm._skip_if_windows_python_3() - stamp = Timestamp('20090415', tz=gettz('US/Eastern'), freq='D') dtval = stamp.to_pydatetime() assert stamp == dtval diff --git a/pandas/tests/sparse/test_libsparse.py b/pandas/tests/sparse/test_libsparse.py index 4842ebdd103c4..7719ea46503fd 100644 --- a/pandas/tests/sparse/test_libsparse.py +++ b/pandas/tests/sparse/test_libsparse.py @@ -4,8 +4,7 @@ import numpy as np import operator import pandas.util.testing as tm - -from pandas import compat +import pandas.util._test_decorators as td from pandas.core.sparse.array import IntIndex, BlockIndex, _make_index import pandas._libs.sparse as splib @@ -190,6 +189,7 @@ def test_intindex_make_union(self): class TestSparseIndexIntersect(object): + @td.skip_if_windows def test_intersect(self): def _check_correct(a, b, expected): result = a.intersect(b) @@ -212,8 +212,6 @@ def _check_case(xloc, xlen, yloc, ylen, eloc, elen): _check_length_exc(xindex.to_int_index(), longer_index.to_int_index()) - if compat.is_platform_windows(): - pytest.skip("segfaults on win-64 when all tests are run") check_cases(_check_case) def test_intersect_empty(self): diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index af26ddb554752..9530cd5ac3f43 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -12,6 +12,7 @@ from datetime import datetime, timedelta, tzinfo, date import pandas.util.testing as tm +import pandas.util._test_decorators as td import pandas.tseries.offsets as offsets from pandas.compat import lrange, zip, PY3 from pandas.core.indexes.datetimes import bdate_range, date_range @@ -958,10 +959,8 @@ def cmptz(self, tz1, tz2): def localize(self, tz, x): return x.replace(tzinfo=tz) + @td.skip_if_windows def test_utc_with_system_utc(self): - # Skipped on win32 due to dateutil bug - tm._skip_if_windows() - from pandas._libs.tslibs.timezones import maybe_get_tz # from system utc to real utc @@ -1270,6 +1269,7 @@ def test_ambiguous_compat(self): assert (result_pytz.to_pydatetime().tzname() == result_dateutil.to_pydatetime().tzname()) + @td.skip_if_windows def test_replace_tzinfo(self): # GH 15683 dt = datetime(2016, 3, 27, 1) @@ -1663,6 +1663,7 @@ def test_normalize_tz(self): assert result.is_normalized assert not rng.is_normalized + @td.skip_if_windows def test_normalize_tz_local(self): # see gh-13459 timezones = ['US/Pacific', 'US/Eastern', 'UTC', 'Asia/Kolkata', diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py index fe7c3b99987f5..31580bc9eab57 100644 --- a/pandas/tests/util/test_testing.py +++ b/pandas/tests/util/test_testing.py @@ -5,11 +5,11 @@ import sys from pandas import Series, DataFrame import pandas.util.testing as tm +import pandas.util._test_decorators as td from pandas.util.testing import (assert_almost_equal, raise_with_traceback, assert_index_equal, assert_series_equal, assert_frame_equal, assert_numpy_array_equal, RNGContext) -from pandas.compat import is_platform_windows class TestAssertAlmostEqual(object): @@ -159,12 +159,9 @@ def test_raise_with_traceback(self): class TestAssertNumpyArrayEqual(object): + @td.skip_if_windows def test_numpy_array_equal_message(self): - if is_platform_windows(): - pytest.skip("windows has incomparable line-endings " - "and uses L on the shape") - expected = """numpy array are different numpy array shapes are different @@ -287,12 +284,9 @@ def test_numpy_array_equal_message(self): assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]), obj='Index') + @td.skip_if_windows def test_numpy_array_equal_object_message(self): - if is_platform_windows(): - pytest.skip("windows has incomparable line-endings " - "and uses L on the shape") - a = np.array([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-01')]) b = np.array([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]) diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index be4e60c6493c8..8da2b401fc848 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -16,7 +16,7 @@ validate_bool_kwarg) import pandas.util.testing as tm -from pandas.util._test_decorators import safe_import +import pandas.util._test_decorators as td class TestDecorators(object): @@ -406,6 +406,7 @@ def test_numpy_errstate_is_default(): assert np.geterr() == expected +@td.skip_if_windows class TestLocaleUtils(object): @classmethod @@ -416,8 +417,6 @@ def setup_class(cls): if not cls.locales: pytest.skip("No locales found") - tm._skip_if_windows() - @classmethod def teardown_class(cls): del cls.locales @@ -486,8 +485,8 @@ def test_make_signature(): def test_safe_import(monkeypatch): - assert not safe_import("foo") - assert not safe_import("pandas", min_version="99.99.99") + assert not td.safe_import("foo") + assert not td.safe_import("pandas", min_version="99.99.99") # Create dummy module to be imported import types @@ -496,7 +495,7 @@ def test_safe_import(monkeypatch): mod = types.ModuleType(mod_name) mod.__version__ = "1.5" - assert not safe_import(mod_name) + assert not td.safe_import(mod_name) monkeypatch.setitem(sys.modules, mod_name, mod) - assert not safe_import(mod_name, min_version="2.0") - assert safe_import(mod_name, min_version="1.0") + assert not td.safe_import(mod_name, min_version="2.0") + assert td.safe_import(mod_name, min_version="1.0") diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index ab37c71404ad9..6d15f360bcbe8 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -27,6 +27,8 @@ def test_foo(): import pytest from distutils.version import LooseVersion +from pandas.compat import is_platform_windows, is_platform_32bit, PY3 + def safe_import(mod_name, min_version=None): """ @@ -83,3 +85,10 @@ def _skip_if_mpl_1_5(): reason="Missing matplotlib dependency") skip_if_mpl_1_5 = pytest.mark.skipif(_skip_if_mpl_1_5(), reason="matplotlib 1.5") +skip_if_32bit = pytest.mark.skipif(is_platform_32bit(), + reason="skipping for 32 bit") +skip_if_windows = pytest.mark.skipif(is_platform_windows(), + reason="Running on Windows") +skip_if_windows_python_3 = pytest.mark.skipif(is_platform_windows() and PY3, + reason=("not used on python3/" + "win32")) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 60b95b931b993..81f84ea646c86 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -38,9 +38,7 @@ import pandas.compat as compat from pandas.compat import ( filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter, - raise_with_traceback, httplib, is_platform_windows, is_platform_32bit, - StringIO, PY3 -) + raise_with_traceback, httplib, StringIO, PY3) from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex, @@ -319,12 +317,6 @@ def close(fignum=None): _close(fignum) -def _skip_if_32bit(): - if is_platform_32bit(): - import pytest - pytest.skip("skipping for 32 bit") - - def _skip_if_mpl_1_5(): import matplotlib as mpl @@ -367,18 +359,6 @@ def _skip_if_no_xarray(): pytest.skip("xarray version is too low: {version}".format(version=v)) -def _skip_if_windows_python_3(): - if PY3 and is_platform_windows(): - import pytest - pytest.skip("not used on python 3/win32") - - -def _skip_if_windows(): - if is_platform_windows(): - import pytest - pytest.skip("Running on Windows") - - def _skip_if_no_pathlib(): try: from pathlib import Path # noqa @@ -2825,9 +2805,6 @@ def set_timezone(tz): ... 'EDT' """ - if is_platform_windows(): - import pytest - pytest.skip("timezone setting not supported on windows") import os import time
- [X] xref #18190 - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Don't have a 32bit or Windows platform at my disposal so plan to leverage AppVeyor and compare before / after of skips to ensure consistency. If you have other ideas on how to tackle let me know. FWIW the functions that were previously defined in ``pandas.util.testing`` were scarcely used, but I noticed that a lot of tests manually checked for 32 bit or windows and skipped directly. For consistency, I've updated those to use the decorators as well (will need to go back and do the same thing for matplotlib)
https://api.github.com/repos/pandas-dev/pandas/pulls/18693
2017-12-08T15:35:01Z
2017-12-11T01:17:30Z
2017-12-11T01:17:30Z
2017-12-11T01:22:30Z
DOC: clean-up whatsnew file for 0.21.1
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index daf060f50b060..31902c98d0b6c 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -3,9 +3,23 @@ v0.21.1 ------- -This is a minor release from 0.21.1 and includes a number of deprecations, new -features, enhancements, and performance improvements along with a large number -of bug fixes. We recommend that all users upgrade to this version. +This is a minor bug-fix release in the 0.21.x series and includes some small regression fixes, +bug fixes and performance improvements. +We recommend that all users upgrade to this version. + +Highlights include: + +- Temporarily restore matplotlib datetime plotting functionality. This should + resolve issues for users who relied implicitly on pandas to plot datetimes + with matplotlib. See :ref:`here <whatsnew_0211.special>`. +- Improvements to the Parquet IO functions introduced in 0.21.0. See + :ref:`here <whatsnew_0211.enhancements.parquet>`. + + +.. contents:: What's new in v0.21.1 + :local: + :backlinks: none + .. _whatsnew_0211.special: @@ -42,9 +56,13 @@ registering them when they want them. New features ~~~~~~~~~~~~ -- -- -- +.. _whatsnew_0211.enhancements.parquet: + +Improvements to the Parquet IO functionality +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- :func:`read_parquet` now allows to specify the columns to read from a parquet file (:issue:`18154`) +- :func:`read_parquet` now allows to specify kwargs which are passed to the respective engine (:issue:`18216`) .. _whatsnew_0211.enhancements.other: @@ -53,7 +71,6 @@ Other Enhancements - :meth:`Timestamp.timestamp` is now available in Python 2.7. (:issue:`17329`) - :class:`Grouper` and :class:`TimeGrouper` now have a friendly repr output (:issue:`18203`). -- .. _whatsnew_0211.deprecations: @@ -69,17 +86,6 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Improved performance of plotting large series/dataframes (:issue:`18236`). -- -- - -.. _whatsnew_0211.docs: - -Documentation Changes -~~~~~~~~~~~~~~~~~~~~~ - -- -- -- .. _whatsnew_0211.bug_fixes: @@ -97,7 +103,6 @@ Conversion - Bug in :func:`DataFrame.to_dict` where columns of datetime that are tz-aware were not converted to required arrays when used with ``orient='records'``, raising``TypeError` (:issue:`18372`) - Bug in :class:`DateTimeIndex` and :meth:`date_range` where mismatching tz-aware ``start`` and ``end`` timezones would not raise an err if ``end.tzinfo`` is None (:issue:`18431`) - Bug in :meth:`Series.fillna` which raised when passed a long integer on Python 2 (:issue:`18159`). -- Indexing ^^^^^^^^ @@ -107,7 +112,6 @@ Indexing - Bug in :class:`IntervalIndex` constructor when a list of intervals is passed with non-default ``closed`` (:issue:`18334`) - Bug in ``Index.putmask`` when an invalid mask passed (:issue:`18368`) - Bug in masked assignment of a ``timedelta64[ns]`` dtype ``Series``, incorrectly coerced to float (:issue:`18493`) -- I/O ^^^ @@ -117,8 +121,6 @@ I/O - Bug in :func:`read_csv` for handling null values in index columns when specifying ``na_filter=False`` (:issue:`5239`) - Bug in :func:`read_csv` when reading numeric category fields with high cardinality (:issue:`18186`) - Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) -- :func:`read_parquet` now allows to specify the columns to read from a parquet file (:issue:`18154`) -- :func:`read_parquet` now allows to specify kwargs which are passed to the respective engine (:issue:`18216`) - Bug in parsing integer datetime-like columns with specified format in ``read_sql`` (:issue:`17855`). - Bug in :meth:`DataFrame.to_msgpack` when serializing data of the numpy.bool_ datatype (:issue:`18390`) - Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) @@ -129,8 +131,6 @@ Plotting ^^^^^^^^ - Bug in ``DataFrame.plot()`` and ``Series.plot()`` with :class:`DatetimeIndex` where a figure generated by them is not pickleable in Python 3 (:issue:`18439`) -- -- Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -139,15 +139,6 @@ Groupby/Resample/Rolling - Bug in ``DataFrame.resample(...)`` when there is a time change (DST) and resampling frequecy is 12h or higher (:issue:`15549`) - Bug in ``pd.DataFrameGroupBy.count()`` when counting over a datetimelike column (:issue:`13393`) - Bug in ``rolling.var`` where calculation is inaccurate with a zero-valued array (:issue:`18430`) -- -- - -Sparse -^^^^^^ - -- -- -- Reshaping ^^^^^^^^^ @@ -161,9 +152,6 @@ Numeric ^^^^^^^ - Bug in ``pd.Series.rolling.skew()`` and ``rolling.kurt()`` with all equal values has floating issue (:issue:`18044`) -- -- -- Categorical ^^^^^^^^^^^ @@ -178,9 +166,3 @@ String ^^^^^^ - :meth:`Series.str.split()` will now propogate ``NaN`` values across all expanded columns instead of ``None`` (:issue:`18450`) - -Other -^^^^^ - -- --
xref https://github.com/pandas-dev/pandas/issues/18244
https://api.github.com/repos/pandas-dev/pandas/pulls/18690
2017-12-08T13:26:30Z
2017-12-10T14:06:38Z
2017-12-10T14:06:38Z
2017-12-12T02:38:31Z
TST: xfail unreliable parallel coordinates sorted label test
diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 8b0a981760c72..1753bc6387d33 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -202,6 +202,7 @@ def test_parallel_coordinates(self): with tm.assert_produces_warning(FutureWarning): parallel_coordinates(df, 'Name', colors=colors) + @pytest.mark.xfail(reason="unreliable test") def test_parallel_coordinates_with_sorted_labels(self): """ For #15908 """ from pandas.plotting import parallel_coordinates
https://api.github.com/repos/pandas-dev/pandas/pulls/18688
2017-12-08T11:09:40Z
2017-12-08T12:40:58Z
2017-12-08T12:40:58Z
2017-12-11T20:20:42Z
BUG: LatexFormatter.write_result multi-index
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index fcc9144bef9e3..0ad0cac0a4181 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -120,6 +120,7 @@ I/O - Bug in :meth:`DataFrame.to_msgpack` when serializing data of the numpy.bool_ datatype (:issue:`18390`) - Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) - Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`) +- Bug in :func:`to_latex` where repeated multi-index values were not printed even though a higher level index differed from the previous row (:issue:`14484`) Plotting ^^^^^^^^ diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 8f25eb3af70cd..97abf60a40a5b 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -46,7 +46,6 @@ import pandas as pd import numpy as np -import itertools import csv from functools import partial @@ -903,6 +902,7 @@ def get_col_type(dtype): name = any(self.frame.index.names) cname = any(self.frame.columns.names) lastcol = self.frame.index.nlevels - 1 + previous_lev3 = None for i, lev in enumerate(self.frame.index.levels): lev2 = lev.format() blank = ' ' * len(lev2[0]) @@ -913,11 +913,19 @@ def get_col_type(dtype): lev3 = [blank] * clevels if name: lev3.append(lev.name) - for level_idx, group in itertools.groupby( - self.frame.index.labels[i]): - count = len(list(group)) - lev3.extend([lev2[level_idx]] + [blank] * (count - 1)) + current_idx_val = None + for level_idx in self.frame.index.labels[i]: + if ((previous_lev3 is None or + previous_lev3[len(lev3)].isspace()) and + lev2[level_idx] == current_idx_val): + # same index as above row and left index was the same + lev3.append(blank) + else: + # different value than above or left index different + lev3.append(lev2[level_idx]) + current_idx_val = lev2[level_idx] strcols.insert(i, lev3) + previous_lev3 = lev3 column_format = self.column_format if column_format is None: diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 35ef5a1cf5c72..7d42ff20ea31e 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -221,6 +221,28 @@ def test_to_latex_multiindex(self): assert result == expected + def test_to_latex_multiindex_dupe_level(self): + # see gh-14484 + # + # If an index is repeated in subsequent rows, it should be + # replaced with a blank in the created table. This should + # ONLY happen if all higher order indices (to the left) are + # equal too. In this test, 'c' has to be printed both times + # because the higher order index 'A' != 'B'. + df = pd.DataFrame(index=pd.MultiIndex.from_tuples( + [('A', 'c'), ('B', 'c')]), columns=['col']) + result = df.to_latex() + expected = r"""\begin{tabular}{lll} +\toprule + & & col \\ +\midrule +A & c & NaN \\ +B & c & NaN \\ +\bottomrule +\end{tabular} +""" + assert result == expected + def test_to_latex_multicolumnrow(self): df = pd.DataFrame({ ('c1', 0): {x: x for x in range(5)},
Supersedes #17499 Closes #14484 Had to create a new PR because #17499 doesn't seem like it allows edits from maintainers (got denied when trying to push the remote). cc @MxKy
https://api.github.com/repos/pandas-dev/pandas/pulls/18685
2017-12-08T05:19:09Z
2017-12-08T11:11:44Z
2017-12-08T11:11:44Z
2017-12-11T01:45:01Z
BENCH: fix timestamp asvs
diff --git a/asv_bench/benchmarks/timestamp.py b/asv_bench/benchmarks/timestamp.py index 9d7d6d2998a8b..c8a93b09d94c0 100644 --- a/asv_bench/benchmarks/timestamp.py +++ b/asv_bench/benchmarks/timestamp.py @@ -6,10 +6,9 @@ class TimestampProperties(object): goal_time = 0.2 - params = [(None, None), - (pytz.timezone('Europe/Amsterdam'), None), - (None, 'B'), - (pytz.timezone('Europe/Amsterdam'), 'B')] + _tzs = [None, pytz.timezone('Europe/Amsterdam')] + _freqs = [None, 'B'] + params = [_tzs, _freqs] param_names = ['tz', 'freq'] def setup(self, tz, freq):
- [x] closes #18680
https://api.github.com/repos/pandas-dev/pandas/pulls/18684
2017-12-08T00:33:15Z
2017-12-08T10:51:03Z
2017-12-08T10:51:02Z
2017-12-08T19:40:41Z
Added mpl_15 decorator
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index db0c1e1cc563c..e87c67a682d46 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -519,6 +519,7 @@ def test_finder_hourly(self): xp = Period('1/1/1999', freq='H').ordinal assert rs == xp + @td.skip_if_mpl_1_5 @pytest.mark.slow def test_gaps(self): ts = tm.makeTimeSeries() @@ -526,7 +527,6 @@ def test_gaps(self): _, ax = self.plt.subplots() ts.plot(ax=ax) lines = ax.get_lines() - tm._skip_if_mpl_1_5() assert len(lines) == 1 l = lines[0] data = l.get_xydata() @@ -564,6 +564,7 @@ def test_gaps(self): mask = data.mask assert mask[2:5, 1].all() + @td.skip_if_mpl_1_5 @pytest.mark.slow def test_gap_upsample(self): low = tm.makeTimeSeries() @@ -580,8 +581,6 @@ def test_gap_upsample(self): l = lines[0] data = l.get_xydata() - tm._skip_if_mpl_1_5() - assert isinstance(data, np.ma.core.MaskedArray) mask = data.mask assert mask[5:25, 1].all() diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index b592a73e5d758..ab37c71404ad9 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -25,6 +25,7 @@ def test_foo(): """ import pytest +from distutils.version import LooseVersion def safe_import(mod_name, min_version=None): @@ -67,5 +68,18 @@ def _skip_if_no_mpl(): return True +def _skip_if_mpl_1_5(): + mod = safe_import("matplotlib") + + if mod: + v = mod.__version__ + if LooseVersion(v) > LooseVersion('1.4.3') or str(v)[0] == '0': + return True + else: + mod.use("Agg", warn=False) + + skip_if_no_mpl = pytest.mark.skipif(_skip_if_no_mpl(), reason="Missing matplotlib dependency") +skip_if_mpl_1_5 = pytest.mark.skipif(_skip_if_mpl_1_5(), + reason="matplotlib 1.5")
- [X] progress towards #18190 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry FWIW the functionality here could be slightly different, because the previous skip if items occurred in the middle of the function bodies instead of wrapping the function, so the first few lines of code within those functions would run in instances that are being skipped today. I've ignored that nuance because I find it strange that we would only want to run part of the test without any assertions for different versions of matplotlib, but if you feel it needs to be accounted for in some other fashion (say separate test cases) let me know
https://api.github.com/repos/pandas-dev/pandas/pulls/18682
2017-12-07T19:15:21Z
2017-12-08T02:10:30Z
2017-12-08T02:10:30Z
2017-12-08T02:11:06Z
Documentation fix for method last_valid_index
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 68cf5dd7161e3..9ce6b6148be56 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4320,7 +4320,7 @@ def first_valid_index(self): return valid_indices[0] if len(valid_indices) else None @Appender(_shared_docs['valid_index'] % { - 'position': 'first', 'klass': 'DataFrame'}) + 'position': 'last', 'klass': 'DataFrame'}) def last_valid_index(self): if len(self) == 0: return None
Documentation was showing "Return index for first non-NA/null value" for both first_valid_index and last_valid_index methods. Fixed by switching "first" to "last" - [ X ] closes #18564 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18681
2017-12-07T18:24:15Z
2017-12-08T02:11:15Z
2017-12-08T02:11:15Z
2017-12-11T20:20:46Z
BUG: Ensure Index.astype('category') returns a CategoricalIndex
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index b319c8bb79bb3..29c6dcaef19ab 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -259,6 +259,7 @@ Conversion - Fixed a bug where creating a Series from an array that contains both tz-naive and tz-aware values will result in a Series whose dtype is tz-aware instead of object (:issue:`16406`) - Adding a ``Period`` object to a ``datetime`` or ``Timestamp`` object will now correctly raise a ``TypeError`` (:issue:`17983`) - Fixed a bug where ``FY5253`` date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`) +- Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`) Indexing diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index f60c0d5ffdca0..5b1335c1a834e 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1934,7 +1934,7 @@ def pandas_dtype(dtype): except TypeError: pass - elif dtype.startswith('interval[') or dtype.startswith('Interval['): + elif dtype.startswith('interval') or dtype.startswith('Interval'): try: return IntervalDtype.construct_from_string(dtype) except TypeError: diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 040b735f8de2c..3a8edf9f066ee 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -340,6 +340,33 @@ def _validate_categories(categories, fastpath=False): return categories + def _update_dtype(self, dtype): + """ + Returns a CategoricalDtype with categories and ordered taken from dtype + if specified, otherwise falling back to self if unspecified + + Parameters + ---------- + dtype : CategoricalDtype + + Returns + ------- + new_dtype : CategoricalDtype + """ + if isinstance(dtype, compat.string_types) and dtype == 'category': + # dtype='category' should not change anything + return self + elif not self.is_dtype(dtype): + msg = ('a CategoricalDtype must be passed to perform an update, ' + 'got {dtype!r}').format(dtype=dtype) + raise ValueError(msg) + + # dtype is CDT: keep current categories if None (ordered can't be None) + new_categories = dtype.categories + if new_categories is None: + new_categories = self.categories + return CategoricalDtype(new_categories, dtype.ordered) + @property def categories(self): """ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 938fd7130faa5..9557261e61463 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1053,6 +1053,10 @@ def _to_embed(self, keep_tz=False, dtype=None): @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): + if is_categorical_dtype(dtype): + from .category import CategoricalIndex + return CategoricalIndex(self.values, name=self.name, dtype=dtype, + copy=copy) return Index(self.values.astype(dtype, copy=copy), name=self.name, dtype=dtype) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 26ffb01b9577f..241907a54f393 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -4,6 +4,7 @@ from pandas import compat from pandas.compat.numpy import function as nv from pandas.core.dtypes.generic import ABCCategorical, ABCSeries +from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.common import ( is_categorical_dtype, _ensure_platform_int, @@ -165,8 +166,6 @@ def _create_categorical(self, data, categories=None, ordered=None, data = Categorical(data, categories=categories, ordered=ordered, dtype=dtype) else: - from pandas.core.dtypes.dtypes import CategoricalDtype - if categories is not None: data = data.set_categories(categories, ordered=ordered) elif ordered is not None and ordered != data.ordered: @@ -344,6 +343,12 @@ def astype(self, dtype, copy=True): if is_interval_dtype(dtype): from pandas import IntervalIndex return IntervalIndex.from_intervals(np.array(self)) + elif is_categorical_dtype(dtype): + # GH 18630 + dtype = self.dtype._update_dtype(dtype) + if dtype == self.dtype: + return self.copy() if copy else self + return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy) @cache_readonly diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 290c77dd7f040..38e8c24de4bdf 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -20,6 +20,7 @@ is_period_dtype, is_bool_dtype, is_string_dtype, + is_categorical_dtype, is_string_like, is_list_like, is_scalar, @@ -35,6 +36,7 @@ from pandas.core.algorithms import checked_add_with_arr from pandas.core.indexes.base import Index, _index_shared_docs +from pandas.core.indexes.category import CategoricalIndex from pandas.core.indexes.numeric import Int64Index, Float64Index import pandas.compat as compat from pandas.tseries.frequencies import ( @@ -915,6 +917,9 @@ def astype(self, dtype, copy=True): elif copy is True: return self.copy() return self + elif is_categorical_dtype(dtype): + return CategoricalIndex(self.values, name=self.name, dtype=dtype, + copy=copy) elif is_string_dtype(dtype): return Index(self.format(), name=self.name, dtype=object) elif is_period_dtype(dtype): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index a32e79920db41..292b0f638f821 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -29,6 +29,7 @@ Interval, IntervalMixin, IntervalTree, intervals_to_interval_bounds) +from pandas.core.indexes.category import CategoricalIndex from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.core.indexes.multi import MultiIndex @@ -632,8 +633,8 @@ def astype(self, dtype, copy=True): elif is_object_dtype(dtype): return Index(self.values, dtype=object) elif is_categorical_dtype(dtype): - from pandas import Categorical - return Categorical(self, ordered=True) + return CategoricalIndex(self.values, name=self.name, dtype=dtype, + copy=copy) raise ValueError('Cannot cast IntervalIndex to dtype {dtype}' .format(dtype=dtype)) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 0cbb87c65ccd7..c20c6e1f75a24 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -14,9 +14,11 @@ from pandas.core.dtypes.common import ( _ensure_int64, _ensure_platform_int, + is_categorical_dtype, is_object_dtype, is_iterator, is_list_like, + pandas_dtype, is_scalar) from pandas.core.dtypes.missing import isna, array_equivalent from pandas.errors import PerformanceWarning, UnsortedIndexError @@ -2715,9 +2717,14 @@ def difference(self, other): @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): - if not is_object_dtype(np.dtype(dtype)): - raise TypeError('Setting %s dtype to anything other than object ' - 'is not supported' % self.__class__) + dtype = pandas_dtype(dtype) + if is_categorical_dtype(dtype): + msg = '> 1 ndim Categorical are not supported at this time' + raise NotImplementedError(msg) + elif not is_object_dtype(dtype): + msg = ('Setting {cls} dtype to anything other than object ' + 'is not supported').format(cls=self.__class__) + raise TypeError(msg) elif copy is True: return self._shallow_copy() return self diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 72aeafbe7e1ab..5fc9cb47362d6 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -7,6 +7,7 @@ is_float_dtype, is_object_dtype, is_integer_dtype, + is_categorical_dtype, is_bool, is_bool_dtype, is_scalar) @@ -16,6 +17,7 @@ from pandas.core import algorithms from pandas.core.indexes.base import ( Index, InvalidIndexError, _index_shared_docs) +from pandas.core.indexes.category import CategoricalIndex from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat import pandas.core.indexes.base as ibase @@ -321,10 +323,13 @@ def astype(self, dtype, copy=True): values = self._values.astype(dtype, copy=copy) elif is_object_dtype(dtype): values = self._values.astype('object', copy=copy) + elif is_categorical_dtype(dtype): + return CategoricalIndex(self, name=self.name, dtype=dtype, + copy=copy) else: - raise TypeError('Setting %s dtype to anything other than ' - 'float64 or object is not supported' % - self.__class__) + raise TypeError('Setting {cls} dtype to anything other than ' + 'float64, object, or category is not supported' + .format(cls=self.__class__)) return Index(values, name=self.name, dtype=dtype) @Appender(_index_shared_docs['_convert_scalar_indexer']) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 8b541bdce39ed..64756906d8a63 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -16,6 +16,7 @@ is_timedelta64_dtype, is_period_dtype, is_bool_dtype, + is_categorical_dtype, pandas_dtype, _ensure_object) from pandas.core.dtypes.dtypes import PeriodDtype @@ -23,6 +24,7 @@ import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc +from pandas.core.indexes.category import CategoricalIndex from pandas.core.indexes.datetimes import DatetimeIndex, Int64Index, Index from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexes.datetimelike import DatelikeOps, DatetimeIndexOpsMixin @@ -517,6 +519,9 @@ def astype(self, dtype, copy=True, how='start'): return self.to_timestamp(how=how).tz_localize(dtype.tz) elif is_period_dtype(dtype): return self.asfreq(freq=dtype.freq) + elif is_categorical_dtype(dtype): + return CategoricalIndex(self.values, name=self.name, dtype=dtype, + copy=copy) raise TypeError('Cannot cast PeriodIndex to dtype %s' % dtype) @Substitution(klass='PeriodIndex') diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 77e05ccf4db22..25c764b138465 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -12,12 +12,15 @@ is_object_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, + is_categorical_dtype, + pandas_dtype, _ensure_int64) from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCSeries from pandas.core.common import _maybe_box, _values_from_object from pandas.core.indexes.base import Index +from pandas.core.indexes.category import CategoricalIndex from pandas.core.indexes.numeric import Int64Index import pandas.compat as compat from pandas.compat import u @@ -479,7 +482,7 @@ def to_pytimedelta(self): @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): - dtype = np.dtype(dtype) + dtype = pandas_dtype(dtype) if is_object_dtype(dtype): return self._box_values_as_index() @@ -498,6 +501,9 @@ def astype(self, dtype, copy=True): elif is_integer_dtype(dtype): return Index(self.values.astype('i8', copy=copy), dtype='i8', name=self.name) + elif is_categorical_dtype(dtype): + return CategoricalIndex(self.values, name=self.name, dtype=dtype, + copy=copy) raise TypeError('Cannot cast TimedeltaIndex to dtype %s' % dtype) def union(self, other): diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 84e6f0d4f5a7a..d8e16482a414e 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -9,6 +9,7 @@ from pandas import ( Series, Categorical, CategoricalIndex, IntervalIndex, date_range) +from pandas.compat import string_types from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, PeriodDtype, IntervalDtype, CategoricalDtype) @@ -123,6 +124,41 @@ def test_tuple_categories(self): result = CategoricalDtype(categories) assert all(result.categories == categories) + @pytest.mark.parametrize('dtype', [ + CategoricalDtype(list('abc'), False), + CategoricalDtype(list('abc'), True)]) + @pytest.mark.parametrize('new_dtype', [ + 'category', + CategoricalDtype(None, False), + CategoricalDtype(None, True), + CategoricalDtype(list('abc'), False), + CategoricalDtype(list('abc'), True), + CategoricalDtype(list('cba'), False), + CategoricalDtype(list('cba'), True), + CategoricalDtype(list('wxyz'), False), + CategoricalDtype(list('wxyz'), True)]) + def test_update_dtype(self, dtype, new_dtype): + if isinstance(new_dtype, string_types) and new_dtype == 'category': + expected_categories = dtype.categories + expected_ordered = dtype.ordered + else: + expected_categories = new_dtype.categories + if expected_categories is None: + expected_categories = dtype.categories + expected_ordered = new_dtype.ordered + + result = dtype._update_dtype(new_dtype) + tm.assert_index_equal(result.categories, expected_categories) + assert result.ordered is expected_ordered + + @pytest.mark.parametrize('bad_dtype', [ + 'foo', object, np.int64, PeriodDtype('Q'), IntervalDtype(object)]) + def test_update_dtype_errors(self, bad_dtype): + dtype = CategoricalDtype(list('abc'), False) + msg = 'a CategoricalDtype must be passed to perform an update, ' + with tm.assert_raises_regex(ValueError, msg): + dtype._update_dtype(bad_dtype) + class TestDatetimeTZDtype(Base): diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index c1ee18526cc01..07e84ad60ef51 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -13,6 +13,7 @@ from pandas.core.indexes.base import InvalidIndexError from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin from pandas.core.dtypes.common import needs_i8_conversion +from pandas.core.dtypes.dtypes import CategoricalDtype from pandas._libs.tslib import iNaT import pandas.util.testing as tm @@ -1058,3 +1059,30 @@ def test_putmask_with_wrong_mask(self): with pytest.raises(ValueError): index.putmask('foo', 1) + + @pytest.mark.parametrize('copy', [True, False]) + @pytest.mark.parametrize('name', [None, 'foo']) + @pytest.mark.parametrize('ordered', [True, False]) + def test_astype_category(self, copy, name, ordered): + # GH 18630 + index = self.create_index() + if name: + index = index.rename(name) + + # standard categories + dtype = CategoricalDtype(ordered=ordered) + result = index.astype(dtype, copy=copy) + expected = CategoricalIndex(index.values, name=name, ordered=ordered) + tm.assert_index_equal(result, expected) + + # non-standard categories + dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered) + result = index.astype(dtype, copy=copy) + expected = CategoricalIndex(index.values, name=name, dtype=dtype) + tm.assert_index_equal(result, expected) + + if ordered is False: + # dtype='category' defaults to ordered=False, so only test once + result = index.astype('category', copy=copy) + expected = CategoricalIndex(index.values, name=name) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index c2eee4e437347..ae9e011d76597 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -388,9 +388,6 @@ def test_delete(self): def test_astype(self): ci = self.create_index() - result = ci.astype('category') - tm.assert_index_equal(result, ci, exact=True) - result = ci.astype(object) tm.assert_index_equal(result, Index(np.array(ci))) @@ -414,6 +411,37 @@ def test_astype(self): result = IntervalIndex.from_intervals(result.values) tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('copy', [True, False]) + @pytest.mark.parametrize('name', [None, 'foo']) + @pytest.mark.parametrize('dtype_ordered', [True, False]) + @pytest.mark.parametrize('index_ordered', [True, False]) + def test_astype_category(self, copy, name, dtype_ordered, index_ordered): + # GH 18630 + index = self.create_index(ordered=index_ordered) + if name: + index = index.rename(name) + + # standard categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = index.astype(dtype, copy=copy) + expected = CategoricalIndex(index.tolist(), + name=name, + categories=index.categories, + ordered=dtype_ordered) + tm.assert_index_equal(result, expected) + + # non-standard categories + dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered) + result = index.astype(dtype, copy=copy) + expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype) + tm.assert_index_equal(result, expected) + + if dtype_ordered is False: + # dtype='category' can't specify ordered, so only test once + result = index.astype('category', copy=copy) + expected = index + tm.assert_index_equal(result, expected) + def test_reindex_base(self): # Determined by cat ordering. idx = CategoricalIndex(list("cab"), categories=list("cab")) diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index 1850ff2795a24..abad930793d7f 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -376,10 +376,6 @@ def test_astype(self, closed): tm.assert_index_equal(result, idx) assert result.equals(idx) - result = idx.astype('category') - expected = pd.Categorical(idx, ordered=True) - tm.assert_categorical_equal(result, expected) - @pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series]) def test_where(self, closed, klass): idx = self.create_index(closed=closed) diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index e86b786e0d717..510ca6ac83ec0 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -16,6 +16,7 @@ compat, date_range, period_range) from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY from pandas.errors import PerformanceWarning, UnsortedIndexError +from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.indexes.base import InvalidIndexError from pandas._libs import lib from pandas._libs.lib import Timestamp @@ -554,6 +555,18 @@ def test_astype(self): with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"): self.index.astype(np.dtype(int)) + @pytest.mark.parametrize('ordered', [True, False]) + def test_astype_category(self, ordered): + # GH 18630 + msg = '> 1 ndim Categorical are not supported at this time' + with tm.assert_raises_regex(NotImplementedError, msg): + self.index.astype(CategoricalDtype(ordered=ordered)) + + if ordered is False: + # dtype='category' defaults to ordered=False, so only test once + with tm.assert_raises_regex(NotImplementedError, msg): + self.index.astype('category') + def test_constructor_single_level(self): result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']], labels=[[0, 1, 2, 3]], names=['first']) diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index 4edce8af92f84..c27af7a5bf8e4 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -4,9 +4,8 @@ import numpy as np from pandas.compat import zip -from pandas import (Series, Index, isna, - to_datetime, DatetimeIndex, Timestamp, - Interval, IntervalIndex, Categorical, +from pandas import (Series, isna, to_datetime, DatetimeIndex, + Timestamp, Interval, IntervalIndex, Categorical, cut, qcut, date_range) import pandas.util.testing as tm from pandas.api.types import CategoricalDtype as CDT @@ -29,7 +28,8 @@ def test_bins(self): result, bins = cut(data, 3, retbins=True) intervals = IntervalIndex.from_breaks(bins.round(3)) - expected = intervals.take([0, 0, 0, 1, 2, 0]).astype('category') + intervals = intervals.take([0, 0, 0, 1, 2, 0]) + expected = Categorical(intervals, ordered=True) tm.assert_categorical_equal(result, expected) tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667, 6.53333333, 9.7])) @@ -38,7 +38,8 @@ def test_right(self): data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) result, bins = cut(data, 4, right=True, retbins=True) intervals = IntervalIndex.from_breaks(bins.round(3)) - expected = intervals.astype('category').take([0, 0, 0, 2, 3, 0, 0]) + expected = Categorical(intervals, ordered=True) + expected = expected.take([0, 0, 0, 2, 3, 0, 0]) tm.assert_categorical_equal(result, expected) tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, 7.325, 9.7])) @@ -47,7 +48,8 @@ def test_noright(self): data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) result, bins = cut(data, 4, right=False, retbins=True) intervals = IntervalIndex.from_breaks(bins.round(3), closed='left') - expected = intervals.take([0, 0, 0, 2, 3, 0, 1]).astype('category') + intervals = intervals.take([0, 0, 0, 2, 3, 0, 1]) + expected = Categorical(intervals, ordered=True) tm.assert_categorical_equal(result, expected) tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095])) @@ -56,7 +58,8 @@ def test_arraylike(self): data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1] result, bins = cut(data, 3, retbins=True) intervals = IntervalIndex.from_breaks(bins.round(3)) - expected = intervals.take([0, 0, 0, 1, 2, 0]).astype('category') + intervals = intervals.take([0, 0, 0, 1, 2, 0]) + expected = Categorical(intervals, ordered=True) tm.assert_categorical_equal(result, expected) tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667, 6.53333333, 9.7])) @@ -249,8 +252,8 @@ def test_qcut_nas(self): def test_qcut_index(self): result = qcut([0, 2], 2) - expected = Index([Interval(-0.001, 1), Interval(1, 2)]).astype( - 'category') + intervals = [Interval(-0.001, 1), Interval(1, 2)] + expected = Categorical(intervals, ordered=True) tm.assert_categorical_equal(result, expected) def test_round_frac(self): diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 69c8f90a57e9c..7ef77e4c78e10 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -19,6 +19,7 @@ import pandas.core.algorithms as algos from pandas.core.common import _asarray_tuplesafe import pandas.util.testing as tm +from pandas.core.dtypes.dtypes import CategoricalDtype as CDT from pandas.compat.numpy import np_array_datetime64_compat from pandas.util.testing import assert_almost_equal @@ -565,8 +566,8 @@ def test_value_counts(self): # assert isinstance(factor, n) result = algos.value_counts(factor) breaks = [-1.194, -0.535, 0.121, 0.777, 1.433] - expected_index = IntervalIndex.from_breaks(breaks).astype('category') - expected = Series([1, 1, 1, 1], index=expected_index) + index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True)) + expected = Series([1, 1, 1, 1], index=index) tm.assert_series_equal(result.sort_index(), expected.sort_index()) def test_value_counts_bins(self):
- [X] closes #18630 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Notes: - `MultiIndex.astype('category')` raises per @TomAugspurger's comment in the issue. - `IntervalIndex.astype('category')` return a `Categorical` with `ordered=True` instead of `CategoricalIndex`, since it looks like someone previously intentionally implemented it this way. I don't immediately see a reason why, but left it as is. Would be straightforward to make this consistent and return a `CategoricalIndex`. - All other types of index should return a `CategoricalIndex`.
https://api.github.com/repos/pandas-dev/pandas/pulls/18677
2017-12-07T08:11:35Z
2017-12-11T11:06:20Z
2017-12-11T11:06:20Z
2017-12-11T21:25:20Z
CLN: ASV Gil benchmark
diff --git a/asv_bench/benchmarks/attrs_caching.py b/asv_bench/benchmarks/attrs_caching.py index 3c091be7a8424..48f0b7d71144c 100644 --- a/asv_bench/benchmarks/attrs_caching.py +++ b/asv_bench/benchmarks/attrs_caching.py @@ -1,18 +1,18 @@ import numpy as np from pandas import DataFrame - try: from pandas.util import cache_readonly except ImportError: from pandas.util.decorators import cache_readonly +from .pandas_vb_common import setup # noqa + class DataFrameAttributes(object): goal_time = 0.2 def setup(self): - np.random.seed(1234) self.df = DataFrame(np.random.randn(10, 6)) self.cur_index = self.df.index diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py index 78a94976e732d..654e5d3bfec0e 100644 --- a/asv_bench/benchmarks/gil.py +++ b/asv_bench/benchmarks/gil.py @@ -1,241 +1,135 @@ -from .pandas_vb_common import * - +import numpy as np +import pandas.util.testing as tm +from pandas import (DataFrame, Series, rolling_median, rolling_mean, + rolling_min, rolling_max, rolling_var, rolling_skew, + rolling_kurt, rolling_std, read_csv, factorize, date_range) from pandas.core.algorithms import take_1d - -try: - from cStringIO import StringIO -except ImportError: - from io import StringIO - try: from pandas._libs import algos except ImportError: from pandas import algos - try: from pandas.util.testing import test_parallel - have_real_test_parallel = True except ImportError: have_real_test_parallel = False - def test_parallel(num_threads=1): - def wrapper(fname): return fname - return wrapper +from .pandas_vb_common import BaseIO, setup # noqa -class NoGilGroupby(object): - goal_time = 0.2 - def setup(self): - self.N = 1000000 - self.ngroups = 1000 - np.random.seed(1234) - self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) +class ParallelGroupbyMethods(object): - np.random.seed(1234) - self.size = 2 ** 22 - self.ngroups = 100 - self.data = Series(np.random.randint(0, self.ngroups, size=self.size)) + goal_time = 0.2 + params = ([2, 4, 8], ['count', 'last', 'max', 'mean', 'min', 'prod', + 'sum', 'var']) + param_names = ['threads', 'method'] - if (not have_real_test_parallel): + def setup(self, threads, method): + if not have_real_test_parallel: raise NotImplementedError + N = 10**6 + ngroups = 10**3 + df = DataFrame({'key': np.random.randint(0, ngroups, size=N), + 'data': np.random.randn(N)}) - @test_parallel(num_threads=2) - def _pg2_count(self): - self.df.groupby('key')['data'].count() - - def time_count_2(self): - self._pg2_count() - - @test_parallel(num_threads=2) - def _pg2_last(self): - self.df.groupby('key')['data'].last() - - def time_last_2(self): - self._pg2_last() - - @test_parallel(num_threads=2) - def _pg2_max(self): - self.df.groupby('key')['data'].max() - - def time_max_2(self): - self._pg2_max() - - @test_parallel(num_threads=2) - def _pg2_mean(self): - self.df.groupby('key')['data'].mean() - - def time_mean_2(self): - self._pg2_mean() - - @test_parallel(num_threads=2) - def _pg2_min(self): - self.df.groupby('key')['data'].min() - - def time_min_2(self): - self._pg2_min() - - @test_parallel(num_threads=2) - def _pg2_prod(self): - self.df.groupby('key')['data'].prod() - - def time_prod_2(self): - self._pg2_prod() - - @test_parallel(num_threads=2) - def _pg2_sum(self): - self.df.groupby('key')['data'].sum() - - def time_sum_2(self): - self._pg2_sum() - - @test_parallel(num_threads=4) - def _pg4_sum(self): - self.df.groupby('key')['data'].sum() - - def time_sum_4(self): - self._pg4_sum() - - def time_sum_4_notp(self): - for i in range(4): - self.df.groupby('key')['data'].sum() - - def _f_sum(self): - self.df.groupby('key')['data'].sum() - - @test_parallel(num_threads=8) - def _pg8_sum(self): - self._f_sum() - - def time_sum_8(self): - self._pg8_sum() - - def time_sum_8_notp(self): - for i in range(8): - self._f_sum() - - @test_parallel(num_threads=2) - def _pg2_var(self): - self.df.groupby('key')['data'].var() - - def time_var_2(self): - self._pg2_var() - - # get groups - - def _groups(self): - self.data.groupby(self.data).groups - - @test_parallel(num_threads=2) - def _pg2_groups(self): - self._groups() + @test_parallel(num_threads=threads) + def parallel(): + getattr(df.groupby('key')['data'], method)() + self.parallel = parallel - def time_groups_2(self): - self._pg2_groups() + def loop(): + getattr(df.groupby('key')['data'], method)() + self.loop = loop - @test_parallel(num_threads=4) - def _pg4_groups(self): - self._groups() + def time_parallel(self, threads, method): + self.parallel() - def time_groups_4(self): - self._pg4_groups() + def time_loop(self, threads, method): + for i in range(threads): + self.loop() - @test_parallel(num_threads=8) - def _pg8_groups(self): - self._groups() - def time_groups_8(self): - self._pg8_groups() +class ParallelGroups(object): - - -class nogil_take1d_float64(object): goal_time = 0.2 + params = [2, 4, 8] + param_names = ['threads'] - def setup(self): - self.N = 1000000 - self.ngroups = 1000 - np.random.seed(1234) - self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) - if (not have_real_test_parallel): + def setup(self, threads): + if not have_real_test_parallel: raise NotImplementedError - self.N = 10000000.0 - self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), }) - self.indexer = np.arange(100, (len(self.df) - 100)) + size = 2**22 + ngroups = 10**3 + data = Series(np.random.randint(0, ngroups, size=size)) - def time_nogil_take1d_float64(self): - self.take_1d_pg2_int64() + @test_parallel(num_threads=threads) + def get_groups(): + data.groupby(data).groups + self.get_groups = get_groups - @test_parallel(num_threads=2) - def take_1d_pg2_int64(self): - take_1d(self.df.int64.values, self.indexer) + def time_get_groups(self, threads): + self.get_groups() - @test_parallel(num_threads=2) - def take_1d_pg2_float64(self): - take_1d(self.df.float64.values, self.indexer) +class ParallelTake1D(object): -class nogil_take1d_int64(object): goal_time = 0.2 + params = ['int64', 'float64'] + param_names = ['dtype'] - def setup(self): - self.N = 1000000 - self.ngroups = 1000 - np.random.seed(1234) - self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), }) - if (not have_real_test_parallel): + def setup(self, dtype): + if not have_real_test_parallel: raise NotImplementedError - self.N = 10000000.0 - self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), }) - self.indexer = np.arange(100, (len(self.df) - 100)) + N = 10**6 + df = DataFrame({'col': np.arange(N, dtype=dtype)}) + indexer = np.arange(100, len(df) - 100) - def time_nogil_take1d_int64(self): - self.take_1d_pg2_float64() + @test_parallel(num_threads=2) + def parallel_take1d(): + take_1d(df['col'].values, indexer) + self.parallel_take1d = parallel_take1d - @test_parallel(num_threads=2) - def take_1d_pg2_int64(self): - take_1d(self.df.int64.values, self.indexer) + def time_take1d(self, dtype): + self.parallel_take1d() - @test_parallel(num_threads=2) - def take_1d_pg2_float64(self): - take_1d(self.df.float64.values, self.indexer) +class ParallelKth(object): -class nogil_kth_smallest(object): number = 1 repeat = 5 def setup(self): - if (not have_real_test_parallel): + if not have_real_test_parallel: raise NotImplementedError - np.random.seed(1234) - self.N = 10000000 - self.k = 500000 - self.a = np.random.randn(self.N) - self.b = self.a.copy() - self.kwargs_list = [{'arr': self.a}, {'arr': self.b}] + N = 10**7 + k = 5 * 10**5 + kwargs_list = [{'arr': np.random.randn(N)}, + {'arr': np.random.randn(N)}] - def time_nogil_kth_smallest(self): - @test_parallel(num_threads=2, kwargs_list=self.kwargs_list) - def run(arr): - algos.kth_smallest(arr, self.k) - run() + @test_parallel(num_threads=2, kwargs_list=kwargs_list) + def parallel_kth_smallest(arr): + algos.kth_smallest(arr, k) + self.parallel_kth_smallest = parallel_kth_smallest + def time_kth_smallest(self): + self.parallel_kth_smallest() + + +class ParallelDatetimeFields(object): -class nogil_datetime_fields(object): goal_time = 0.2 def setup(self): - self.N = 100000000 - self.dti = pd.date_range('1900-01-01', periods=self.N, freq='T') - self.period = self.dti.to_period('D') - if (not have_real_test_parallel): + if not have_real_test_parallel: raise NotImplementedError + N = 10**6 + self.dti = date_range('1900-01-01', periods=N, freq='T') + self.period = self.dti.to_period('D') def time_datetime_field_year(self): @test_parallel(num_threads=2) @@ -274,149 +168,95 @@ def run(period): run(self.period) -class nogil_rolling_algos_slow(object): - goal_time = 0.2 +class ParallelRolling(object): - def setup(self): - self.win = 100 - np.random.seed(1234) - self.arr = np.random.rand(100000) - if (not have_real_test_parallel): - raise NotImplementedError - - def time_nogil_rolling_median(self): - @test_parallel(num_threads=2) - def run(arr, win): - rolling_median(arr, win) - run(self.arr, self.win) - - -class nogil_rolling_algos_fast(object): goal_time = 0.2 + params = ['rolling_median', 'rolling_mean', 'rolling_min', 'rolling_max', + 'rolling_var', 'rolling_skew', 'rolling_kurt', 'rolling_std'] + param_names = ['method'] - def setup(self): - self.win = 100 - np.random.seed(1234) - self.arr = np.random.rand(1000000) - if (not have_real_test_parallel): + def setup(self, method): + if not have_real_test_parallel: raise NotImplementedError + win = 100 + arr = np.random.rand(100000) + rolling = {'rolling_median': rolling_median, + 'rolling_mean': rolling_mean, + 'rolling_min': rolling_min, + 'rolling_max': rolling_max, + 'rolling_var': rolling_var, + 'rolling_skew': rolling_skew, + 'rolling_kurt': rolling_kurt, + 'rolling_std': rolling_std} - def time_nogil_rolling_mean(self): - @test_parallel(num_threads=2) - def run(arr, win): - rolling_mean(arr, win) - run(self.arr, self.win) - - def time_nogil_rolling_min(self): @test_parallel(num_threads=2) - def run(arr, win): - rolling_min(arr, win) - run(self.arr, self.win) + def parallel_rolling(): + rolling[method](arr, win) + self.parallel_rolling = parallel_rolling - def time_nogil_rolling_max(self): - @test_parallel(num_threads=2) - def run(arr, win): - rolling_max(arr, win) - run(self.arr, self.win) - - def time_nogil_rolling_var(self): - @test_parallel(num_threads=2) - def run(arr, win): - rolling_var(arr, win) - run(self.arr, self.win) - - def time_nogil_rolling_skew(self): - @test_parallel(num_threads=2) - def run(arr, win): - rolling_skew(arr, win) - run(self.arr, self.win) + def time_rolling(self, method): + self.parallel_rolling() - def time_nogil_rolling_kurt(self): - @test_parallel(num_threads=2) - def run(arr, win): - rolling_kurt(arr, win) - run(self.arr, self.win) - - def time_nogil_rolling_std(self): - @test_parallel(num_threads=2) - def run(arr, win): - rolling_std(arr, win) - run(self.arr, self.win) +class ParallelReadCSV(BaseIO): -class nogil_read_csv(object): number = 1 repeat = 5 + params = ['float', 'object', 'datetime'] + param_names = ['dtype'] - def setup(self): - if (not have_real_test_parallel): + def setup(self, dtype): + if not have_real_test_parallel: raise NotImplementedError - # Using the values - self.df = DataFrame(np.random.randn(10000, 50)) - self.df.to_csv('__test__.csv') - - self.rng = date_range('1/1/2000', periods=10000) - self.df_date_time = DataFrame(np.random.randn(10000, 50), index=self.rng) - self.df_date_time.to_csv('__test_datetime__.csv') + rows = 10000 + cols = 50 + data = {'float': DataFrame(np.random.randn(rows, cols)), + 'datetime': DataFrame(np.random.randn(rows, cols), + index=date_range('1/1/2000', + periods=rows)), + 'object': DataFrame('foo', + index=range(rows), + columns=['object%03d'.format(i) + for i in range(5)])} + + self.fname = '__test_{}__.csv'.format(dtype) + df = data[dtype] + df.to_csv(self.fname) - self.df_object = DataFrame('foo', index=self.df.index, columns=self.create_cols('object')) - self.df_object.to_csv('__test_object__.csv') - - def create_cols(self, name): - return [('%s%03d' % (name, i)) for i in range(5)] - - @test_parallel(num_threads=2) - def pg_read_csv(self): - read_csv('__test__.csv', sep=',', header=None, float_precision=None) - - def time_read_csv(self): - self.pg_read_csv() - - @test_parallel(num_threads=2) - def pg_read_csv_object(self): - read_csv('__test_object__.csv', sep=',') - - def time_read_csv_object(self): - self.pg_read_csv_object() + @test_parallel(num_threads=2) + def parallel_read_csv(): + read_csv(self.fname) + self.parallel_read_csv = parallel_read_csv - @test_parallel(num_threads=2) - def pg_read_csv_datetime(self): - read_csv('__test_datetime__.csv', sep=',', header=None) + def time_read_csv(self, dtype): + self.parallel_read_csv() - def time_read_csv_datetime(self): - self.pg_read_csv_datetime() +class ParallelFactorize(object): -class nogil_factorize(object): number = 1 repeat = 5 + params = [2, 4, 8] + param_names = ['threads'] - def setup(self): - if (not have_real_test_parallel): + def setup(self, threads): + if not have_real_test_parallel: raise NotImplementedError - np.random.seed(1234) - self.strings = tm.makeStringIndex(100000) - - def factorize_strings(self): - pd.factorize(self.strings) - - @test_parallel(num_threads=4) - def _pg_factorize_strings_4(self): - self.factorize_strings() + strings = tm.makeStringIndex(100000) - def time_factorize_strings_4(self): - for i in range(2): - self._pg_factorize_strings_4() + @test_parallel(num_threads=threads) + def parallel(): + factorize(strings) + self.parallel = parallel - @test_parallel(num_threads=2) - def _pg_factorize_strings_2(self): - self.factorize_strings() + def loop(): + factorize(strings) + self.loop = loop - def time_factorize_strings_2(self): - for i in range(4): - self._pg_factorize_strings_2() + def time_parallel(self, threads): + self.parallel() - def time_factorize_strings(self): - for i in range(8): - self.factorize_strings() + def time_loop(self, threads): + for i in range(threads): + self.loop() diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 3abf2338e1d94..1978d240abedd 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -16,32 +16,32 @@ class ApplyDictReturn(object): def setup(self): self.labels = np.arange(1000).repeat(10) self.data = Series(np.random.randn(len(self.labels))) - self.f = lambda x: {'first': x.values[0], 'last': x.values[(-1)]} def time_groupby_apply_dict_return(self): - self.data.groupby(self.labels).apply(self.f) + self.data.groupby(self.labels).apply(lambda x: {'first': x.values[0], + 'last': x.values[-1]}) class Apply(object): goal_time = 0.2 - def setup(self): + def setup_cache(self): N = 10**4 labels = np.random.randint(0, 2000, size=N) labels2 = np.random.randint(0, 3, size=N) - self.df = DataFrame({'key': labels, - 'key2': labels2, - 'value1': np.random.randn(N), - 'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4), - }) - self.scalar_function = lambda x: 1 + df = DataFrame({'key': labels, + 'key2': labels2, + 'value1': np.random.randn(N), + 'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4) + }) + return df - def time_scalar_function_multi_col(self): - self.df.groupby(['key', 'key2']).apply(self.scalar_function) + def time_scalar_function_multi_col(self, df): + df.groupby(['key', 'key2']).apply(lambda x: 1) - def time_scalar_function_single_col(self): - self.df.groupby('key').apply(self.scalar_function) + def time_scalar_function_single_col(self, df): + df.groupby('key').apply(lambda x: 1) @staticmethod def df_copy_function(g): @@ -49,11 +49,11 @@ def df_copy_function(g): g.name return g.copy() - def time_copy_function_multi_col(self): - self.df.groupby(['key', 'key2']).apply(self.df_copy_function) + def time_copy_function_multi_col(self, df): + df.groupby(['key', 'key2']).apply(self.df_copy_function) - def time_copy_overhead_single_col(self): - self.df.groupby('key').apply(self.df_copy_function) + def time_copy_overhead_single_col(self, df): + df.groupby('key').apply(self.df_copy_function) class Groups(object): @@ -274,13 +274,16 @@ class GroupStrings(object): goal_time = 0.2 def setup(self): - n = (5 * 7 * 11) * (1 << 9) + n = 2 * 10**5 alpha = list(map(''.join, product((ascii_letters + digits), repeat=4))) - f = lambda k: np.repeat(np.random.choice(alpha, (n // k)), k) - self.df = DataFrame({'a': f(11), - 'b': f(7), - 'c': f(5), - 'd': f(1)}) + self.df = DataFrame({'a': np.repeat(np.random.choice(alpha, + (n // 11)), 11), + 'b': np.repeat(np.random.choice(alpha, + (n // 7)), 7), + 'c': np.repeat(np.random.choice(alpha, + (n // 5)), 5), + 'd': np.repeat(np.random.choice(alpha, + (n // 1)), 1)}) self.df['joe'] = (np.random.randn(len(self.df)) * 10).round(3) i = np.random.permutation(len(self.df)) self.df = self.df.iloc[i].reset_index(drop=True) @@ -293,29 +296,29 @@ class MultiColumn(object): goal_time = 0.2 - def setup(self): + def setup_cache(self): N = 10**5 key1 = np.tile(np.arange(100, dtype=object), 1000) key2 = key1.copy() np.random.shuffle(key1) np.random.shuffle(key2) - self.df = DataFrame({'key1': key1, - 'key2': key2, - 'data1': np.random.randn(N), - 'data2': np.random.randn(N)}) - self.f = lambda x: x.values.sum() + df = DataFrame({'key1': key1, + 'key2': key2, + 'data1': np.random.randn(N), + 'data2': np.random.randn(N)}) + return df - def time_lambda_sum(self): - self.df.groupby(['key1', 'key2']).agg(self.f) + def time_lambda_sum(self, df): + df.groupby(['key1', 'key2']).agg(lambda x: x.values.sum()) - def time_cython_sum(self): - self.df.groupby(['key1', 'key2']).sum() + def time_cython_sum(self, df): + df.groupby(['key1', 'key2']).sum() - def time_col_select_lambda_sum(self): - self.df.groupby(['key1', 'key2'])['data1'].agg(self.f) + def time_col_select_lambda_sum(self, df): + df.groupby(['key1', 'key2'])['data1'].agg(lambda x: x.values.sum()) - def time_col_select_numpy_sum(self): - self.df.groupby(['key1', 'key2'])['data1'].agg(np.sum) + def time_col_select_numpy_sum(self, df): + df.groupby(['key1', 'key2'])['data1'].agg(np.sum) class Size(object): @@ -582,7 +585,6 @@ def setup(self): arr[2::10000, 2] = np.nan data = DataFrame(arr, index=index, columns=['col1', 'col20', 'col3']) self.df = data - self.f_max = lambda x: max(x) n = 20000 self.df1 = DataFrame(np.random.randint(1, n, (n, 3)), @@ -596,7 +598,7 @@ def setup(self): self.df4['jim'] = self.df4['joe'] def time_transform_lambda_max(self): - self.df.groupby(level='lev1').transform(self.f_max) + self.df.groupby(level='lev1').transform(lambda x: max(x)) def time_transform_ufunc_max(self): self.df.groupby(level='lev1').transform(np.max) diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index 74517f184ae6f..b7040bfdb9397 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -36,7 +36,7 @@ def remove(self, f): # causes an exception to be raised pass - def teardown(self): + def teardown(self, *args, **kwargs): self.remove(self.fname) # try em until it works! diff --git a/ci/lint.sh b/ci/lint.sh index bec82602fa509..1c157abbff060 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -24,7 +24,7 @@ if [ "$LINT" ]; then echo "Linting setup.py DONE" echo "Linting asv_bench/benchmarks/" - flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/[ghijoprs]*.py --ignore=F811 + flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/[ijoprs]*.py --ignore=F811 if [ $? -ne "0" ]; then RET=1 fi
- Adding missing `setup` import to `attrs_caching.py` - Now lint benchmark files that start with g and h - Utilized `params` for `gil.py` benchmarks, flake8 and remove start imports ``` asv dev -b ^gil · Discovering benchmarks · Running 15 total benchmarks (1 commits * 1 environments * 15 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 6.67%] ··· Running gil.ParallelDatetimeFields.time_datetime_field_day 211ms [ 13.33%] ··· Running gil.ParallelDatetimeFields.time_datetime_field_daysinmonth 224ms [ 20.00%] ··· Running gil.ParallelDatetimeFields.time_datetime_field_normalize 324ms [ 26.67%] ··· Running gil.ParallelDatetimeFields.time_datetime_field_year 201ms [ 33.33%] ··· Running gil.ParallelDatetimeFields.time_datetime_to_period 258ms [ 40.00%] ··· Running gil.ParallelDatetimeFields.time_period_to_datetime 325ms [ 46.67%] ··· Running gil.ParallelFactorize.time_loop ok [ 46.67%] ···· ========= ======== threads --------- -------- 2 76.9ms 4 156ms 8 314ms ========= ======== [ 53.33%] ··· Running gil.ParallelFactorize.time_parallel ok [ 53.33%] ···· ========= ======== threads --------- -------- 2 95.9ms 4 209ms 8 474ms ========= ======== [ 60.00%] ··· Running gil.ParallelGroupbyMethods.time_loop ok [ 60.00%] ···· ========= ======== ======== ======== ======== ======== ======== ======== ======= -- method --------- ---------------------------------------------------------------------- threads count last max mean min prod sum var ========= ======== ======== ======== ======== ======== ======== ======== ======= 2 92.6ms 85.2ms 79.0ms 82.7ms 82.3ms 82.0ms 82.2ms 106ms 4 187ms 163ms 158ms 164ms 156ms 165ms 160ms 207ms 8 452ms 406ms 396ms 410ms 398ms 405ms 401ms 493ms ========= ======== ======== ======== ======== ======== ======== ======== ======= [ 66.67%] ··· Running gil.ParallelGroupbyMethods.time_parallel ok [ 66.67%] ···· ========= ======= ======= ======= ======== ======= ======= ======= ======= -- method --------- ---------------------------------------------------------------- threads count last max mean min prod sum var ========= ======= ======= ======= ======== ======= ======= ======= ======= 2 150ms 104ms 105ms 83.9ms 104ms 103ms 103ms 111ms 4 305ms 235ms 227ms 227ms 227ms 231ms 219ms 271ms 8 740ms 466ms 481ms 504ms 493ms 515ms 484ms 709ms ========= ======= ======= ======= ======== ======= ======= ======= ======= [ 73.33%] ··· Running gil.ParallelGroups.time_get_groups ok [ 73.33%] ···· ========= ======= threads --------- ------- 2 1.41s 4 2.94s 8 5.76s ========= ======= [ 80.00%] ··· Running gil.ParallelKth.time_kth_smallest 291ms [ 86.67%] ··· Running gil.ParallelReadCSV.time_read_csv ok [ 86.67%] ···· ========== ======== dtype ---------- -------- float 554ms object 24.4ms datetime 561ms ========== ======== [ 93.33%] ··· Running gil.ParallelRolling.time_rolling ok [ 93.33%] ···· ================ ======== method ---------------- -------- rolling_median 239ms rolling_mean 24.7ms rolling_min 27.3ms rolling_max 29.1ms rolling_var 23.7ms rolling_skew 31.3ms rolling_kurt 29.7ms rolling_std 30.0ms ================ ======== [ 93.33%] ····· For parameters: 'rolling_median' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/gil.py:194: FutureWarning: pd.rolling_median is deprecated for ndarrays and will be removed in a future version rolling[method](arr, win) For parameters: 'rolling_mean' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/gil.py:194: FutureWarning: pd.rolling_mean is deprecated for ndarrays and will be removed in a future version rolling[method](arr, win) For parameters: 'rolling_min' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/gil.py:194: FutureWarning: pd.rolling_min is deprecated for ndarrays and will be removed in a future version rolling[method](arr, win) For parameters: 'rolling_max' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/gil.py:194: FutureWarning: pd.rolling_max is deprecated for ndarrays and will be removed in a future version rolling[method](arr, win) For parameters: 'rolling_var' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/gil.py:194: FutureWarning: pd.rolling_var is deprecated for ndarrays and will be removed in a future version rolling[method](arr, win) For parameters: 'rolling_skew' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/gil.py:194: FutureWarning: pd.rolling_skew is deprecated for ndarrays and will be removed in a future version rolling[method](arr, win) For parameters: 'rolling_kurt' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/gil.py:194: FutureWarning: pd.rolling_kurt is deprecated for ndarrays and will be removed in a future version rolling[method](arr, win) For parameters: 'rolling_std' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/gil.py:194: FutureWarning: pd.rolling_std is deprecated for ndarrays and will be removed in a future version rolling[method](arr, win) [100.00%] ··· Running gil.ParallelTake1D.time_take1d ok [100.00%] ···· ========= ======== dtype --------- -------- int64 24.3ms float64 8.17ms ========= ======== ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18675
2017-12-07T07:05:41Z
2017-12-11T08:54:15Z
2017-12-11T08:54:15Z
2017-12-12T03:20:47Z
ERR: ValueError when merging on incompatible dtypes
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 32b548e5f32f1..0baa0a307c988 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -189,6 +189,7 @@ Other API Changes - The default NA value for :class:`UInt64Index` has changed from 0 to ``NaN``, which impacts methods that mask with NA, such as ``UInt64Index.where()`` (:issue:`18398`) - Refactored ``setup.py`` to use ``find_packages`` instead of explicitly listing out all subpackages (:issue:`18535`) - Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:pr:`16672`) +- :func:`pandas.merge` now raises a ``ValueError`` when trying to merge on incompatible data types (:issue:`9780`) .. _whatsnew_0220.deprecations: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index bad7088a126cf..455c6f42ac74a 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -27,6 +27,7 @@ is_dtype_equal, is_bool, is_list_like, + is_datetimelike, _ensure_int64, _ensure_float64, _ensure_object, @@ -962,6 +963,33 @@ def _maybe_coerce_merge_keys(self): elif lib.infer_dtype(lk) == lib.infer_dtype(rk): pass + # Check if we are trying to merge on obviously + # incompatible dtypes GH 9780 + elif is_numeric_dtype(lk) and not is_numeric_dtype(rk): + msg = ("You are trying to merge on {lk_dtype} and " + "{rk_dtype} columns. If you wish to proceed " + "you should use pd.concat".format(lk_dtype=lk.dtype, + rk_dtype=rk.dtype)) + raise ValueError(msg) + elif not is_numeric_dtype(lk) and is_numeric_dtype(rk): + msg = ("You are trying to merge on {lk_dtype} and " + "{rk_dtype} columns. If you wish to proceed " + "you should use pd.concat".format(lk_dtype=lk.dtype, + rk_dtype=rk.dtype)) + raise ValueError(msg) + elif is_datetimelike(lk) and not is_datetimelike(rk): + msg = ("You are trying to merge on {lk_dtype} and " + "{rk_dtype} columns. If you wish to proceed " + "you should use pd.concat".format(lk_dtype=lk.dtype, + rk_dtype=rk.dtype)) + raise ValueError(msg) + elif not is_datetimelike(lk) and is_datetimelike(rk): + msg = ("You are trying to merge on {lk_dtype} and " + "{rk_dtype} columns. If you wish to proceed " + "you should use pd.concat".format(lk_dtype=lk.dtype, + rk_dtype=rk.dtype)) + raise ValueError(msg) + # Houston, we have a problem! # let's coerce to object if the dtypes aren't # categorical, otherwise coerce to the category diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 6f2d2ce2a8583..70b84f7a6225b 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -6,6 +6,7 @@ from numpy import nan import numpy as np import random +import re import pandas as pd from pandas.compat import lrange, lzip @@ -1370,30 +1371,47 @@ def f(): pytest.raises(NotImplementedError, f) -@pytest.fixture -def df(): - return DataFrame( - {'A': ['foo', 'bar'], - 'B': Series(['foo', 'bar']).astype('category'), - 'C': [1, 2], - 'D': [1.0, 2.0], - 'E': Series([1, 2], dtype='uint64'), - 'F': Series([1, 2], dtype='int32')}) - - class TestMergeDtypes(object): - def test_different(self, df): - - # we expect differences by kind - # to be ok, while other differences should return object - - left = df - for col in df.columns: - right = DataFrame({'A': df[col]}) + @pytest.mark.parametrize('right_vals', [ + ['foo', 'bar'], + Series(['foo', 'bar']).astype('category'), + [1, 2], + [1.0, 2.0], + Series([1, 2], dtype='uint64'), + Series([1, 2], dtype='int32') + ] + ) + def test_different(self, right_vals): + + left = DataFrame({'A': ['foo', 'bar'], + 'B': Series(['foo', 'bar']).astype('category'), + 'C': [1, 2], + 'D': [1.0, 2.0], + 'E': Series([1, 2], dtype='uint64'), + 'F': Series([1, 2], dtype='int32')}) + right = DataFrame({'A': right_vals}) + + # GH 9780 + # We allow merging on object and categorical cols and cast + # categorical cols to object + if (is_categorical_dtype(right['A'].dtype) or + is_object_dtype(right['A'].dtype)): result = pd.merge(left, right, on='A') assert is_object_dtype(result.A.dtype) + # GH 9780 + # We raise for merging on object col and int/float col and + # merging on categorical col and int/float col + else: + msg = ("You are trying to merge on " + "{lk_dtype} and {rk_dtype} columns. " + "If you wish to proceed you should use " + "pd.concat".format(lk_dtype=left['A'].dtype, + rk_dtype=right['A'].dtype)) + with tm.assert_raises_regex(ValueError, msg): + pd.merge(left, right, on='A') + @pytest.mark.parametrize('d1', [np.int64, np.int32, np.int16, np.int8, np.uint8]) @pytest.mark.parametrize('d2', [np.int64, np.float64, @@ -1462,6 +1480,42 @@ def test_merge_on_ints_floats_warning(self): result = B.merge(A, left_on='Y', right_on='X') assert_frame_equal(result, expected[['Y', 'X']]) + @pytest.mark.parametrize('df1_vals, df2_vals', [ + ([0, 1, 2], ["0", "1", "2"]), + ([0.0, 1.0, 2.0], ["0", "1", "2"]), + ([0, 1, 2], [u"0", u"1", u"2"]), + (pd.date_range('1/1/2011', periods=2, freq='D'), ['2011-01-01', + '2011-01-02']), + (pd.date_range('1/1/2011', periods=2, freq='D'), [0, 1]), + (pd.date_range('1/1/2011', periods=2, freq='D'), [0.0, 1.0]), + ([0, 1, 2], Series(['a', 'b', 'a']).astype('category')), + ([0.0, 1.0, 2.0], Series(['a', 'b', 'a']).astype('category')), + ]) + def test_merge_incompat_dtypes(self, df1_vals, df2_vals): + # GH 9780 + # Raise a ValueError when a user tries to merge on + # dtypes that are incompatible (e.g., obj and int/float) + + df1 = DataFrame({'A': df1_vals}) + df2 = DataFrame({'A': df2_vals}) + + msg = ("You are trying to merge on {lk_dtype} and " + "{rk_dtype} columns. If you wish to proceed " + "you should use pd.concat".format(lk_dtype=df1['A'].dtype, + rk_dtype=df2['A'].dtype)) + msg = re.escape(msg) + with tm.assert_raises_regex(ValueError, msg): + pd.merge(df1, df2, on=['A']) + + # Check that error still raised when swapping order of dataframes + msg = ("You are trying to merge on {lk_dtype} and " + "{rk_dtype} columns. If you wish to proceed " + "you should use pd.concat".format(lk_dtype=df2['A'].dtype, + rk_dtype=df1['A'].dtype)) + msg = re.escape(msg) + with tm.assert_raises_regex(ValueError, msg): + pd.merge(df2, df1, on=['A']) + @pytest.fixture def left():
- [ ] closes #9780 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18674
2017-12-06T23:50:23Z
2017-12-10T15:41:37Z
2017-12-10T15:41:37Z
2017-12-12T09:17:03Z
BUG: do not escape placeholder in to_latex()
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index d34c1f3535509..ca7c43d1f2b31 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -302,7 +302,7 @@ I/O - Bug in :func:`read_msgpack` with a non existent file is passed in Python 2 (:issue:`15296`) - Bug in :func:`read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`) - Bug in :func:`read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` (:issue:`18184`) -- +- Bug in :func:`DataFrame.to_latex()` where pairs of braces meant to serve as invisible placeholders were escaped (:issue:`18667`) - Plotting diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 8f25eb3af70cd..4b7f4de12bb65 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -967,7 +967,7 @@ def get_col_type(dtype): .replace('#', '\\#').replace('{', '\\{') .replace('}', '\\}').replace('~', '\\textasciitilde') .replace('^', '\\textasciicircum').replace('&', '\\&') - if x else '{}') for x in row] + if (x and x != '{}') else '{}') for x in row] else: crow = [x if x else '{}' for x in row] if self.bold_rows and self.fmt.index: diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 35ef5a1cf5c72..549a1e1ce316a 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -536,3 +536,35 @@ def test_to_latex_no_bold_rows(self): \end{tabular} """ assert observed == expected + + @pytest.mark.parametrize('name0', [None, 'named']) + @pytest.mark.parametrize('name1', [None, 'named']) + @pytest.mark.parametrize('axes', [[0], [1], [0, 1]]) + def test_to_latex_multiindex_names(self, name0, name1, axes): + # GH 18667 + names = [name0, name1] + mi = pd.MultiIndex.from_product([[1, 2], [3, 4]]) + df = pd.DataFrame(-1, index=mi.copy(), columns=mi.copy()) + for idx in axes: + df.axes[idx].names = names + + idx_names = tuple(n or '{}' for n in names) + idx_names_row = ('%s & %s & & & & \\\\\n' % idx_names + if (0 in axes and any(names)) else '') + placeholder = '{}' if any(names) and 1 in axes else ' ' + col_names = [n if (bool(n) and 1 in axes) else placeholder + for n in names] + observed = df.to_latex() + expected = r"""\begin{tabular}{llrrrr} +\toprule + & %s & \multicolumn{2}{l}{1} & \multicolumn{2}{l}{2} \\ + & %s & 3 & 4 & 3 & 4 \\ +%s\midrule +1 & 3 & -1 & -1 & -1 & -1 \\ + & 4 & -1 & -1 & -1 & -1 \\ +2 & 3 & -1 & -1 & -1 & -1 \\ + & 4 & -1 & -1 & -1 & -1 \\ +\bottomrule +\end{tabular} +""" % tuple(list(col_names) + [idx_names_row]) + assert observed == expected
- [x] closes #18667 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18670
2017-12-06T21:16:52Z
2017-12-21T15:25:08Z
2017-12-21T15:25:07Z
2017-12-21T16:42:24Z
DEPR/CLN: Remove how keyword from df.rolling() etc.
diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 66e16808f6af9..49ac516af6d37 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -253,12 +253,6 @@ accept the following arguments: result is NA) - ``center``: boolean, whether to set the labels at the center (default is False) -.. warning:: - - The ``freq`` and ``how`` arguments were in the API prior to 0.18.0 changes. These are deprecated in the new API. You can simply resample the input prior to creating a window function. - - For example, instead of ``s.rolling(window=5,freq='D').max()`` to get the max value on a rolling 5 Day window, one could use ``s.resample('D').max().rolling(window=5).max()``, which first resamples the data to daily data, then provides a rolling 5 day window. - We can then call methods on these ``rolling`` objects. These return like-indexed objects: .. ipython:: python diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index d34c1f3535509..5b23f20b5651d 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -237,8 +237,8 @@ Removal of prior version deprecations/changes - The ``SparseList`` class has been removed (:issue:`14007`) - The ``pandas.io.wb`` and ``pandas.io.data`` stub modules have been removed (:issue:`13735`) - ``Categorical.from_array`` has been removed (:issue:`13854`) -- The ``freq`` parameter has been removed from the ``rolling``/``expanding``/``ewm`` methods of DataFrame - and Series (deprecated since v0.18). Instead, resample before calling the methods. (:issue:18601) +- The ``freq`` and ``how`` parameters have been removed from the ``rolling``/``expanding``/``ewm`` methods of DataFrame + and Series (deprecated since v0.18). Instead, resample before calling the methods. (:issue:18601 & :issue:18668) - ``DatetimeIndex.to_datetime``, ``Timestamp.to_datetime``, ``PeriodIndex.to_datetime``, and ``Index.to_datetime`` have been removed (:issue:`8254`, :issue:`14096`, :issue:`14113`) .. _whatsnew_0220.performance: diff --git a/pandas/core/window.py b/pandas/core/window.py index 807f8bfa12674..5ad8d20cc03e2 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -106,17 +106,17 @@ def validate(self): raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'") - def _convert_freq(self, how=None): + def _convert_freq(self): """ resample according to the how, return a new object """ obj = self._selected_obj index = None return obj, index - def _create_blocks(self, how): + def _create_blocks(self): """ split data into blocks & return conformed data """ - obj, index = self._convert_freq(how) + obj, index = self._convert_freq() if index is not None: index = self._on @@ -196,7 +196,7 @@ def _get_index(self, index=None): return index, index.asi8 return index, index - def _prep_values(self, values=None, kill_inf=True, how=None): + def _prep_values(self, values=None, kill_inf=True): if values is None: values = getattr(self._selected_obj, 'values', self._selected_obj) @@ -320,22 +320,10 @@ def aggregate(self, arg, *args, **kwargs): agg = aggregate _shared_docs['sum'] = dedent(""" - %(name)s sum - - Parameters - ---------- - how : string, default None - .. deprecated:: 0.18.0 - Method for down- or re-sampling""") + %(name)s sum""") _shared_docs['mean'] = dedent(""" - %(name)s mean - - Parameters - ---------- - how : string, default None - .. deprecated:: 0.18.0 - Method for down- or re-sampling""") + %(name)s mean""") class Window(_Window): @@ -549,7 +537,7 @@ def _pop_args(win_type, arg_names, kwargs): # GH #15662. `False` makes symmetric window, rather than periodic. return sig.get_window(win_type, window, False).astype(float) - def _apply_window(self, mean=True, how=None, **kwargs): + def _apply_window(self, mean=True, **kwargs): """ Applies a moving window of type ``window_type`` on the data. @@ -557,9 +545,6 @@ def _apply_window(self, mean=True, how=None, **kwargs): ---------- mean : boolean, default True If True computes weighted mean, else weighted sum - how : string, default to None - .. deprecated:: 0.18.0 - how to resample Returns ------- @@ -569,7 +554,7 @@ def _apply_window(self, mean=True, how=None, **kwargs): window = self._prep_window(**kwargs) center = self.center - blocks, obj, index = self._create_blocks(how=how) + blocks, obj, index = self._create_blocks() results = [] for b in blocks: try: @@ -686,7 +671,7 @@ def __init__(self, obj, *args, **kwargs): cov = GroupByMixin._dispatch('cov', other=None, pairwise=None) def _apply(self, func, name, window=None, center=None, - check_minp=None, how=None, **kwargs): + check_minp=None, **kwargs): """ dispatch to apply; we are stripping all of the _apply kwargs and performing the original function call on the grouped object @@ -710,7 +695,7 @@ def _constructor(self): return Rolling def _apply(self, func, name=None, window=None, center=None, - check_minp=None, how=None, **kwargs): + check_minp=None, **kwargs): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. @@ -723,9 +708,6 @@ def _apply(self, func, name=None, window=None, center=None, window : int/array, default to _get_window() center : boolean, default to self.center check_minp : function, default to _use_window - how : string, default to None - .. deprecated:: 0.18.0 - how to resample Returns ------- @@ -739,7 +721,7 @@ def _apply(self, func, name=None, window=None, center=None, if check_minp is None: check_minp = _use_window - blocks, obj, index = self._create_blocks(how=how) + blocks, obj, index = self._create_blocks() index, indexi = self._get_index(index=index) results = [] for b in blocks: @@ -803,7 +785,7 @@ class _Rolling_and_Expanding(_Rolling): def count(self): - blocks, obj, index = self._create_blocks(how=None) + blocks, obj, index = self._create_blocks() index, indexi = self._get_index(index=index) window = self._get_window() @@ -849,29 +831,19 @@ def sum(self, *args, **kwargs): _shared_docs['max'] = dedent(""" %(name)s maximum + """) - Parameters - ---------- - how : string, default 'max' - .. deprecated:: 0.18.0 - Method for down- or re-sampling""") - - def max(self, how=None, *args, **kwargs): + def max(self, *args, **kwargs): nv.validate_window_func('max', args, kwargs) - return self._apply('roll_max', 'max', how=how, **kwargs) + return self._apply('roll_max', 'max', **kwargs) _shared_docs['min'] = dedent(""" %(name)s minimum + """) - Parameters - ---------- - how : string, default 'min' - .. deprecated:: 0.18.0 - Method for down- or re-sampling""") - - def min(self, how=None, *args, **kwargs): + def min(self, *args, **kwargs): nv.validate_window_func('min', args, kwargs) - return self._apply('roll_min', 'min', how=how, **kwargs) + return self._apply('roll_min', 'min', **kwargs) def mean(self, *args, **kwargs): nv.validate_window_func('mean', args, kwargs) @@ -879,15 +851,10 @@ def mean(self, *args, **kwargs): _shared_docs['median'] = dedent(""" %(name)s median + """) - Parameters - ---------- - how : string, default 'median' - .. deprecated:: 0.18.0 - Method for down- or re-sampling""") - - def median(self, how=None, **kwargs): - return self._apply('roll_median_c', 'median', how=how, **kwargs) + def median(self, **kwargs): + return self._apply('roll_median_c', 'median', **kwargs) _shared_docs['std'] = dedent(""" %(name)s standard deviation @@ -1709,23 +1676,20 @@ def aggregate(self, arg, *args, **kwargs): agg = aggregate - def _apply(self, func, how=None, **kwargs): + def _apply(self, func, **kwargs): """Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : string/callable to apply - how : string, default to None - .. deprecated:: 0.18.0 - how to resample Returns ------- y : type of input argument """ - blocks, obj, index = self._create_blocks(how=how) + blocks, obj, index = self._create_blocks() results = [] for b in blocks: try: diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index db94cd08b0050..661c7dfcc50fc 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -3019,7 +3019,7 @@ def test_rolling_max_gh6297(self): x = series.resample('D').max().rolling(window=1).max() tm.assert_series_equal(expected, x) - def test_rolling_max_how_resample(self): + def test_rolling_max_resample(self): indices = [datetime(1975, 1, i) for i in range(1, 6)] # So that we can have 3 datapoints on last day (4, 10, and 20) @@ -3040,17 +3040,17 @@ def test_rolling_max_how_resample(self): # Now specify median (10.0) expected = Series([0.0, 1.0, 2.0, 3.0, 10.0], index=[datetime(1975, 1, i, 0) for i in range(1, 6)]) - x = series.resample('D').median().rolling(window=1).max(how='median') + x = series.resample('D').median().rolling(window=1).max() tm.assert_series_equal(expected, x) # Now specify mean (4+10+20)/3 v = (4.0 + 10.0 + 20.0) / 3.0 expected = Series([0.0, 1.0, 2.0, 3.0, v], index=[datetime(1975, 1, i, 0) for i in range(1, 6)]) - x = series.resample('D').mean().rolling(window=1).max(how='mean') + x = series.resample('D').mean().rolling(window=1).max() tm.assert_series_equal(expected, x) - def test_rolling_min_how_resample(self): + def test_rolling_min_resample(self): indices = [datetime(1975, 1, i) for i in range(1, 6)] # So that we can have 3 datapoints on last day (4, 10, and 20) @@ -3068,7 +3068,7 @@ def test_rolling_min_how_resample(self): r = series.resample('D').min().rolling(window=1) tm.assert_series_equal(expected, r.min()) - def test_rolling_median_how_resample(self): + def test_rolling_median_resample(self): indices = [datetime(1975, 1, i) for i in range(1, 6)] # So that we can have 3 datapoints on last day (4, 10, and 20)
- [x] xref #18601 and #11603 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry The ``how`` parameter of df.rolling/expanding/ewm related methods was deprecated in 0.18 (#11603). This PR removes the parameter from the code base. This PR is a continuation of #18601 (removed the ``freq`` parameter). Next up will be to remove ``pd.stats.*``).
https://api.github.com/repos/pandas-dev/pandas/pulls/18668
2017-12-06T18:34:35Z
2017-12-08T00:26:57Z
2017-12-08T00:26:57Z
2017-12-11T08:08:41Z
Handle "today" and "now" in cython instead of C
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index edc9c0f8f903d..fd76f3328c05b 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -527,7 +527,7 @@ void pandas_datetime_to_datetimestruct(npy_datetime val, PANDAS_DATETIMEUNIT fr, void pandas_timedelta_to_timedeltastruct(npy_timedelta val, PANDAS_DATETIMEUNIT fr, pandas_timedeltastruct *result) { - convert_timedelta_to_timedeltastruct(fr, val, result); + convert_timedelta_to_timedeltastruct(fr, val, result); } diff --git a/pandas/_libs/src/datetime/np_datetime_strings.c b/pandas/_libs/src/datetime/np_datetime_strings.c index a047650f4c88d..2ea69e2ac1636 100644 --- a/pandas/_libs/src/datetime/np_datetime_strings.c +++ b/pandas/_libs/src/datetime/np_datetime_strings.c @@ -33,55 +33,6 @@ This file implements string parsing and creation for NumPy datetime. #include "np_datetime_strings.h" -/* Platform-specific time_t typedef */ -typedef time_t NPY_TIME_T; - -/* - * Wraps `localtime` functionality for multiple platforms. This - * converts a time value to a time structure in the local timezone. - * - * Returns 0 on success, -1 on failure. - */ -static int get_localtime(NPY_TIME_T *ts, struct tm *tms) { - char *func_name = "<unknown>"; -#if defined(_WIN32) -#if defined(_MSC_VER) && (_MSC_VER >= 1400) - if (localtime_s(tms, ts) != 0) { - func_name = "localtime_s"; - goto fail; - } -#elif defined(__GNUC__) && defined(NPY_MINGW_USE_CUSTOM_MSVCR) - if (_localtime64_s(tms, ts) != 0) { - func_name = "_localtime64_s"; - goto fail; - } -#else - struct tm *tms_tmp; - localtime_r(ts, tms_tmp); - if (tms_tmp == NULL) { - func_name = "localtime"; - goto fail; - } - memcpy(tms, tms_tmp, sizeof(struct tm)); -#endif -#else - if (localtime_r(ts, tms) == NULL) { - func_name = "localtime_r"; - goto fail; - } -#endif - - return 0; - -fail: - PyErr_Format(PyExc_OSError, - "Failed to use '%s' to convert " - "to a local time", - func_name); - return -1; -} - - /* * Parses (almost) standard ISO 8601 date strings. The differences are: * @@ -138,59 +89,6 @@ int parse_iso_8601_datetime(char *str, int len, out->month = 1; out->day = 1; - /* - * The string "today" means take today's date in local time, and - * convert it to a date representation. This date representation, if - * forced into a time unit, will be at midnight UTC. - * This is perhaps a little weird, but done so that the - * 'datetime64[D]' type produces the date you expect, rather than - * switching to an adjacent day depending on the current time and your - * timezone. - */ - if (len == 5 && tolower(str[0]) == 't' && tolower(str[1]) == 'o' && - tolower(str[2]) == 'd' && tolower(str[3]) == 'a' && - tolower(str[4]) == 'y') { - NPY_TIME_T rawtime = 0; - struct tm tm_; - - time(&rawtime); - if (get_localtime(&rawtime, &tm_) < 0) { - return -1; - } - out->year = tm_.tm_year + 1900; - out->month = tm_.tm_mon + 1; - out->day = tm_.tm_mday; - - /* - * Indicate that this was a special value, and - * is a date (unit 'D'). - */ - if (out_local != NULL) { - *out_local = 0; - } - - return 0; - } - - /* The string "now" resolves to the current UTC time */ - if (len == 3 && tolower(str[0]) == 'n' && tolower(str[1]) == 'o' && - tolower(str[2]) == 'w') { - NPY_TIME_T rawtime = 0; - - time(&rawtime); - - /* - * Indicate that this was a special value, and - * use 's' because the time() function has resolution - * seconds. - */ - if (out_local != NULL) { - *out_local = 0; - } - - return convert_datetime_to_datetimestruct(PANDAS_FR_s, rawtime, out); - } - substr = str; sublen = len; diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 342e282f28d3b..293e10d1934fa 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -208,6 +208,11 @@ def _test_parse_iso8601(object ts): obj = _TSObject() + if ts == 'now': + return Timestamp.utcnow() + elif ts == 'today': + return Timestamp.utcnow().normalize() + _string_to_dts(ts, &obj.dts, &out_local, &out_tzoffset) obj.value = dtstruct_to_dt64(&obj.dts) check_dts_bounds(&obj.dts) @@ -581,12 +586,13 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', elif is_string_object(val): # string - try: - if len(val) == 0 or val in nat_strings: - iresult[i] = NPY_NAT - continue + if len(val) == 0 or val in nat_strings: + iresult[i] = NPY_NAT + continue + + seen_string = 1 - seen_string = 1 + try: _string_to_dts(val, &dts, &out_local, &out_tzoffset) value = dtstruct_to_dt64(&dts) if out_local == 1: @@ -597,6 +603,8 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', except ValueError: # if requiring iso8601 strings, skip trying other formats if require_iso8601: + if _parse_today_now(val, &iresult[i]): + continue if is_coerce: iresult[i] = NPY_NAT continue @@ -611,6 +619,8 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', py_dt = parse_datetime_string(val, dayfirst=dayfirst, yearfirst=yearfirst) except Exception: + if _parse_today_now(val, &iresult[i]): + continue if is_coerce: iresult[i] = NPY_NAT continue @@ -706,6 +716,19 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', return oresult +cdef inline bint _parse_today_now(str val, int64_t* iresult): + # We delay this check for as long as possible + # because it catches relatively rare cases + if val == 'now': + # Note: this is *not* the same as Timestamp('now') + iresult[0] = Timestamp.utcnow().value + return True + elif val == 'today': + # Note: this is *not* the same as Timestamp('today') + iresult[0] = Timestamp.utcnow().normalize().value + return True + return False + # ---------------------------------------------------------------------- # Some general helper functions
ATM the `Timestamp` constructor (specifically `tslibs.conversion.convert_str_to_tsobject`) catches "now" and "today" before passing strings to `_string_to_dts`, which eventually dispatches to `np_datetime_strings`. By contrast, `to_datetime` (specifically `tslib.array_to_datetime`) does not handle "now" or "today" internally. This PR makes `array_to_datetime` handle "now" and "today" internally, removing a big chunk of np_datetime_strings in the process. NB: This does _not_ change the fact that `to_datetime("today") != Timestamp("today")`, just makes this fact more obvious. My hope is that making this obvious will make it easier to change/fix in the future. asv results are as usual all over the place, but if anything look slightly faster.
https://api.github.com/repos/pandas-dev/pandas/pulls/18666
2017-12-06T17:34:48Z
2017-12-09T15:50:16Z
2017-12-09T15:50:16Z
2017-12-10T02:31:00Z
DOC: added a reference to DataFrame assign in concatenate section of merging
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index ebade853313ab..4d9746eed0f0b 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -323,6 +323,13 @@ the name of the ``Series``. labels=['df1', 's1'], vertical=False); plt.close('all'); +.. note:: + + Since we're concatenating a ``Series`` to a ``DataFrame``, we could have + achieved the same result with :meth:`DataFrame.assign`. To concatenate an + arbitrary number of pandas objects (``DataFrame`` or ``Series``), use + ``concat``. + If unnamed ``Series`` are passed they will be numbered consecutively. .. ipython:: python diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 879b245af49cd..8d6a3dc72163e 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -677,7 +677,7 @@ Documentation Changes Rewrote some sentences for greater clarity, added more dynamic references to functions, methods and classes. (:issue:`18941`, :issue:`18948`, :issue:`18973`, :issue:`19017`) - +- Added a reference to :func:`DataFrame.assign` in the concatenate section of the merging documentation (:issue:`18665`) .. _whatsnew_0230.bug_fixes:
Because of various stack overflow answers we are directed to the merging page where a link to DataFrame.assign would be better. Adding the reference here will make people's code better. At least it would have for me. - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry For tests, we'll see with CI.
https://api.github.com/repos/pandas-dev/pandas/pulls/18665
2017-12-06T17:24:35Z
2018-02-20T23:54:15Z
2018-02-20T23:54:15Z
2018-02-20T23:54:23Z
CLN: cleanup libs cimports, remove is_timestamp
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index f1bd03a097cd0..8d4f2af19701a 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -1,24 +1,25 @@ # Copyright (c) 2012, Lambda Foundry, Inc. # See LICENSE for the license +import os +import sys +import time +import warnings + +from csv import QUOTE_MINIMAL, QUOTE_NONNUMERIC, QUOTE_NONE from libc.stdio cimport fopen, fclose from libc.stdlib cimport malloc, free from libc.string cimport strncpy, strlen, strcmp, strcasecmp -cimport libc.stdio as stdio -import warnings -from csv import QUOTE_MINIMAL, QUOTE_NONNUMERIC, QUOTE_NONE +cimport cython +from cython cimport Py_ssize_t + from cpython cimport (PyObject, PyBytes_FromString, PyBytes_AsString, PyBytes_Check, PyUnicode_Check, PyUnicode_AsUTF8String, PyErr_Occurred, PyErr_Fetch) from cpython.ref cimport Py_XDECREF -from pandas.errors import (ParserError, DtypeWarning, - EmptyDataError, ParserWarning) -# Import CParserError as alias of ParserError for backwards compatibility. -# Ultimately, we want to remove this import. See gh-12665 and gh-14479. -CParserError = ParserError cdef extern from "Python.h": object PyUnicode_FromString(char *v) @@ -29,15 +30,24 @@ cdef extern from "Python.h": cdef extern from "stdlib.h": void memcpy(void *dst, void *src, size_t n) -cimport cython -cimport numpy as cnp +import numpy as np +cimport numpy as cnp from numpy cimport ndarray, uint8_t, uint64_t, int64_t +cnp.import_array() -import numpy as np -cimport util +from util cimport UINT64_MAX, INT64_MAX, INT64_MIN +import lib + +from khash cimport ( + khiter_t, + kh_str_t, kh_init_str, kh_put_str, kh_exist_str, + kh_get_str, kh_destroy_str, + kh_float64_t, kh_get_float64, kh_destroy_float64, + kh_put_float64, kh_init_float64, + kh_strbox_t, kh_put_strbox, kh_get_strbox, kh_init_strbox, + kh_destroy_strbox) -import pandas._libs.lib as lib import pandas.compat as compat from pandas.core.dtypes.common import ( is_categorical_dtype, CategoricalDtype, @@ -47,55 +57,44 @@ from pandas.core.dtypes.common import ( pandas_dtype) from pandas.core.categorical import Categorical from pandas.core.dtypes.concat import union_categoricals - import pandas.io.common as com -import time -import os - -cnp.import_array() +from pandas.errors import (ParserError, DtypeWarning, + EmptyDataError, ParserWarning) -from khash cimport ( - khiter_t, - kh_str_t, kh_init_str, kh_put_str, kh_exist_str, - kh_get_str, kh_destroy_str, - kh_float64_t, kh_get_float64, kh_destroy_float64, - kh_put_float64, kh_init_float64, - kh_strbox_t, kh_put_strbox, kh_get_strbox, kh_init_strbox, - kh_destroy_strbox) +# Import CParserError as alias of ParserError for backwards compatibility. +# Ultimately, we want to remove this import. See gh-12665 and gh-14479. +CParserError = ParserError -import sys cdef bint PY3 = (sys.version_info[0] >= 3) cdef double INF = <double> np.inf cdef double NEGINF = -INF -cdef extern from "headers/stdint.h": - enum: UINT8_MAX - enum: UINT16_MAX - enum: UINT32_MAX - enum: UINT64_MAX - enum: INT8_MIN - enum: INT8_MAX - enum: INT16_MIN - enum: INT16_MAX - enum: INT32_MAX - enum: INT32_MIN - enum: INT64_MAX - enum: INT64_MIN - -cdef extern from "headers/portable.h": - pass cdef extern from "errno.h": int errno +cdef extern from "headers/portable.h": + # I *think* this is here so that strcasecmp is defined on Windows + # so we don't get + # `parsers.obj : error LNK2001: unresolved external symbol strcasecmp` + # in Appveyor. + # In a sane world, the `from libc.string cimport` above would fail + # loudly. + pass + try: basestring except NameError: basestring = str +cdef extern from "src/numpy_helper.h": + object sarr_from_data(cnp.dtype, int length, void* data) + void transfer_object_column(char *dst, char *src, size_t stride, + size_t length) + cdef extern from "parser/tokenizer.h": ctypedef enum ParserState: @@ -2360,7 +2359,7 @@ def _to_structured_array(dict columns, object names, object usecols): # We own the data. buf = <char*> malloc(length * stride) - recs = util.sarr_from_data(dt, length, buf) + recs = sarr_from_data(dt, length, buf) assert(recs.flags.owndata) for i in range(nfields): @@ -2385,7 +2384,7 @@ cdef _fill_structured_column(char *dst, char* src, int64_t elsize, int64_t i if incref: - util.transfer_object_column(dst, src, stride, length) + transfer_object_column(dst, src, stride, length) else: for i in range(length): memcpy(dst, src, elsize) diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd index e5fe90aa81f7d..be6591a118dc5 100644 --- a/pandas/_libs/src/util.pxd +++ b/pandas/_libs/src/util.pxd @@ -18,9 +18,6 @@ cdef extern from "numpy_helper.h": object get_value_1d(ndarray, Py_ssize_t) char *get_c_string(object) except NULL object char_to_string(char*) - void transfer_object_column(char *dst, char *src, size_t stride, - size_t length) - object sarr_from_data(cnp.dtype, int length, void* data) object unbox_if_zerodim(object arr) ctypedef fused numeric: @@ -100,8 +97,6 @@ cdef inline set_value_at(ndarray arr, object loc, object value): set_value_at_unsafe(arr, loc, value) -cdef inline int is_contiguous(ndarray arr): - return cnp.PyArray_CHKFLAGS(arr, cnp.NPY_C_CONTIGUOUS) cdef inline is_array(object o): return cnp.PyArray_Check(o) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 5e08df7dfe27b..342e282f28d3b 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- # cython: profile=False -# cython: linetrace=False -# distutils: define_macros=CYTHON_TRACE=0 -# distutils: define_macros=CYTHON_TRACE_NOGIL=0 cimport numpy as np from numpy cimport int64_t, ndarray, float64_t @@ -10,15 +7,13 @@ import numpy as np np.import_array() -from cpython cimport PyTypeObject, PyFloat_Check - -cdef extern from "Python.h": - cdef PyTypeObject *Py_TYPE(object) +from cpython cimport PyFloat_Check from util cimport (is_integer_object, is_float_object, is_string_object, is_datetime64_object) from cpython.datetime cimport (PyDateTime_Check, PyDate_Check, + PyDateTime_CheckExact, PyDateTime_IMPORT, timedelta, datetime, date) # import datetime C API @@ -47,10 +42,8 @@ UTC = pytz.utc from tslibs.timedeltas cimport cast_from_unit from tslibs.timedeltas import Timedelta -from tslibs.timezones cimport ( - is_utc, is_tzlocal, is_fixed_offset, - treat_tz_as_pytz, - get_dst_info) +from tslibs.timezones cimport (is_utc, is_tzlocal, is_fixed_offset, + treat_tz_as_pytz, get_dst_info) from tslibs.conversion cimport (tz_convert_single, _TSObject, convert_datetime_to_tsobject, get_datetime64_nanos) @@ -204,13 +197,6 @@ def ints_to_pytimedelta(ndarray[int64_t] arr, box=False): return result -cdef PyTypeObject* ts_type = <PyTypeObject*> Timestamp - - -cdef inline bint is_timestamp(object o): - return Py_TYPE(o) == ts_type # isinstance(o, Timestamp) - - def _test_parse_iso8601(object ts): """ TESTING ONLY: Parse string into Timestamp using iso8601 parser. Used @@ -333,14 +319,6 @@ def format_array_from_datetime(ndarray[int64_t] values, object tz=None, return result -# const for parsers - -_MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', - 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] -_MONTH_NUMBERS = {k: i for i, k in enumerate(_MONTHS)} -_MONTH_ALIASES = {(k + 1): v for k, v in enumerate(_MONTHS)} - - cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'): """ convert the ndarray according to the unit @@ -360,7 +338,7 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'): bint is_ignore = errors=='ignore' bint is_coerce = errors=='coerce' bint is_raise = errors=='raise' - bint need_to_iterate=True + bint need_to_iterate = True ndarray[int64_t] iresult ndarray[object] oresult @@ -383,7 +361,7 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'): mask = iresult == iNaT iresult[mask] = 0 fvalues = iresult.astype('f8') * m - need_to_iterate=False + need_to_iterate = False except: pass @@ -394,7 +372,7 @@ cpdef array_with_unit_to_datetime(ndarray values, unit, errors='coerce'): or (fvalues > _NS_UPPER_BOUND).any()): raise OutOfBoundsDatetime( "cannot convert input with unit '{0}'".format(unit)) - result = (iresult *m).astype('M8[ns]') + result = (iresult * m).astype('M8[ns]') iresult = result.view('i8') iresult[mask] = iNaT return result @@ -545,7 +523,8 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', 'utc=True') else: iresult[i] = pydatetime_to_dt64(val, &dts) - if is_timestamp(val): + if not PyDateTime_CheckExact(val): + # i.e. a Timestamp object iresult[i] += val.nanosecond try: check_dts_bounds(&dts) @@ -752,11 +731,15 @@ cpdef normalize_date(object dt): ------- normalized : datetime.datetime or Timestamp """ - if is_timestamp(dt): - return dt.replace(hour=0, minute=0, second=0, microsecond=0, - nanosecond=0) - elif PyDateTime_Check(dt): - return dt.replace(hour=0, minute=0, second=0, microsecond=0) + if PyDateTime_Check(dt): + if not PyDateTime_CheckExact(dt): + # i.e. a Timestamp object + return dt.replace(hour=0, minute=0, second=0, microsecond=0, + nanosecond=0) + else: + # regular datetime object + return dt.replace(hour=0, minute=0, second=0, microsecond=0) + # TODO: Make sure DST crossing is handled correctly here elif PyDate_Check(dt): return datetime(dt.year, dt.month, dt.day) else: diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 4f6b2f52d4d24..c12a15b71487b 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -32,10 +32,10 @@ from util cimport (is_string_object, is_integer_object, is_float_object) from timedeltas cimport cast_from_unit -from timezones cimport ( - is_utc, is_tzlocal, is_fixed_offset, - treat_tz_as_dateutil, treat_tz_as_pytz, - get_utcoffset, get_dst_info, get_timezone, maybe_get_tz) +from timezones cimport (is_utc, is_tzlocal, is_fixed_offset, + treat_tz_as_dateutil, treat_tz_as_pytz, + get_utcoffset, get_dst_info, + get_timezone, maybe_get_tz) from parsing import parse_datetime_string from nattype import nat_strings, NaT diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index b321ca1659682..950677b3b53db 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- # cython: profile=False -# cython: linetrace=False -# distutils: define_macros=CYTHON_TRACE=0 -# distutils: define_macros=CYTHON_TRACE_NOGIL=0 """ Functions for accessing attributes of Timestamp/datetime64/datetime-like objects and arrays diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 90882eefd9f67..8ce1d9cdf2158 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- # cython: profile=False -# cython: linetrace=False -# distutils: define_macros=CYTHON_TRACE=0 -# distutils: define_macros=CYTHON_TRACE_NOGIL=0 """ Parsing functions for datetime and datetime-like strings. """ diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 1c20dbe7f8fc9..d2b518c74a1e3 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -10,20 +10,18 @@ np.import_array() from util cimport is_string_object, get_nat -from pandas._libs.khash cimport ( - khiter_t, - kh_destroy_int64, kh_put_int64, - kh_init_int64, kh_int64_t, - kh_resize_int64, kh_get_int64) +from pandas._libs.khash cimport (khiter_t, + kh_destroy_int64, kh_put_int64, + kh_init_int64, kh_int64_t, + kh_resize_int64, kh_get_int64) from cpython.datetime cimport datetime from np_datetime cimport (pandas_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct) from frequencies cimport get_freq_code -from timezones cimport ( - is_utc, is_tzlocal, - maybe_get_tz, get_dst_info, get_utcoffset) +from timezones cimport (is_utc, is_tzlocal, + maybe_get_tz, get_dst_info, get_utcoffset) from fields import build_field_sarray from conversion import tz_convert diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index d326f2cb68f24..de9f75344b2bf 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- # cython: profile=False -# cython: linetrace=False -# distutils: define_macros=CYTHON_TRACE=0 -# distutils: define_macros=CYTHON_TRACE_NOGIL=0 cimport cython from cython cimport Py_ssize_t diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index 67ed725436581..9cbcfa4f46008 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -5,7 +5,8 @@ import pandas.util.testing as tm import pandas.core.indexes.period as period from pandas.compat import lrange -from pandas.tseries.frequencies import get_freq, MONTHS +from pandas.tseries.frequencies import get_freq +from pandas._libs.tslibs.resolution import _MONTHS as MONTHS from pandas._libs.tslibs.period import period_ordinal, period_asfreq from pandas import (PeriodIndex, Period, DatetimeIndex, Timestamp, Series, date_range, to_datetime, period_range) diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 10c3c0ea507c1..1fd6befd64f57 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -22,8 +22,7 @@ from pandas.core.base import SpecificationError, AbstractMethodError from pandas.errors import UnsupportedFunctionCall from pandas.core.groupby import DataError -from pandas._libs.tslibs.resolution import DAYS -from pandas.tseries.frequencies import MONTHS +from pandas._libs.tslibs.resolution import DAYS, _MONTHS as MONTHS from pandas.tseries.frequencies import to_offset from pandas.core.indexes.datetimes import date_range from pandas.tseries.offsets import Minute, BDay diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index fef88587a7282..460ad3f5591fc 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -17,7 +17,6 @@ from pandas.util._decorators import deprecate_kwarg import pandas.tseries.offsets as offsets -from pandas._libs import tslib from pandas._libs.tslib import Timedelta from pandas._libs.tslibs.frequencies import ( # noqa get_freq_code, _base_and_stride, _period_str_to_code, @@ -26,7 +25,7 @@ from pandas._libs.tslibs.resolution import (Resolution, _FrequencyInferer, _TimedeltaFrequencyInferer) -from pandas._libs.tslibs.parsing import _get_rule_month +from pandas._libs.tslibs.parsing import _get_rule_month, _MONTH_NUMBERS from pytz import AmbiguousTimeError @@ -497,8 +496,8 @@ def _is_annual(rule): def _quarter_months_conform(source, target): - snum = _month_numbers[source] - tnum = _month_numbers[target] + snum = _MONTH_NUMBERS[source] + tnum = _MONTH_NUMBERS[target] return snum % 3 == tnum % 3 @@ -515,7 +514,3 @@ def _is_monthly(rule): def _is_weekly(rule): rule = rule.upper() return rule == 'W' or rule.startswith('W-') - - -MONTHS = tslib._MONTHS -_month_numbers = tslib._MONTH_NUMBERS
- cleanup imports in `_libs.parsers`, including some unused - a couple of functions from util are used exactly once and only in parsers. Get those directly from numpy_helper instead of from util - remove unused `util.is_contiguous` - remove a bunch of distutils shebang-like declarations that are unnecessary and are a pain for when we _do_ want to run tests with coverage enabled. - remove `tslib.is_timestamp` (will be helpful for eventual goal of "fixing" Timestamp classmethods) - remove some leftover constants from tslib, update imports appropriately
https://api.github.com/repos/pandas-dev/pandas/pulls/18663
2017-12-06T16:49:07Z
2017-12-08T01:31:49Z
2017-12-08T01:31:49Z
2017-12-08T19:38:10Z
TST/DOC: test pyarrow tz data + doc / enable cross compat tests for pyarrow/fastparquet
diff --git a/doc/source/io.rst b/doc/source/io.rst index f96e33dbf9882..49e264c8562d0 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4522,6 +4522,7 @@ See the documentation for `pyarrow <http://arrow.apache.org/docs/python/>`__ and .. note:: These engines are very similar and should read/write nearly identical parquet format files. + Currently ``pyarrow`` does not support timedelta data, and ``fastparquet`` does not support timezone aware datetimes (they are coerced to UTC). These libraries differ by having different underlying dependencies (``fastparquet`` by using ``numba``, while ``pyarrow`` uses a c-library). .. ipython:: python @@ -4548,8 +4549,8 @@ Read from a parquet file. .. ipython:: python - result = pd.read_parquet('example_pa.parquet', engine='pyarrow') result = pd.read_parquet('example_fp.parquet', engine='fastparquet') + result = pd.read_parquet('example_pa.parquet', engine='pyarrow') result.dtypes diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index e7bcff22371b7..c743c5d9fecd5 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -7,7 +7,7 @@ import numpy as np import pandas as pd -from pandas.compat import PY3 +from pandas.compat import PY3, is_platform_windows from pandas.io.parquet import (to_parquet, read_parquet, get_engine, PyArrowImpl, FastParquetImpl) from pandas.util import testing as tm @@ -80,16 +80,36 @@ def df_compat(): def df_cross_compat(): df = pd.DataFrame({'a': list('abc'), 'b': list(range(1, 4)), - 'c': np.arange(3, 6).astype('u1'), + # 'c': np.arange(3, 6).astype('u1'), 'd': np.arange(4.0, 7.0, dtype='float64'), 'e': [True, False, True], 'f': pd.date_range('20130101', periods=3), - 'g': pd.date_range('20130101', periods=3, - tz='US/Eastern'), - 'h': pd.date_range('20130101', periods=3, freq='ns')}) + # 'g': pd.date_range('20130101', periods=3, + # tz='US/Eastern'), + # 'h': pd.date_range('20130101', periods=3, freq='ns') + }) return df +@pytest.fixture +def df_full(): + return pd.DataFrame( + {'string': list('abc'), + 'string_with_nan': ['a', np.nan, 'c'], + 'string_with_none': ['a', None, 'c'], + 'bytes': [b'foo', b'bar', b'baz'], + 'unicode': [u'foo', u'bar', u'baz'], + 'int': list(range(1, 4)), + 'uint': np.arange(3, 6).astype('u1'), + 'float': np.arange(4.0, 7.0, dtype='float64'), + 'float_with_nan': [2., np.nan, 3.], + 'bool': [True, False, True], + 'datetime': pd.date_range('20130101', periods=3), + 'datetime_with_nat': [pd.Timestamp('20130101'), + pd.NaT, + pd.Timestamp('20130103')]}) + + def test_invalid_engine(df_compat): with pytest.raises(ValueError): @@ -154,7 +174,8 @@ def test_options_get_engine(fp, pa): assert isinstance(get_engine('fastparquet'), FastParquetImpl) -@pytest.mark.xfail(reason="fp does not ignore pa index __index_level_0__") +@pytest.mark.xfail(is_platform_windows(), + reason="reading pa metadata failing on Windows") def test_cross_engine_pa_fp(df_cross_compat, pa, fp): # cross-compat with differing reading/writing engines @@ -165,8 +186,10 @@ def test_cross_engine_pa_fp(df_cross_compat, pa, fp): result = read_parquet(path, engine=fp) tm.assert_frame_equal(result, df) + result = read_parquet(path, engine=fp, columns=['a', 'd']) + tm.assert_frame_equal(result, df[['a', 'd']]) + -@pytest.mark.xfail(reason="pyarrow reading fp in some cases") def test_cross_engine_fp_pa(df_cross_compat, pa, fp): # cross-compat with differing reading/writing engines @@ -177,6 +200,9 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp): result = read_parquet(path, engine=pa) tm.assert_frame_equal(result, df) + result = read_parquet(path, engine=pa, columns=['a', 'd']) + tm.assert_frame_equal(result, df[['a', 'd']]) + class Base(object): @@ -300,27 +326,31 @@ def test_read_columns(self, engine): class TestParquetPyArrow(Base): - def test_basic(self, pa): + def test_basic(self, pa, df_full): - df = pd.DataFrame({'string': list('abc'), - 'string_with_nan': ['a', np.nan, 'c'], - 'string_with_none': ['a', None, 'c'], - 'bytes': [b'foo', b'bar', b'baz'], - 'unicode': [u'foo', u'bar', u'baz'], - 'int': list(range(1, 4)), - 'uint': np.arange(3, 6).astype('u1'), - 'float': np.arange(4.0, 7.0, dtype='float64'), - 'float_with_nan': [2., np.nan, 3.], - 'bool': [True, False, True], - 'bool_with_none': [True, None, True], - 'datetime_ns': pd.date_range('20130101', periods=3), - 'datetime_with_nat': [pd.Timestamp('20130101'), - pd.NaT, - pd.Timestamp('20130103')] - }) + df = df_full + + # additional supported types for pyarrow + import pyarrow + if LooseVersion(pyarrow.__version__) >= LooseVersion('0.7.0'): + df['datetime_tz'] = pd.date_range('20130101', periods=3, + tz='Europe/Brussels') + df['bool_with_none'] = [True, None, True] self.check_round_trip(df, pa) + @pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)") + def test_basic_subset_columns(self, pa, df_full): + # GH18628 + + df = df_full + # additional supported types for pyarrow + df['datetime_tz'] = pd.date_range('20130101', periods=3, + tz='Europe/Brussels') + + self.check_round_trip(df, pa, expected=df[['string', 'int']], + read_kwargs={'columns': ['string', 'int']}) + def test_duplicate_columns(self, pa): # not currently able to handle duplicate columns @@ -363,25 +393,12 @@ def test_categorical_unsupported(self, pa_lt_070): class TestParquetFastParquet(Base): - def test_basic(self, fp): - - df = pd.DataFrame( - {'string': list('abc'), - 'string_with_nan': ['a', np.nan, 'c'], - 'string_with_none': ['a', None, 'c'], - 'bytes': [b'foo', b'bar', b'baz'], - 'unicode': [u'foo', u'bar', u'baz'], - 'int': list(range(1, 4)), - 'uint': np.arange(3, 6).astype('u1'), - 'float': np.arange(4.0, 7.0, dtype='float64'), - 'float_with_nan': [2., np.nan, 3.], - 'bool': [True, False, True], - 'datetime': pd.date_range('20130101', periods=3), - 'datetime_with_nat': [pd.Timestamp('20130101'), - pd.NaT, - pd.Timestamp('20130103')], - 'timedelta': pd.timedelta_range('1 day', periods=3), - }) + def test_basic(self, fp, df_full): + + df = df_full + + # additional supported types for fastparquet + df['timedelta'] = pd.timedelta_range('1 day', periods=3) self.check_round_trip(df, fp, write_kwargs={'compression': None})
Commits: TST: add parquet test with tz datetime data for pyarrow + clean-up basic data types tests: make common dataframe with types supported by both pyarrow and fastparquet DOC: document differences between pyarrow and fastparquet in supported data types TST: enable pyarrow/fastparquet cross compatibility tests on smaller subset of dataframe Closes https://github.com/pandas-dev/pandas/issues/17448 Also adds a test for https://github.com/pandas-dev/pandas/issues/18628
https://api.github.com/repos/pandas-dev/pandas/pulls/18662
2017-12-06T15:04:58Z
2017-12-10T14:41:15Z
2017-12-10T14:41:14Z
2017-12-10T15:28:16Z
DOC: temporary remove pyarrow example of reading subset columns
diff --git a/doc/source/io.rst b/doc/source/io.rst index f96e33dbf9882..65205c57a1ab6 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4557,7 +4557,6 @@ Read only certain columns of a parquet file. .. ipython:: python - result = pd.read_parquet('example_pa.parquet', engine='pyarrow', columns=['a', 'b']) result = pd.read_parquet('example_fp.parquet', engine='fastparquet', columns=['a', 'b']) result.dtypes
xref #18628. Until pyarrow 0.8 is released, I propose to just show the example with fastparquet. Once pyarrow 0.8 is released, we can revert this change.
https://api.github.com/repos/pandas-dev/pandas/pulls/18661
2017-12-06T13:52:54Z
2017-12-07T01:20:48Z
2017-12-07T01:20:48Z
2017-12-11T20:08:47Z
added option keep=False to nlargests/nsmallest
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index ab7f18bce47d3..53d8aa5946845 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -138,6 +138,8 @@ Other Enhancements - :func:`Series` / :func:`DataFrame` tab completion also returns identifiers in the first level of a :func:`MultiIndex`. (:issue:`16326`) - :func:`read_excel()` has gained the ``nrows`` parameter (:issue:`16645`) - :func:``DataFrame.to_json`` and ``Series.to_json`` now accept an ``index`` argument which allows the user to exclude the index from the JSON output (:issue:`17394`) +- :func:`Series` / :func:`DataFrame` methods :func:`nlargest` / :func:`nsmallest` now accept the value 'all' for the `keep` argument. This keeps all ties for the nth largests/smallest value (:issue:`16818`). + .. _whatsnew_0220.api_breaking: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 0ceb8966fd3c8..099fec74d266c 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -910,8 +910,8 @@ def __init__(self, obj, n, keep): self.n = n self.keep = keep - if self.keep not in ('first', 'last'): - raise ValueError('keep must be either "first", "last"') + if self.keep not in ('first', 'last', 'all'): + raise ValueError('keep must be either "first", "last", or "all"') def nlargest(self): return self.compute('nlargest') @@ -979,7 +979,11 @@ def compute(self, method): kth_val = algos.kth_smallest(arr.copy(), n - 1) ns, = np.nonzero(arr <= kth_val) - inds = ns[arr[ns].argsort(kind='mergesort')][:n] + inds = ns[arr[ns].argsort(kind='mergesort')] + + if self.keep != 'all': + inds = inds[:n] + if self.keep == 'last': # reverse indices inds = narr - 1 - inds diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5f323d0f040bc..d1441da0d810f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3769,10 +3769,13 @@ def nlargest(self, n, columns, keep='first'): Number of items to retrieve columns : list or str Column name or names to order by - keep : {'first', 'last'}, default 'first' + keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - - ``first`` : take the first occurrence. - - ``last`` : take the last occurrence. + - 'first' : take the first occurrence. + - 'last' : take the last occurrence. + - 'all' : keep all ties of nth largest value. + + .. versionadded:: 0.22.0 Returns ------- @@ -3780,14 +3783,28 @@ def nlargest(self, n, columns, keep='first'): Examples -------- - >>> df = DataFrame({'a': [1, 10, 8, 11, -1], - ... 'b': list('abdce'), - ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]}) - >>> df.nlargest(3, 'a') + >>> df = pd.DataFrame({'a': [1, 10, 8, 11, 8, 2], + ... 'b': list('abdcef'), + ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0, 9.0]}) + + >>> df.nlargest(3, 'a', keep='first') + a b c + 3 11 c 3 + 1 10 b 2 + 2 8 d NaN + + >>> df.nlargest(3, 'a', keep='last') + a b c + 3 11 c 3 + 1 10 b 2 + 4 8 e 4 + + >>> df.nlargest(3, 'a', keep='all') a b c 3 11 c 3 1 10 b 2 2 8 d NaN + 4 8 e 4 """ return algorithms.SelectNFrame(self, n=n, @@ -3804,10 +3821,13 @@ def nsmallest(self, n, columns, keep='first'): Number of items to retrieve columns : list or str Column name or names to order by - keep : {'first', 'last'}, default 'first' + keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - - ``first`` : take the first occurrence. - - ``last`` : take the last occurrence. + - 'first' : take the first occurrence. + - 'last' : take the last occurrence. + - 'all' : keep all ties of nth smallest value. + + .. versionadded:: 0.22.0 Returns ------- @@ -3815,14 +3835,28 @@ def nsmallest(self, n, columns, keep='first'): Examples -------- - >>> df = DataFrame({'a': [1, 10, 8, 11, -1], - ... 'b': list('abdce'), - ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]}) - >>> df.nsmallest(3, 'a') - a b c - 4 -1 e 4 - 0 1 a 1 - 2 8 d NaN + >>> df = pd.DataFrame({'a': [1, 10, 8, 11, 8, 2], + ... 'b': list('abdcef'), + ... 'c': [1.0, 2.0, np.nan, 3.0, 4.0, 9.0]}) + + >>> df.nsmallest(3, 'a', keep='first') + a b c + 0 1 a 1.0 + 5 2 f 9.0 + 2 8 d NaN + + >>> df.nsmallest(3, 'a', keep='last') + a b c + 0 1 a 1.0 + 5 2 f 9.0 + 4 8 e 4.0 + + >>> df.nsmallest(3, 'a', keep='all') + a b c + 0 1 a 1.0 + 5 2 f 9.0 + 2 8 d NaN + 4 8 e 4.0 """ return algorithms.SelectNFrame(self, n=n, diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 4bba6d7601ae8..c038d76879ce1 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -2202,6 +2202,22 @@ def test_n_duplicate_index(self, df_duplicates, n, order): expected = df.sort_values(order, ascending=False).head(n) tm.assert_frame_equal(result, expected) + def test_keep_all_ties(self): + # GH 16818 + df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3], + 'b': [10, 9, 8, 7, 5, 50, 10, 20]}) + result = df.nlargest(4, 'a', keep='all') + expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3, + 5: 3, 6: 3, 7: 3}, + 'b': {0: 10, 1: 9, 2: 8, 4: 5, + 5: 50, 6: 10, 7: 20}}) + tm.assert_frame_equal(result, expected) + + result = df.nsmallest(2, 'a', keep='all') + expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3}, + 'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}}) + tm.assert_frame_equal(result, expected) + def test_series_broadcasting(self): # smoke test for numpy warnings # GH 16378, GH 16306 diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 289b5c01c1263..2e4a84f8bcd6b 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1867,6 +1867,18 @@ def test_n(self, n): expected = s.sort_values().head(n) assert_series_equal(result, expected) + def test_keep_all_ties(self): + # GH 16818 + s = Series([10, 9, 8, 7, 7, 7, 7, 6]) + result = s.nlargest(4, keep='all') + expected = Series([10, 9, 8, 7, 7, 7, 7]) + print(result, expected) + assert_series_equal(result, expected) + + result = s.nsmallest(2, keep='all') + expected = Series([6, 7, 7, 7, 7], index=[7, 3, 4, 5, 6]) + assert_series_equal(result, expected) + class TestCategoricalSeriesAnalytics(object):
- [x] closes #16818 - [x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry I made a minor change to `nlargest/nsmallest` to keep all the values of the last `n` when `keep=False`
https://api.github.com/repos/pandas-dev/pandas/pulls/18656
2017-12-06T03:19:06Z
2018-06-26T10:36:51Z
null
2018-06-27T07:14:55Z
remove datetime.pxd
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index 4c4449fb3e291..aa7aa4b528194 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -42,7 +42,8 @@ def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'): bytes data, k uint8_t *kb uint64_t *lens - char **vecs, *cdata + char **vecs + char *cdata object val k = <bytes>key.encode(encoding) diff --git a/pandas/_libs/src/datetime.pxd b/pandas/_libs/src/datetime.pxd deleted file mode 100644 index d919fca09c006..0000000000000 --- a/pandas/_libs/src/datetime.pxd +++ /dev/null @@ -1,64 +0,0 @@ -# cython: profile=False -from numpy cimport int64_t, npy_int64, npy_int32 - -from cpython cimport PyUnicode_Check, PyUnicode_AsASCIIString - - -cdef extern from "numpy/ndarrayobject.h": - ctypedef int64_t npy_datetime - -cdef extern from "datetime/np_datetime.h": - ctypedef enum PANDAS_DATETIMEUNIT: - PANDAS_FR_Y - PANDAS_FR_M - PANDAS_FR_W - PANDAS_FR_D - PANDAS_FR_B - PANDAS_FR_h - PANDAS_FR_m - PANDAS_FR_s - PANDAS_FR_ms - PANDAS_FR_us - PANDAS_FR_ns - PANDAS_FR_ps - PANDAS_FR_fs - PANDAS_FR_as - - ctypedef struct pandas_datetimestruct: - npy_int64 year - npy_int32 month, day, hour, min, sec, us, ps, as - - void pandas_datetime_to_datetimestruct(npy_datetime val, - PANDAS_DATETIMEUNIT fr, - pandas_datetimestruct *result) nogil - - -cdef extern from "datetime/np_datetime_strings.h": - int parse_iso_8601_datetime(char *str, int len, - pandas_datetimestruct *out, - int *out_local, int *out_tzoffset) - -cdef inline int _string_to_dts(object val, pandas_datetimestruct* dts, - int* out_local, int* out_tzoffset) except? -1: - cdef int result - cdef char *tmp - - if PyUnicode_Check(val): - val = PyUnicode_AsASCIIString(val); - - tmp = val - result = _cstring_to_dts(tmp, len(val), dts, out_local, out_tzoffset) - - if result == -1: - raise ValueError('Unable to parse %s' % str(val)) - return result - -cdef inline int _cstring_to_dts(char *val, int length, - pandas_datetimestruct* dts, - int* out_local, int* out_tzoffset) except? -1: - cdef: - int result - - result = parse_iso_8601_datetime(val, length, - dts, out_local, out_tzoffset) - return result diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 020ac812e1c20..5e08df7dfe27b 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -23,12 +23,11 @@ from cpython.datetime cimport (PyDateTime_Check, PyDate_Check, timedelta, datetime, date) # import datetime C API PyDateTime_IMPORT -# this is our datetime.pxd -from datetime cimport _string_to_dts from tslibs.np_datetime cimport (check_dts_bounds, pandas_datetimestruct, + _string_to_dts, dt64_to_dtstruct, dtstruct_to_dt64, pydatetime_to_dt64, pydate_to_dt64, get_datetime64_value, diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 0257d13157acc..4f6b2f52d4d24 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -20,14 +20,13 @@ PyDateTime_IMPORT from np_datetime cimport (check_dts_bounds, pandas_datetimestruct, + pandas_datetime_to_datetimestruct, _string_to_dts, PANDAS_DATETIMEUNIT, PANDAS_FR_ns, npy_datetime, dt64_to_dtstruct, dtstruct_to_dt64, get_datetime64_unit, get_datetime64_value, pydatetime_to_dt64) -from datetime cimport pandas_datetime_to_datetimestruct, _string_to_dts - from util cimport (is_string_object, is_datetime64_object, is_integer_object, is_float_object) diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index 8d9a812654ab0..33b8b32bcf2dc 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -50,6 +50,10 @@ cdef extern from "../src/datetime/np_datetime.h": PANDAS_FR_fs PANDAS_FR_as + void pandas_datetime_to_datetimestruct(npy_datetime val, + PANDAS_DATETIMEUNIT fr, + pandas_datetimestruct *result) nogil + int days_per_month_table[2][12] int dayofweek(int y, int m, int d) nogil int is_leapyear(int64_t year) nogil @@ -71,3 +75,6 @@ cdef int64_t pydate_to_dt64(date val, pandas_datetimestruct *dts) cdef npy_datetime get_datetime64_value(object obj) nogil cdef npy_timedelta get_timedelta64_value(object obj) nogil cdef PANDAS_DATETIMEUNIT get_datetime64_unit(object obj) nogil + +cdef int _string_to_dts(object val, pandas_datetimestruct* dts, + int* out_local, int* out_tzoffset) except? -1 diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 72c028161a937..7f861a50f03b8 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -1,7 +1,8 @@ # -*- coding: utf-8 -*- # cython: profile=False -from cpython cimport Py_EQ, Py_NE, Py_GE, Py_GT, Py_LT, Py_LE +from cpython cimport (Py_EQ, Py_NE, Py_GE, Py_GT, Py_LT, Py_LE, + PyUnicode_Check, PyUnicode_AsASCIIString) from cpython.datetime cimport (datetime, date, PyDateTime_IMPORT, @@ -33,6 +34,11 @@ cdef extern from "../src/datetime/np_datetime.h": pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS +cdef extern from "../src/datetime/np_datetime_strings.h": + int parse_iso_8601_datetime(char *str, int len, + pandas_datetimestruct *out, + int *out_local, int *out_tzoffset) + # ---------------------------------------------------------------------- # numpy object inspection @@ -161,3 +167,35 @@ cdef inline int64_t pydate_to_dt64(date val, dts.hour = dts.min = dts.sec = dts.us = 0 dts.ps = dts.as = 0 return dtstruct_to_dt64(dts) + + +cdef inline int _string_to_dts(object val, pandas_datetimestruct* dts, + int* out_local, int* out_tzoffset) except? -1: + cdef: + int result + char *tmp + + if PyUnicode_Check(val): + val = PyUnicode_AsASCIIString(val) + + tmp = val + result = _cstring_to_dts(tmp, len(val), dts, out_local, out_tzoffset) + + if result == -1: + raise ValueError('Unable to parse %s' % str(val)) + return result + + +cdef inline int _cstring_to_dts(char *val, int length, + pandas_datetimestruct* dts, + int* out_local, int* out_tzoffset) except? -1: + # Note: without this "extra layer" between _string_to_dts + # and parse_iso_8601_datetime, calling _string_to_dts raises + # `SystemError: <class 'str'> returned a result with an error set` + # in Python3 + cdef: + int result + + result = parse_iso_8601_datetime(val, length, + dts, out_local, out_tzoffset) + return result diff --git a/setup.py b/setup.py index 5820b0dd9933c..c58cc8ef99faf 100755 --- a/setup.py +++ b/setup.py @@ -453,8 +453,7 @@ def pxd(name): 'pandas/_libs/src/datetime/np_datetime_strings.h'] np_datetime_sources = ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c'] -tseries_depends = np_datetime_headers + ['pandas/_libs/src/datetime.pxd', - 'pandas/_libs/tslibs/np_datetime.pxd'] +tseries_depends = np_datetime_headers + ['pandas/_libs/tslibs/np_datetime.pxd'] # some linux distros require it libraries = ['m'] if not is_platform_windows() else []
There is an unrelated 2-line edit in hashing.pyx that gets rid of a compiler warning.
https://api.github.com/repos/pandas-dev/pandas/pulls/18654
2017-12-06T02:16:33Z
2017-12-07T01:06:15Z
2017-12-07T01:06:15Z
2017-12-07T02:10:58Z
BUG: Fix tz-aware DatetimeIndex +/- TimedeltaIndex/timedelta64 array
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index d34c1f3535509..84fc063f70b15 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -293,7 +293,7 @@ Indexing - Bug in :class:`IntervalIndex` where empty and purely NA data was constructed inconsistently depending on the construction method (:issue:`18421`) - Bug in ``IntervalIndex.symmetric_difference()`` where the symmetric difference with a non-``IntervalIndex`` did not raise (:issue:`18475`) - Bug in indexing a datetimelike ``Index`` that raised ``ValueError`` instead of ``IndexError`` (:issue:`18386`). - +- Bug in tz-aware :class:`DatetimeIndex` where addition/subtraction with a :class:`TimedeltaIndex` or array with ``dtype='timedelta64[ns]'`` was incorrect (:issue:`17558`) I/O ^^^ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5c96e4eeff69d..8cc996285fbbd 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -14,7 +14,7 @@ is_integer, is_float, is_bool_dtype, _ensure_int64, is_scalar, is_dtype_equal, - is_list_like) + is_list_like, is_timedelta64_dtype) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ABCPeriodIndex, ABCIndexClass) @@ -651,14 +651,14 @@ def __add__(self, other): from pandas.core.index import Index from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.tseries.offsets import DateOffset - if isinstance(other, TimedeltaIndex): + if is_timedelta64_dtype(other): return self._add_delta(other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if hasattr(other, '_add_delta'): return other._add_delta(self) raise TypeError("cannot add TimedeltaIndex and {typ}" .format(typ=type(other))) - elif isinstance(other, (DateOffset, timedelta, np.timedelta64)): + elif isinstance(other, (DateOffset, timedelta)): return self._add_delta(other) elif is_integer(other): return self.shift(other) @@ -674,7 +674,7 @@ def __sub__(self, other): from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.tseries.offsets import DateOffset - if isinstance(other, TimedeltaIndex): + if is_timedelta64_dtype(other): return self._add_delta(-other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if not isinstance(other, TimedeltaIndex): @@ -687,7 +687,7 @@ def __sub__(self, other): raise TypeError("cannot subtract {typ1} and {typ2}" .format(typ1=type(self).__name__, typ2=type(other).__name__)) - elif isinstance(other, (DateOffset, timedelta, np.timedelta64)): + elif isinstance(other, (DateOffset, timedelta)): return self._add_delta(-other) elif is_integer(other): return self.shift(-other) @@ -736,7 +736,7 @@ def _add_delta_tdi(self, other): if self.hasnans or other.hasnans: mask = (self._isnan) | (other._isnan) new_values[mask] = iNaT - return new_values.view(self.dtype) + return new_values.view('i8') def isin(self, values): """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index d0638412fb276..17b3a88cbf544 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -13,6 +13,7 @@ _NS_DTYPE, _INT64_DTYPE, is_object_dtype, is_datetime64_dtype, is_datetimetz, is_dtype_equal, + is_timedelta64_dtype, is_integer, is_float, is_integer_dtype, is_datetime64_ns_dtype, @@ -858,10 +859,13 @@ def _add_delta(self, delta): if isinstance(delta, (Tick, timedelta, np.timedelta64)): new_values = self._add_delta_td(delta) - elif isinstance(delta, TimedeltaIndex): + elif is_timedelta64_dtype(delta): + if not isinstance(delta, TimedeltaIndex): + delta = TimedeltaIndex(delta) + else: + # update name when delta is Index + name = com._maybe_match_name(self, delta) new_values = self._add_delta_tdi(delta) - # update name when delta is Index - name = com._maybe_match_name(self, delta) elif isinstance(delta, DateOffset): new_values = self._add_offset(delta).asi8 else: diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 2f788a116c0e5..a46462e91a866 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -121,6 +121,99 @@ def test_dti_isub_timedeltalike(self, tz, delta): rng -= delta tm.assert_index_equal(rng, expected) + # ------------------------------------------------------------- + # Binary operations DatetimeIndex and TimedeltaIndex/array + def test_dti_add_tdi(self, tz): + # GH 17558 + dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) + tdi = pd.timedelta_range('0 days', periods=10) + expected = pd.date_range('2017-01-01', periods=10, tz=tz) + + # add with TimdeltaIndex + result = dti + tdi + tm.assert_index_equal(result, expected) + + result = tdi + dti + tm.assert_index_equal(result, expected) + + # add with timedelta64 array + result = dti + tdi.values + tm.assert_index_equal(result, expected) + + result = tdi.values + dti + tm.assert_index_equal(result, expected) + + def test_dti_iadd_tdi(self, tz): + # GH 17558 + dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) + tdi = pd.timedelta_range('0 days', periods=10) + expected = pd.date_range('2017-01-01', periods=10, tz=tz) + + # iadd with TimdeltaIndex + result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) + result += tdi + tm.assert_index_equal(result, expected) + + result = pd.timedelta_range('0 days', periods=10) + result += dti + tm.assert_index_equal(result, expected) + + # iadd with timedelta64 array + result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) + result += tdi.values + tm.assert_index_equal(result, expected) + + result = pd.timedelta_range('0 days', periods=10) + result += dti + tm.assert_index_equal(result, expected) + + def test_dti_sub_tdi(self, tz): + # GH 17558 + dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) + tdi = pd.timedelta_range('0 days', periods=10) + expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D') + + # sub with TimedeltaIndex + result = dti - tdi + tm.assert_index_equal(result, expected) + + msg = 'cannot subtract TimedeltaIndex and DatetimeIndex' + with tm.assert_raises_regex(TypeError, msg): + tdi - dti + + # sub with timedelta64 array + result = dti - tdi.values + tm.assert_index_equal(result, expected) + + msg = 'cannot perform __neg__ with this index type:' + with tm.assert_raises_regex(TypeError, msg): + tdi.values - dti + + def test_dti_isub_tdi(self, tz): + # GH 17558 + dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) + tdi = pd.timedelta_range('0 days', periods=10) + expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D') + + # isub with TimedeltaIndex + result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) + result -= tdi + tm.assert_index_equal(result, expected) + + msg = 'cannot subtract TimedeltaIndex and DatetimeIndex' + with tm.assert_raises_regex(TypeError, msg): + tdi -= dti + + # isub with timedelta64 array + result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) + result -= tdi.values + tm.assert_index_equal(result, expected) + + msg = '|'.join(['cannot perform __neg__ with this index type:', + 'ufunc subtract cannot use operands with types']) + with tm.assert_raises_regex(TypeError, msg): + tdi.values -= dti + # ------------------------------------------------------------- # Binary Operations DatetimeIndex and datetime-like # TODO: A couple other tests belong in this section. Move them in
- [X] closes #17558 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Added the whatsnew entry under 0.22.0, but could possibly go under 0.21.1. This PR is a prereq for #18558.
https://api.github.com/repos/pandas-dev/pandas/pulls/18653
2017-12-06T01:00:29Z
2017-12-07T01:19:15Z
2017-12-07T01:19:14Z
2017-12-07T01:57:42Z
BUG: Categorical data fails to load from hdf when all columns are NaN
diff --git a/.gitignore b/.gitignore index ff0a6aef47163..b1748ae72b8ba 100644 --- a/.gitignore +++ b/.gitignore @@ -106,3 +106,4 @@ doc/build/html/index.html doc/tmp.sv doc/source/styled.xlsx doc/source/templates/ +env/ diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 31902c98d0b6c..fbfbb403b2a17 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -126,6 +126,7 @@ I/O - Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) - Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`) - Bug in :func:`to_latex` where repeated multi-index values were not printed even though a higher level index differed from the previous row (:issue:`14484`) +- Bug when reading NaN-only categorical columns in :class:`HDFStore` (:issue:`18413`) Plotting ^^^^^^^^ diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 74cd2ba7dc4d8..d73417f7b0c95 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2137,10 +2137,17 @@ def convert(self, values, nan_rep, encoding): # if we have stored a NaN in the categories # then strip it; in theory we could have BOTH # -1s in the codes and nulls :< - mask = isna(categories) - if mask.any(): - categories = categories[~mask] - codes[codes != -1] -= mask.astype(int).cumsum().values + if categories is None: + # Handle case of NaN-only categorical columns in which case + # the categories are an empty array; when this is stored, + # pytables cannot write a zero-len array, so on readback + # the categories would be None and `read_hdf()` would fail. + categories = Index([], dtype=np.float64) + else: + mask = isna(categories) + if mask.any(): + categories = categories[~mask] + codes[codes != -1] -= mask.astype(int).cumsum().values self.data = Categorical.from_codes(codes, categories=categories, diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index f239b7fe7855d..85f24e794f12a 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -4928,6 +4928,25 @@ def test_categorical_conversion(self): result = read_hdf(path, 'df', where='obsids=B') tm.assert_frame_equal(result, expected) + def test_categorical_nan_only_columns(self): + # GH18413 + # Check that read_hdf with categorical columns with NaN-only values can + # be read back. + df = pd.DataFrame({ + 'a': ['a', 'b', 'c', np.nan], + 'b': [np.nan, np.nan, np.nan, np.nan], + 'c': [1, 2, 3, 4], + 'd': pd.Series([None] * 4, dtype=object) + }) + df['a'] = df.a.astype('category') + df['b'] = df.b.astype('category') + df['d'] = df.b.astype('category') + expected = df + with ensure_clean_path(self.path) as path: + df.to_hdf(path, 'df', format='table', data_columns=True) + result = read_hdf(path, 'df') + tm.assert_frame_equal(result, expected) + def test_duplicate_column_name(self): df = DataFrame(columns=["a", "a"], data=[[0, 0]])
- [x] closes #18413 - [x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry: Allow storing NaN-only categorical columns in hdf5 store
https://api.github.com/repos/pandas-dev/pandas/pulls/18652
2017-12-06T00:46:31Z
2017-12-10T18:28:06Z
2017-12-10T18:28:05Z
2017-12-12T02:38:31Z
selected numeric data before correlation
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index f2500bb29d0be..805f57eabbd17 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -77,6 +77,7 @@ Other Enhancements - :func:`Series.fillna` now accepts a Series or a dict as a ``value`` for a categorical dtype (:issue:`17033`) - :func:`pandas.read_clipboard` updated to use qtpy, falling back to PyQt5 and then PyQt4, adding compatibility with Python3 and multiple python-qt bindings (:issue:`17722`) - Improved wording of ``ValueError`` raised in :func:`read_csv` when the ``usecols`` argument cannot match all columns. (:issue:`17301`) +- :func:`DataFrame.corrwith` now silently drops non-numeric columns when passed a Series. Before, an exception was raised (:issue:`18570`). .. _whatsnew_0220.api_breaking: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 313c9ec872179..68cf5dd7161e3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5577,7 +5577,7 @@ def corrwith(self, other, axis=0, drop=False): Parameters ---------- - other : DataFrame + other : DataFrame, Series axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise drop : boolean, default False @@ -5588,10 +5588,11 @@ def corrwith(self, other, axis=0, drop=False): correls : Series """ axis = self._get_axis_number(axis) + this = self._get_numeric_data() + if isinstance(other, Series): - return self.apply(other.corr, axis=axis) + return this.apply(other.corr, axis=axis) - this = self._get_numeric_data() other = other._get_numeric_data() left, right = this.align(other, join='inner', copy=False) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index cfdb18cefee64..9ef526034eaf7 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -240,6 +240,16 @@ def test_corrwith_matches_corrcoef(self): tm.assert_almost_equal(c1, c2) assert c1 < 1 + def test_corrwith_mixed_dtypes(self): + # GH 18570 + df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3], + 'c': ['a', 'b', 'c', 'd']}) + s = pd.Series([0, 6, 7, 3]) + result = df.corrwith(s) + corrs = [df['a'].corr(s), df['b'].corr(s)] + expected = pd.Series(data=corrs, index=['a', 'b']) + tm.assert_series_equal(result, expected) + def test_bool_describe_in_mixed_frame(self): df = DataFrame({ 'string_data': ['a', 'b', 'c', 'd', 'e'],
- [x] closes #18570 - [x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry I select the numeric data before the calculation happens with a Series. No whatsnew entry yet.
https://api.github.com/repos/pandas-dev/pandas/pulls/18651
2017-12-06T00:35:04Z
2017-12-06T11:16:51Z
2017-12-06T11:16:51Z
2017-12-06T14:29:36Z
CLN: replace %s syntax with .format in stats/moments.py
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index a0e94aa0c8581..580ee50c442de 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -8,7 +8,7 @@ import numpy as np from pandas.core.dtypes.common import is_scalar from pandas.core.api import DataFrame, Series -from pandas.util._decorators import Substitution, Appender +from pandas.util._decorators import Appender __all__ = ['rolling_count', 'rolling_max', 'rolling_min', 'rolling_sum', 'rolling_mean', 'rolling_std', 'rolling_cov', @@ -29,15 +29,15 @@ # (header, args, kwargs, returns, notes) _doc_template = """ -%s +{header} Parameters ---------- -%s%s +{args}{kwargs} Returns ------- -%s -%s +{returns} +{notes} """ _roll_kw = """window : int @@ -51,7 +51,7 @@ as a frequency string or DateOffset object. center : boolean, default False Set the labels at the center of the window. -how : string, default '%s' +how : string, default '{how}' Method for down- or re-sampling """ @@ -278,10 +278,11 @@ def rolling_count(arg, window, **kwargs): return ensure_compat('rolling', 'count', arg, window=window, **kwargs) -@Substitution("Unbiased moving covariance.", _binary_arg_flex, - _roll_kw % 'None' + _pairwise_kw + _ddof_kw, _flex_retval, - _roll_notes) -@Appender(_doc_template) +@Appender(_doc_template + .format(header="Unbiased moving covariance.", + args=_binary_arg_flex, + kwargs=_roll_kw.format(how='None' + _pairwise_kw + _ddof_kw), + returns=_flex_retval, notes=_roll_notes)) def rolling_cov(arg1, arg2=None, window=None, pairwise=None, **kwargs): if window is None and isinstance(arg2, (int, float)): window = arg2 @@ -300,9 +301,10 @@ def rolling_cov(arg1, arg2=None, window=None, pairwise=None, **kwargs): **kwargs) -@Substitution("Moving sample correlation.", _binary_arg_flex, - _roll_kw % 'None' + _pairwise_kw, _flex_retval, _roll_notes) -@Appender(_doc_template) +@Appender(_doc_template + .format(header="Moving sample correlation.", args=_binary_arg_flex, + kwargs=_roll_kw.format(how='None' + _pairwise_kw), + returns=_flex_retval, notes=_roll_notes)) def rolling_corr(arg1, arg2=None, window=None, pairwise=None, **kwargs): if window is None and isinstance(arg2, (int, float)): window = arg2 @@ -325,9 +327,10 @@ def rolling_corr(arg1, arg2=None, window=None, pairwise=None, **kwargs): # Exponential moving moments -@Substitution("Exponentially-weighted moving average", _unary_arg, _ewm_kw, - _type_of_input_retval, _ewm_notes) -@Appender(_doc_template) +@Appender(_doc_template + .format(header="Exponentially-weighted moving average", + args=_unary_arg, kwargs=_ewm_kw, + returns=_type_of_input_retval, notes=_ewm_notes)) def ewma(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0, freq=None, adjust=True, how=None, ignore_na=False): return ensure_compat('ewm', @@ -344,9 +347,10 @@ def ewma(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0, ignore_na=ignore_na) -@Substitution("Exponentially-weighted moving variance", _unary_arg, - _ewm_kw + _bias_kw, _type_of_input_retval, _ewm_notes) -@Appender(_doc_template) +@Appender(_doc_template + .format(header="Exponentially-weighted moving variance", + args=_unary_arg, kwargs=_ewm_kw + _bias_kw, + returns=_type_of_input_retval, notes=_ewm_notes)) def ewmvar(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0, bias=False, freq=None, how=None, ignore_na=False, adjust=True): return ensure_compat('ewm', @@ -365,9 +369,10 @@ def ewmvar(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0, func_kw=['bias']) -@Substitution("Exponentially-weighted moving std", _unary_arg, - _ewm_kw + _bias_kw, _type_of_input_retval, _ewm_notes) -@Appender(_doc_template) +@Appender(_doc_template + .format(header="Exponentially-weighted moving std", + args=_unary_arg, kwargs=_ewm_kw + _bias_kw, + returns=_type_of_input_retval, notes=_ewm_notes)) def ewmstd(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0, bias=False, freq=None, how=None, ignore_na=False, adjust=True): return ensure_compat('ewm', @@ -389,9 +394,10 @@ def ewmstd(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0, ewmvol = ewmstd -@Substitution("Exponentially-weighted moving covariance", _binary_arg_flex, - _ewm_kw + _pairwise_kw, _type_of_input_retval, _ewm_notes) -@Appender(_doc_template) +@Appender(_doc_template + .format(header="Exponentially-weighted moving covariance", + args=_binary_arg_flex, kwargs=_ewm_kw + _pairwise_kw, + returns=_type_of_input_retval, notes=_ewm_notes)) def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None, min_periods=0, bias=False, freq=None, pairwise=None, how=None, ignore_na=False, adjust=True): @@ -421,9 +427,10 @@ def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None, func_kw=['other', 'pairwise', 'bias']) -@Substitution("Exponentially-weighted moving correlation", _binary_arg_flex, - _ewm_kw + _pairwise_kw, _type_of_input_retval, _ewm_notes) -@Appender(_doc_template) +@Appender(_doc_template + .format(header="Exponentially-weighted moving correlation", + args=_binary_arg_flex, kwargs=_ewm_kw + _pairwise_kw, + returns=_type_of_input_retval, notes=_ewm_notes)) def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None, min_periods=0, freq=None, pairwise=None, how=None, ignore_na=False, adjust=True): @@ -460,9 +467,10 @@ def _rolling_func(name, desc, how=None, func_kw=None, additional_kw=''): else: how_arg_str = "'{how}".format(how=how) - @Substitution(desc, _unary_arg, _roll_kw % how_arg_str + additional_kw, - _type_of_input_retval, _roll_notes) - @Appender(_doc_template) + @Appender(_doc_template + .format(header=desc, args=_unary_arg, + kwargs=_roll_kw.format(how=how_arg_str + additional_kw), + returns=_type_of_input_retval, notes=_roll_notes)) def f(arg, window, min_periods=None, freq=None, center=False, **kwargs): @@ -672,9 +680,10 @@ def rolling_window(arg, window=None, win_type=None, min_periods=None, def _expanding_func(name, desc, func_kw=None, additional_kw=''): - @Substitution(desc, _unary_arg, _expanding_kw + additional_kw, - _type_of_input_retval, "") - @Appender(_doc_template) + @Appender(_doc_template + .format(header=desc, args=_unary_arg, + kwargs=_expanding_kw + additional_kw, + returns=_type_of_input_retval, notes="")) def f(arg, min_periods=1, freq=None, **kwargs): return ensure_compat('expanding', name, @@ -766,9 +775,11 @@ def expanding_quantile(arg, quantile, min_periods=1, freq=None): quantile=quantile) -@Substitution("Unbiased expanding covariance.", _binary_arg_flex, - _expanding_kw + _pairwise_kw + _ddof_kw, _flex_retval, "") -@Appender(_doc_template) +@Appender(_doc_template + .format(header="Unbiased expanding covariance.", + args=_binary_arg_flex, + kwargs=_expanding_kw + _pairwise_kw + _ddof_kw, + returns=_flex_retval, notes="")) def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, pairwise=None, ddof=1): if arg2 is None: @@ -789,9 +800,10 @@ def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, func_kw=['other', 'pairwise', 'ddof']) -@Substitution("Expanding sample correlation.", _binary_arg_flex, - _expanding_kw + _pairwise_kw, _flex_retval, "") -@Appender(_doc_template) +@Appender(_doc_template + .format(header="Expanding sample correlation.", + args=_binary_arg_flex, kwargs=_expanding_kw + _pairwise_kw, + returns=_flex_retval, notes="")) def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None): if arg2 is None: arg2 = arg1
- [x] Progress towards #16130 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/18650
2017-12-06T00:23:27Z
2017-12-06T01:21:34Z
null
2023-05-11T01:16:53Z
TST: Add list of tuples pd.factorize test
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index bf244deec9ffc..64564df4febe8 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -17,6 +17,7 @@ from pandas._libs.hashtable import unique_label_indices from pandas.compat import lrange, range import pandas.core.algorithms as algos +from pandas.core.common import _asarray_tuplesafe import pandas.util.testing as tm from pandas.compat.numpy import np_array_datetime64_compat from pandas.util.testing import assert_almost_equal @@ -190,6 +191,33 @@ def test_factorize_nan(self): assert len(set(key)) == len(set(expected)) tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel) + @pytest.mark.parametrize("data,expected_label,expected_level", [ + ( + [(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'], + [0, 1, 2, 1, 3], + [(1, 1), (1, 2), (0, 0), 'nonsense'] + ), + ( + [(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)], + [0, 1, 2, 1, 3], + [(1, 1), (1, 2), (0, 0), (1, 2, 3)] + ), + ( + [(1, 1), (1, 2), (0, 0), (1, 2)], + [0, 1, 2, 1], + [(1, 1), (1, 2), (0, 0)] + ) + ]) + def test_factorize_tuple_list(self, data, expected_label, expected_level): + # GH9454 + result = pd.factorize(data) + + tm.assert_numpy_array_equal(result[0], + np.array(expected_label, dtype=np.intp)) + + expected_level_array = _asarray_tuplesafe(expected_level, dtype=object) + tm.assert_numpy_array_equal(result[1], expected_level_array) + def test_complex_sorting(self): # gh 12666 - check no segfault # Test not valid numpy versions older than 1.11
- [x] closes #9454 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18649
2017-12-05T23:15:30Z
2017-12-09T15:37:47Z
2017-12-09T15:37:47Z
2017-12-09T15:37:50Z
CLN: Remove .to_datetime methods
diff --git a/doc/source/api.rst b/doc/source/api.rst index f3405fcdee608..fefea408748e3 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1489,7 +1489,6 @@ Conversion Index.map Index.ravel Index.tolist - Index.to_datetime Index.to_native_types Index.to_series Index.to_frame @@ -1757,7 +1756,6 @@ Conversion .. autosummary:: :toctree: generated/ - DatetimeIndex.to_datetime DatetimeIndex.to_period DatetimeIndex.to_perioddelta DatetimeIndex.to_pydatetime diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index ae272282040b8..745c69adc752c 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -152,6 +152,7 @@ Removal of prior version deprecations/changes - ``Categorical.from_array`` has been removed (:issue:`13854`) - The ``freq`` parameter has been removed from the ``rolling``/``expanding``/``ewm`` methods of DataFrame and Series (deprecated since v0.18). Instead, resample before calling the methods. (:issue:18601) +- ``DatetimeIndex.to_datetime``, ``Timestamp.to_datetime``, ``PeriodIndex.to_datetime``, and ``Index.to_datetime`` have been removed (:issue:`8254`, :issue:`14096`, :issue:`14113`) .. _whatsnew_0220.performance: diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 2e7b861b24fa8..a058b9d7de9c4 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -556,16 +556,6 @@ class NaTType(_NaT): Timestamp with fields replaced """) - def to_datetime(self): - """ - DEPRECATED: use :meth:`to_pydatetime` instead. - - Convert a Timestamp object to a native Python datetime object. - """ - warnings.warn("to_datetime is deprecated. Use self.to_pydatetime()", - FutureWarning, stacklevel=2) - return self.to_pydatetime(warn=False) - NaT = NaTType() diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index cf0c0e2c01d60..59044fe314e08 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -181,16 +181,6 @@ cdef class _Timestamp(datetime): elif other.tzinfo is None: raise TypeError('Cannot compare tz-naive and tz-aware timestamps') - cpdef datetime to_datetime(_Timestamp self): - """ - DEPRECATED: use :meth:`to_pydatetime` instead. - - Convert a Timestamp object to a native Python datetime object. - """ - warnings.warn("to_datetime is deprecated. Use self.to_pydatetime()", - FutureWarning, stacklevel=2) - return self.to_pydatetime(warn=False) - cpdef datetime to_pydatetime(_Timestamp self, warn=True): """ Convert a Timestamp object to a native Python datetime object. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 94e9947155c41..938fd7130faa5 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -7,7 +7,6 @@ algos as libalgos, join as libjoin, Timestamp, Timedelta, ) from pandas._libs.lib import is_datetime_array -from pandas._libs.tslibs import parsing from pandas.compat import range, u, set_function_name from pandas.compat.numpy import function as nv @@ -1061,25 +1060,6 @@ def _to_safe_for_reshape(self): """ convert to object if we are a categorical """ return self - def to_datetime(self, dayfirst=False): - """ - DEPRECATED: use :meth:`pandas.to_datetime` instead. - - For an Index containing strings or datetime.datetime objects, attempt - conversion to DatetimeIndex - """ - warnings.warn("to_datetime is deprecated. Use pd.to_datetime(...)", - FutureWarning, stacklevel=2) - - from pandas.core.indexes.datetimes import DatetimeIndex - if self.inferred_type == 'string': - from dateutil.parser import parse - parser = lambda x: parse(x, dayfirst=dayfirst) - parsed = parsing.try_parse_dates(self.values, parser=parser) - return DatetimeIndex(parsed) - else: - return DatetimeIndex(self.values) - def _assert_can_do_setop(self, other): if not is_list_like(other): raise TypeError('Input must be Index or array-like') diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index fb86d25625b6a..d0638412fb276 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -244,7 +244,6 @@ class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin, round floor ceil - to_datetime to_period to_perioddelta to_pydatetime @@ -899,9 +898,6 @@ def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): format=format, na_rep=na_rep) - def to_datetime(self, dayfirst=False): - return self.copy() - @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index cb0c4a9ce2a86..8b541bdce39ed 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -3,7 +3,6 @@ import numpy as np import warnings - from pandas.core import common as com from pandas.core.dtypes.common import ( is_integer, @@ -611,17 +610,6 @@ def asfreq(self, freq=None, how='E'): return self._simple_new(new_data, self.name, freq=freq) - def to_datetime(self, dayfirst=False): - """ - .. deprecated:: 0.19.0 - Use :meth:`to_timestamp` instead. - - Cast to DatetimeIndex. - """ - warnings.warn("to_datetime is deprecated. Use self.to_timestamp(...)", - FutureWarning, stacklevel=2) - return self.to_timestamp() - year = _field_accessor('year', 0, "The year of the period") month = _field_accessor('month', 3, "The month as January=1, December=12") day = _field_accessor('day', 4, "The days of the period") @@ -1214,8 +1202,6 @@ def _make_field_arrays(*fields): def pnow(freq=None): # deprecation, xref #13790 - import warnings - warnings.warn("pd.pnow() and pandas.core.indexes.period.pnow() " "are deprecated. Please use Period.now()", FutureWarning, stacklevel=2) diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index d03951458f12a..91284613b0331 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -23,8 +23,7 @@ from pandas.util import testing as tm from pandas.util.testing import assert_series_equal, _skip_if_has_locale from pandas import (isna, to_datetime, Timestamp, Series, DataFrame, - Index, DatetimeIndex, NaT, date_range, bdate_range, - compat) + Index, DatetimeIndex, NaT, date_range, compat) class TestTimeConversionFormats(object): @@ -735,24 +734,6 @@ def test_dataframe_dtypes(self, cache): class TestToDatetimeMisc(object): - @pytest.mark.parametrize('cache', [True, False]) - def test_index_to_datetime(self, cache): - idx = Index(['1/1/2000', '1/2/2000', '1/3/2000']) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = idx.to_datetime() - expected = DatetimeIndex(pd.to_datetime(idx.values, cache=cache)) - tm.assert_index_equal(result, expected) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - today = datetime.today() - idx = Index([today], dtype=object) - result = idx.to_datetime() - expected = DatetimeIndex([today]) - tm.assert_index_equal(result, expected) - @pytest.mark.parametrize('cache', [True, False]) def test_to_datetime_iso8601(self, cache): result = to_datetime(["2012-01-01 00:00:00"], cache=cache) @@ -888,12 +869,6 @@ def test_to_datetime_list_of_integers(self): tm.assert_index_equal(rng, result) - def test_to_datetime_freq(self): - xp = bdate_range('2000-1-1', periods=10, tz='UTC') - rs = xp.to_datetime() - assert xp.freq == rs.freq - assert xp.tzinfo == rs.tzinfo - def test_to_datetime_overflow(self): # gh-17637 # we are overflowing Timedelta range here diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index 3774111f44fb2..67ed725436581 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -386,14 +386,6 @@ def test_to_timestamp_1703(self): result = index.to_timestamp() assert result[0] == Timestamp('1/1/2012') - def test_to_datetime_depr(self): - index = period_range('1/1/2012', periods=4, freq='D') - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = index.to_datetime() - assert result[0] == Timestamp('1/1/2012') - def test_combine_first(self): # GH 3367 didx = pd.DatetimeIndex(start='1950-01-31', end='1950-07-31', freq='M') diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index e23911e8d2003..9dcfaeb703d43 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -672,16 +672,6 @@ def test_pprint(self): 'foo': 1}""" assert result == expected - def test_to_datetime_depr(self): - # see gh-8254 - ts = Timestamp('2011-01-01') - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - expected = datetime(2011, 1, 1) - result = ts.to_datetime() - assert result == expected - def test_to_pydatetime_nonzero_nano(self): ts = Timestamp('2011-01-01 9:00:00.123456789')
Removes the following .to_datetime methods * `Index.to_datetime` * `Timestamp.to_datetime` * `PeriodIndex.to_datetime` * `DatetimeIndex.to_datetime` All were deprecated in 0.19.0 xref #8254, #14096, #14113
https://api.github.com/repos/pandas-dev/pandas/pulls/18648
2017-12-05T18:10:28Z
2017-12-06T10:31:36Z
2017-12-06T10:31:36Z
2017-12-06T17:18:01Z
STYLE/CI: implement incremental linting for asv benchmarks
diff --git a/ci/lint.sh b/ci/lint.sh index 5d9fafe6c9064..832e1227d9752 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -23,6 +23,13 @@ if [ "$LINT" ]; then fi echo "Linting setup.py DONE" + echo "Linting asv_bench/benchmarks/" + flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/[ghijoprst]*.py --ignore=F811 + if [ $? -ne "0" ]; then + RET=1 + fi + echo "Linting asv_bench/benchmarks/*.py DONE" + echo "Linting *.pyx" flake8 pandas --filename=*.pyx --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126,E265,E305,E301,E127,E261,E271,E129,W291,E222,E241,E123,F403 if [ $? -ne "0" ]; then
Another try at #18620 @mroeschke since it looks like you are going through the asv files one-by-one, the idea here is that when file X gets cleaned up, it can be un-excluded in the lint.sh command. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18647
2017-12-05T17:48:37Z
2017-12-06T23:19:24Z
2017-12-06T23:19:24Z
2017-12-08T19:38:14Z
BUG: Fixed handling of boolean indexing with 2-d ndarrays
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 0301bf0a23dd5..430637ac6d384 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -314,6 +314,7 @@ Indexing - :func:`DatetimeIndex.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`) - Bug in indexing non-scalar value from ``Series`` having non-unique ``Index`` will return value flattened (:issue:`17610`) - Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`) +- Bug in ``__setitem__`` when indexing a :class:`DataFrame` with a 2-d boolean ndarray (:issue:`18582`) I/O diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 12a4a7fdaedad..faf9f2673b0ba 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2532,10 +2532,10 @@ def __setitem__(self, key, value): if indexer is not None: return self._setitem_slice(indexer, value) - if isinstance(key, (Series, np.ndarray, list, Index)): - self._setitem_array(key, value) - elif isinstance(key, DataFrame): + if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2: self._setitem_frame(key, value) + elif isinstance(key, (Series, np.ndarray, list, Index)): + self._setitem_array(key, value) else: # set column self._set_item(key, value) @@ -2568,8 +2568,17 @@ def _setitem_array(self, key, value): def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 + if isinstance(key, np.ndarray): + if key.shape != self.shape: + raise ValueError( + 'Array conditional must be same shape as self' + ) + key = self._constructor(key, **self._construct_axes_dict()) + if key.values.size and not is_bool_dtype(key.values): - raise TypeError('Must pass DataFrame with boolean values only') + raise TypeError( + 'Must pass DataFrame or 2-d ndarray with boolean values only' + ) self._check_inplace_setting(value) self._check_setitem_copy() diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 62bc0eada9d89..882fa634d167d 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -524,9 +524,8 @@ def test_setitem_boolean(self): values[values == 2] = 3 assert_almost_equal(df.values, values) - with tm.assert_raises_regex(TypeError, 'Must pass ' - 'DataFrame with ' - 'boolean values only'): + msg = "Must pass DataFrame or 2-d ndarray with boolean values only" + with tm.assert_raises_regex(TypeError, msg): df[df * 0] = 2 # index with DataFrame @@ -542,6 +541,25 @@ def test_setitem_boolean(self): np.putmask(expected.values, mask.values, df.values * 2) assert_frame_equal(df, expected) + @pytest.mark.parametrize( + "mask_type", + [lambda df: df > np.abs(df) / 2, + lambda df: (df > np.abs(df) / 2).values], + ids=['dataframe', 'array']) + def test_setitem_boolean_mask(self, mask_type): + + # Test for issue #18582 + df = self.frame.copy() + mask = mask_type(df) + + # index with boolean mask + result = df.copy() + result[mask] = np.nan + + expected = df.copy() + expected.values[np.array(mask)] = np.nan + assert_frame_equal(result, expected) + def test_setitem_cast(self): self.frame['D'] = self.frame['D'].astype('i8') assert self.frame['D'].dtype == np.int64
- [x] closes #18582 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18645
2017-12-05T12:57:31Z
2017-12-30T16:16:54Z
2017-12-30T16:16:53Z
2017-12-30T22:11:47Z
TST: Move merge/join tests into sub-dir
diff --git a/pandas/tests/reshape/merge/__init__.py b/pandas/tests/reshape/merge/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/reshape/data/allow_exact_matches.csv b/pandas/tests/reshape/merge/data/allow_exact_matches.csv similarity index 100% rename from pandas/tests/reshape/data/allow_exact_matches.csv rename to pandas/tests/reshape/merge/data/allow_exact_matches.csv diff --git a/pandas/tests/reshape/data/allow_exact_matches_and_tolerance.csv b/pandas/tests/reshape/merge/data/allow_exact_matches_and_tolerance.csv similarity index 100% rename from pandas/tests/reshape/data/allow_exact_matches_and_tolerance.csv rename to pandas/tests/reshape/merge/data/allow_exact_matches_and_tolerance.csv diff --git a/pandas/tests/reshape/data/asof.csv b/pandas/tests/reshape/merge/data/asof.csv similarity index 100% rename from pandas/tests/reshape/data/asof.csv rename to pandas/tests/reshape/merge/data/asof.csv diff --git a/pandas/tests/reshape/data/asof2.csv b/pandas/tests/reshape/merge/data/asof2.csv similarity index 100% rename from pandas/tests/reshape/data/asof2.csv rename to pandas/tests/reshape/merge/data/asof2.csv diff --git a/pandas/tests/reshape/data/quotes.csv b/pandas/tests/reshape/merge/data/quotes.csv similarity index 100% rename from pandas/tests/reshape/data/quotes.csv rename to pandas/tests/reshape/merge/data/quotes.csv diff --git a/pandas/tests/reshape/data/quotes2.csv b/pandas/tests/reshape/merge/data/quotes2.csv similarity index 100% rename from pandas/tests/reshape/data/quotes2.csv rename to pandas/tests/reshape/merge/data/quotes2.csv diff --git a/pandas/tests/reshape/data/tolerance.csv b/pandas/tests/reshape/merge/data/tolerance.csv similarity index 100% rename from pandas/tests/reshape/data/tolerance.csv rename to pandas/tests/reshape/merge/data/tolerance.csv diff --git a/pandas/tests/reshape/data/trades.csv b/pandas/tests/reshape/merge/data/trades.csv similarity index 100% rename from pandas/tests/reshape/data/trades.csv rename to pandas/tests/reshape/merge/data/trades.csv diff --git a/pandas/tests/reshape/data/trades2.csv b/pandas/tests/reshape/merge/data/trades2.csv similarity index 100% rename from pandas/tests/reshape/data/trades2.csv rename to pandas/tests/reshape/merge/data/trades2.csv diff --git a/pandas/tests/reshape/test_join.py b/pandas/tests/reshape/merge/test_join.py similarity index 99% rename from pandas/tests/reshape/test_join.py rename to pandas/tests/reshape/merge/test_join.py index 76791b08a26be..a64069fa700b8 100644 --- a/pandas/tests/reshape/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -13,7 +13,7 @@ from pandas._libs import join as libjoin import pandas.util.testing as tm -from pandas.tests.reshape.test_merge import get_test_data, N, NGROUPS +from pandas.tests.reshape.merge.test_merge import get_test_data, N, NGROUPS a_ = np.array diff --git a/pandas/tests/reshape/test_merge.py b/pandas/tests/reshape/merge/test_merge.py similarity index 100% rename from pandas/tests/reshape/test_merge.py rename to pandas/tests/reshape/merge/test_merge.py diff --git a/pandas/tests/reshape/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py similarity index 100% rename from pandas/tests/reshape/test_merge_asof.py rename to pandas/tests/reshape/merge/test_merge_asof.py diff --git a/pandas/tests/reshape/test_merge_index_as_string.py b/pandas/tests/reshape/merge/test_merge_index_as_string.py similarity index 100% rename from pandas/tests/reshape/test_merge_index_as_string.py rename to pandas/tests/reshape/merge/test_merge_index_as_string.py diff --git a/pandas/tests/reshape/test_merge_ordered.py b/pandas/tests/reshape/merge/test_merge_ordered.py similarity index 100% rename from pandas/tests/reshape/test_merge_ordered.py rename to pandas/tests/reshape/merge/test_merge_ordered.py diff --git a/setup.py b/setup.py index 004f111115079..5820b0dd9933c 100755 --- a/setup.py +++ b/setup.py @@ -761,6 +761,7 @@ def pxd(name): 'pandas.tests.io.formats', 'pandas.tests.groupby', 'pandas.tests.reshape', + 'pandas.tests.reshape.merge', 'pandas.tests.series', 'pandas.tests.scalar', 'pandas.tests.tseries', @@ -800,6 +801,7 @@ def pxd(name): 'pandas.tests.io.formats': ['data/*.csv'], 'pandas.tests.io.msgpack': ['data/*.mp'], 'pandas.tests.reshape': ['data/*.csv'], + 'pandas.tests.reshape.merge': ['data/*.csv'], 'pandas.tests.tseries.offsets': ['data/*.pickle'], 'pandas.io.formats': ['templates/*.tpl'] },
- [ ] closes #18615
https://api.github.com/repos/pandas-dev/pandas/pulls/18643
2017-12-05T10:56:37Z
2017-12-06T00:57:28Z
2017-12-06T00:57:28Z
2017-12-06T09:21:07Z