title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
BUG: TypeError in groupby pct_change when fill_method is None
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 96ea682dd3caf..fb636935f1480 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -869,6 +869,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrame.groupby` when using axis=1 and having a single level columns index (:issue:`30208`) - Bug in :meth:`DataFrame.groupby` when using nunique on axis=1 (:issue:`30253`) - Bug in :meth:`GroupBy.quantile` with multiple list-like q value and integer column names (:issue:`30289`) +- Bug in :meth:`GroupBy.pct_change` and :meth:`SeriesGroupBy.pct_change` causes ``TypeError`` when ``fill_method`` is ``None`` (:issue:`30463`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 6b110a0c80c07..be94fa5484496 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -809,6 +809,9 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None): periods=periods, fill_method=fill_method, limit=limit, freq=freq ) ) + if fill_method is None: # GH30463 + fill_method = "pad" + limit = 0 filled = getattr(self, fill_method)(limit=limit) fill_grp = filled.groupby(self.grouper.codes) shifted = fill_grp.shift(periods=periods, freq=freq) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 227547daf3668..150651709047c 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2362,6 +2362,9 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, axis=0 axis=axis, ) ) + if fill_method is None: # GH30463 + fill_method = "pad" + limit = 0 filled = getattr(self, fill_method)(limit=limit) fill_grp = filled.groupby(self.grouper.codes) shifted = fill_grp.shift(periods=periods, freq=freq) diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index c46180c1d11cd..a9bae8ecfcf3d 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -874,27 +874,19 @@ def test_pad_stable_sorting(fill_method): ), ], ) -@pytest.mark.parametrize( - "periods,fill_method,limit", - [ - (1, "ffill", None), - (1, "ffill", 1), - (1, "bfill", None), - (1, "bfill", 1), - (-1, "ffill", None), - (-1, "ffill", 1), - (-1, "bfill", None), - (-1, "bfill", 1), - ], -) +@pytest.mark.parametrize("periods", [1, -1]) +@pytest.mark.parametrize("fill_method", ["ffill", "bfill", None]) +@pytest.mark.parametrize("limit", [None, 1]) def test_pct_change(test_series, freq, periods, fill_method, limit): - # GH 21200, 21621 + # GH 21200, 21621, 30463 vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4] keys = ["a", "b"] key_v = np.repeat(keys, len(vals)) df = DataFrame({"key": key_v, "vals": vals * 2}) - df_g = getattr(df.groupby("key"), fill_method)(limit=limit) + df_g = df + if fill_method is not None: + df_g = getattr(df.groupby("key"), fill_method)(limit=limit) grp = df_g.groupby(df.key) expected = grp["vals"].obj / grp["vals"].shift(periods) - 1
- [x] xref one of two issues in #30463 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30532
2019-12-29T06:11:14Z
2019-12-31T12:46:48Z
2019-12-31T12:46:48Z
2019-12-31T12:58:24Z
CLN: ops cleanups
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fc39b264d1598..cd55ee07dbde9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5148,7 +5148,7 @@ def reorder_levels(self, order, axis=0): # Arithmetic / combination related def _combine_frame(self, other, func, fill_value=None, level=None): - this, other = self.align(other, join="outer", level=level, copy=False) + # at this point we have `self._indexed_same(other)` if fill_value is None: # since _arith_op may be called in a loop, avoid function call @@ -5164,14 +5164,15 @@ def _arith_op(left, right): left, right = ops.fill_binop(left, right, fill_value) return func(left, right) - if ops.should_series_dispatch(this, other, func): + if ops.should_series_dispatch(self, other, func): # iterate over columns - new_data = ops.dispatch_to_series(this, other, _arith_op) + new_data = ops.dispatch_to_series(self, other, _arith_op) else: with np.errstate(all="ignore"): - res_values = _arith_op(this.values, other.values) - new_data = dispatch_fill_zeros(func, this.values, other.values, res_values) - return this._construct_result(new_data) + res_values = _arith_op(self.values, other.values) + new_data = dispatch_fill_zeros(func, self.values, other.values, res_values) + + return new_data def _combine_match_index(self, other, func): # at this point we have `self.index.equals(other.index)` diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index be5e53eaa6721..1b868f7c10602 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -302,7 +302,7 @@ def _get_op_name(op, special): """ opname = op.__name__.strip("_") if special: - opname = "__{opname}__".format(opname=opname) + opname = f"__{opname}__" return opname @@ -385,7 +385,7 @@ def column_op(a, b): return {i: func(a.iloc[:, i], b.iloc[:, i]) for i in range(len(a.columns))} elif isinstance(right, ABCSeries) and axis == "columns": - # We only get here if called via _combine_frame_series, + # We only get here if called via _combine_series_frame, # in which case we specifically want to operate row-by-row assert right.index.equals(left.columns) @@ -603,9 +603,7 @@ def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=N result : DataFrame """ if fill_value is not None: - raise NotImplementedError( - "fill_value {fill} not supported.".format(fill=fill_value) - ) + raise NotImplementedError(f"fill_value {fill_value} not supported.") if axis is None: # default axis is columns @@ -661,15 +659,13 @@ def to_series(right): else: raise ValueError( "Unable to coerce to DataFrame, shape " - "must be {req_shape}: given {given_shape}".format( - req_shape=left.shape, given_shape=right.shape - ) + f"must be {left.shape}: given {right.shape}" ) elif right.ndim > 2: raise ValueError( "Unable to coerce to Series/DataFrame, dim " - "must be <= 2: {dim}".format(dim=right.shape) + f"must be <= 2: {right.shape}" ) elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)): @@ -702,7 +698,11 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): # Another DataFrame pass_op = op if should_series_dispatch(self, other, op) else na_op pass_op = pass_op if not is_logical else op - return self._combine_frame(other, pass_op, fill_value, level) + + left, right = self.align(other, join="outer", level=level, copy=False) + new_data = left._combine_frame(right, pass_op, fill_value) + return left._construct_result(new_data) + elif isinstance(other, ABCSeries): # For these values of `axis`, we end up dispatching to Series op, # so do not want the masked op. @@ -763,7 +763,7 @@ def _comp_method_FRAME(cls, op, special): str_rep = _get_opstr(op) op_name = _get_op_name(op, special) - @Appender("Wrapper for comparison method {name}".format(name=op_name)) + @Appender(f"Wrapper for comparison method {op_name}") def f(self, other): other = _align_method_FRAME(self, other, axis=None)
try to call construct_result in fewer places, do reindexing in more consistent places, and get fstrings out of the way
https://api.github.com/repos/pandas-dev/pandas/pulls/30530
2019-12-28T21:58:10Z
2019-12-30T13:14:22Z
2019-12-30T13:14:22Z
2019-12-30T16:28:30Z
REF: collect Index setops tests
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index 9a5f9e40374a3..306ac84ef1832 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -43,7 +43,7 @@ def test_can_hold_identifiers(self): (lambda idx: ["a", "b"] + idx, "__radd__"), ], ) - def test_disallow_set_ops(self, func, op_name): + def test_disallow_addsub_ops(self, func, op_name): # GH 10039 # set ops (+/-) raise TypeError idx = pd.Index(pd.Categorical(["a", "b"])) diff --git a/pandas/tests/indexes/multi/test_set_ops.py b/pandas/tests/indexes/multi/test_setops.py similarity index 100% rename from pandas/tests/indexes/multi/test_set_ops.py rename to pandas/tests/indexes/multi/test_setops.py diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index c11dda8f67620..da2f04d45fdac 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -105,25 +105,6 @@ def test_no_millisecond_field(self): with pytest.raises(AttributeError, match=msg): DatetimeIndex([]).millisecond - @pytest.mark.parametrize("sort", [None, False]) - def test_difference_freq(self, sort): - # GH14323: difference of Period MUST preserve frequency - # but the ability to union results must be preserved - - index = period_range("20160920", "20160925", freq="D") - - other = period_range("20160921", "20160924", freq="D") - expected = PeriodIndex(["20160920", "20160925"], freq="D") - idx_diff = index.difference(other, sort) - tm.assert_index_equal(idx_diff, expected) - tm.assert_attr_equal("freq", idx_diff, expected) - - other = period_range("20160922", "20160925", freq="D") - idx_diff = index.difference(other, sort) - expected = PeriodIndex(["20160920", "20160921"], freq="D") - tm.assert_index_equal(idx_diff, expected) - tm.assert_attr_equal("freq", idx_diff, expected) - def test_hash_error(self): index = period_range("20010101", periods=10) msg = f"unhashable type: '{type(index).__name__}'" diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py index 03e4bd5834166..173d61849b126 100644 --- a/pandas/tests/indexes/period/test_setops.py +++ b/pandas/tests/indexes/period/test_setops.py @@ -353,3 +353,22 @@ def test_difference(self, sort): if sort is None: expected = expected.sort_values() tm.assert_index_equal(result_difference, expected) + + @pytest.mark.parametrize("sort", [None, False]) + def test_difference_freq(self, sort): + # GH14323: difference of Period MUST preserve frequency + # but the ability to union results must be preserved + + index = period_range("20160920", "20160925", freq="D") + + other = period_range("20160921", "20160924", freq="D") + expected = PeriodIndex(["20160920", "20160925"], freq="D") + idx_diff = index.difference(other, sort) + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + other = period_range("20160922", "20160925", freq="D") + idx_diff = index.difference(other, sort) + expected = PeriodIndex(["20160920", "20160921"], freq="D") + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index db0cc9828e9e9..f7abdf53e0975 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -1,5 +1,3 @@ -from datetime import datetime, timedelta - import numpy as np import pytest @@ -464,176 +462,6 @@ def test_join_self(self, join_type): joined = index.join(index, how=join_type) assert index is joined - @pytest.mark.parametrize("sort", [None, False]) - def test_intersection(self, sort): - # intersect with Int64Index - index = self.create_index() - other = Index(np.arange(1, 6)) - result = index.intersection(other, sort=sort) - expected = Index(np.sort(np.intersect1d(index.values, other.values))) - tm.assert_index_equal(result, expected) - - result = other.intersection(index, sort=sort) - expected = Index( - np.sort(np.asarray(np.intersect1d(index.values, other.values))) - ) - tm.assert_index_equal(result, expected) - - # intersect with increasing RangeIndex - other = RangeIndex(1, 6) - result = index.intersection(other, sort=sort) - expected = Index(np.sort(np.intersect1d(index.values, other.values))) - tm.assert_index_equal(result, expected) - - # intersect with decreasing RangeIndex - other = RangeIndex(5, 0, -1) - result = index.intersection(other, sort=sort) - expected = Index(np.sort(np.intersect1d(index.values, other.values))) - tm.assert_index_equal(result, expected) - - # reversed (GH 17296) - result = other.intersection(index, sort=sort) - tm.assert_index_equal(result, expected) - - # GH 17296: intersect two decreasing RangeIndexes - first = RangeIndex(10, -2, -2) - other = RangeIndex(5, -4, -1) - expected = first.astype(int).intersection(other.astype(int), sort=sort) - result = first.intersection(other, sort=sort).astype(int) - tm.assert_index_equal(result, expected) - - # reversed - result = other.intersection(first, sort=sort).astype(int) - tm.assert_index_equal(result, expected) - - index = RangeIndex(5) - - # intersect of non-overlapping indices - other = RangeIndex(5, 10, 1) - result = index.intersection(other, sort=sort) - expected = RangeIndex(0, 0, 1) - tm.assert_index_equal(result, expected) - - other = RangeIndex(-1, -5, -1) - result = index.intersection(other, sort=sort) - expected = RangeIndex(0, 0, 1) - tm.assert_index_equal(result, expected) - - # intersection of empty indices - other = RangeIndex(0, 0, 1) - result = index.intersection(other, sort=sort) - expected = RangeIndex(0, 0, 1) - tm.assert_index_equal(result, expected) - - result = other.intersection(index, sort=sort) - tm.assert_index_equal(result, expected) - - # intersection of non-overlapping values based on start value and gcd - index = RangeIndex(1, 10, 2) - other = RangeIndex(0, 10, 4) - result = index.intersection(other, sort=sort) - expected = RangeIndex(0, 0, 1) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize("sort", [False, None]) - def test_union_noncomparable(self, sort): - # corner case, non-Int64Index - index = self.create_index() - other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) - result = index.union(other, sort=sort) - expected = Index(np.concatenate((index, other))) - tm.assert_index_equal(result, expected) - - result = other.union(index, sort=sort) - expected = Index(np.concatenate((other, index))) - tm.assert_index_equal(result, expected) - - @pytest.fixture( - params=[ - (RI(0, 10, 1), RI(0, 10, 1), RI(0, 10, 1), RI(0, 10, 1)), - (RI(0, 10, 1), RI(5, 20, 1), RI(0, 20, 1), I64(range(20))), - (RI(0, 10, 1), RI(10, 20, 1), RI(0, 20, 1), I64(range(20))), - (RI(0, -10, -1), RI(0, -10, -1), RI(0, -10, -1), RI(0, -10, -1)), - (RI(0, -10, -1), RI(-10, -20, -1), RI(-19, 1, 1), I64(range(0, -20, -1))), - ( - RI(0, 10, 2), - RI(1, 10, 2), - RI(0, 10, 1), - I64(list(range(0, 10, 2)) + list(range(1, 10, 2))), - ), - ( - RI(0, 11, 2), - RI(1, 12, 2), - RI(0, 12, 1), - I64(list(range(0, 11, 2)) + list(range(1, 12, 2))), - ), - ( - RI(0, 21, 4), - RI(-2, 24, 4), - RI(-2, 24, 2), - I64(list(range(0, 21, 4)) + list(range(-2, 24, 4))), - ), - ( - RI(0, -20, -2), - RI(-1, -21, -2), - RI(-19, 1, 1), - I64(list(range(0, -20, -2)) + list(range(-1, -21, -2))), - ), - (RI(0, 100, 5), RI(0, 100, 20), RI(0, 100, 5), I64(range(0, 100, 5))), - ( - RI(0, -100, -5), - RI(5, -100, -20), - RI(-95, 10, 5), - I64(list(range(0, -100, -5)) + [5]), - ), - ( - RI(0, -11, -1), - RI(1, -12, -4), - RI(-11, 2, 1), - I64(list(range(0, -11, -1)) + [1, -11]), - ), - (RI(0), RI(0), RI(0), RI(0)), - (RI(0, -10, -2), RI(0), RI(0, -10, -2), RI(0, -10, -2)), - (RI(0, 100, 2), RI(100, 150, 200), RI(0, 102, 2), I64(range(0, 102, 2))), - ( - RI(0, -100, -2), - RI(-100, 50, 102), - RI(-100, 4, 2), - I64(list(range(0, -100, -2)) + [-100, 2]), - ), - ( - RI(0, -100, -1), - RI(0, -50, -3), - RI(-99, 1, 1), - I64(list(range(0, -100, -1))), - ), - (RI(0, 1, 1), RI(5, 6, 10), RI(0, 6, 5), I64([0, 5])), - (RI(0, 10, 5), RI(-5, -6, -20), RI(-5, 10, 5), I64([0, 5, -5])), - (RI(0, 3, 1), RI(4, 5, 1), I64([0, 1, 2, 4]), I64([0, 1, 2, 4])), - (RI(0, 10, 1), I64([]), RI(0, 10, 1), RI(0, 10, 1)), - (RI(0), I64([1, 5, 6]), I64([1, 5, 6]), I64([1, 5, 6])), - ] - ) - def unions(self, request): - """Inputs and expected outputs for RangeIndex.union tests""" - - return request.param - - def test_union_sorted(self, unions): - - idx1, idx2, expected_sorted, expected_notsorted = unions - - res1 = idx1.union(idx2, sort=None) - tm.assert_index_equal(res1, expected_sorted, exact=True) - - res1 = idx1.union(idx2, sort=False) - tm.assert_index_equal(res1, expected_notsorted, exact=True) - - res2 = idx2.union(idx1, sort=None) - res3 = idx1._int64index.union(idx2, sort=None) - tm.assert_index_equal(res2, expected_sorted, exact=True) - tm.assert_index_equal(res3, expected_sorted) - def test_nbytes(self): # memory savings vs int index diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py new file mode 100644 index 0000000000000..5c1e461c9fcf0 --- /dev/null +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -0,0 +1,244 @@ +from datetime import datetime, timedelta + +import numpy as np +import pytest + +from pandas import Index, Int64Index, RangeIndex +import pandas.util.testing as tm + + +class TestRangeIndexSetOps: + @pytest.mark.parametrize("sort", [None, False]) + def test_intersection(self, sort): + # intersect with Int64Index + index = RangeIndex(start=0, stop=20, step=2) + other = Index(np.arange(1, 6)) + result = index.intersection(other, sort=sort) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) + tm.assert_index_equal(result, expected) + + result = other.intersection(index, sort=sort) + expected = Index( + np.sort(np.asarray(np.intersect1d(index.values, other.values))) + ) + tm.assert_index_equal(result, expected) + + # intersect with increasing RangeIndex + other = RangeIndex(1, 6) + result = index.intersection(other, sort=sort) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) + tm.assert_index_equal(result, expected) + + # intersect with decreasing RangeIndex + other = RangeIndex(5, 0, -1) + result = index.intersection(other, sort=sort) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) + tm.assert_index_equal(result, expected) + + # reversed (GH 17296) + result = other.intersection(index, sort=sort) + tm.assert_index_equal(result, expected) + + # GH 17296: intersect two decreasing RangeIndexes + first = RangeIndex(10, -2, -2) + other = RangeIndex(5, -4, -1) + expected = first.astype(int).intersection(other.astype(int), sort=sort) + result = first.intersection(other, sort=sort).astype(int) + tm.assert_index_equal(result, expected) + + # reversed + result = other.intersection(first, sort=sort).astype(int) + tm.assert_index_equal(result, expected) + + index = RangeIndex(5) + + # intersect of non-overlapping indices + other = RangeIndex(5, 10, 1) + result = index.intersection(other, sort=sort) + expected = RangeIndex(0, 0, 1) + tm.assert_index_equal(result, expected) + + other = RangeIndex(-1, -5, -1) + result = index.intersection(other, sort=sort) + expected = RangeIndex(0, 0, 1) + tm.assert_index_equal(result, expected) + + # intersection of empty indices + other = RangeIndex(0, 0, 1) + result = index.intersection(other, sort=sort) + expected = RangeIndex(0, 0, 1) + tm.assert_index_equal(result, expected) + + result = other.intersection(index, sort=sort) + tm.assert_index_equal(result, expected) + + # intersection of non-overlapping values based on start value and gcd + index = RangeIndex(1, 10, 2) + other = RangeIndex(0, 10, 4) + result = index.intersection(other, sort=sort) + expected = RangeIndex(0, 0, 1) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("sort", [False, None]) + def test_union_noncomparable(self, sort): + # corner case, non-Int64Index + index = RangeIndex(start=0, stop=20, step=2) + other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) + result = index.union(other, sort=sort) + expected = Index(np.concatenate((index, other))) + tm.assert_index_equal(result, expected) + + result = other.union(index, sort=sort) + expected = Index(np.concatenate((other, index))) + tm.assert_index_equal(result, expected) + + @pytest.fixture( + params=[ + ( + RangeIndex(0, 10, 1), + RangeIndex(0, 10, 1), + RangeIndex(0, 10, 1), + RangeIndex(0, 10, 1), + ), + ( + RangeIndex(0, 10, 1), + RangeIndex(5, 20, 1), + RangeIndex(0, 20, 1), + Int64Index(range(20)), + ), + ( + RangeIndex(0, 10, 1), + RangeIndex(10, 20, 1), + RangeIndex(0, 20, 1), + Int64Index(range(20)), + ), + ( + RangeIndex(0, -10, -1), + RangeIndex(0, -10, -1), + RangeIndex(0, -10, -1), + RangeIndex(0, -10, -1), + ), + ( + RangeIndex(0, -10, -1), + RangeIndex(-10, -20, -1), + RangeIndex(-19, 1, 1), + Int64Index(range(0, -20, -1)), + ), + ( + RangeIndex(0, 10, 2), + RangeIndex(1, 10, 2), + RangeIndex(0, 10, 1), + Int64Index(list(range(0, 10, 2)) + list(range(1, 10, 2))), + ), + ( + RangeIndex(0, 11, 2), + RangeIndex(1, 12, 2), + RangeIndex(0, 12, 1), + Int64Index(list(range(0, 11, 2)) + list(range(1, 12, 2))), + ), + ( + RangeIndex(0, 21, 4), + RangeIndex(-2, 24, 4), + RangeIndex(-2, 24, 2), + Int64Index(list(range(0, 21, 4)) + list(range(-2, 24, 4))), + ), + ( + RangeIndex(0, -20, -2), + RangeIndex(-1, -21, -2), + RangeIndex(-19, 1, 1), + Int64Index(list(range(0, -20, -2)) + list(range(-1, -21, -2))), + ), + ( + RangeIndex(0, 100, 5), + RangeIndex(0, 100, 20), + RangeIndex(0, 100, 5), + Int64Index(range(0, 100, 5)), + ), + ( + RangeIndex(0, -100, -5), + RangeIndex(5, -100, -20), + RangeIndex(-95, 10, 5), + Int64Index(list(range(0, -100, -5)) + [5]), + ), + ( + RangeIndex(0, -11, -1), + RangeIndex(1, -12, -4), + RangeIndex(-11, 2, 1), + Int64Index(list(range(0, -11, -1)) + [1, -11]), + ), + (RangeIndex(0), RangeIndex(0), RangeIndex(0), RangeIndex(0)), + ( + RangeIndex(0, -10, -2), + RangeIndex(0), + RangeIndex(0, -10, -2), + RangeIndex(0, -10, -2), + ), + ( + RangeIndex(0, 100, 2), + RangeIndex(100, 150, 200), + RangeIndex(0, 102, 2), + Int64Index(range(0, 102, 2)), + ), + ( + RangeIndex(0, -100, -2), + RangeIndex(-100, 50, 102), + RangeIndex(-100, 4, 2), + Int64Index(list(range(0, -100, -2)) + [-100, 2]), + ), + ( + RangeIndex(0, -100, -1), + RangeIndex(0, -50, -3), + RangeIndex(-99, 1, 1), + Int64Index(list(range(0, -100, -1))), + ), + ( + RangeIndex(0, 1, 1), + RangeIndex(5, 6, 10), + RangeIndex(0, 6, 5), + Int64Index([0, 5]), + ), + ( + RangeIndex(0, 10, 5), + RangeIndex(-5, -6, -20), + RangeIndex(-5, 10, 5), + Int64Index([0, 5, -5]), + ), + ( + RangeIndex(0, 3, 1), + RangeIndex(4, 5, 1), + Int64Index([0, 1, 2, 4]), + Int64Index([0, 1, 2, 4]), + ), + ( + RangeIndex(0, 10, 1), + Int64Index([]), + RangeIndex(0, 10, 1), + RangeIndex(0, 10, 1), + ), + ( + RangeIndex(0), + Int64Index([1, 5, 6]), + Int64Index([1, 5, 6]), + Int64Index([1, 5, 6]), + ), + ] + ) + def unions(self, request): + """Inputs and expected outputs for RangeIndex.union tests""" + + return request.param + + def test_union_sorted(self, unions): + + idx1, idx2, expected_sorted, expected_notsorted = unions + + res1 = idx1.union(idx2, sort=None) + tm.assert_index_equal(res1, expected_sorted, exact=True) + + res1 = idx1.union(idx2, sort=False) + tm.assert_index_equal(res1, expected_notsorted, exact=True) + + res2 = idx2.union(idx1, sort=None) + res3 = idx1._int64index.union(idx2, sort=None) + tm.assert_index_equal(res2, expected_sorted, exact=True) + tm.assert_index_equal(res3, expected_sorted) diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py index bbdd6c8c7c017..34db7ed419ddb 100644 --- a/pandas/tests/indexes/timedeltas/test_setops.py +++ b/pandas/tests/indexes/timedeltas/test_setops.py @@ -179,3 +179,51 @@ def test_intersection_non_monotonic(self, rng, expected, sort): assert isinstance(result.freq, Hour) else: assert result.freq is None + + +class TestTimedeltaIndexDifference: + @pytest.mark.parametrize("sort", [None, False]) + def test_difference_freq(self, sort): + # GH14323: Difference of TimedeltaIndex should not preserve frequency + + index = timedelta_range("0 days", "5 days", freq="D") + + other = timedelta_range("1 days", "4 days", freq="D") + expected = TimedeltaIndex(["0 days", "5 days"], freq=None) + idx_diff = index.difference(other, sort) + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + other = timedelta_range("2 days", "5 days", freq="D") + idx_diff = index.difference(other, sort) + expected = TimedeltaIndex(["0 days", "1 days"], freq=None) + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + @pytest.mark.parametrize("sort", [None, False]) + def test_difference_sort(self, sort): + + index = pd.TimedeltaIndex( + ["5 days", "3 days", "2 days", "4 days", "1 days", "0 days"] + ) + + other = timedelta_range("1 days", "4 days", freq="D") + idx_diff = index.difference(other, sort) + + expected = TimedeltaIndex(["5 days", "0 days"], freq=None) + + if sort is None: + expected = expected.sort_values() + + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + other = timedelta_range("2 days", "5 days", freq="D") + idx_diff = index.difference(other, sort) + expected = TimedeltaIndex(["1 days", "0 days"], freq=None) + + if sort is None: + expected = expected.sort_values() + + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 35575f3349f83..fa74ff2d30368 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -57,52 +57,6 @@ def test_fillna_timedelta(self): ) tm.assert_index_equal(idx.fillna("x"), exp) - @pytest.mark.parametrize("sort", [None, False]) - def test_difference_freq(self, sort): - # GH14323: Difference of TimedeltaIndex should not preserve frequency - - index = timedelta_range("0 days", "5 days", freq="D") - - other = timedelta_range("1 days", "4 days", freq="D") - expected = TimedeltaIndex(["0 days", "5 days"], freq=None) - idx_diff = index.difference(other, sort) - tm.assert_index_equal(idx_diff, expected) - tm.assert_attr_equal("freq", idx_diff, expected) - - other = timedelta_range("2 days", "5 days", freq="D") - idx_diff = index.difference(other, sort) - expected = TimedeltaIndex(["0 days", "1 days"], freq=None) - tm.assert_index_equal(idx_diff, expected) - tm.assert_attr_equal("freq", idx_diff, expected) - - @pytest.mark.parametrize("sort", [None, False]) - def test_difference_sort(self, sort): - - index = pd.TimedeltaIndex( - ["5 days", "3 days", "2 days", "4 days", "1 days", "0 days"] - ) - - other = timedelta_range("1 days", "4 days", freq="D") - idx_diff = index.difference(other, sort) - - expected = TimedeltaIndex(["5 days", "0 days"], freq=None) - - if sort is None: - expected = expected.sort_values() - - tm.assert_index_equal(idx_diff, expected) - tm.assert_attr_equal("freq", idx_diff, expected) - - other = timedelta_range("2 days", "5 days", freq="D") - idx_diff = index.difference(other, sort) - expected = TimedeltaIndex(["1 days", "0 days"], freq=None) - - if sort is None: - expected = expected.sort_values() - - tm.assert_index_equal(idx_diff, expected) - tm.assert_attr_equal("freq", idx_diff, expected) - def test_isin(self): index = tm.makeTimedeltaIndex(4)
https://api.github.com/repos/pandas-dev/pandas/pulls/30529
2019-12-28T21:52:27Z
2019-12-30T13:15:30Z
2019-12-30T13:15:30Z
2019-12-30T16:23:22Z
CLN: remove kwargs from (DataFrame|Series).fillna
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fc39b264d1598..fac662337b089 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4014,8 +4014,7 @@ def fillna( inplace=False, limit=None, downcast=None, - **kwargs, - ): + ) -> Optional["DataFrame"]: return super().fillna( value=value, method=method, @@ -4023,7 +4022,6 @@ def fillna( inplace=inplace, limit=limit, downcast=downcast, - **kwargs, ) @Appender(_shared_docs["replace"] % _shared_doc_kwargs) diff --git a/pandas/core/series.py b/pandas/core/series.py index 15fc712672717..36e26e088935c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4005,8 +4005,7 @@ def fillna( inplace=False, limit=None, downcast=None, - **kwargs, - ): + ) -> Optional["Series"]: return super().fillna( value=value, method=method, @@ -4014,7 +4013,6 @@ def fillna( inplace=inplace, limit=limit, downcast=downcast, - **kwargs, ) @Appender(generic._shared_docs["replace"] % _shared_doc_kwargs)
(DataFrame|Series).fillna have the same parameters as NDFrame.fillna, so the **kwargs is not needed. Also make the return type more specific.
https://api.github.com/repos/pandas-dev/pandas/pulls/30528
2019-12-28T16:30:52Z
2019-12-30T13:16:01Z
2019-12-30T13:16:01Z
2019-12-30T13:24:08Z
BUG: pct_change wrong result when there are duplicated indices
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index a6ba7770dadcc..c1a5c452e1df4 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -765,6 +765,7 @@ Numeric - Bug in :class:`NumericIndex` construction that caused :class:`UInt64Index` to be casted to :class:`Float64Index` when integers in the ``np.uint64`` range were used to index a :class:`DataFrame` (:issue:`28279`) - Bug in :meth:`Series.interpolate` when using method=`index` with an unsorted index, would previously return incorrect results. (:issue:`21037`) - Bug in :meth:`DataFrame.round` where a :class:`DataFrame` with a :class:`CategoricalIndex` of :class:`IntervalIndex` columns would incorrectly raise a ``TypeError`` (:issue:`30063`) +- Bug in :meth:`Series.pct_change` and :meth:`DataFrame.pct_change` when there are duplicated indices (:issue:`30463`) - Bug in :class:`DataFrame` cumulative operations (e.g. cumsum, cummax) incorrect casting to object-dtype (:issue:`19296`) Conversion diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b896721469f1f..55dcc81139276 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9901,11 +9901,11 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwar data = self.fillna(method=fill_method, limit=limit, axis=axis) rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1 - rs = rs.loc[~rs.index.duplicated()] - rs = rs.reindex_like(data) - if freq is None: - mask = isna(com.values_from_object(data)) - np.putmask(rs.values, mask, np.nan) + if freq is not None: + # Shift method is implemented differently when freq is not None + # We want to restore the original index + rs = rs.loc[~rs.index.duplicated()] + rs = rs.reindex_like(data) return rs def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs): diff --git a/pandas/tests/frame/methods/test_pct_change.py b/pandas/tests/frame/methods/test_pct_change.py index 0c15533c37f01..ac13a5e146043 100644 --- a/pandas/tests/frame/methods/test_pct_change.py +++ b/pandas/tests/frame/methods/test_pct_change.py @@ -76,3 +76,21 @@ def test_pct_change_periods_freq( rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit) rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit) tm.assert_frame_equal(rs_freq, rs_periods) + + +@pytest.mark.parametrize("fill_method", ["pad", "ffill", None]) +def test_pct_change_with_duplicated_indices(fill_method): + # GH30463 + data = DataFrame( + {0: [np.nan, 1, 2, 3, 9, 18], 1: [0, 1, np.nan, 3, 9, 18]}, index=["a", "b"] * 3 + ) + result = data.pct_change(fill_method=fill_method) + if fill_method is None: + second_column = [np.nan, np.inf, np.nan, np.nan, 2.0, 1.0] + else: + second_column = [np.nan, np.inf, 0.0, 2.0, 2.0, 1.0] + expected = DataFrame( + {0: [np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], 1: second_column}, + index=["a", "b"] * 3, + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/methods/test_pct_change.py b/pandas/tests/series/methods/test_pct_change.py index abc5c498813ef..aa01543132841 100644 --- a/pandas/tests/series/methods/test_pct_change.py +++ b/pandas/tests/series/methods/test_pct_change.py @@ -68,3 +68,12 @@ def test_pct_change_periods_freq( rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit) rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit) tm.assert_series_equal(rs_freq, rs_periods) + + +@pytest.mark.parametrize("fill_method", ["pad", "ffill", None]) +def test_pct_change_with_duplicated_indices(fill_method): + # GH30463 + s = Series([np.nan, 1, 2, 3, 9, 18], index=["a", "b"] * 3) + result = s.pct_change(fill_method=fill_method) + expected = Series([np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], index=["a", "b"] * 3) + tm.assert_series_equal(result, expected)
- [x] closes #30463 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30526
2019-12-28T06:53:53Z
2020-01-02T18:09:58Z
2020-01-02T18:09:57Z
2020-01-03T11:41:19Z
WARN: Ignore NumbaPerformanceWarning in test suite
diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py index 66e4d4e2e7145..2fbf05f678431 100644 --- a/pandas/tests/window/test_numba.py +++ b/pandas/tests/window/test_numba.py @@ -8,6 +8,8 @@ @td.skip_if_no("numba", "0.46.0") +@pytest.mark.filterwarnings("ignore:\\nThe keyword argument") +# Filter warnings when parallel=True and the function can't be parallelized by Numba class TestApply: @pytest.mark.parametrize("jit", [True, False]) def test_numba_vs_cython(self, jit, nogil, parallel, nopython):
Followup to #30151; ignoring `NumbaPerformanceWarning` raised in the test suite when `parallel=True` while the function being tested cannot be parallelized. This behavior is fully intended.
https://api.github.com/repos/pandas-dev/pandas/pulls/30525
2019-12-28T04:30:23Z
2019-12-28T16:46:31Z
2019-12-28T16:46:31Z
2019-12-28T18:27:57Z
TST: tests for preserving views
diff --git a/pandas/tests/arrays/test_numpy.py b/pandas/tests/arrays/test_numpy.py index 7a150c35fea09..8828a013aeea1 100644 --- a/pandas/tests/arrays/test_numpy.py +++ b/pandas/tests/arrays/test_numpy.py @@ -226,3 +226,25 @@ def test_setitem_no_coercion(): arr = PandasArray(np.array([1, 2, 3])) with pytest.raises(ValueError, match="int"): arr[0] = "a" + + # With a value that we do coerce, check that we coerce the value + # and not the underlying array. + arr[0] = 2.5 + assert isinstance(arr[0], (int, np.integer)), type(arr[0]) + + +def test_setitem_preserves_views(): + # GH#28150, see also extension test of the same name + arr = PandasArray(np.array([1, 2, 3])) + view1 = arr.view() + view2 = arr[:] + view3 = np.asarray(arr) + + arr[0] = 9 + assert view1[0] == 9 + assert view2[0] == 9 + assert view3[0] == 9 + + arr[-1] = 2.5 + view1[-1] = 5 + assert arr[-1] == 5 diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py index bb6bb02b462e2..7d50f176edd67 100644 --- a/pandas/tests/extension/base/setitem.py +++ b/pandas/tests/extension/base/setitem.py @@ -186,3 +186,14 @@ def test_setitem_scalar_key_sequence_raise(self, data): arr = data[:5].copy() with pytest.raises(ValueError): arr[0] = arr[[0, 1]] + + def test_setitem_preserves_views(self, data): + # GH#28150 setitem shouldn't swap the underlying data + assert data[-1] != data[0] # otherwise test would not be meaningful + + view1 = data.view() + view2 = data[:] + + data[0] = data[-1] + assert view1[0] == data[-1] + assert view2[0] == data[-1] diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py index 4fdcf930d224f..2411f6cfbd936 100644 --- a/pandas/tests/extension/test_interval.py +++ b/pandas/tests/extension/test_interval.py @@ -147,7 +147,9 @@ class TestReshaping(BaseInterval, base.BaseReshapingTests): class TestSetitem(BaseInterval, base.BaseSetitemTests): - pass + @pytest.mark.xfail(reason="GH#27147 setitem changes underlying index") + def test_setitem_preserves_views(self, data): + super().test_setitem_preserves_views(data) class TestPrinting(BaseInterval, base.BasePrintingTests):
- [x] xref #28150 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30523
2019-12-28T00:53:11Z
2019-12-30T13:17:03Z
2019-12-30T13:17:03Z
2019-12-30T16:26:30Z
POC/TST: dynamic xfail
diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index c46180c1d11cd..dde71ee511526 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -765,9 +765,12 @@ def test_transform_with_non_scalar_group(): ], ) @pytest.mark.parametrize("agg_func", ["count", "rank", "size"]) -def test_transform_numeric_ret(cols, exp, comp_func, agg_func): +def test_transform_numeric_ret(cols, exp, comp_func, agg_func, request): if agg_func == "size" and isinstance(cols, list): - pytest.xfail("'size' transformation not supported with NDFrameGroupy") + # https://github.com/pytest-dev/pytest/issues/6300 + # workaround to xfail fixture/param permutations + reason = "'size' transformation not supported with NDFrameGroupy" + request.node.add_marker(pytest.mark.xfail(reason=reason)) # GH 19200 df = pd.DataFrame(
Motivation: we have a non-trivial number of places where we do `pytest.xfail` within a test (as opposed to via decorator). This has the downside of always being non-strict, so incorrectly-xfailed tests will never show up as xpassing. This uses a workaround to dynamically add the pytest.mark.xfail marker within the test rather than raising pytest.xfail directly. Thoughts?
https://api.github.com/repos/pandas-dev/pandas/pulls/30521
2019-12-28T00:07:25Z
2019-12-30T13:18:55Z
2019-12-30T13:18:55Z
2019-12-30T16:25:05Z
ENH: add support for reading binary Excel files (#15914)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 1e27496be3e12..b86c99839f4b2 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -900,6 +900,7 @@ I/O - Bug in :class:`PythonParser` where str and bytes were being mixed when dealing with the decimal field (:issue:`29650`) - :meth:`read_gbq` now accepts ``progress_bar_type`` to display progress bar while the data downloads. (:issue:`29857`) - Bug in :func:`pandas.io.json.json_normalize` where a missing value in the location specified by `record_path` would raise a ``TypeError`` (:issue:`30148`) +- :func:`read_excel` now accepts binary data (:issue:`15914`) Plotting ^^^^^^^^ diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index fe13fce83161d..c3adb0a34b9b2 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -40,7 +40,7 @@ Parameters ---------- -io : str, ExcelFile, xlrd.Book, path object or file-like object +io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.xlsx``. @@ -350,6 +350,8 @@ def __init__(self, filepath_or_buffer): self.book = self.load_workbook(filepath_or_buffer) elif isinstance(filepath_or_buffer, str): self.book = self.load_workbook(filepath_or_buffer) + elif isinstance(filepath_or_buffer, bytes): + self.book = self.load_workbook(BytesIO(filepath_or_buffer)) else: raise ValueError( "Must explicitly set engine if not passing in buffer or path for io." diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 6add99858da68..4f731eb2b272c 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -988,3 +988,13 @@ def test_conflicting_excel_engines(self, read_ext): with pd.ExcelFile("test1" + read_ext) as xl: with pytest.raises(ValueError, match=msg): pd.read_excel(xl, engine="foo") + + def test_excel_read_binary(self, engine, read_ext): + # GH 15914 + expected = pd.read_excel("test1" + read_ext, engine=engine) + + with open("test1" + read_ext, "rb") as f: + data = f.read() + + actual = pd.read_excel(data, engine=engine) + tm.assert_frame_equal(expected, actual)
Hi, this is a quick fix for #15914. After reading the issue I'm not sure if this is meant to be supported though, please let me know! I ran into this issue when trying to pass to `read_excel` a binary file downloaded with `requests` so I think it would make sense to support this natively without the user having to figure out the need to call `io.BytesIO` first. If so, do we need to update other docstrings? Thank you! - [x] closes #15914 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30519
2019-12-27T22:46:28Z
2020-01-03T11:55:36Z
2020-01-03T11:55:36Z
2020-01-03T11:58:55Z
DEPR: tz-aware Series/DatetimIndex.__array__ returns an object array of Timestamps
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 7948b3bf2fd2f..db1dce083b7ad 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -652,6 +652,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - Removed the previously deprecated keyword "data" from :func:`parallel_coordinates`, use "frame" instead (:issue:`6956`) - Removed the previously deprecated keyword "colors" from :func:`parallel_coordinates`, use "color" instead (:issue:`6956`) - Removed the previously deprecated keywords "verbose" and "private_key" from :func:`read_gbq` (:issue:`30200`) +- Calling ``np.array`` and ``np.asarray`` on tz-aware :class:`Series` and :class:`DatetimeIndex` will now return an object array of tz-aware :class:`Timestamp` (:issue:`24596`) - .. _whatsnew_1000.performance: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index ddc85ee41c1cf..88145f1870955 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -310,21 +310,6 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): # -------------------------------------------------------------------- def __array__(self, dtype=None): - if ( - dtype is None - and isinstance(self._data, DatetimeArray) - and getattr(self.dtype, "tz", None) - ): - msg = ( - "Converting timezone-aware DatetimeArray to timezone-naive " - "ndarray with 'datetime64[ns]' dtype. In the future, this " - "will return an ndarray with 'object' dtype where each " - "element is a 'pandas.Timestamp' with the correct 'tz'.\n\t" - "To accept the future behavior, pass 'dtype=object'.\n\t" - "To keep the old behavior, pass 'dtype=\"datetime64[ns]\"'." - ) - warnings.warn(msg, FutureWarning, stacklevel=3) - dtype = "M8[ns]" return np.asarray(self._data, dtype=dtype) @property diff --git a/pandas/core/series.py b/pandas/core/series.py index 14826e0a1d5a4..bc45a0e585241 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -33,7 +33,6 @@ ) from pandas.core.dtypes.generic import ( ABCDataFrame, - ABCDatetimeArray, ABCDatetimeIndex, ABCSeries, ABCSparseArray, @@ -740,21 +739,6 @@ def __array__(self, dtype=None): array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ - if ( - dtype is None - and isinstance(self.array, ABCDatetimeArray) - and getattr(self.dtype, "tz", None) - ): - msg = ( - "Converting timezone-aware DatetimeArray to timezone-naive " - "ndarray with 'datetime64[ns]' dtype. In the future, this " - "will return an ndarray with 'object' dtype where each " - "element is a 'pandas.Timestamp' with the correct 'tz'.\n\t" - "To accept the future behavior, pass 'dtype=object'.\n\t" - "To keep the old behavior, pass 'dtype=\"datetime64[ns]\"'." - ) - warnings.warn(msg, FutureWarning, stacklevel=3) - dtype = "M8[ns]" return np.asarray(self.array, dtype) # ---------------------------------------------------------------------- diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 5e7c6e4b48682..b1934ecbbceec 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -1,6 +1,5 @@ from datetime import datetime from decimal import Decimal -from warnings import catch_warnings, filterwarnings import numpy as np import pytest @@ -315,23 +314,21 @@ def test_array_equivalent(): assert not array_equivalent( TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan]) ) - with catch_warnings(): - filterwarnings("ignore", "Converting timezone", FutureWarning) - assert array_equivalent( - DatetimeIndex([0, np.nan], tz="US/Eastern"), - DatetimeIndex([0, np.nan], tz="US/Eastern"), - ) - assert not array_equivalent( - DatetimeIndex([0, np.nan], tz="US/Eastern"), - DatetimeIndex([1, np.nan], tz="US/Eastern"), - ) - assert not array_equivalent( - DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan], tz="US/Eastern") - ) - assert not array_equivalent( - DatetimeIndex([0, np.nan], tz="CET"), - DatetimeIndex([0, np.nan], tz="US/Eastern"), - ) + assert array_equivalent( + DatetimeIndex([0, np.nan], tz="US/Eastern"), + DatetimeIndex([0, np.nan], tz="US/Eastern"), + ) + assert not array_equivalent( + DatetimeIndex([0, np.nan], tz="US/Eastern"), + DatetimeIndex([1, np.nan], tz="US/Eastern"), + ) + assert not array_equivalent( + DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan], tz="US/Eastern") + ) + assert not array_equivalent( + DatetimeIndex([0, np.nan], tz="CET"), + DatetimeIndex([0, np.nan], tz="US/Eastern"), + ) assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan])) diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py index 270a7c70a2e81..ff887fbddf145 100644 --- a/pandas/tests/generic/test_frame.py +++ b/pandas/tests/generic/test_frame.py @@ -222,11 +222,10 @@ def test_to_xarray_index_types(self, index): # idempotency # categoricals are not preserved - # datetimes w/tz are not preserved + # datetimes w/tz are preserved # column names are lost expected = df.copy() expected["f"] = expected["f"].astype(object) - expected["h"] = expected["h"].astype("datetime64[ns]") expected.columns.name = None tm.assert_frame_equal( result.to_dataframe(), @@ -271,7 +270,6 @@ def test_to_xarray(self): result = result.to_dataframe() expected = df.copy() expected["f"] = expected["f"].astype(object) - expected["h"] = expected["h"].astype("datetime64[ns]") expected.columns.name = None tm.assert_frame_equal(result, expected, check_index_type=False) diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 03b9502be2735..2ccebe426e024 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -393,15 +393,13 @@ def test_asarray_tz_naive(self): # This shouldn't produce a warning. idx = pd.date_range("2000", periods=2) # M8[ns] by default - with tm.assert_produces_warning(None): - result = np.asarray(idx) + result = np.asarray(idx) expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]") tm.assert_numpy_array_equal(result, expected) # optionally, object - with tm.assert_produces_warning(None): - result = np.asarray(idx, dtype=object) + result = np.asarray(idx, dtype=object) expected = np.array([pd.Timestamp("2000-01-01"), pd.Timestamp("2000-01-02")]) tm.assert_numpy_array_equal(result, expected) @@ -410,15 +408,12 @@ def test_asarray_tz_aware(self): tz = "US/Central" idx = pd.date_range("2000", periods=2, tz=tz) expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]") - # We warn by default and return an ndarray[M8[ns]] - with tm.assert_produces_warning(FutureWarning): - result = np.asarray(idx) + result = np.asarray(idx, dtype="datetime64[ns]") tm.assert_numpy_array_equal(result, expected) # Old behavior with no warning - with tm.assert_produces_warning(None): - result = np.asarray(idx, dtype="M8[ns]") + result = np.asarray(idx, dtype="M8[ns]") tm.assert_numpy_array_equal(result, expected) @@ -426,8 +421,7 @@ def test_asarray_tz_aware(self): expected = np.array( [pd.Timestamp("2000-01-01", tz=tz), pd.Timestamp("2000-01-02", tz=tz)] ) - with tm.assert_produces_warning(None): - result = np.asarray(idx, dtype=object) + result = np.asarray(idx, dtype=object) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index c3e5e8b975cda..b317a2ee6f018 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -731,14 +731,12 @@ def test_asarray_tz_naive(self): # This shouldn't produce a warning. ser = pd.Series(pd.date_range("2000", periods=2)) expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]") - with tm.assert_produces_warning(None): - result = np.asarray(ser) + result = np.asarray(ser) tm.assert_numpy_array_equal(result, expected) # optionally, object - with tm.assert_produces_warning(None): - result = np.asarray(ser, dtype=object) + result = np.asarray(ser, dtype=object) expected = np.array([pd.Timestamp("2000-01-01"), pd.Timestamp("2000-01-02")]) tm.assert_numpy_array_equal(result, expected) @@ -747,15 +745,12 @@ def test_asarray_tz_aware(self): tz = "US/Central" ser = pd.Series(pd.date_range("2000", periods=2, tz=tz)) expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]") - # We warn by default and return an ndarray[M8[ns]] - with tm.assert_produces_warning(FutureWarning): - result = np.asarray(ser) + result = np.asarray(ser, dtype="datetime64[ns]") tm.assert_numpy_array_equal(result, expected) # Old behavior with no warning - with tm.assert_produces_warning(None): - result = np.asarray(ser, dtype="M8[ns]") + result = np.asarray(ser, dtype="M8[ns]") tm.assert_numpy_array_equal(result, expected) @@ -763,7 +758,6 @@ def test_asarray_tz_aware(self): expected = np.array( [pd.Timestamp("2000-01-01", tz=tz), pd.Timestamp("2000-01-02", tz=tz)] ) - with tm.assert_produces_warning(None): - result = np.asarray(ser, dtype=object) + result = np.asarray(ser, dtype=object) tm.assert_numpy_array_equal(result, expected)
- [x] closes #24716 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30516
2019-12-27T19:17:05Z
2020-01-03T01:17:46Z
2020-01-03T01:17:45Z
2020-01-03T05:23:39Z
BUG: List indexer on PeriodIndex doesn't coerce strings
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 47f67e9c2a4b3..6eedf9dee5266 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -635,6 +635,7 @@ Indexing - Bug in :meth:`Series.__setitem__` with an :class:`IntervalIndex` and a list-like key of integers (:issue:`33473`) - Bug in :meth:`Series.__getitem__` allowing missing labels with ``np.ndarray``, :class:`Index`, :class:`Series` indexers but not ``list``, these now all raise ``KeyError`` (:issue:`33646`) - Bug in :meth:`DataFrame.truncate` and :meth:`Series.truncate` where index was assumed to be monotone increasing (:issue:`33756`) +- Indexing with a list of strings representing datetimes failed on :class:`DatetimeIndex` or :class:`PeriodIndex`(:issue:`11278`) Missing ^^^^^^^ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index e26dc5b9e4fb3..a12d5b64bb06c 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -29,6 +29,8 @@ from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin from pandas.core.base import IndexOpsMixin +import pandas.core.common as com +from pandas.core.construction import array as pd_array, extract_array import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.extension import ( @@ -39,6 +41,7 @@ from pandas.core.indexes.numeric import Int64Index from pandas.core.ops import get_op_result_name from pandas.core.sorting import ensure_key_mapped +from pandas.core.tools.datetimes import DateParseError from pandas.core.tools.timedeltas import to_timedelta from pandas.tseries.offsets import DateOffset, Tick @@ -573,6 +576,22 @@ def _wrap_joined_index(self, joined: np.ndarray, other): return type(self)._simple_new(new_data, name=name) + @doc(Index._convert_arr_indexer) + def _convert_arr_indexer(self, keyarr): + if lib.infer_dtype(keyarr) == "string": + # Weak reasoning that indexer is a list of strings + # representing datetime or timedelta or period + try: + extension_arr = pd_array(keyarr, self.dtype) + except (ValueError, DateParseError): + # Fail to infer keyarr from self.dtype + return keyarr + + converted_arr = extract_array(extension_arr, extract_numpy=True) + else: + converted_arr = com.asarray_tuplesafe(keyarr) + return converted_arr + class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, Int64Index): """ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 5752f00ca5a18..e51ec33ba8519 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1228,11 +1228,13 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False): indexer, keyarr = ax._convert_listlike_indexer(key) # We only act on all found values: if indexer is not None and (indexer != -1).all(): - self._validate_read_indexer(key, indexer, axis, raise_missing=raise_missing) + self._validate_read_indexer( + keyarr, indexer, axis, raise_missing=raise_missing + ) return ax[indexer], indexer if ax.is_unique and not getattr(ax, "is_overlapping", False): - indexer = ax.get_indexer_for(key) + indexer = ax.get_indexer_for(keyarr) keyarr = ax.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr) diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index a1bd6fed32cad..513ca039366cb 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -8,7 +8,7 @@ import pytest import pandas as pd -from pandas import DataFrame, Index, Series, date_range +from pandas import DataFrame, Index, Period, Series, Timestamp, date_range, period_range import pandas._testing as tm @@ -535,3 +535,118 @@ def test_partial_set_empty_frame_empty_consistencies(self): df.loc[0, "x"] = 1 expected = DataFrame(dict(x=[1], y=[np.nan])) tm.assert_frame_equal(df, expected, check_dtype=False) + + @pytest.mark.parametrize( + "idx,labels,expected_idx", + [ + ( + period_range(start="2000", periods=20, freq="D"), + ["2000-01-04", "2000-01-08", "2000-01-12"], + [ + Period("2000-01-04", freq="D"), + Period("2000-01-08", freq="D"), + Period("2000-01-12", freq="D"), + ], + ), + ( + date_range(start="2000", periods=20, freq="D"), + ["2000-01-04", "2000-01-08", "2000-01-12"], + [ + Timestamp("2000-01-04", freq="D"), + Timestamp("2000-01-08", freq="D"), + Timestamp("2000-01-12", freq="D"), + ], + ), + ( + pd.timedelta_range(start="1 day", periods=20), + ["4D", "8D", "12D"], + [pd.Timedelta("4 day"), pd.Timedelta("8 day"), pd.Timedelta("12 day")], + ), + ], + ) + def test_loc_with_list_of_strings_representing_datetimes( + self, idx, labels, expected_idx + ): + # GH 11278 + s = Series(range(20), index=idx) + df = DataFrame(range(20), index=idx) + + expected_value = [3, 7, 11] + expected_s = Series(expected_value, expected_idx) + expected_df = DataFrame(expected_value, expected_idx) + + tm.assert_series_equal(expected_s, s.loc[labels]) + tm.assert_series_equal(expected_s, s[labels]) + tm.assert_frame_equal(expected_df, df.loc[labels]) + + @pytest.mark.parametrize( + "idx,labels", + [ + ( + period_range(start="2000", periods=20, freq="D"), + ["2000-01-04", "2000-01-30"], + ), + ( + date_range(start="2000", periods=20, freq="D"), + ["2000-01-04", "2000-01-30"], + ), + (pd.timedelta_range(start="1 day", periods=20), ["3 day", "30 day"]), + ], + ) + def test_loc_with_list_of_strings_representing_datetimes_missing_value( + self, idx, labels + ): + # GH 11278 + s = Series(range(20), index=idx) + df = DataFrame(range(20), index=idx) + msg = r"with any missing labels" + + with pytest.raises(KeyError, match=msg): + s.loc[labels] + with pytest.raises(KeyError, match=msg): + s[labels] + with pytest.raises(KeyError, match=msg): + df.loc[labels] + + @pytest.mark.parametrize( + "idx,labels,msg", + [ + ( + period_range(start="2000", periods=20, freq="D"), + ["4D", "8D"], + ( + r"None of \[Index\(\['4D', '8D'\], dtype='object'\)\] " + r"are in the \[index\]" + ), + ), + ( + date_range(start="2000", periods=20, freq="D"), + ["4D", "8D"], + ( + r"None of \[Index\(\['4D', '8D'\], dtype='object'\)\] " + r"are in the \[index\]" + ), + ), + ( + pd.timedelta_range(start="1 day", periods=20), + ["2000-01-04", "2000-01-08"], + ( + r"None of \[Index\(\['2000-01-04', '2000-01-08'\], " + r"dtype='object'\)\] are in the \[index\]" + ), + ), + ], + ) + def test_loc_with_list_of_strings_representing_datetimes_not_matched_type( + self, idx, labels, msg + ): + # GH 11278 + s = Series(range(20), index=idx) + df = DataFrame(range(20), index=idx) + + with pytest.raises(KeyError, match=msg): + s.loc[labels] + with pytest.raises(KeyError, match=msg): + s[labels] + with pytest.raises(KeyError, match=msg): + df.loc[labels]
- [x] closes #11278 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Description: In single object indexer case, datetime string converted to datetime-like object. Because indexing use ".xs" which is ".get_loc" that has type casting labels code. But in list indexer, there is no type check and casting code. So i add them. this bug happen not only series but also dataframe And "datetimeindex" as well as "periodIndex"
https://api.github.com/repos/pandas-dev/pandas/pulls/30515
2019-12-27T18:38:13Z
2020-05-03T16:09:15Z
2020-05-03T16:09:14Z
2020-05-03T16:49:49Z
DEPR: Remove Series.compress
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 7948b3bf2fd2f..42ff3a8875cd6 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -646,7 +646,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - Changed the default "ordered" argument in :class:`CategoricalDtype` from ``None`` to ``False`` (:issue:`26336`) - :meth:`Series.set_axis` and :meth:`DataFrame.set_axis` now require "labels" as the first argument and "axis" as an optional named parameter (:issue:`30089`) - Removed :func:`to_msgpack`, :func:`read_msgpack`, :meth:`DataFrame.to_msgpack`, :meth:`Series.to_msgpack` (:issue:`27103`) -- +- Removed :meth:`Series.compress` (:issue:`21930`) - Removed the previously deprecated keyword "fill_value" from :meth:`Categorical.fillna`, use "value" instead (:issue:`19269`) - Removed the previously deprecated keyword "data" from :func:`andrews_curves`, use "frame" instead (:issue:`6956`) - Removed the previously deprecated keyword "data" from :func:`parallel_coordinates`, use "frame" instead (:issue:`6956`) diff --git a/pandas/core/series.py b/pandas/core/series.py index 14826e0a1d5a4..15fc712672717 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -510,30 +510,6 @@ def ravel(self, order="C"): """ return self._values.ravel(order=order) - def compress(self, condition, *args, **kwargs): - """ - Return selected slices of an array along given axis as a Series. - - .. deprecated:: 0.24.0 - - Returns - ------- - Series - Series without the slices for which condition is false. - - See Also - -------- - numpy.ndarray.compress - """ - msg = ( - "Series.compress(condition) is deprecated. " - "Use 'Series[condition]' or " - "'np.asarray(series).compress(condition)' instead." - ) - warnings.warn(msg, FutureWarning, stacklevel=2) - nv.validate_compress(args, kwargs) - return self[condition] - def __len__(self) -> int: """ Return the length of the Series. diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 03f12ea13fdaa..a88043c7777c4 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -11,30 +11,6 @@ class TestSeriesAnalytics: - def test_compress(self): - cond = [True, False, True, False, False] - s = Series([1, -1, 5, 8, 7], index=list("abcde"), name="foo") - expected = Series(s.values.compress(cond), index=list("ac"), name="foo") - with tm.assert_produces_warning(FutureWarning): - result = s.compress(cond) - tm.assert_series_equal(result, expected) - - def test_numpy_compress(self): - cond = [True, False, True, False, False] - s = Series([1, -1, 5, 8, 7], index=list("abcde"), name="foo") - expected = Series(s.values.compress(cond), index=list("ac"), name="foo") - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - tm.assert_series_equal(np.compress(cond, s), expected) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - msg = "the 'axis' parameter is not supported" - with pytest.raises(ValueError, match=msg): - np.compress(cond, s, axis=1) - - msg = "the 'out' parameter is not supported" - with pytest.raises(ValueError, match=msg): - np.compress(cond, s, out=s) - def test_prod_numpy16_bug(self): s = Series([1.0, 1.0, 1.0], index=range(3)) result = s.prod() diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 7437bb0d6caf3..a187a1362297c 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -465,30 +465,6 @@ def f(x): s = Series(np.random.randn(10)) tm.assert_almost_equal(s.ravel(order="F"), s.values.ravel(order="F")) - # compress - # GH 6658 - s = Series([0, 1.0, -1], index=list("abc")) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = np.compress(s > 0, s) - tm.assert_series_equal(result, Series([1.0], index=["b"])) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = np.compress(s < -1, s) - # result empty Index(dtype=object) as the same as original - exp = Series([], dtype="float64", index=Index([], dtype="object")) - tm.assert_series_equal(result, exp) - - s = Series([0, 1.0, -1], index=[0.1, 0.2, 0.3]) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = np.compress(s > 0, s) - tm.assert_series_equal(result, Series([1.0], index=[0.2])) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = np.compress(s < -1, s) - # result empty Float64Index as the same as original - exp = Series([], dtype="float64", index=Index([], dtype="float64")) - tm.assert_series_equal(result, exp) - def test_str_accessor_updates_on_inplace(self): s = pd.Series(list("abc")) s.drop([0], inplace=True)
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry As noted in https://github.com/pandas-dev/pandas/issues/6581, sounds like we are okay breaking compatibility with `np.compress`
https://api.github.com/repos/pandas-dev/pandas/pulls/30514
2019-12-27T18:17:32Z
2019-12-27T19:28:19Z
2019-12-27T19:28:19Z
2019-12-27T19:28:32Z
BUG: hash_pandas_object fails on array containing tuple #28969
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index a6ba7770dadcc..d5231e63b7e6e 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -912,6 +912,7 @@ Other - Bug in :meth:`Series.diff` where a boolean series would incorrectly raise a ``TypeError`` (:issue:`17294`) - :meth:`Series.append` will no longer raise a ``TypeError`` when passed a tuple of ``Series`` (:issue:`28410`) - Fix corrupted error message when calling ``pandas.libs._json.encode()`` on a 0d array (:issue:`18878`) +- Bug in ``pd.core.util.hashing.hash_pandas_object`` where arrays containing tuples were incorrectly treated as non-hashable (:issue:`28969`) - Bug in :meth:`DataFrame.append` that raised ``IndexError`` when appending with empty list (:issue:`28769`) - Fix :class:`AbstractHolidayCalendar` to return correct results for years after 2030 (now goes up to 2200) (:issue:`27790`) diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index d735890f7d07e..5298d8c5ed34e 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -70,6 +70,12 @@ def hash_object_array(object[:] arr, object key, object encoding='utf8'): # null, stringify and encode data = <bytes>str(val).encode(encoding) + elif isinstance(val, tuple): + # GH#28969 we could have a tuple, but need to ensure that + # the tuple entries are themselves hashable before converting + # to str + hash(val) + data = <bytes>str(val).encode(encoding) else: raise TypeError(f"{val} of type {type(val)} is not a valid type " "for hashing, must be string or null") diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index fa3582755a202..43655fa3ea913 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -85,11 +85,12 @@ def hash_pandas_object( if isinstance(obj, ABCMultiIndex): return Series(hash_tuples(obj, encoding, hash_key), dtype="uint64", copy=False) - if isinstance(obj, ABCIndexClass): + elif isinstance(obj, ABCIndexClass): h = hash_array(obj.values, encoding, hash_key, categorize).astype( "uint64", copy=False ) h = Series(h, index=obj, dtype="uint64", copy=False) + elif isinstance(obj, ABCSeries): h = hash_array(obj.values, encoding, hash_key, categorize).astype( "uint64", copy=False diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index ebbdbd6c29842..ee9c4ed12bd92 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -353,3 +353,24 @@ def test_hash_collisions(): result = hash_array(np.asarray(hashes, dtype=object), "utf8") tm.assert_numpy_array_equal(result, np.concatenate([expected1, expected2], axis=0)) + + +def test_hash_with_tuple(): + # GH#28969 array containing a tuple raises on call to arr.astype(str) + # apparently a numpy bug github.com/numpy/numpy/issues/9441 + + df = pd.DataFrame({"data": [tuple("1"), tuple("2")]}) + result = hash_pandas_object(df) + expected = pd.Series([10345501319357378243, 8331063931016360761], dtype=np.uint64) + tm.assert_series_equal(result, expected) + + df2 = pd.DataFrame({"data": [tuple([1]), tuple([2])]}) + result = hash_pandas_object(df2) + expected = pd.Series([9408946347443669104, 3278256261030523334], dtype=np.uint64) + tm.assert_series_equal(result, expected) + + # require that the elements of such tuples are themselves hashable + + df3 = pd.DataFrame({"data": [tuple([1, []]), tuple([2, {}])]}) + with pytest.raises(TypeError, match="unhashable type: 'list'"): + hash_pandas_object(df3)
- [x] closes #28969 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ATM this implements a smoke-test that the cases don't raise, but I don't have a canonical answer to test them against. Could hard-code the result I get now. Thoughts?
https://api.github.com/repos/pandas-dev/pandas/pulls/30508
2019-12-27T04:08:57Z
2019-12-31T13:02:56Z
2019-12-31T13:02:56Z
2019-12-31T16:06:42Z
BUG: pass 2D ndarray and EA-dtype to DataFrame, closes #12513
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 5b052823dfb25..c1c74e046fa3c 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -981,7 +981,8 @@ Other - Fixed ``pow`` operations for :class:`IntegerArray` when the other value is ``0`` or ``1`` (:issue:`29997`) - Bug in :meth:`Series.count` raises if use_inf_as_na is enabled (:issue:`29478`) - Bug in :class:`Index` where a non-hashable name could be set without raising ``TypeError`` (:issue:`29069`) - +- Bug in :class:`DataFrame` constructor when passing a 2D ``ndarray`` and an extension dtype (:issue:`12513`) +- .. _whatsnew_1000.contributors: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 897dbe2e8f788..3a92cfd9bf16d 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -152,9 +152,17 @@ def init_ndarray(values, index, columns, dtype=None, copy=False): return arrays_to_mgr([values], columns, index, columns, dtype=dtype) elif is_extension_array_dtype(values) or is_extension_array_dtype(dtype): # GH#19157 + + if isinstance(values, np.ndarray) and values.ndim > 1: + # GH#12513 a EA dtype passed with a 2D array, split into + # multiple EAs that view the values + values = [values[:, n] for n in range(values.shape[1])] + else: + values = [values] + if columns is None: - columns = [0] - return arrays_to_mgr([values], columns, index, columns, dtype=dtype) + columns = list(range(len(values))) + return arrays_to_mgr(values, columns, index, columns, dtype=dtype) # by definition an array here # the dtypes will be coerced to a single dtype diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index f3cc11cb7027d..ffdf1435f74e0 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2551,3 +2551,11 @@ def test_from_tzaware_mixed_object_array(self): "datetime64[ns, CET]", ] assert (res.dtypes == expected_dtypes).all() + + def test_from_2d_ndarray_with_dtype(self): + # GH#12513 + array_dim2 = np.arange(10).reshape((5, 2)) + df = pd.DataFrame(array_dim2, dtype="datetime64[ns, UTC]") + + expected = pd.DataFrame(array_dim2).astype("datetime64[ns, UTC]") + tm.assert_frame_equal(df, expected)
- [x] closes #12513 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30507
2019-12-27T03:28:12Z
2020-01-01T16:28:29Z
2020-01-01T16:28:29Z
2020-01-01T16:31:44Z
TST: fix maybe_promote dt64tz case, 323 xfails
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 946070f8fad98..fa7b45ec4babd 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -6,6 +6,7 @@ from pandas._libs import lib, tslib, tslibs from pandas._libs.tslibs import NaT, OutOfBoundsDatetime, Period, iNaT +from pandas._libs.tslibs.timezones import tz_compare from pandas.util._validators import validate_bool_kwarg from .common import ( @@ -409,6 +410,14 @@ def maybe_promote(dtype, fill_value=np.nan): elif is_datetime64tz_dtype(dtype): if isna(fill_value): fill_value = NaT + elif not isinstance(fill_value, datetime): + dtype = np.dtype(np.object_) + elif fill_value.tzinfo is None: + dtype = np.dtype(np.object_) + elif not tz_compare(fill_value.tzinfo, dtype.tz): + # TODO: sure we want to cast here? + dtype = np.dtype(np.object_) + elif is_extension_array_dtype(dtype) and isna(fill_value): fill_value = dtype.na_value diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py index 0939e35bd64fa..69f8f46356a4d 100644 --- a/pandas/tests/dtypes/cast/test_promote.py +++ b/pandas/tests/dtypes/cast/test_promote.py @@ -8,7 +8,6 @@ import pytest from pandas._libs.tslibs import NaT -from pandas.compat import is_platform_windows from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.common import ( @@ -406,7 +405,6 @@ def test_maybe_promote_any_with_datetime64( _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) -@pytest.mark.xfail(reason="Fails to upcast to object") def test_maybe_promote_datetimetz_with_any_numpy_dtype( tz_aware_fixture, any_numpy_dtype_reduced ): @@ -427,11 +425,6 @@ def test_maybe_promote_datetimetz_with_datetimetz(tz_aware_fixture, tz_aware_fix dtype = DatetimeTZDtype(tz=tz_aware_fixture) fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture2) - from dateutil.tz import tzlocal - - if is_platform_windows() and tz_aware_fixture2 == tzlocal(): - pytest.xfail("Cannot process fill_value with this dtype, see GH 24310") - # create array of given dtype; casts "1" to correct dtype fill_value = pd.Series([10 ** 9], dtype=fill_dtype)[0] @@ -441,7 +434,6 @@ def test_maybe_promote_datetimetz_with_datetimetz(tz_aware_fixture, tz_aware_fix expected_dtype = dtype else: expected_dtype = np.dtype(object) - pytest.xfail("fails to cast to object") _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
about a third of our remaining xfailed tests
https://api.github.com/repos/pandas-dev/pandas/pulls/30506
2019-12-27T03:07:41Z
2019-12-30T13:21:21Z
2019-12-30T13:21:21Z
2019-12-30T16:42:36Z
BUG: DTA/TDA/PA iadd/isub should actually be inplace
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 4671170fa79ae..c6c5562af3951 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -716,7 +716,7 @@ Datetimelike - Bug in :class:`DatetimeIndex` addition when adding a non-optimized :class:`DateOffset` incorrectly dropping timezone information (:issue:`30336`) - Bug in :meth:`DataFrame.drop` where attempting to drop non-existent values from a DatetimeIndex would yield a confusing error message (:issue:`30399`) - Bug in :meth:`DataFrame.append` would remove the timezone-awareness of new data (:issue:`30238`) - +- Bug in :class:`DatetimeArray`, :class:`TimedeltaArray`, and :class:`PeriodArray` where inplace addition and subtraction did not actually operate inplace (:issue:`24115`) Timedelta ^^^^^^^^^ diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ff21a66928294..66f0ad2500c54 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1311,14 +1311,23 @@ def __rsub__(self, other): return -(self - other) - # FIXME: DTA/TDA/PA inplace methods should actually be inplace, GH#24115 def __iadd__(self, other): # type: ignore - # alias for __add__ - return self.__add__(other) + result = self + other + self[:] = result[:] + + if not is_period_dtype(self): + # restore freq, which is invalidated by setitem + self._freq = result._freq + return self def __isub__(self, other): # type: ignore - # alias for __sub__ - return self.__sub__(other) + result = self - other + self[:] = result[:] + + if not is_period_dtype(self): + # restore freq, which is invalidated by setitem + self._freq = result._freq + return self # -------------------------------------------------------------- # Comparison Methods diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 84b6d45b78fe8..e9c64d04ec860 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -225,6 +225,19 @@ def test_setitem_raises(self): with pytest.raises(TypeError, match="'value' should be a.* 'object'"): arr[0] = object() + def test_inplace_arithmetic(self): + # GH#24115 check that iadd and isub are actually in-place + data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9 + arr = self.array_cls(data, freq="D") + + expected = arr + pd.Timedelta(days=1) + arr += pd.Timedelta(days=1) + tm.assert_equal(arr, expected) + + expected = arr - pd.Timedelta(days=1) + arr -= pd.Timedelta(days=1) + tm.assert_equal(arr, expected) + class TestDatetimeArray(SharedTests): index_cls = pd.DatetimeIndex
- [x] closes #24115 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30505
2019-12-27T02:20:50Z
2019-12-27T14:09:57Z
2019-12-27T14:09:57Z
2019-12-27T16:19:07Z
Mark unused parameters in objToJSON
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 5eaebdff8e23f..389e040866f72 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -396,18 +396,21 @@ static PyObject *get_item(PyObject *obj, Py_ssize_t i) { return ret; } -static char *PyBytesToUTF8(JSOBJ _obj, JSONTypeContext *tc, size_t *_outLen) { +static char *PyBytesToUTF8(JSOBJ _obj, JSONTypeContext *Py_UNUSED(tc), + size_t *_outLen) { PyObject *obj = (PyObject *)_obj; *_outLen = PyBytes_GET_SIZE(obj); return PyBytes_AS_STRING(obj); } -static char *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, size_t *_outLen) { +static char *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *Py_UNUSED(tc), + size_t *_outLen) { return (char *)PyUnicode_AsUTF8AndSize(_obj, (Py_ssize_t *)_outLen); } /* returns a char* and mutates the pointer to *len */ -static char *NpyDateTimeToIso(JSOBJ unused, JSONTypeContext *tc, size_t *len) { +static char *NpyDateTimeToIso(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, + size_t *len) { npy_datetimestruct dts; int ret_code; int64_t longVal = GET_TC(tc)->longValue; @@ -537,7 +540,7 @@ static char *PyTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, size_t *outLen) { // Numpy array iteration functions //============================================================================= -static void NpyArr_freeItemValue(JSOBJ _obj, JSONTypeContext *tc) { +static void NpyArr_freeItemValue(JSOBJ Py_UNUSED(_obj), JSONTypeContext *tc) { if (GET_TC(tc)->npyarr && GET_TC(tc)->itemValue != GET_TC(tc)->npyarr->array) { PRINTMARK(); @@ -546,7 +549,9 @@ static void NpyArr_freeItemValue(JSOBJ _obj, JSONTypeContext *tc) { } } -int NpyArr_iterNextNone(JSOBJ _obj, JSONTypeContext *tc) { return 0; } +int NpyArr_iterNextNone(JSOBJ Py_UNUSED(_obj), JSONTypeContext *Py_UNUSED(tc)) { + return 0; +} void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { PyArrayObject *obj; @@ -603,7 +608,10 @@ void NpyArr_iterEnd(JSOBJ obj, JSONTypeContext *tc) { } } -void NpyArrPassThru_iterBegin(JSOBJ obj, JSONTypeContext *tc) { PRINTMARK(); } +void NpyArrPassThru_iterBegin(JSOBJ Py_UNUSED(obj), + JSONTypeContext *Py_UNUSED(tc)) { + PRINTMARK(); +} void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) { NpyArrContext *npyarr = GET_TC(tc)->npyarr; @@ -682,12 +690,13 @@ int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc) { return 1; } -JSOBJ NpyArr_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { +JSOBJ NpyArr_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PRINTMARK(); return GET_TC(tc)->itemValue; } -char *NpyArr_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { +char *NpyArr_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, + size_t *outLen) { NpyArrContext *npyarr = GET_TC(tc)->npyarr; npy_intp idx; PRINTMARK(); @@ -741,7 +750,8 @@ int PdBlock_iterNextItem(JSOBJ obj, JSONTypeContext *tc) { return NpyArr_iterNextItem(obj, tc); } -char *PdBlock_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { +char *PdBlock_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, + size_t *outLen) { PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; NpyArrContext *npyarr = blkCtxt->npyCtxts[0]; npy_intp idx; @@ -763,7 +773,7 @@ char *PdBlock_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { return cStr; } -char *PdBlock_iterGetName_Transpose(JSOBJ obj, JSONTypeContext *tc, +char *PdBlock_iterGetName_Transpose(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, size_t *outLen) { PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; NpyArrContext *npyarr = blkCtxt->npyCtxts[blkCtxt->colIdx]; @@ -809,7 +819,7 @@ int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) { return 1; } -void PdBlockPassThru_iterBegin(JSOBJ obj, JSONTypeContext *tc) { +void PdBlockPassThru_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; PRINTMARK(); @@ -1041,13 +1051,14 @@ int Tuple_iterNext(JSOBJ obj, JSONTypeContext *tc) { return 1; } -void Tuple_iterEnd(JSOBJ obj, JSONTypeContext *tc) {} +void Tuple_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc)) {} -JSOBJ Tuple_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { +JSOBJ Tuple_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->itemValue; } -char *Tuple_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { +char *Tuple_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc), + size_t *Py_UNUSED(outLen)) { return NULL; } @@ -1060,7 +1071,7 @@ void Iter_iterBegin(JSOBJ obj, JSONTypeContext *tc) { GET_TC(tc)->iterator = PyObject_GetIter(obj); } -int Iter_iterNext(JSOBJ obj, JSONTypeContext *tc) { +int Iter_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PyObject *item; if (GET_TC(tc)->itemValue) { @@ -1078,7 +1089,7 @@ int Iter_iterNext(JSOBJ obj, JSONTypeContext *tc) { return 1; } -void Iter_iterEnd(JSOBJ obj, JSONTypeContext *tc) { +void Iter_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { if (GET_TC(tc)->itemValue) { Py_DECREF(GET_TC(tc)->itemValue); GET_TC(tc)->itemValue = NULL; @@ -1090,11 +1101,12 @@ void Iter_iterEnd(JSOBJ obj, JSONTypeContext *tc) { } } -JSOBJ Iter_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { +JSOBJ Iter_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->itemValue; } -char *Iter_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { +char *Iter_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc), + size_t *Py_UNUSED(outLen)) { return NULL; } @@ -1110,7 +1122,7 @@ void Dir_iterBegin(JSOBJ obj, JSONTypeContext *tc) { PRINTMARK(); } -void Dir_iterEnd(JSOBJ obj, JSONTypeContext *tc) { +void Dir_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { if (GET_TC(tc)->itemValue) { Py_DECREF(GET_TC(tc)->itemValue); GET_TC(tc)->itemValue = NULL; @@ -1196,12 +1208,13 @@ int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) { return 1; } -JSOBJ Dir_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { +JSOBJ Dir_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PRINTMARK(); return GET_TC(tc)->itemValue; } -char *Dir_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { +char *Dir_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, + size_t *outLen) { PRINTMARK(); *outLen = PyBytes_GET_SIZE(GET_TC(tc)->itemName); return PyBytes_AS_STRING(GET_TC(tc)->itemName); @@ -1227,20 +1240,21 @@ int List_iterNext(JSOBJ obj, JSONTypeContext *tc) { return 1; } -void List_iterEnd(JSOBJ obj, JSONTypeContext *tc) {} +void List_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc)) {} -JSOBJ List_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { +JSOBJ List_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->itemValue; } -char *List_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { +char *List_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc), + size_t *Py_UNUSED(outLen)) { return NULL; } //============================================================================= // pandas Index iteration functions //============================================================================= -void Index_iterBegin(JSOBJ obj, JSONTypeContext *tc) { +void Index_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { GET_TC(tc)->index = 0; GET_TC(tc)->cStr = PyObject_Malloc(20 * sizeof(char)); if (!GET_TC(tc)->cStr) { @@ -1276,13 +1290,16 @@ int Index_iterNext(JSOBJ obj, JSONTypeContext *tc) { return 1; } -void Index_iterEnd(JSOBJ obj, JSONTypeContext *tc) { PRINTMARK(); } +void Index_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc)) { + PRINTMARK(); +} -JSOBJ Index_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { +JSOBJ Index_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->itemValue; } -char *Index_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { +char *Index_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, + size_t *outLen) { *outLen = strlen(GET_TC(tc)->cStr); return GET_TC(tc)->cStr; } @@ -1290,7 +1307,7 @@ char *Index_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { //============================================================================= // pandas Series iteration functions //============================================================================= -void Series_iterBegin(JSOBJ obj, JSONTypeContext *tc) { +void Series_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder; GET_TC(tc)->index = 0; GET_TC(tc)->cStr = PyObject_Malloc(20 * sizeof(char)); @@ -1331,17 +1348,18 @@ int Series_iterNext(JSOBJ obj, JSONTypeContext *tc) { return 1; } -void Series_iterEnd(JSOBJ obj, JSONTypeContext *tc) { +void Series_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder; enc->outputFormat = enc->originalOutputFormat; PRINTMARK(); } -JSOBJ Series_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { +JSOBJ Series_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->itemValue; } -char *Series_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { +char *Series_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, + size_t *outLen) { *outLen = strlen(GET_TC(tc)->cStr); return GET_TC(tc)->cStr; } @@ -1349,7 +1367,7 @@ char *Series_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { //============================================================================= // pandas DataFrame iteration functions //============================================================================= -void DataFrame_iterBegin(JSOBJ obj, JSONTypeContext *tc) { +void DataFrame_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder; GET_TC(tc)->index = 0; GET_TC(tc)->cStr = PyObject_Malloc(20 * sizeof(char)); @@ -1395,17 +1413,18 @@ int DataFrame_iterNext(JSOBJ obj, JSONTypeContext *tc) { return 1; } -void DataFrame_iterEnd(JSOBJ obj, JSONTypeContext *tc) { +void DataFrame_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder; enc->outputFormat = enc->originalOutputFormat; PRINTMARK(); } -JSOBJ DataFrame_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { +JSOBJ DataFrame_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->itemValue; } -char *DataFrame_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { +char *DataFrame_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, + size_t *outLen) { *outLen = strlen(GET_TC(tc)->cStr); return GET_TC(tc)->cStr; } @@ -1415,12 +1434,12 @@ char *DataFrame_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { // itemName might converted to string (Python_Str). Do refCounting // itemValue is borrowed from object (which is dict). No refCounting //============================================================================= -void Dict_iterBegin(JSOBJ obj, JSONTypeContext *tc) { +void Dict_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { GET_TC(tc)->index = 0; PRINTMARK(); } -int Dict_iterNext(JSOBJ obj, JSONTypeContext *tc) { +int Dict_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PyObject *itemNameTmp; if (GET_TC(tc)->itemName) { @@ -1448,7 +1467,7 @@ int Dict_iterNext(JSOBJ obj, JSONTypeContext *tc) { return 1; } -void Dict_iterEnd(JSOBJ obj, JSONTypeContext *tc) { +void Dict_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { if (GET_TC(tc)->itemName) { Py_DECREF(GET_TC(tc)->itemName); GET_TC(tc)->itemName = NULL; @@ -1457,11 +1476,12 @@ void Dict_iterEnd(JSOBJ obj, JSONTypeContext *tc) { PRINTMARK(); } -JSOBJ Dict_iterGetValue(JSOBJ obj, JSONTypeContext *tc) { +JSOBJ Dict_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->itemValue; } -char *Dict_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { +char *Dict_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, + size_t *outLen) { *outLen = PyBytes_GET_SIZE(GET_TC(tc)->itemName); return PyBytes_AS_STRING(GET_TC(tc)->itemName); } @@ -2208,7 +2228,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { return; } -void Object_endTypeContext(JSOBJ obj, JSONTypeContext *tc) { +void Object_endTypeContext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PRINTMARK(); if (tc->prv) { Py_XDECREF(GET_TC(tc)->newObj); @@ -2234,11 +2254,11 @@ const char *Object_getStringValue(JSOBJ obj, JSONTypeContext *tc, return GET_TC(tc)->PyTypeToUTF8(obj, tc, _outLen); } -JSINT64 Object_getLongValue(JSOBJ obj, JSONTypeContext *tc) { +JSINT64 Object_getLongValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->longValue; } -double Object_getDoubleValue(JSOBJ obj, JSONTypeContext *tc) { +double Object_getDoubleValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->doubleValue; } @@ -2264,7 +2284,8 @@ char *Object_iterGetName(JSOBJ obj, JSONTypeContext *tc, size_t *outLen) { return GET_TC(tc)->iterGetName(obj, tc, outLen); } -PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) { +PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args, + PyObject *kwargs) { static char *kwlist[] = {"obj", "ensure_ascii", "double_precision",
If compiled with `-Wextra` you get 58 warnings; this brings things down to 2 and I think helps the reader
https://api.github.com/repos/pandas-dev/pandas/pulls/30504
2019-12-27T01:57:44Z
2019-12-27T14:12:00Z
2019-12-27T14:12:00Z
2020-01-16T00:33:25Z
REF/TST: method-specific files for test_append
diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py new file mode 100644 index 0000000000000..fac6a9139462f --- /dev/null +++ b/pandas/tests/frame/methods/test_append.py @@ -0,0 +1,179 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame, Series, Timestamp +import pandas.util.testing as tm + + +class TestDataFrameAppend: + def test_append_empty_list(self): + # GH 28769 + df = DataFrame() + result = df.append([]) + expected = df + tm.assert_frame_equal(result, expected) + assert result is not df + + df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) + result = df.append([]) + expected = df + tm.assert_frame_equal(result, expected) + assert result is not df # .append() should return a new object + + def test_append_series_dict(self): + df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) + + series = df.loc[4] + msg = "Indexes have overlapping values" + with pytest.raises(ValueError, match=msg): + df.append(series, verify_integrity=True) + + series.name = None + msg = "Can only append a Series if ignore_index=True" + with pytest.raises(TypeError, match=msg): + df.append(series, verify_integrity=True) + + result = df.append(series[::-1], ignore_index=True) + expected = df.append( + DataFrame({0: series[::-1]}, index=df.columns).T, ignore_index=True + ) + tm.assert_frame_equal(result, expected) + + # dict + result = df.append(series.to_dict(), ignore_index=True) + tm.assert_frame_equal(result, expected) + + result = df.append(series[::-1][:3], ignore_index=True) + expected = df.append( + DataFrame({0: series[::-1][:3]}).T, ignore_index=True, sort=True + ) + tm.assert_frame_equal(result, expected.loc[:, result.columns]) + + # can append when name set + row = df.loc[4] + row.name = 5 + result = df.append(row) + expected = df.append(df[-1:], ignore_index=True) + tm.assert_frame_equal(result, expected) + + def test_append_list_of_series_dicts(self): + df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) + + dicts = [x.to_dict() for idx, x in df.iterrows()] + + result = df.append(dicts, ignore_index=True) + expected = df.append(df, ignore_index=True) + tm.assert_frame_equal(result, expected) + + # different columns + dicts = [ + {"foo": 1, "bar": 2, "baz": 3, "peekaboo": 4}, + {"foo": 5, "bar": 6, "baz": 7, "peekaboo": 8}, + ] + result = df.append(dicts, ignore_index=True, sort=True) + expected = df.append(DataFrame(dicts), ignore_index=True, sort=True) + tm.assert_frame_equal(result, expected) + + def test_append_missing_cols(self): + # GH22252 + # exercise the conditional branch in append method where the data + # to be appended is a list and does not contain all columns that are in + # the target DataFrame + df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) + + dicts = [{"foo": 9}, {"bar": 10}] + with tm.assert_produces_warning(None): + result = df.append(dicts, ignore_index=True, sort=True) + + expected = df.append(DataFrame(dicts), ignore_index=True, sort=True) + tm.assert_frame_equal(result, expected) + + def test_append_empty_dataframe(self): + + # Empty df append empty df + df1 = DataFrame() + df2 = DataFrame() + result = df1.append(df2) + expected = df1.copy() + tm.assert_frame_equal(result, expected) + + # Non-empty df append empty df + df1 = DataFrame(np.random.randn(5, 2)) + df2 = DataFrame() + result = df1.append(df2) + expected = df1.copy() + tm.assert_frame_equal(result, expected) + + # Empty df with columns append empty df + df1 = DataFrame(columns=["bar", "foo"]) + df2 = DataFrame() + result = df1.append(df2) + expected = df1.copy() + tm.assert_frame_equal(result, expected) + + # Non-Empty df with columns append empty df + df1 = DataFrame(np.random.randn(5, 2), columns=["bar", "foo"]) + df2 = DataFrame() + result = df1.append(df2) + expected = df1.copy() + tm.assert_frame_equal(result, expected) + + def test_append_dtypes(self): + + # GH 5754 + # row appends of different dtypes (so need to do by-item) + # can sometimes infer the correct type + + df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(5)) + df2 = DataFrame() + result = df1.append(df2) + expected = df1.copy() + tm.assert_frame_equal(result, expected) + + df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) + df2 = DataFrame({"bar": "foo"}, index=range(1, 2)) + result = df1.append(df2) + expected = DataFrame({"bar": [Timestamp("20130101"), "foo"]}) + tm.assert_frame_equal(result, expected) + + df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) + df2 = DataFrame({"bar": np.nan}, index=range(1, 2)) + result = df1.append(df2) + expected = DataFrame( + {"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")} + ) + tm.assert_frame_equal(result, expected) + + df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) + df2 = DataFrame({"bar": np.nan}, index=range(1, 2), dtype=object) + result = df1.append(df2) + expected = DataFrame( + {"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")} + ) + tm.assert_frame_equal(result, expected) + + df1 = DataFrame({"bar": np.nan}, index=range(1)) + df2 = DataFrame({"bar": Timestamp("20130101")}, index=range(1, 2)) + result = df1.append(df2) + expected = DataFrame( + {"bar": Series([np.nan, Timestamp("20130101")], dtype="M8[ns]")} + ) + tm.assert_frame_equal(result, expected) + + df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) + df2 = DataFrame({"bar": 1}, index=range(1, 2), dtype=object) + result = df1.append(df2) + expected = DataFrame({"bar": Series([Timestamp("20130101"), 1])}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "timestamp", ["2019-07-19 07:04:57+0100", "2019-07-19 07:04:57"] + ) + def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp): + # GH 30238 + tz = tz_naive_fixture + df = pd.DataFrame([pd.Timestamp(timestamp, tz=tz)]) + result = df.append(df.iloc[0]).iloc[-1] + expected = pd.Series(pd.Timestamp(timestamp, tz=tz), name=0) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py index 04bc87a243a9b..209b4a800354d 100644 --- a/pandas/tests/frame/methods/test_cov_corr.py +++ b/pandas/tests/frame/methods/test_cov_corr.py @@ -62,32 +62,15 @@ def test_cov(self, float_frame, float_string_frame): class TestDataFrameCorr: # DataFrame.corr(), as opposed to DataFrame.corrwith - @staticmethod - def _check_method(frame, method="pearson"): - correls = frame.corr(method=method) - expected = frame["A"].corr(frame["C"], method=method) - tm.assert_almost_equal(correls["A"]["C"], expected) - - @td.skip_if_no_scipy - def test_corr_pearson(self, float_frame): - float_frame["A"][:5] = np.nan - float_frame["B"][5:10] = np.nan - - self._check_method(float_frame, "pearson") - + @pytest.mark.parametrize("method", ["pearson", "kendall", "spearman"]) @td.skip_if_no_scipy - def test_corr_kendall(self, float_frame): + def test_corr_scipy_method(self, float_frame, method): float_frame["A"][:5] = np.nan float_frame["B"][5:10] = np.nan - self._check_method(float_frame, "kendall") - - @td.skip_if_no_scipy - def test_corr_spearman(self, float_frame): - float_frame["A"][:5] = np.nan - float_frame["B"][5:10] = np.nan - - self._check_method(float_frame, "spearman") + correls = float_frame.corr(method=method) + expected = float_frame["A"].corr(float_frame["C"], method=method) + tm.assert_almost_equal(correls["A"]["C"], expected) # --------------------------------------------------------------------- diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py index f01a030ad0e22..efb0c64a4f7ac 100644 --- a/pandas/tests/frame/methods/test_rank.py +++ b/pandas/tests/frame/methods/test_rank.py @@ -113,6 +113,15 @@ def test_rank2(self): exp = DataFrame({"a": [3.5, 1.0, 3.5, 5.0, 6.0, 7.0, 2.0]}) tm.assert_frame_equal(df.rank(), exp) + def test_rank_does_not_mutate(self): + # GH#18521 + # Check rank does not mutate DataFrame + df = DataFrame(np.random.randn(10, 3), dtype="float64") + expected = df.copy() + df.rank() + result = df + tm.assert_frame_equal(result, expected) + def test_rank_mixed_frame(self, float_string_frame): float_string_frame["datetime"] = datetime.now() float_string_frame["timedelta"] = timedelta(days=1, seconds=1) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index a705fc89a813d..68d49c05eaa37 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1248,15 +1248,6 @@ def test_matmul(self): # --------------------------------------------------------------------- # Unsorted - def test_series_nat_conversion(self): - # GH 18521 - # Check rank does not mutate DataFrame - df = DataFrame(np.random.randn(10, 3), dtype="float64") - expected = df.copy() - df.rank() - result = df - tm.assert_frame_equal(result, expected) - def test_series_broadcasting(self): # smoke test for numpy warnings # GH 16378, GH 16306 diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py index ebc4438366001..bfb691a8e75d3 100644 --- a/pandas/tests/frame/test_combine_concat.py +++ b/pandas/tests/frame/test_combine_concat.py @@ -128,177 +128,6 @@ def test_concat_tuple_keys(self): ) tm.assert_frame_equal(results, expected) - def test_append_empty_list(self): - # GH 28769 - df = DataFrame() - result = df.append([]) - expected = df - tm.assert_frame_equal(result, expected) - assert result is not df - - df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) - result = df.append([]) - expected = df - tm.assert_frame_equal(result, expected) - assert result is not df # .append() should return a new object - - def test_append_series_dict(self): - df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) - - series = df.loc[4] - msg = "Indexes have overlapping values" - with pytest.raises(ValueError, match=msg): - df.append(series, verify_integrity=True) - - series.name = None - msg = "Can only append a Series if ignore_index=True" - with pytest.raises(TypeError, match=msg): - df.append(series, verify_integrity=True) - - result = df.append(series[::-1], ignore_index=True) - expected = df.append( - DataFrame({0: series[::-1]}, index=df.columns).T, ignore_index=True - ) - tm.assert_frame_equal(result, expected) - - # dict - result = df.append(series.to_dict(), ignore_index=True) - tm.assert_frame_equal(result, expected) - - result = df.append(series[::-1][:3], ignore_index=True) - expected = df.append( - DataFrame({0: series[::-1][:3]}).T, ignore_index=True, sort=True - ) - tm.assert_frame_equal(result, expected.loc[:, result.columns]) - - # can append when name set - row = df.loc[4] - row.name = 5 - result = df.append(row) - expected = df.append(df[-1:], ignore_index=True) - tm.assert_frame_equal(result, expected) - - def test_append_list_of_series_dicts(self): - df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) - - dicts = [x.to_dict() for idx, x in df.iterrows()] - - result = df.append(dicts, ignore_index=True) - expected = df.append(df, ignore_index=True) - tm.assert_frame_equal(result, expected) - - # different columns - dicts = [ - {"foo": 1, "bar": 2, "baz": 3, "peekaboo": 4}, - {"foo": 5, "bar": 6, "baz": 7, "peekaboo": 8}, - ] - result = df.append(dicts, ignore_index=True, sort=True) - expected = df.append(DataFrame(dicts), ignore_index=True, sort=True) - tm.assert_frame_equal(result, expected) - - def test_append_missing_cols(self): - # GH22252 - # exercise the conditional branch in append method where the data - # to be appended is a list and does not contain all columns that are in - # the target DataFrame - df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) - - dicts = [{"foo": 9}, {"bar": 10}] - with tm.assert_produces_warning(None): - result = df.append(dicts, ignore_index=True, sort=True) - - expected = df.append(DataFrame(dicts), ignore_index=True, sort=True) - tm.assert_frame_equal(result, expected) - - def test_append_empty_dataframe(self): - - # Empty df append empty df - df1 = DataFrame() - df2 = DataFrame() - result = df1.append(df2) - expected = df1.copy() - tm.assert_frame_equal(result, expected) - - # Non-empty df append empty df - df1 = DataFrame(np.random.randn(5, 2)) - df2 = DataFrame() - result = df1.append(df2) - expected = df1.copy() - tm.assert_frame_equal(result, expected) - - # Empty df with columns append empty df - df1 = DataFrame(columns=["bar", "foo"]) - df2 = DataFrame() - result = df1.append(df2) - expected = df1.copy() - tm.assert_frame_equal(result, expected) - - # Non-Empty df with columns append empty df - df1 = DataFrame(np.random.randn(5, 2), columns=["bar", "foo"]) - df2 = DataFrame() - result = df1.append(df2) - expected = df1.copy() - tm.assert_frame_equal(result, expected) - - def test_append_dtypes(self): - - # GH 5754 - # row appends of different dtypes (so need to do by-item) - # can sometimes infer the correct type - - df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(5)) - df2 = DataFrame() - result = df1.append(df2) - expected = df1.copy() - tm.assert_frame_equal(result, expected) - - df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) - df2 = DataFrame({"bar": "foo"}, index=range(1, 2)) - result = df1.append(df2) - expected = DataFrame({"bar": [Timestamp("20130101"), "foo"]}) - tm.assert_frame_equal(result, expected) - - df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) - df2 = DataFrame({"bar": np.nan}, index=range(1, 2)) - result = df1.append(df2) - expected = DataFrame( - {"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")} - ) - tm.assert_frame_equal(result, expected) - - df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) - df2 = DataFrame({"bar": np.nan}, index=range(1, 2), dtype=object) - result = df1.append(df2) - expected = DataFrame( - {"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")} - ) - tm.assert_frame_equal(result, expected) - - df1 = DataFrame({"bar": np.nan}, index=range(1)) - df2 = DataFrame({"bar": Timestamp("20130101")}, index=range(1, 2)) - result = df1.append(df2) - expected = DataFrame( - {"bar": Series([np.nan, Timestamp("20130101")], dtype="M8[ns]")} - ) - tm.assert_frame_equal(result, expected) - - df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) - df2 = DataFrame({"bar": 1}, index=range(1, 2), dtype=object) - result = df1.append(df2) - expected = DataFrame({"bar": Series([Timestamp("20130101"), 1])}) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "timestamp", ["2019-07-19 07:04:57+0100", "2019-07-19 07:04:57"] - ) - def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp): - # GH 30238 - tz = tz_naive_fixture - df = pd.DataFrame([pd.Timestamp(timestamp, tz=tz)]) - result = df.append(df.iloc[0]).iloc[-1] - expected = pd.Series(pd.Timestamp(timestamp, tz=tz), name=0) - tm.assert_series_equal(result, expected) - def test_update(self): df = DataFrame( [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]] diff --git a/pandas/tests/series/methods/test_append.py b/pandas/tests/series/methods/test_append.py new file mode 100644 index 0000000000000..ec357786f18fb --- /dev/null +++ b/pandas/tests/series/methods/test_append.py @@ -0,0 +1,158 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame, DatetimeIndex, Series, date_range +import pandas.util.testing as tm + + +class TestSeriesAppend: + def test_append(self, datetime_series, string_series, object_series): + appended_series = string_series.append(object_series) + for idx, value in appended_series.items(): + if idx in string_series.index: + assert value == string_series[idx] + elif idx in object_series.index: + assert value == object_series[idx] + else: + raise AssertionError("orphaned index!") + + msg = "Indexes have overlapping values:" + with pytest.raises(ValueError, match=msg): + datetime_series.append(datetime_series, verify_integrity=True) + + def test_append_many(self, datetime_series): + pieces = [datetime_series[:5], datetime_series[5:10], datetime_series[10:]] + + result = pieces[0].append(pieces[1:]) + tm.assert_series_equal(result, datetime_series) + + def test_append_duplicates(self): + # GH 13677 + s1 = pd.Series([1, 2, 3]) + s2 = pd.Series([4, 5, 6]) + exp = pd.Series([1, 2, 3, 4, 5, 6], index=[0, 1, 2, 0, 1, 2]) + tm.assert_series_equal(s1.append(s2), exp) + tm.assert_series_equal(pd.concat([s1, s2]), exp) + + # the result must have RangeIndex + exp = pd.Series([1, 2, 3, 4, 5, 6]) + tm.assert_series_equal( + s1.append(s2, ignore_index=True), exp, check_index_type=True + ) + tm.assert_series_equal( + pd.concat([s1, s2], ignore_index=True), exp, check_index_type=True + ) + + msg = "Indexes have overlapping values:" + with pytest.raises(ValueError, match=msg): + s1.append(s2, verify_integrity=True) + with pytest.raises(ValueError, match=msg): + pd.concat([s1, s2], verify_integrity=True) + + def test_append_tuples(self): + # GH 28410 + s = pd.Series([1, 2, 3]) + list_input = [s, s] + tuple_input = (s, s) + + expected = s.append(list_input) + result = s.append(tuple_input) + + tm.assert_series_equal(expected, result) + + +class TestSeriesAppendWithDatetimeIndex: + def test_append(self): + rng = date_range("5/8/2012 1:45", periods=10, freq="5T") + ts = Series(np.random.randn(len(rng)), rng) + df = DataFrame(np.random.randn(len(rng), 4), index=rng) + + result = ts.append(ts) + result_df = df.append(df) + ex_index = DatetimeIndex(np.tile(rng.values, 2)) + tm.assert_index_equal(result.index, ex_index) + tm.assert_index_equal(result_df.index, ex_index) + + appended = rng.append(rng) + tm.assert_index_equal(appended, ex_index) + + appended = rng.append([rng, rng]) + ex_index = DatetimeIndex(np.tile(rng.values, 3)) + tm.assert_index_equal(appended, ex_index) + + # different index names + rng1 = rng.copy() + rng2 = rng.copy() + rng1.name = "foo" + rng2.name = "bar" + assert rng1.append(rng1).name == "foo" + assert rng1.append(rng2).name is None + + def test_append_tz(self): + # see gh-2938 + rng = date_range("5/8/2012 1:45", periods=10, freq="5T", tz="US/Eastern") + rng2 = date_range("5/8/2012 2:35", periods=10, freq="5T", tz="US/Eastern") + rng3 = date_range("5/8/2012 1:45", periods=20, freq="5T", tz="US/Eastern") + ts = Series(np.random.randn(len(rng)), rng) + df = DataFrame(np.random.randn(len(rng), 4), index=rng) + ts2 = Series(np.random.randn(len(rng2)), rng2) + df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) + + result = ts.append(ts2) + result_df = df.append(df2) + tm.assert_index_equal(result.index, rng3) + tm.assert_index_equal(result_df.index, rng3) + + appended = rng.append(rng2) + tm.assert_index_equal(appended, rng3) + + def test_append_tz_explicit_pytz(self): + # see gh-2938 + from pytz import timezone as timezone + + rng = date_range( + "5/8/2012 1:45", periods=10, freq="5T", tz=timezone("US/Eastern") + ) + rng2 = date_range( + "5/8/2012 2:35", periods=10, freq="5T", tz=timezone("US/Eastern") + ) + rng3 = date_range( + "5/8/2012 1:45", periods=20, freq="5T", tz=timezone("US/Eastern") + ) + ts = Series(np.random.randn(len(rng)), rng) + df = DataFrame(np.random.randn(len(rng), 4), index=rng) + ts2 = Series(np.random.randn(len(rng2)), rng2) + df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) + + result = ts.append(ts2) + result_df = df.append(df2) + tm.assert_index_equal(result.index, rng3) + tm.assert_index_equal(result_df.index, rng3) + + appended = rng.append(rng2) + tm.assert_index_equal(appended, rng3) + + def test_append_tz_dateutil(self): + # see gh-2938 + rng = date_range( + "5/8/2012 1:45", periods=10, freq="5T", tz="dateutil/US/Eastern" + ) + rng2 = date_range( + "5/8/2012 2:35", periods=10, freq="5T", tz="dateutil/US/Eastern" + ) + rng3 = date_range( + "5/8/2012 1:45", periods=20, freq="5T", tz="dateutil/US/Eastern" + ) + ts = Series(np.random.randn(len(rng)), rng) + df = DataFrame(np.random.randn(len(rng), 4), index=rng) + ts2 = Series(np.random.randn(len(rng2)), rng2) + df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) + + result = ts.append(ts2) + result_df = df.append(df2) + tm.assert_index_equal(result.index, rng3) + tm.assert_index_equal(result_df.index, rng3) + + appended = rng.append(rng2) + tm.assert_index_equal(appended, rng3) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index c00113a7c47ff..03f12ea13fdaa 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -184,23 +184,6 @@ def test_is_monotonic(self): assert s.is_monotonic is False assert s.is_monotonic_decreasing is True - def test_apply_categorical(self): - values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True) - s = pd.Series(values, name="XX", index=list("abcdefg")) - result = s.apply(lambda x: x.lower()) - - # should be categorical dtype when the number of categories are - # the same - values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True) - exp = pd.Series(values, name="XX", index=list("abcdefg")) - tm.assert_series_equal(result, exp) - tm.assert_categorical_equal(result.values, exp.values) - - result = s.apply(lambda x: "A") - exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg")) - tm.assert_series_equal(result, exp) - assert result.dtype == np.object - def test_unstack(self): index = MultiIndex( diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 334c6994eb540..30b8b5c7c8545 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -162,6 +162,23 @@ def test_apply_dict_depr(self): with pytest.raises(SpecificationError, match=msg): tsdf.A.agg({"foo": ["sum", "mean"]}) + def test_apply_categorical(self): + values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True) + ser = pd.Series(values, name="XX", index=list("abcdefg")) + result = ser.apply(lambda x: x.lower()) + + # should be categorical dtype when the number of categories are + # the same + values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True) + exp = pd.Series(values, name="XX", index=list("abcdefg")) + tm.assert_series_equal(result, exp) + tm.assert_categorical_equal(result.values, exp.values) + + result = ser.apply(lambda x: "A") + exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg")) + tm.assert_series_equal(result, exp) + assert result.dtype == np.object + @pytest.mark.parametrize("series", [["1-1", "1-1", np.NaN], ["1-1", "1-2", np.NaN]]) def test_apply_categorical_with_nan_values(self, series): # GH 20714 bug fixed in: GH 24275 diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py index c6f4ce364f328..238a413af7a31 100644 --- a/pandas/tests/series/test_combine_concat.py +++ b/pandas/tests/series/test_combine_concat.py @@ -4,65 +4,11 @@ import pytest import pandas as pd -from pandas import DataFrame, DatetimeIndex, Series, date_range +from pandas import DataFrame, Series import pandas.util.testing as tm class TestSeriesCombine: - def test_append(self, datetime_series, string_series, object_series): - appendedSeries = string_series.append(object_series) - for idx, value in appendedSeries.items(): - if idx in string_series.index: - assert value == string_series[idx] - elif idx in object_series.index: - assert value == object_series[idx] - else: - raise AssertionError("orphaned index!") - - msg = "Indexes have overlapping values:" - with pytest.raises(ValueError, match=msg): - datetime_series.append(datetime_series, verify_integrity=True) - - def test_append_many(self, datetime_series): - pieces = [datetime_series[:5], datetime_series[5:10], datetime_series[10:]] - - result = pieces[0].append(pieces[1:]) - tm.assert_series_equal(result, datetime_series) - - def test_append_duplicates(self): - # GH 13677 - s1 = pd.Series([1, 2, 3]) - s2 = pd.Series([4, 5, 6]) - exp = pd.Series([1, 2, 3, 4, 5, 6], index=[0, 1, 2, 0, 1, 2]) - tm.assert_series_equal(s1.append(s2), exp) - tm.assert_series_equal(pd.concat([s1, s2]), exp) - - # the result must have RangeIndex - exp = pd.Series([1, 2, 3, 4, 5, 6]) - tm.assert_series_equal( - s1.append(s2, ignore_index=True), exp, check_index_type=True - ) - tm.assert_series_equal( - pd.concat([s1, s2], ignore_index=True), exp, check_index_type=True - ) - - msg = "Indexes have overlapping values:" - with pytest.raises(ValueError, match=msg): - s1.append(s2, verify_integrity=True) - with pytest.raises(ValueError, match=msg): - pd.concat([s1, s2], verify_integrity=True) - - def test_append_tuples(self): - # GH 28410 - s = pd.Series([1, 2, 3]) - list_input = [s, s] - tuple_input = (s, s) - - expected = s.append(list_input) - result = s.append(tuple_input) - - tm.assert_series_equal(expected, result) - def test_combine_scalar(self): # GH 21248 # Note - combine() with another Series is tested elsewhere because @@ -319,99 +265,3 @@ def test_combine_first_dt64(self): rs = s0.combine_first(s1) xp = Series([datetime(2010, 1, 1), "2011"]) tm.assert_series_equal(rs, xp) - - -class TestTimeseries: - def test_append_concat(self): - rng = date_range("5/8/2012 1:45", periods=10, freq="5T") - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - - result = ts.append(ts) - result_df = df.append(df) - ex_index = DatetimeIndex(np.tile(rng.values, 2)) - tm.assert_index_equal(result.index, ex_index) - tm.assert_index_equal(result_df.index, ex_index) - - appended = rng.append(rng) - tm.assert_index_equal(appended, ex_index) - - appended = rng.append([rng, rng]) - ex_index = DatetimeIndex(np.tile(rng.values, 3)) - tm.assert_index_equal(appended, ex_index) - - # different index names - rng1 = rng.copy() - rng2 = rng.copy() - rng1.name = "foo" - rng2.name = "bar" - assert rng1.append(rng1).name == "foo" - assert rng1.append(rng2).name is None - - def test_append_concat_tz(self): - # see gh-2938 - rng = date_range("5/8/2012 1:45", periods=10, freq="5T", tz="US/Eastern") - rng2 = date_range("5/8/2012 2:35", periods=10, freq="5T", tz="US/Eastern") - rng3 = date_range("5/8/2012 1:45", periods=20, freq="5T", tz="US/Eastern") - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - ts2 = Series(np.random.randn(len(rng2)), rng2) - df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) - - result = ts.append(ts2) - result_df = df.append(df2) - tm.assert_index_equal(result.index, rng3) - tm.assert_index_equal(result_df.index, rng3) - - appended = rng.append(rng2) - tm.assert_index_equal(appended, rng3) - - def test_append_concat_tz_explicit_pytz(self): - # see gh-2938 - from pytz import timezone as timezone - - rng = date_range( - "5/8/2012 1:45", periods=10, freq="5T", tz=timezone("US/Eastern") - ) - rng2 = date_range( - "5/8/2012 2:35", periods=10, freq="5T", tz=timezone("US/Eastern") - ) - rng3 = date_range( - "5/8/2012 1:45", periods=20, freq="5T", tz=timezone("US/Eastern") - ) - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - ts2 = Series(np.random.randn(len(rng2)), rng2) - df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) - - result = ts.append(ts2) - result_df = df.append(df2) - tm.assert_index_equal(result.index, rng3) - tm.assert_index_equal(result_df.index, rng3) - - appended = rng.append(rng2) - tm.assert_index_equal(appended, rng3) - - def test_append_concat_tz_dateutil(self): - # see gh-2938 - rng = date_range( - "5/8/2012 1:45", periods=10, freq="5T", tz="dateutil/US/Eastern" - ) - rng2 = date_range( - "5/8/2012 2:35", periods=10, freq="5T", tz="dateutil/US/Eastern" - ) - rng3 = date_range( - "5/8/2012 1:45", periods=20, freq="5T", tz="dateutil/US/Eastern" - ) - ts = Series(np.random.randn(len(rng)), rng) - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - ts2 = Series(np.random.randn(len(rng2)), rng2) - df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) - - result = ts.append(ts2) - result_df = df.append(df2) - tm.assert_index_equal(result.index, rng3) - tm.assert_index_equal(result_df.index, rng3) - - appended = rng.append(rng2) - tm.assert_index_equal(appended, rng3)
A couple of cleanups and parametrizations.
https://api.github.com/repos/pandas-dev/pandas/pulls/30503
2019-12-27T01:44:27Z
2019-12-27T14:15:04Z
2019-12-27T14:15:04Z
2019-12-27T16:17:01Z
Replace "foo!r" to "repr(foo)" syntax #29886
diff --git a/pandas/_version.py b/pandas/_version.py index dfed9574c7cb0..66e756a4744c8 100644 --- a/pandas/_version.py +++ b/pandas/_version.py @@ -79,17 +79,17 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): if e.errno == errno.ENOENT: continue if verbose: - print("unable to run {dispcmd}".format(dispcmd=dispcmd)) + print(f"unable to run {dispcmd}") print(e) return None else: if verbose: - print("unable to find command, tried %s" % (commands,)) + print(f"unable to find command, tried {commands}") return None stdout = p.communicate()[0].strip().decode() if p.returncode != 0: if verbose: - print("unable to run {dispcmd} (error)".format(dispcmd=dispcmd)) + print(f"unable to run {dispcmd} (error)") return None return stdout @@ -101,10 +101,8 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): if not dirname.startswith(parentdir_prefix): if verbose: print( - "guessing rootdir is '{root}', but '{dirname}' " - "doesn't start with prefix '{parentdir_prefix}'".format( - root=root, dirname=dirname, parentdir_prefix=parentdir_prefix - ) + f"guessing rootdir is '{root}', but '{dirname}' " + f"doesn't start with prefix '{parentdir_prefix}'" ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") return { @@ -163,15 +161,15 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r"\d", r)} if verbose: - print("discarding '{}', no digits".format(",".join(refs - tags))) + print(f"discarding '{','.join(refs - tags)}', no digits") if verbose: - print("likely tags: {}".format(",".join(sorted(tags)))) + print(f"likely tags: {','.join(sorted(tags))}") for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] if verbose: - print("picking {r}".format(r=r)) + print(f"picking {r}") return { "version": r, "full-revisionid": keywords["full"].strip(), @@ -198,7 +196,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if not os.path.exists(os.path.join(root, ".git")): if verbose: - print("no .git in {root}".format(root=root)) + print(f"no .git in {root}") raise NotThisMethod("no .git directory") GITS = ["git"] @@ -240,17 +238,13 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ( - "unable to parse git-describe output: " - "'{describe_out}'".format(describe_out=describe_out) - ) + pieces["error"] = f"unable to parse git-describe output: '{describe_out}'" return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): - fmt = "tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" - msg = fmt.format(full_tag=full_tag, tag_prefix=tag_prefix) + msg = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" if verbose: print(msg) pieces["error"] = msg @@ -291,12 +285,12 @@ def render_pep440(pieces): rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) - rendered += "{:d}.g{}".format(pieces["distance"], pieces["short"]) + rendered += f"{pieces['distance']:d}.g{pieces['short']}" if pieces["dirty"]: rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.{:d}.g{}".format(pieces["distance"], pieces["short"]) + rendered = f"0+untagged.{pieces['distance']:d}.g{pieces['short']}" if pieces["dirty"]: rendered += ".dirty" return rendered @@ -311,10 +305,10 @@ def render_pep440_pre(pieces): if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] + rendered += f".post.dev{pieces['distance']:d}" else: # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] + rendered = f"0.post.dev{pieces['distance']:d}" return rendered @@ -330,17 +324,17 @@ def render_pep440_post(pieces): if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: - rendered += ".post{:d}".format(pieces["distance"]) + rendered += f".post{pieces['distance']:d}" if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) - rendered += "g{}".format(pieces["short"]) + rendered += f"g{pieces['short']}" else: # exception #1 - rendered = "0.post%d" % pieces["distance"] + rendered = f"0.pos{pieces['distance']:d}" if pieces["dirty"]: rendered += ".dev0" - rendered += "+g{}".format(pieces["short"]) + rendered += f"+g{pieces['short']}" return rendered @@ -353,12 +347,12 @@ def render_pep440_old(pieces): if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] + rendered += f".post{pieces['distance']:d}" if pieces["dirty"]: rendered += ".dev0" else: # exception #1 - rendered = "0.post%d" % pieces["distance"] + rendered = f"0.post{pieces['distance']:d}" if pieces["dirty"]: rendered += ".dev0" return rendered @@ -374,7 +368,7 @@ def render_git_describe(pieces): if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: - rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"]) + rendered += f"-{pieces['distance']:d}-g{pieces['short']}" else: # exception #1 rendered = pieces["short"] @@ -392,7 +386,7 @@ def render_git_describe_long(pieces): if pieces["closest-tag"]: rendered = pieces["closest-tag"] - rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"]) + rendered += f"-{pieces['distance']:d}-g{pieces['short']}" else: # exception #1 rendered = pieces["short"] @@ -426,7 +420,7 @@ def render(pieces, style): elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: - raise ValueError("unknown style '{style}'".format(style=style)) + raise ValueError(f"unknown style '{style}'") return { "version": rendered, diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index d3a12ccb77048..0c9d2d54d3065 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -255,7 +255,7 @@ def _translate(self): BLANK_VALUE = "" def format_attr(pair): - return "{key}={value}".format(**pair) + return f"{pair['key']}={pair['value']}" # for sparsifying a MultiIndex idx_lengths = _get_level_lengths(self.index) diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 6a3137785e6f3..c11dda8f67620 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -126,7 +126,7 @@ def test_difference_freq(self, sort): def test_hash_error(self): index = period_range("20010101", periods=10) - msg = "unhashable type: '{}'".format(type(index).__name__) + msg = f"unhashable type: '{type(index).__name__}'" with pytest.raises(TypeError, match=msg): hash(index) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index fe65820a7c975..d5c2b368a3c6a 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -215,9 +215,7 @@ def teardown_method(self, method): class MySQLMixIn(MixInBase): def drop_table(self, table_name): cur = self.conn.cursor() - cur.execute( - "DROP TABLE IF EXISTS {}".format(sql._get_valid_mysql_name(table_name)) - ) + cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}") self.conn.commit() def _get_all_tables(self): @@ -237,7 +235,7 @@ def _close_conn(self): class SQLiteMixIn(MixInBase): def drop_table(self, table_name): self.conn.execute( - "DROP TABLE IF EXISTS {}".format(sql._get_valid_sqlite_name(table_name)) + f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}" ) self.conn.commit() @@ -405,11 +403,7 @@ def _load_raw_sql(self): def _count_rows(self, table_name): result = ( self._get_exec() - .execute( - "SELECT count(*) AS count_1 FROM {table_name}".format( - table_name=table_name - ) - ) + .execute(f"SELECT count(*) AS count_1 FROM {table_name}") .fetchone() ) return result[0] @@ -1207,7 +1201,7 @@ def _get_sqlite_column_type(self, schema, column): for col in schema.split("\n"): if col.split()[0].strip('""') == column: return col.split()[1] - raise ValueError("Column {column} not found".format(column=column)) + raise ValueError(f"Column {column} not found") def test_sqlite_type_mapping(self): @@ -1272,7 +1266,7 @@ def setup_connect(self): # to test if connection can be made: self.conn.connect() except sqlalchemy.exc.OperationalError: - pytest.skip("Can't connect to {0} server".format(self.flavor)) + pytest.skip(f"Can't connect to {self.flavor} server") def test_read_sql(self): self._read_sql_iris() @@ -1414,7 +1408,7 @@ def check(col): else: raise AssertionError( - "DateCol loaded with incorrect type -> {0}".format(col.dtype) + f"DateCol loaded with incorrect type -> {col.dtype}" ) # GH11216 @@ -2051,15 +2045,13 @@ def psql_insert_copy(table, conn, keys, data_iter): writer.writerows(data_iter) s_buf.seek(0) - columns = ", ".join('"{}"'.format(k) for k in keys) + columns = ", ".join(f'"{k}"' for k in keys) if table.schema: - table_name = "{}.{}".format(table.schema, table.name) + table_name = f"{table.schema}.{table.name}" else: table_name = table.name - sql_query = "COPY {} ({}) FROM STDIN WITH CSV".format( - table_name, columns - ) + sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV" cur.copy_expert(sql=sql_query, file=s_buf) expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]}) @@ -2199,14 +2191,12 @@ def test_datetime_time(self): def _get_index_columns(self, tbl_name): ixs = sql.read_sql_query( "SELECT * FROM sqlite_master WHERE type = 'index' " - + "AND tbl_name = '{tbl_name}'".format(tbl_name=tbl_name), + + f"AND tbl_name = '{tbl_name}'", self.conn, ) ix_cols = [] for ix_name in ixs.name: - ix_info = sql.read_sql_query( - "PRAGMA index_info({ix_name})".format(ix_name=ix_name), self.conn - ) + ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn) ix_cols.append(ix_info.name.tolist()) return ix_cols @@ -2217,15 +2207,11 @@ def test_transactions(self): self._transaction_test() def _get_sqlite_column_type(self, table, column): - recs = self.conn.execute("PRAGMA table_info({table})".format(table=table)) + recs = self.conn.execute(f"PRAGMA table_info({table})") for cid, name, ctype, not_null, default, pk in recs: if name == column: return ctype - raise ValueError( - "Table {table}, column {column} not found".format( - table=table, column=column - ) - ) + raise ValueError(f"Table {table}, column {column} not found") def test_dtype(self): if self.flavor == "mysql": @@ -2295,7 +2281,7 @@ def test_illegal_names(self): sql.table_exists(weird_name, self.conn) df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name]) - c_tbl = "test_weird_col_name{ndx:d}".format(ndx=ndx) + c_tbl = f"test_weird_col_name{ndx:d}" df2.to_sql(c_tbl, self.conn) sql.table_exists(c_tbl, self.conn) @@ -2500,7 +2486,7 @@ def test_if_exists(self): df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]}) df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]}) table_name = "table_if_exists" - sql_select = "SELECT * FROM {table_name}".format(table_name=table_name) + sql_select = f"SELECT * FROM {table_name}" def clean_up(test_table_to_drop): """ @@ -2788,7 +2774,7 @@ def test_if_exists(self): df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]}) df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]}) table_name = "table_if_exists" - sql_select = "SELECT * FROM {table_name}".format(table_name=table_name) + sql_select = f"SELECT * FROM {table_name}" def clean_up(test_table_to_drop): """ diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 20ecfd0f8e563..b988a72fd2684 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -399,7 +399,7 @@ def test_unit_parser(self, units, np_unit, wrapper): [np.timedelta64(i, "m") for i in np.arange(5).tolist()] ) - str_repr = ["{}{}".format(x, unit) for x in np.arange(5)] + str_repr = [f"{x}{unit}" for x in np.arange(5)] result = to_timedelta(wrapper(str_repr)) tm.assert_index_equal(result, expected) result = TimedeltaIndex(wrapper(str_repr)) @@ -416,9 +416,9 @@ def test_unit_parser(self, units, np_unit, wrapper): if unit == "M": expected = Timedelta(np.timedelta64(2, "m").astype("timedelta64[ns]")) - result = to_timedelta("2{}".format(unit)) + result = to_timedelta(f"2{unit}") assert result == expected - result = Timedelta("2{}".format(unit)) + result = Timedelta(f"2{unit}") assert result == expected @pytest.mark.parametrize("unit", ["Y", "y", "M"]) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 196749a965885..c49cd6930781e 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -275,7 +275,7 @@ def test_datetime64_tz_fillna(self): ["2011-01-01 10:00", pd.NaT, "2011-01-03 10:00", pd.NaT], tz=tz ) s = pd.Series(idx) - assert s.dtype == "datetime64[ns, {0}]".format(tz) + assert s.dtype == f"datetime64[ns, {tz}]" tm.assert_series_equal(pd.isna(s), null_loc) result = s.fillna(pd.Timestamp("2011-01-02 10:00")) @@ -1284,7 +1284,7 @@ def test_interpolate_invalid_float_limit(self, nontemporal_method): def test_interp_invalid_method(self, invalid_method): s = Series([1, 3, np.nan, 12, np.nan, 25]) - msg = "method must be one of.* Got '{}' instead".format(invalid_method) + msg = f"method must be one of.* Got '{invalid_method}' instead" with pytest.raises(ValueError, match=msg): s.interpolate(method=invalid_method) @@ -1608,9 +1608,9 @@ def test_interp_non_timedelta_index(self, interp_methods_ind, ind): else: expected_error = ( "Index column must be numeric or datetime type when " - "using {method} method other than linear. " + f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " - "interpolating.".format(method=method) + "interpolating." ) with pytest.raises(ValueError, match=expected_error): df[0].interpolate(method=method, **kwargs)
xref #29886 Fixed the remaining files in: Replace "foo!r" to "repr(foo)" syntax #29886
https://api.github.com/repos/pandas-dev/pandas/pulls/30502
2019-12-27T01:35:59Z
2019-12-27T15:29:37Z
2019-12-27T15:29:37Z
2020-01-04T11:48:03Z
BUG: Fixed strange behaviour of pd.DataFrame.drop() with inplace argu…
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 8cb80c7c92f8e..2d1f3cd52d152 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -385,6 +385,7 @@ Reshaping - Bug in :func:`concat` where the resulting indices are not copied when ``copy=True`` (:issue:`29879`) - :meth:`Series.append` will now raise a ``TypeError`` when passed a DataFrame or a sequence containing Dataframe (:issue:`31413`) - :meth:`DataFrame.replace` and :meth:`Series.replace` will raise a ``TypeError`` if ``to_replace`` is not an expected type. Previously the ``replace`` would fail silently (:issue:`18634`) +- Bug on inplace operation of a Series that was adding a column to the DataFrame from where it was originally dropped from (using inplace=True) (:issue:`30484`) - Bug in :meth:`DataFrame.apply` where callback was called with :class:`Series` parameter even though ``raw=True`` requested. (:issue:`32423`) - Bug in :meth:`DataFrame.pivot_table` losing timezone information when creating a :class:`MultiIndex` level from a column with timezone-aware dtype (:issue:`32558`) - Bug in :meth:`concat` where when passing a non-dict mapping as ``objs`` would raise a ``TypeError`` (:issue:`32863`) diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py index c04658565f235..0cf1ac4d107f6 100644 --- a/pandas/core/ops/methods.py +++ b/pandas/core/ops/methods.py @@ -93,7 +93,8 @@ def _wrap_inplace_method(method): def f(self, other): result = method(self, other) - + # Delete cacher + self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 958eb5935812e..d1d55d38f4a9a 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -716,3 +716,24 @@ def test_reindex_multi_categorical_time(self): result = df2.reindex(midx) expected = pd.DataFrame({"a": [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "operation", ["__iadd__", "__isub__", "__imul__", "__ipow__"] + ) + @pytest.mark.parametrize("inplace", [False, True]) + def test_inplace_drop_and_operation(self, operation, inplace): + # GH 30484 + df = pd.DataFrame({"x": range(5)}) + expected = df.copy() + df["y"] = range(5) + y = df["y"] + + with tm.assert_produces_warning(None): + if inplace: + df.drop("y", axis=1, inplace=inplace) + else: + df = df.drop("y", axis=1, inplace=inplace) + + # Perform operation and check result + getattr(y, operation)(1) + tm.assert_frame_equal(df, expected)
closes #30484 - [x] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This is my first PR, so pardon me if I did something wrong. To fix this issue I started by creating a failing test based on the example given by the reporter. After, I fixed the test by removing the "inplace" special methods from add_special_arithmetic_methods, following what was suggested as a comment in the code.
https://api.github.com/repos/pandas-dev/pandas/pulls/30501
2019-12-27T01:27:01Z
2020-03-26T12:53:14Z
2020-03-26T12:53:14Z
2020-03-26T12:53:19Z
DOC: Make pyplot import explicit in the 10 minutes to pandas page
diff --git a/doc/source/getting_started/10min.rst b/doc/source/getting_started/10min.rst index 66e500131b316..3055a22129b91 100644 --- a/doc/source/getting_started/10min.rst +++ b/doc/source/getting_started/10min.rst @@ -697,8 +697,9 @@ Plotting See the :ref:`Plotting <visualization>` docs. +We use the standard convention for referencing the matplotlib API: + .. ipython:: python - :suppress: import matplotlib.pyplot as plt plt.close('all')
Beginners don't know what is `plt`, thus it is better to show how to import it explicitly. This is a continuation of https://github.com/pandas-dev/pandas/pull/30274 - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30499
2019-12-27T01:19:33Z
2020-01-01T03:18:30Z
2020-01-01T03:18:30Z
2020-01-01T03:18:34Z
BLD: catch missing cython instead of tempita NameError
diff --git a/setup.py b/setup.py index d0b077ea8abe6..af70ee3b30095 100755 --- a/setup.py +++ b/setup.py @@ -504,6 +504,10 @@ def maybe_cythonize(extensions, *args, **kwargs): # See https://github.com/cython/cython/issues/1495 return extensions + elif not cython: + # GH#28836 raise a helfpul error message + raise RuntimeError("Cannot cythonize without Cython installed.") + numpy_incl = pkg_resources.resource_filename("numpy", "core/include") # TODO: Is this really necessary here? for ext in extensions:
- [x] closes #28836 I named this branch "bld-misc" in the optimistic hope I could close a bunch of BLD issues, but nope, just this one.
https://api.github.com/repos/pandas-dev/pandas/pulls/30498
2019-12-27T01:04:42Z
2019-12-27T14:41:05Z
2019-12-27T14:41:05Z
2019-12-27T16:16:00Z
CLN: OrderedDict -> Dict
diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index 278ad1027d489..88d63071c360f 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -3,8 +3,6 @@ Currently only includes to_coo helpers. """ -from collections import OrderedDict - from pandas.core.indexes.api import Index, MultiIndex from pandas.core.series import Series @@ -46,14 +44,13 @@ def get_indexers(levels): # labels_to_i[:] = np.arange(labels_to_i.shape[0]) def _get_label_to_i_dict(labels, sort_labels=False): - """ Return OrderedDict of unique labels to number. + """ Return dict of unique labels to number. Optionally sort by label. """ labels = Index(map(tuple, labels)).unique().tolist() # squish if sort_labels: labels = sorted(labels) - d = OrderedDict((k, i) for i, k in enumerate(labels)) - return d + return {k: i for i, k in enumerate(labels)} def _get_index_subset_to_coord_dict(index, subset, sort_labels=False): ilabels = list(zip(*[index._get_level_values(i) for i in subset])) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 6b110a0c80c07..2bbc38ffd16fe 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -5,7 +5,7 @@ These are user facing as the result of the ``df.groupby(...)`` operations, which here returns a DataFrameGroupBy object. """ -from collections import OrderedDict, abc, defaultdict, namedtuple +from collections import abc, defaultdict, namedtuple import copy from functools import partial from textwrap import dedent @@ -14,6 +14,7 @@ TYPE_CHECKING, Any, Callable, + Dict, FrozenSet, Iterable, List, @@ -306,7 +307,7 @@ def _aggregate_multiple_funcs(self, arg): arg = zip(columns, arg) - results = OrderedDict() + results = {} for name, func in arg: obj = self @@ -443,7 +444,7 @@ def _get_index() -> Index: return self._reindex_output(result) def _aggregate_named(self, func, *args, **kwargs): - result = OrderedDict() + result = {} for name, group in self: group.name = name @@ -1119,7 +1120,7 @@ def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: axis = self.axis obj = self._obj_with_exclusions - result: OrderedDict = OrderedDict() + result: Dict[Union[int, str], Union[NDFrame, np.ndarray]] = {} if axis != obj._info_axis_number: for name, data in self: fres = func(data, *args, **kwargs) @@ -1136,7 +1137,7 @@ def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame: # only for axis==0 obj = self._obj_with_exclusions - result: OrderedDict = OrderedDict() + result: Dict[Union[int, str], NDFrame] = {} cannot_agg = [] for item in obj: data = obj[item] @@ -1874,7 +1875,7 @@ def _normalize_keyword_aggregation(kwargs): Normalize user-provided "named aggregation" kwargs. Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs - to the old OrderedDict[str, List[scalar]]]. + to the old Dict[str, List[scalar]]]. Parameters ---------- @@ -1892,11 +1893,11 @@ def _normalize_keyword_aggregation(kwargs): Examples -------- >>> _normalize_keyword_aggregation({'output': ('input', 'sum')}) - (OrderedDict([('input', ['sum'])]), ('output',), [('input', 'sum')]) + ({'input': ['sum']}, ('output',), [('input', 'sum')]) """ # Normalize the aggregation functions as Mapping[column, List[func]], # process normally, then fixup the names. - # TODO: aggspec type: typing.OrderedDict[str, List[AggScalar]] + # TODO: aggspec type: typing.Dict[str, List[AggScalar]] # May be hitting https://github.com/python/mypy/issues/5958 # saying it doesn't have an attribute __name__ aggspec = defaultdict(list) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index dac9b20104c36..9a79af65dc101 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1,4 +1,3 @@ -from collections import OrderedDict import datetime from sys import getsizeof from typing import List, Optional @@ -1639,17 +1638,12 @@ def to_frame(self, index=True, name=None): else: idx_names = self.names - # Guarantee resulting column order + # Guarantee resulting column order - PY36+ dict maintains insertion order result = DataFrame( - OrderedDict( - [ - ( - (level if lvlname is None else lvlname), - self._get_level_values(level), - ) - for lvlname, level in zip(idx_names, range(len(self.levels))) - ] - ), + { + (level if lvlname is None else lvlname): self._get_level_values(level) + for lvlname, level in zip(idx_names, range(len(self.levels))) + }, copy=False, ) diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index b88478b3da181..020b4952f5549 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -2,7 +2,6 @@ Module for formatting output data in HTML. """ -from collections import OrderedDict from textwrap import dedent from typing import IO, Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union, cast @@ -138,10 +137,9 @@ def _write_cell( else: start_tag = "<{kind}>".format(kind=kind) - esc: Union[OrderedDict[str, str], Dict] if self.escape: # escape & first to prevent double escaping of & - esc = OrderedDict([("&", r"&amp;"), ("<", r"&lt;"), (">", r"&gt;")]) + esc = {"&": r"&amp;", "<": r"&lt;", ">": r"&gt;"} else: esc = {} diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 42a4a55988b0f..816f3d047997b 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -3,7 +3,6 @@ specific classification into the other test modules. """ import codecs -from collections import OrderedDict import csv from datetime import datetime from io import BytesIO, StringIO @@ -1316,9 +1315,7 @@ def test_float_parser(all_parsers): def test_scientific_no_exponent(all_parsers): # see gh-12215 - df = DataFrame.from_dict( - OrderedDict([("w", ["2e"]), ("x", ["3E"]), ("y", ["42e"]), ("z", ["632E"])]) - ) + df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]}) data = df.to_csv(index=False) parser = all_parsers diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py index bce62571d55ec..f05b4db516dfe 100644 --- a/pandas/tests/reshape/merge/test_multi.py +++ b/pandas/tests/reshape/merge/test_multi.py @@ -1,5 +1,3 @@ -from collections import OrderedDict - import numpy as np from numpy.random import randn import pytest @@ -474,17 +472,13 @@ def test_merge_datetime_index(self, klass): if klass is not None: on_vector = klass(on_vector) - expected = DataFrame( - OrderedDict([("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018])]) - ) + expected = DataFrame({"a": [1, 2, 3], "key_1": [2016, 2017, 2018]}) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( - OrderedDict( - [("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3])] - ) + {"key_0": [2016, 2017, 2018], "a_x": [1, 2, 3], "a_y": [1, 2, 3]} ) result = df.merge(df, on=[df.index.year], how="inner") @@ -788,17 +782,13 @@ def test_merge_datetime_index(self, box): if box is not None: on_vector = box(on_vector) - expected = DataFrame( - OrderedDict([("a", [1, 2, 3]), ("key_1", [2016, 2017, 2018])]) - ) + expected = DataFrame({"a": [1, 2, 3], "key_1": [2016, 2017, 2018]}) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( - OrderedDict( - [("key_0", [2016, 2017, 2018]), ("a_x", [1, 2, 3]), ("a_y", [1, 2, 3])] - ) + {"key_0": [2016, 2017, 2018], "a_x": [1, 2, 3], "a_y": [1, 2, 3]} ) result = df.merge(df, on=[df.index.year], how="inner") diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index bd1d3d2d5bb63..43da011ed7100 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1,4 +1,3 @@ -from collections import OrderedDict from datetime import date, datetime, timedelta from itertools import product @@ -1044,7 +1043,7 @@ def test_pivot_columns_lexsorted(self): assert pivoted.columns.is_monotonic def test_pivot_complex_aggfunc(self): - f = OrderedDict([("D", ["std"]), ("E", ["sum"])]) + f = {"D": ["std"], "E": ["sum"]} expected = self.data.groupby(["A", "B"]).agg(f).unstack("B") result = self.data.pivot_table(index="A", columns="B", aggfunc=f) diff --git a/pandas/tests/util/test_validate_args.py b/pandas/tests/util/test_validate_args.py index 1f1365d62c64e..dfbd8a3f9af19 100644 --- a/pandas/tests/util/test_validate_args.py +++ b/pandas/tests/util/test_validate_args.py @@ -1,5 +1,3 @@ -from collections import OrderedDict - import pytest from pandas.util._validators import validate_args @@ -58,11 +56,7 @@ def test_not_all_defaults(i): r"in the pandas implementation of {func}\(\)".format(arg=bad_arg, func=_fname) ) - compat_args = OrderedDict() - compat_args["foo"] = 2 - compat_args["bar"] = -1 - compat_args["baz"] = 3 - + compat_args = {"foo": 2, "bar": -1, "baz": 3} arg_vals = (1, -1, 3) with pytest.raises(ValueError, match=msg): @@ -73,8 +67,5 @@ def test_validation(): # No exceptions should be raised. validate_args(_fname, (None,), 2, dict(out=None)) - compat_args = OrderedDict() - compat_args["axis"] = 1 - compat_args["out"] = None - + compat_args = {"axis": 1, "out": None} validate_args(_fname, (1, None), 2, compat_args) diff --git a/pandas/tests/util/test_validate_args_and_kwargs.py b/pandas/tests/util/test_validate_args_and_kwargs.py index 6aa2088c07b5d..eaf5f99b7e8f9 100644 --- a/pandas/tests/util/test_validate_args_and_kwargs.py +++ b/pandas/tests/util/test_validate_args_and_kwargs.py @@ -1,5 +1,3 @@ -from collections import OrderedDict - import pytest from pandas.util._validators import validate_args_and_kwargs @@ -52,9 +50,7 @@ def test_missing_args_or_kwargs(args, kwargs): bad_arg = "bar" min_fname_arg_count = 2 - compat_args = OrderedDict() - compat_args["foo"] = -5 - compat_args[bad_arg] = 1 + compat_args = {"foo": -5, bad_arg: 1} msg = ( r"the '{arg}' parameter is not supported " @@ -68,11 +64,7 @@ def test_missing_args_or_kwargs(args, kwargs): def test_duplicate_argument(): min_fname_arg_count = 2 - compat_args = OrderedDict() - compat_args["foo"] = None - compat_args["bar"] = None - compat_args["baz"] = None - + compat_args = {"foo": None, "bar": None, "baz": None} kwargs = {"foo": None, "bar": None} args = (None,) # duplicate value for "foo" @@ -84,10 +76,7 @@ def test_duplicate_argument(): def test_validation(): # No exceptions should be raised. - compat_args = OrderedDict() - compat_args["foo"] = 1 - compat_args["bar"] = None - compat_args["baz"] = -2 + compat_args = {"foo": 1, "bar": None, "baz": -2} kwargs = {"baz": -2} args = (1, None) diff --git a/pandas/tests/util/test_validate_kwargs.py b/pandas/tests/util/test_validate_kwargs.py index 54b5c6ed034a2..a26d96fcda231 100644 --- a/pandas/tests/util/test_validate_kwargs.py +++ b/pandas/tests/util/test_validate_kwargs.py @@ -1,5 +1,3 @@ -from collections import OrderedDict - import pytest from pandas.util._validators import validate_bool_kwarg, validate_kwargs @@ -11,9 +9,7 @@ def test_bad_kwarg(): good_arg = "f" bad_arg = good_arg + "o" - compat_args = OrderedDict() - compat_args[good_arg] = "foo" - compat_args[bad_arg + "o"] = "bar" + compat_args = {good_arg: "foo", bad_arg + "o": "bar"} kwargs = {good_arg: "foo", bad_arg: "bar"} msg = fr"{_fname}\(\) got an unexpected keyword argument '{bad_arg}'" @@ -30,10 +26,7 @@ def test_not_all_none(i): r"in the pandas implementation of {func}\(\)".format(arg=bad_arg, func=_fname) ) - compat_args = OrderedDict() - compat_args["foo"] = 1 - compat_args["bar"] = "s" - compat_args["baz"] = None + compat_args = {"foo": 1, "bar": "s", "baz": None} kwarg_keys = ("foo", "bar", "baz") kwarg_vals = (2, "s", None) @@ -46,10 +39,7 @@ def test_not_all_none(i): def test_validation(): # No exceptions should be raised. - compat_args = OrderedDict() - compat_args["f"] = None - compat_args["b"] = 1 - compat_args["ba"] = "s" + compat_args = {"f": None, "b": 1, "ba": "s"} kwargs = dict(f=None, b=1) validate_kwargs(_fname, kwargs, compat_args) diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index 6cc14c7804b4a..b69c974661f89 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -84,15 +84,13 @@ def validate_args(fname, args, max_fname_arg_count, compat_args): The maximum number of arguments that the function `fname` can accept, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. - compat_args : Dict - An ordered dictionary of keys and their associated default values. + compat_args : dict + A dictionary of keys and their associated default values. In order to accommodate buggy behaviour in some versions of `numpy`, where a signature displayed keyword arguments but then passed those arguments **positionally** internally when calling downstream - implementations, an ordered dictionary ensures that the original - order of the keyword arguments is enforced. Note that if there is - only one key, a generic dict can be passed in as well. - + implementations, a dict ensures that the original + order of the keyword arguments is enforced. Raises ------ TypeError @@ -168,10 +166,9 @@ def validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_ar The minimum number of arguments that the function `fname` requires, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. - compat_args: OrderedDict - A ordered dictionary of keys that `kwargs` is allowed to - have and their associated default values. Note that if there - is only one key, a generic dict can be passed in as well. + compat_args: dict + A dictionary of keys that `kwargs` is allowed to + have and their associated default values. Raises ------ diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index b0eeb7b96e0eb..af0026c85baad 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -15,7 +15,6 @@ """ import argparse import ast -import collections import doctest import functools import glob @@ -422,7 +421,7 @@ def needs_summary(self): @property def doc_parameters(self): - parameters = collections.OrderedDict() + parameters = {} for names, type_, desc in self.doc["Parameters"]: for name in names.split(", "): parameters[name] = (type_, "".join(desc)) @@ -510,7 +509,7 @@ def parameter_desc(self, param): @property def see_also(self): - result = collections.OrderedDict() + result = {} for funcs, desc in self.doc["See Also"]: for func, _ in funcs: result[func] = "".join(desc)
- [x] ref #30469
https://api.github.com/repos/pandas-dev/pandas/pulls/30497
2019-12-27T00:59:18Z
2020-01-02T00:33:07Z
2020-01-02T00:33:07Z
2020-01-02T07:45:43Z
Improve ISO Date Performance for JSON
diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py index 8f037e94e0095..27096bcaba78b 100644 --- a/asv_bench/benchmarks/io/json.py +++ b/asv_bench/benchmarks/io/json.py @@ -132,6 +132,30 @@ def peakmem_to_json_wide(self, orient, frame): df.to_json(self.fname, orient=orient) +class ToJSONISO(BaseIO): + fname = "__test__.json" + params = [["split", "columns", "index", "values", "records"]] + param_names = ["orient"] + + def setup(self, orient): + N = 10 ** 5 + index = date_range("20000101", periods=N, freq="H") + timedeltas = timedelta_range(start=1, periods=N, freq="s") + datetimes = date_range(start=1, periods=N, freq="s") + self.df = DataFrame( + { + "td_1": timedeltas, + "td_2": timedeltas, + "ts_1": datetimes, + "ts_2": datetimes, + }, + index=index, + ) + + def time_iso_format(self, orient): + self.df.to_json(orient=orient, date_format="iso") + + class ToJSONLines(BaseIO): fname = "__test__.json" diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 389e040866f72..2192539e24626 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -54,7 +54,6 @@ static PyTypeObject *cls_dataframe; static PyTypeObject *cls_series; static PyTypeObject *cls_index; static PyTypeObject *cls_nat; -PyObject *cls_timestamp; PyObject *cls_timedelta; npy_int64 get_nat(void) { return NPY_MIN_INT64; } @@ -166,7 +165,6 @@ void *initObjToJSON(void) { cls_index = (PyTypeObject *)PyObject_GetAttrString(mod_pandas, "Index"); cls_series = (PyTypeObject *)PyObject_GetAttrString(mod_pandas, "Series"); - cls_timestamp = PyObject_GetAttrString(mod_pandas, "Timestamp"); cls_timedelta = PyObject_GetAttrString(mod_pandas, "Timedelta"); Py_DECREF(mod_pandas); } @@ -408,22 +406,18 @@ static char *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *Py_UNUSED(tc), return (char *)PyUnicode_AsUTF8AndSize(_obj, (Py_ssize_t *)_outLen); } -/* returns a char* and mutates the pointer to *len */ -static char *NpyDateTimeToIso(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, - size_t *len) { +/* Converts the int64_t representation of a datetime to ISO; mutates len */ +static char *int64ToIso(int64_t value, NPY_DATETIMEUNIT base, size_t *len) { npy_datetimestruct dts; int ret_code; - int64_t longVal = GET_TC(tc)->longValue; - pandas_datetime_to_datetimestruct(longVal, NPY_FR_ns, &dts); + pandas_datetime_to_datetimestruct(value, NPY_FR_ns, &dts); - NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; *len = (size_t)get_datetime_iso_8601_strlen(0, base); char *result = PyObject_Malloc(*len); if (result == NULL) { PyErr_NoMemory(); - ((JSONObjectEncoder *)tc->encoder)->errorMsg = ""; return NULL; } @@ -431,7 +425,6 @@ static char *NpyDateTimeToIso(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, if (ret_code != 0) { PyErr_SetString(PyExc_ValueError, "Could not convert datetime value to string"); - ((JSONObjectEncoder *)tc->encoder)->errorMsg = ""; PyObject_Free(result); } @@ -441,30 +434,33 @@ static char *NpyDateTimeToIso(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, return result; } +/* JSON callback. returns a char* and mutates the pointer to *len */ +static char *NpyDateTimeToIsoCallback(JSOBJ Py_UNUSED(unused), JSONTypeContext *tc, + size_t *len) { + NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; + return int64ToIso(GET_TC(tc)->longValue, base, len); +} + static npy_datetime NpyDateTimeToEpoch(npy_datetime dt, NPY_DATETIMEUNIT base) { scaleNanosecToUnit(&dt, base); return dt; } -static char *PyDateTimeToIso(JSOBJ obj, JSONTypeContext *tc, size_t *len) { +/* Convert PyDatetime To ISO C-string. mutates len */ +static char *PyDateTimeToIso(PyDateTime_Date *obj, NPY_DATETIMEUNIT base, + size_t *len) { npy_datetimestruct dts; int ret; - if (!PyDateTime_Check(obj)) { - // TODO: raise TypeError - } - ret = convert_pydatetime_to_datetimestruct(obj, &dts); if (ret != 0) { if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ValueError, "Could not convert PyDateTime to numpy datetime"); } - ((JSONObjectEncoder *)tc->encoder)->errorMsg = ""; return NULL; } - NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; *len = (size_t)get_datetime_iso_8601_strlen(0, base); char *result = PyObject_Malloc(*len); ret = make_iso_8601_datetime(&dts, result, *len, base); @@ -473,7 +469,6 @@ static char *PyDateTimeToIso(JSOBJ obj, JSONTypeContext *tc, size_t *len) { PRINTMARK(); PyErr_SetString(PyExc_ValueError, "Could not convert datetime value to string"); - ((JSONObjectEncoder *)tc->encoder)->errorMsg = ""; PyObject_Free(result); return NULL; } @@ -484,6 +479,19 @@ static char *PyDateTimeToIso(JSOBJ obj, JSONTypeContext *tc, size_t *len) { return result; } +/* JSON callback */ +static char *PyDateTimeToIsoCallback(JSOBJ obj, JSONTypeContext *tc, + size_t *len) { + + if (!PyDateTime_Check(obj)) { + PyErr_SetString(PyExc_TypeError, "Expected datetime object"); + return NULL; + } + + NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; + return PyDateTimeToIso(obj, base, len); +} + static npy_datetime PyDateTimeToEpoch(PyObject *obj, NPY_DATETIMEUNIT base) { npy_datetimestruct dts; int ret; @@ -1518,7 +1526,8 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc, npy_intp num) { // NOTE this function steals a reference to labels. PyObject *item = NULL; - npy_intp i, stride, len; + size_t len; + npy_intp i, stride; char **ret; char *dataptr, *cLabel; int type_num; @@ -1559,8 +1568,7 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc, break; } - // TODO: for any matches on type_num (date and timedeltas) should use a - // vectorized solution to convert to epoch or iso formats + // TODO: vectorized timedelta solution if (enc->datetimeIso && (type_num == NPY_TIMEDELTA || PyDelta_Check(item))) { PyObject *td = PyObject_CallFunction(cls_timedelta, "(O)", item); @@ -1583,54 +1591,36 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc, cLabel = (char *)PyUnicode_AsUTF8(iso); Py_DECREF(iso); len = strlen(cLabel); - } else if (PyTypeNum_ISDATETIME(type_num) || PyDateTime_Check(item) || - PyDate_Check(item)) { - PyObject *ts = PyObject_CallFunction(cls_timestamp, "(O)", item); - if (ts == NULL) { - Py_DECREF(item); - NpyArr_freeLabels(ret, num); - ret = 0; - break; + } else if (PyTypeNum_ISDATETIME(type_num)) { + NPY_DATETIMEUNIT base = enc->datetimeUnit; + npy_int64 longVal; + PyArray_VectorUnaryFunc *castfunc = + PyArray_GetCastFunc(PyArray_DescrFromType(type_num), NPY_INT64); + if (!castfunc) { + PyErr_Format(PyExc_ValueError, + "Cannot cast numpy dtype %d to long", + enc->npyType); } - + castfunc(dataptr, &longVal, 1, NULL, NULL); if (enc->datetimeIso) { - PyObject *iso = PyObject_CallMethod(ts, "isoformat", NULL); - Py_DECREF(ts); - if (iso == NULL) { - Py_DECREF(item); - NpyArr_freeLabels(ret, num); - ret = 0; - break; + cLabel = int64ToIso(longVal, base, &len); + } else { + if (!scaleNanosecToUnit(&longVal, base)) { + // TODO: This gets hit but somehow doesn't cause errors + // need to clean up (elsewhere in module as well) } - - cLabel = (char *)PyUnicode_AsUTF8(iso); - Py_DECREF(iso); + cLabel = PyObject_Malloc(21); // 21 chars for int64 + sprintf(cLabel, "%" NPY_INT64_FMT, longVal); len = strlen(cLabel); + } + } else if (PyDateTime_Check(item) || PyDate_Check(item)) { + NPY_DATETIMEUNIT base = enc->datetimeUnit; + if (enc->datetimeIso) { + cLabel = PyDateTimeToIso((PyDateTime_Date *)item, base, &len); } else { - npy_int64 value; - // TODO: refactor to not duplicate what goes on in - // beginTypeContext - if (PyObject_HasAttrString(ts, "value")) { - PRINTMARK(); - value = get_long_attr(ts, "value"); - } else { - PRINTMARK(); - value = total_seconds(ts) * - 1000000000LL; // nanoseconds per second - } - Py_DECREF(ts); - - NPY_DATETIMEUNIT unit = enc->datetimeUnit; - if (scaleNanosecToUnit(&value, unit) != 0) { - Py_DECREF(item); - NpyArr_freeLabels(ret, num); - ret = 0; - break; - } - - char buf[21] = {0}; // 21 chars for 2**63 as string - cLabel = buf; - sprintf(buf, "%" NPY_INT64_FMT, value); + cLabel = PyObject_Malloc(21); // 21 chars for int64 + sprintf(cLabel, "%" NPY_DATETIME_FMT, + PyDateTimeToEpoch(item, base)); len = strlen(cLabel); } } else { // Fallback to string representation @@ -1740,7 +1730,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { if (enc->datetimeIso) { PRINTMARK(); - pc->PyTypeToUTF8 = NpyDateTimeToIso; + pc->PyTypeToUTF8 = NpyDateTimeToIsoCallback; // Currently no way to pass longVal to iso function, so use // state management GET_TC(tc)->longValue = longVal; @@ -1815,7 +1805,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { PRINTMARK(); if (enc->datetimeIso) { PRINTMARK(); - pc->PyTypeToUTF8 = PyDateTimeToIso; + pc->PyTypeToUTF8 = PyDateTimeToIsoCallback; tc->type = JT_UTF8; } else { PRINTMARK(); @@ -1841,7 +1831,7 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { PRINTMARK(); if (enc->datetimeIso) { PRINTMARK(); - pc->PyTypeToUTF8 = PyDateTimeToIso; + pc->PyTypeToUTF8 = PyDateTimeToIsoCallback; tc->type = JT_UTF8; } else { PRINTMARK(); diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 6e27b79458faf..eec4d7d01eab5 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -854,7 +854,7 @@ def test_date_format_frame(self, date, date_unit): json = df.to_json(date_format="iso") result = read_json(json) expected = df.copy() - # expected.index = expected.index.tz_localize("UTC") + expected.index = expected.index.tz_localize("UTC") expected["date"] = expected["date"].dt.tz_localize("UTC") tm.assert_frame_equal(result, expected) @@ -884,7 +884,7 @@ def test_date_format_series(self, date, date_unit): json = ts.to_json(date_format="iso") result = read_json(json, typ="series") expected = ts.copy() - # expected.index = expected.index.tz_localize("UTC") + expected.index = expected.index.tz_localize("UTC") expected = expected.dt.tz_localize("UTC") tm.assert_series_equal(result, expected)
benchmarks below ```sh before after ratio [9c6771c5] [5c0f5682] <master> <json-index-dates> - 231±60ms 188±3ms 0.82 io.json.ToJSON.time_iso_format('split', 'df_td_int_ts') - 496±20ms 218±40ms 0.44 io.json.ToJSON.time_iso_format('index', 'df_int_float_str') - 486±20ms 207±2ms 0.42 io.json.ToJSON.time_iso_format('columns', 'df_int_float_str') - 499±9ms 210±1ms 0.42 io.json.ToJSON.time_iso_format('index', 'df_date_idx') - 503±20ms 206±2ms 0.41 io.json.ToJSON.time_iso_format('columns', 'df_int_floats') - 515±20ms 210±1ms 0.41 io.json.ToJSON.time_iso_format('index', 'df') - 524±80ms 209±0.5ms 0.40 io.json.ToJSON.time_iso_format('index', 'df_int_floats') - 528±10ms 206±2ms 0.39 io.json.ToJSON.time_iso_format('columns', 'df_td_int_ts') diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index c04bbf53a..897de2f85 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json - 546±80ms 209±0.7ms 0.38 io.json.ToJSON.time_iso_format('index', 'df_td_int_ts') - 568±60ms 208±3ms 0.37 io.json.ToJSON.time_iso_format('columns', 'df_date_idx') - 598±100ms 206±2ms 0.35 io.json.ToJSON.time_iso_format('columns', 'df') SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. PERFORMANCE INCREASED. ``` Note that this mostly improves on DTI which on 0.25.3 can't even be written as ISO format, so I didn't add a whatsnew. Timedelta is the big bottleneck remaining
https://api.github.com/repos/pandas-dev/pandas/pulls/30496
2019-12-26T21:53:14Z
2020-01-02T01:08:40Z
2020-01-02T01:08:40Z
2020-01-02T01:09:59Z
INT: Implement _set_freq for TDI/DTI
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 3bf6dce00a031..868f0553816d0 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -35,7 +35,7 @@ from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.tools.timedeltas import to_timedelta -from pandas.tseries.frequencies import to_offset +from pandas.tseries.frequencies import DateOffset, to_offset _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -71,6 +71,36 @@ def method(self, other): return method +class DatetimeTimedeltaMixin: + """ + Mixin class for methods shared by DatetimeIndex and TimedeltaIndex, + but not PeriodIndex + """ + + def _set_freq(self, freq): + """ + Set the _freq attribute on our underlying DatetimeArray. + + Parameters + ---------- + freq : DateOffset, None, or "infer" + """ + # GH#29843 + if freq is None: + # Always valid + pass + elif len(self) == 0 and isinstance(freq, DateOffset): + # Always valid. In the TimedeltaIndex case, we assume this + # is a Tick offset. + pass + else: + # As an internal method, we can ensure this assertion always holds + assert freq == "infer" + freq = to_offset(self.inferred_freq) + + self._data._freq = freq + + class DatetimeIndexOpsMixin(ExtensionOpsMixin): """ Common ops mixin to support a unified interface datetimelike Index. @@ -592,8 +622,7 @@ def intersection(self, other, sort=False): result = Index.intersection(self, other, sort=sort) if isinstance(result, type(self)): if result.freq is None: - # TODO: find a less code-smelly way to set this - result._data._freq = to_offset(result.inferred_freq) + result._set_freq("infer") return result elif ( @@ -608,8 +637,7 @@ def intersection(self, other, sort=False): # Invalidate the freq of `result`, which may not be correct at # this point, depending on the values. - # TODO: find a less code-smelly way to set this - result._data._freq = None + result._set_freq(None) if hasattr(self, "tz"): result = self._shallow_copy( result._values, name=result.name, tz=result.tz, freq=None @@ -617,8 +645,7 @@ def intersection(self, other, sort=False): else: result = self._shallow_copy(result._values, name=result.name, freq=None) if result.freq is None: - # TODO: find a less code-smelly way to set this - result._data._freq = to_offset(result.inferred_freq) + result._set_freq("infer") return result # to make our life easier, "sort" the two ranges diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1fd962dd24656..c81d1076f1015 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -34,6 +34,7 @@ from pandas.core.indexes.datetimelike import ( DatetimeIndexOpsMixin, DatetimelikeDelegateMixin, + DatetimeTimedeltaMixin, ea_passthrough, ) from pandas.core.indexes.numeric import Int64Index @@ -93,7 +94,9 @@ class DatetimeDelegateMixin(DatetimelikeDelegateMixin): typ="method", overwrite=False, ) -class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin): +class DatetimeIndex( + DatetimeTimedeltaMixin, DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin +): """ Immutable ndarray of datetime64 data, represented internally as int64, and which can be boxed to Timestamp objects that are subclasses of datetime and @@ -412,7 +415,7 @@ def _convert_for_op(self, value): @Appender(Index.difference.__doc__) def difference(self, other, sort=None): new_idx = super().difference(other, sort=sort) - new_idx._data._freq = None + new_idx._set_freq(None) return new_idx # -------------------------------------------------------------------- diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 889075ebe4e31..23a42b7173c2c 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -29,6 +29,7 @@ from pandas.core.indexes.datetimelike import ( DatetimeIndexOpsMixin, DatetimelikeDelegateMixin, + DatetimeTimedeltaMixin, ea_passthrough, ) from pandas.core.indexes.numeric import Int64Index @@ -64,7 +65,11 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin): overwrite=True, ) class TimedeltaIndex( - DatetimeIndexOpsMixin, dtl.TimelikeOps, Int64Index, TimedeltaDelegateMixin + DatetimeTimedeltaMixin, + DatetimeIndexOpsMixin, + dtl.TimelikeOps, + Int64Index, + TimedeltaDelegateMixin, ): """ Immutable ndarray of timedelta64 data, represented internally as int64, and @@ -296,8 +301,7 @@ def _union(self, other, sort): result = Index._union(this, other, sort=sort) if isinstance(result, TimedeltaIndex): if result.freq is None: - # TODO: find a less code-smelly way to set this - result._data._freq = to_offset(result.inferred_freq) + result._set_freq("infer") return result def join(self, other, how="left", level=None, return_indexers=False, sort=False): @@ -350,8 +354,7 @@ def intersection(self, other, sort=False): @Appender(Index.difference.__doc__) def difference(self, other, sort=None): new_idx = super().difference(other, sort=sort) - # TODO: find a less code-smelly way to set this - new_idx._data._freq = None + new_idx._set_freq(None) return new_idx def _wrap_joined_index(self, joined, other): diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 2294c846e81c7..bcac5c4d2913b 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1025,8 +1025,7 @@ def _downsample(self, how, **kwargs): if not len(ax): # reset to the new freq obj = obj.copy() - # TODO: find a less code-smelly way to set this - obj.index._data._freq = self.freq + obj.index._set_freq(self.freq) return obj # do we have a regular frequency
- [x] closes #29843 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30495
2019-12-26T20:48:34Z
2019-12-27T09:06:27Z
2019-12-27T09:06:26Z
2019-12-27T16:17:38Z
BUG: compound dtype ndarray passed to Series
diff --git a/pandas/core/series.py b/pandas/core/series.py index ff780356104fa..6d818aa1684c4 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -235,6 +235,13 @@ def __init__( copy = False elif isinstance(data, np.ndarray): + if len(data.dtype): + # GH#13296 we are dealing with a compound dtype, which + # should be treated as 2D + raise ValueError( + "Cannot construct a Series from an ndarray with " + "compound dtype. Use DataFrame instead." + ) pass elif isinstance(data, ABCSeries): if name is None: diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index fffb9c577bf3d..20a83ec4cd162 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -67,6 +67,14 @@ def test_invalid_dtype(self): with pytest.raises(TypeError, match=msg): Series([], name="time", dtype=dtype) + def test_invalid_compound_dtype(self): + # GH#13296 + c_dtype = np.dtype([("a", "i8"), ("b", "f4")]) + cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype) + + with pytest.raises(ValueError, match="Use DataFrame instead"): + Series(cdt_arr, index=["A", "B"]) + def test_scalar_conversion(self): # Pass in scalar is disabled
- [x] closes #13296 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30494
2019-12-26T20:03:49Z
2019-12-26T21:19:17Z
2019-12-26T21:19:17Z
2019-12-27T00:32:21Z
DOC: standardize wording for changed default args
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 90b970e374a95..4671170fa79ae 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -548,13 +548,13 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - :meth:`DataFrame.hist` and :meth:`Series.hist` no longer allows ``figsize="default"``, specify figure size by passinig a tuple instead (:issue:`30003`) - Floordiv of integer-dtyped array by :class:`Timedelta` now raises ``TypeError`` (:issue:`21036`) - :class:`TimedeltaIndex` and :class:`DatetimeIndex` no longer accept non-nanosecond dtype strings like "timedelta64" or "datetime64", use "timedelta64[ns]" and "datetime64[ns]" instead (:issue:`24806`) -- :func:`pandas.api.types.infer_dtype` argument ``skipna`` defaults to ``True`` instead of ``False`` (:issue:`24050`) +- Changed the default "skipna" argument in :func:`pandas.api.types.infer_dtype` from ``False`` to ``True`` (:issue:`24050`) - Removed :attr:`Series.ix` and :attr:`DataFrame.ix` (:issue:`26438`) - Removed :meth:`Index.summary` (:issue:`18217`) - Removed the previously deprecated keyword "fastpath" from the :class:`Index` constructor (:issue:`23110`) - Removed :meth:`Series.get_value`, :meth:`Series.set_value`, :meth:`DataFrame.get_value`, :meth:`DataFrame.set_value` (:issue:`17739`) - Removed :meth:`Series.compound` and :meth:`DataFrame.compound` (:issue:`26405`) -- Changed the the default value of `inplace` in :meth:`DataFrame.set_index` and :meth:`Series.set_axis`. It now defaults to ``False`` (:issue:`27600`) +- Changed the default "inplace" argument in :meth:`DataFrame.set_index` and :meth:`Series.set_axis` from ``None`` to ``False`` (:issue:`27600`) - Removed :attr:`Series.cat.categorical`, :attr:`Series.cat.index`, :attr:`Series.cat.name` (:issue:`24751`) - Removed the previously deprecated keyword "box" from :func:`to_datetime` and :func:`to_timedelta`; in addition these now always returns :class:`DatetimeIndex`, :class:`TimedeltaIndex`, :class:`Index`, :class:`Series`, or :class:`DataFrame` (:issue:`24486`) - :func:`to_timedelta`, :class:`Timedelta`, and :class:`TimedeltaIndex` no longer allow "M", "y", or "Y" for the "unit" argument (:issue:`23264`) @@ -594,7 +594,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - :func:`read_excel` no longer allows an integer value for the parameter ``usecols``, instead pass a list of integers from 0 to ``usecols`` inclusive (:issue:`23635`) - Removed the previously deprecated keyword "convert_datetime64" from :meth:`DataFrame.to_records` (:issue:`18902`) - Removed :meth:`IntervalIndex.from_intervals` in favor of the :class:`IntervalIndex` constructor (:issue:`19263`) -- Changed the default value for the "keep_tz" argument in :meth:`DatetimeIndex.to_series` to ``True`` (:issue:`23739`) +- Changed the default "keep_tz" argument in :meth:`DatetimeIndex.to_series` from ``None`` to ``True`` (:issue:`23739`) - Removed :func:`api.types.is_period` and :func:`api.types.is_datetimetz` (:issue:`23917`) - Ability to read pickles containing :class:`Categorical` instances created with pre-0.16 version of pandas has been removed (:issue:`27538`) - Removed :func:`pandas.tseries.plotting.tsplot` (:issue:`18627`) @@ -603,7 +603,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - Removed the previously deprecated ``FrozenNDArray`` class in ``pandas.core.indexes.frozen`` (:issue:`29335`) - Removed the previously deprecated keyword "nthreads" from :func:`read_feather`, use "use_threads" instead (:issue:`23053`) - Removed :meth:`Index.is_lexsorted_for_tuple` (:issue:`29305`) -- Removed support for nexted renaming in :meth:`DataFrame.aggregate`, :meth:`Series.aggregate`, :meth:`DataFrameGroupBy.aggregate`, :meth:`SeriesGroupBy.aggregate`, :meth:`Rolling.aggregate` (:issue:`29608`) +- Removed support for nested renaming in :meth:`DataFrame.aggregate`, :meth:`Series.aggregate`, :meth:`DataFrameGroupBy.aggregate`, :meth:`SeriesGroupBy.aggregate`, :meth:`Rolling.aggregate` (:issue:`29608`) - Removed :meth:`Series.valid`; use :meth:`Series.dropna` instead (:issue:`18800`) - Removed :attr:`DataFrame.is_copy`, :attr:`Series.is_copy` (:issue:`18812`) - Removed :meth:`DataFrame.get_ftype_counts`, :meth:`Series.get_ftype_counts` (:issue:`18243`) @@ -615,7 +615,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - Removed :meth:`DatetimeIndex.asobject`, :meth:`TimedeltaIndex.asobject`, :meth:`PeriodIndex.asobject`, use ``astype(object)`` instead (:issue:`29801`) - Removed the previously deprecated keyword "order" from :func:`factorize` (:issue:`19751`) - Removed the previously deprecated keyword "encoding" from :func:`read_stata` and :meth:`DataFrame.to_stata` (:issue:`21400`) -- In :func:`concat` the default value for ``sort`` has been changed from ``None`` to ``False`` (:issue:`20613`) +- Changed the default "sort" argument in :func:`concat` from ``None`` to ``False`` (:issue:`20613`) - Removed the previously deprecated keyword "raise_conflict" from :meth:`DataFrame.update`, use "errors" instead (:issue:`23585`) - Removed the previously deprecated keyword "n" from :meth:`DatetimeIndex.shift`, :meth:`TimedeltaIndex.shift`, :meth:`PeriodIndex.shift`, use "periods" instead (:issue:`22458`) - Removed the previously deprecated keywords "how", "fill_method", and "limit" from :meth:`DataFrame.resample` (:issue:`30139`) @@ -631,16 +631,15 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - :meth:`Categorical.ravel` returns a :class:`Categorical` instead of a ``ndarray`` (:issue:`27199`) - The 'outer' method on Numpy ufuncs, e.g. ``np.subtract.outer`` operating on :class:`Series` objects is no longer supported, and will raise ``NotImplementedError`` (:issue:`27198`) - Removed :meth:`Series.get_dtype_counts` and :meth:`DataFrame.get_dtype_counts` (:issue:`27145`) -- Changed the default ``fill_value`` in :meth:`Categorical.take` from ``True`` to ``False`` (:issue:`20841`) -- Changed the default value for the `raw` argument in :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, -- :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` to ``False`` (:issue:`20584`) +- Changed the default "fill_value" argument in :meth:`Categorical.take` from ``True`` to ``False`` (:issue:`20841`) +- Changed the default value for the `raw` argument in :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` from ``None`` to ``False`` (:issue:`20584`) - Removed deprecated behavior of :meth:`Series.argmin` and :meth:`Series.argmax`, use :meth:`Series.idxmin` and :meth:`Series.idxmax` for the old behavior (:issue:`16955`) - Passing a tz-aware ``datetime.datetime`` or :class:`Timestamp` into the :class:`Timestamp` constructor with the ``tz`` argument now raises a ``ValueError`` (:issue:`23621`) - Removed :attr:`Series.base`, :attr:`Index.base`, :attr:`Categorical.base`, :attr:`Series.flags`, :attr:`Index.flags`, :attr:`PeriodArray.flags`, :attr:`Series.strides`, :attr:`Index.strides`, :attr:`Series.itemsize`, :attr:`Index.itemsize`, :attr:`Series.data`, :attr:`Index.data` (:issue:`20721`) - Changed :meth:`Timedelta.resolution` to match the behavior of the standard library ``datetime.timedelta.resolution``, for the old behavior, use :meth:`Timedelta.resolution_string` (:issue:`26839`) - Removed :attr:`Timestamp.weekday_name`, :attr:`DatetimeIndex.weekday_name`, and :attr:`Series.dt.weekday_name` (:issue:`18164`) - Removed the previously deprecated keyword "errors" in :meth:`Timestamp.tz_localize`, :meth:`DatetimeIndex.tz_localize`, and :meth:`Series.tz_localize` (:issue:`22644`) -- Changed the default value for ``ordered`` in :class:`CategoricalDtype` from ``None`` to ``False`` (:issue:`26336`) +- Changed the default "ordered" argument in :class:`CategoricalDtype` from ``None`` to ``False`` (:issue:`26336`) - :meth:`Series.set_axis` and :meth:`DataFrame.set_axis` now require "labels" as the first argument and "axis" as an optional named parameter (:issue:`30089`) - Removed :func:`to_msgpack`, :func:`read_msgpack`, :meth:`DataFrame.to_msgpack`, :meth:`Series.to_msgpack` (:issue:`27103`) -
last of the standardizations, a few typo fixups, made sure these all specify both the old and new keywords
https://api.github.com/repos/pandas-dev/pandas/pulls/30493
2019-12-26T20:01:30Z
2019-12-26T21:25:22Z
2019-12-26T21:25:22Z
2019-12-27T00:32:47Z
BUG: The setting xrot=0 in DataFrame.hist() doesn't work with by and subplots #30288
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 852694a51e79d..2b45f8a8f0de2 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -828,6 +828,7 @@ Plotting - :func:`set_option` now validates that the plot backend provided to ``'plotting.backend'`` implements the backend when the option is set, rather than when a plot is created (:issue:`28163`) - :meth:`DataFrame.plot` now allow a ``backend`` keyword argument to allow changing between backends in one session (:issue:`28619`). - Bug in color validation incorrectly raising for non-color styles (:issue:`29122`). +- Bug in :meth:`DataFrame.hist`, ``xrot=0`` does not work with ``by`` and subplots (:issue:`30288`). Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index 8957389ac2b13..f8b2c7ab123d0 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -250,7 +250,8 @@ def _grouped_hist( def plot_group(group, ax): ax.hist(group.dropna().values, bins=bins, **kwargs) - xrot = xrot or rot + if xrot is None: + xrot = rot fig, axes = _grouped_plot( plot_group, diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 6c1c7dfd1a4a4..74d48c10ad9a0 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -253,6 +253,24 @@ def test_tight_layout(self): tm.close() + def test_hist_subplot_xrot(self): + # GH 30288 + df = DataFrame( + { + "length": [1.5, 0.5, 1.2, 0.9, 3], + "animal": ["pig", "rabbit", "pig", "pig", "rabbit"], + } + ) + axes = _check_plot_works( + df.hist, + filterwarnings="always", + column="length", + by="animal", + bins=5, + xrot=0, + ) + self._check_ticks_props(axes, xrot=0) + @td.skip_if_no_mpl class TestDataFrameGroupByPlots(TestPlotBase):
- [x] closes #30288 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/30491
2019-12-26T19:39:04Z
2019-12-27T16:29:54Z
2019-12-27T16:29:54Z
2019-12-27T16:34:28Z
CLN: Remove have_pytz (gh-17251)
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 273dc06886088..b71963fdef1c1 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -32,13 +32,9 @@ cdef extern from "datetime.h": cdef int64_t iNaT = util.get_nat() -try: - from dateutil.tz import tzutc as _du_utc - import pytz - UTC = pytz.utc - have_pytz = True -except ImportError: - have_pytz = False +from dateutil.tz import tzutc as _du_utc +import pytz +UTC = pytz.utc PyDateTime_IMPORT diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx index 1db31387de5a7..506fec28f5041 100644 --- a/pandas/_libs/period.pyx +++ b/pandas/_libs/period.pyx @@ -3,8 +3,7 @@ import operator from cpython cimport ( PyObject_RichCompareBool, - Py_EQ, Py_NE, -) + Py_EQ, Py_NE) from numpy cimport (int8_t, int32_t, int64_t, import_array, ndarray, NPY_INT64, NPY_DATETIME, NPY_TIMEDELTA) @@ -24,14 +23,13 @@ cimport util, lib from lib cimport is_null_datetimelike, is_period from pandas._libs import tslib, lib from pandas._libs.tslib import (Timedelta, Timestamp, iNaT, - NaT, have_pytz, _get_utcoffset) + NaT, _get_utcoffset) from tslib cimport ( maybe_get_tz, _is_utc, _is_tzlocal, _get_dst_info, - _nat_scalar_rules, -) + _nat_scalar_rules) from pandas.tseries import offsets from pandas.core.tools.datetimes import parse_time_string @@ -610,9 +608,6 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, ndarray[int64_t] trans, deltas, pos pandas_datetimestruct dts - if not have_pytz: - raise Exception('Could not find pytz module') - if _is_utc(tz): for i in range(n): if stamps[i] == NPY_NAT: diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 44be9ba56b84a..e1ba4169ed629 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -4080,12 +4080,8 @@ def i8_to_pydt(int64_t i8, object tzinfo = None): #---------------------------------------------------------------------- # time zone conversion helpers -try: - import pytz - UTC = pytz.utc - have_pytz = True -except: - have_pytz = False +import pytz +UTC = pytz.utc @cython.boundscheck(False) @@ -4112,9 +4108,6 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): int64_t v, offset, delta pandas_datetimestruct dts - if not have_pytz: - import pytz - if len(vals) == 0: return np.array([], dtype=np.int64) @@ -4229,9 +4222,6 @@ def tz_convert_single(int64_t val, object tz1, object tz2): int64_t v, offset, utc_date pandas_datetimestruct dts - if not have_pytz: - import pytz - if val == NPY_NAT: return val @@ -4444,9 +4434,6 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None, assert is_coerce or is_raise - if not have_pytz: - raise Exception("Could not find pytz module") - if tz == UTC or tz is None: return vals
- [x] closes #17251 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17266
2017-08-16T17:33:34Z
2017-08-16T20:52:30Z
2017-08-16T20:52:30Z
2017-10-30T16:24:02Z
ENH: improve 'incompatible tolerance' error message in merge_asof
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 0234a5563326c..412c00dc95ec0 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -126,7 +126,7 @@ def _groupby_and_merge(by, on, left, right, _merge_pieces, try: if k in merged: merged[k] = key - except: + except KeyError: pass pieces.append(merged) @@ -1268,8 +1268,10 @@ def _get_merge_keys(self): else: lt = left_join_keys[-1] - msg = "incompatible tolerance, must be compat " \ - "with type {lt}".format(lt=type(lt)) + msg = ("incompatible tolerance {tolerance}, must be compat " + "with type {lkdtype}".format( + tolerance=type(self.tolerance), + lkdtype=lt.dtype)) if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt): if not isinstance(self.tolerance, Timedelta): @@ -1505,12 +1507,12 @@ def _sort_labels(uniques, left, right): # tuplesafe uniques = Index(uniques).values - l = len(left) + llength = len(left) labels = np.concatenate([left, right]) _, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1) new_labels = _ensure_int64(new_labels) - new_left, new_right = new_labels[:l], new_labels[l:] + new_left, new_right = new_labels[:llength], new_labels[llength:] return new_left, new_right
https://api.github.com/repos/pandas-dev/pandas/pulls/17260
2017-08-15T16:52:59Z
2017-11-10T23:27:45Z
2017-11-10T23:27:45Z
2017-11-10T23:27:45Z
TST: pytest deprecation warnings GH17197
diff --git a/appveyor.yml b/appveyor.yml index 684b859c206b2..65e62f887554e 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -72,7 +72,7 @@ install: - cmd: conda info -a # create our env - - cmd: conda create -n pandas python=%PYTHON_VERSION% cython pytest pytest-xdist + - cmd: conda create -n pandas python=%PYTHON_VERSION% cython pytest>=3.1.0 pytest-xdist - cmd: activate pandas - SET REQ=ci\requirements-%PYTHON_VERSION%_WIN.run - cmd: echo "installing requirements from %REQ%" diff --git a/ci/install_circle.sh b/ci/install_circle.sh index 00e14b10ebbd6..29ca69970104b 100755 --- a/ci/install_circle.sh +++ b/ci/install_circle.sh @@ -64,7 +64,7 @@ fi # create envbuild deps echo "[create env: ${REQ_BUILD}]" time conda create -n pandas -q --file=${REQ_BUILD} || exit 1 -time conda install -n pandas pytest || exit 1 +time conda install -n pandas pytest>=3.1.0 || exit 1 source activate pandas diff --git a/ci/install_travis.sh b/ci/install_travis.sh index df6969c7cc659..ad8f0bdd8a597 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -103,7 +103,7 @@ if [ -e ${REQ} ]; then time bash $REQ || exit 1 fi -time conda install -n pandas pytest +time conda install -n pandas pytest>=3.1.0 time pip install pytest-xdist if [ "$LINT" ]; then diff --git a/ci/requirements_all.txt b/ci/requirements_all.txt index de37ec4d20be4..b153b6989df86 100644 --- a/ci/requirements_all.txt +++ b/ci/requirements_all.txt @@ -1,4 +1,4 @@ -pytest +pytest>=3.1.0 pytest-cov pytest-xdist flake8 diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt index 1e051802ec9f8..c7190c506ba18 100644 --- a/ci/requirements_dev.txt +++ b/ci/requirements_dev.txt @@ -2,6 +2,6 @@ python-dateutil pytz numpy cython -pytest +pytest>=3.1.0 pytest-cov flake8 diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index b44d0f36b86a1..e172d0d2a71a2 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -598,6 +598,10 @@ Like many packages, *pandas* uses `pytest extensions in `numpy.testing <http://docs.scipy.org/doc/numpy/reference/routines.testing.html>`_. +.. note:: + + The earliest supported pytest version is 3.1.0. + Writing tests ~~~~~~~~~~~~~ @@ -654,7 +658,9 @@ Using ``pytest`` Here is an example of a self-contained set of tests that illustrate multiple features that we like to use. - functional style: tests are like ``test_*`` and *only* take arguments that are either fixtures or parameters +- ``pytest.mark`` can be used to set metadata on test functions, e.g. ``skip`` or ``xfail``. - using ``parametrize``: allow testing of multiple cases +- to set a mark on a parameter, ``pytest.param(..., marks=...)`` syntax should be used - ``fixture``, code for object construction, on a per-test basis - using bare ``assert`` for scalars and truth-testing - ``tm.assert_series_equal`` (and its counter part ``tm.assert_frame_equal``), for pandas object comparisons. @@ -673,6 +679,13 @@ We would name this file ``test_cool_feature.py`` and put in an appropriate place def test_dtypes(dtype): assert str(np.dtype(dtype)) == dtype + @pytest.mark.parametrize('dtype', ['float32', + pytest.param('int16', marks=pytest.mark.skip), + pytest.param('int32', + marks=pytest.mark.xfail(reason='to show how it works'))]) + def test_mark(dtype): + assert str(np.dtype(dtype)) == 'float32' + @pytest.fixture def series(): return pd.Series([1, 2, 3]) @@ -695,13 +708,16 @@ A test run of this yields ((pandas) bash-3.2$ pytest test_cool_feature.py -v =========================== test session starts =========================== - platform darwin -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - collected 8 items + platform darwin -- Python 3.6.2, pytest-3.2.1, py-1.4.31, pluggy-0.4.0 + collected 11 items tester.py::test_dtypes[int8] PASSED tester.py::test_dtypes[int16] PASSED tester.py::test_dtypes[int32] PASSED tester.py::test_dtypes[int64] PASSED + tester.py::test_mark[float32] PASSED + tester.py::test_mark[int16] SKIPPED + tester.py::test_mark[int32] xfail tester.py::test_series[int8] PASSED tester.py::test_series[int16] PASSED tester.py::test_series[int32] PASSED @@ -714,8 +730,8 @@ Tests that we have ``parametrized`` are now accessible via the test name, for ex ((pandas) bash-3.2$ pytest test_cool_feature.py -v -k int8 =========================== test session starts =========================== - platform darwin -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - collected 8 items + platform darwin -- Python 3.6.2, pytest-3.2.1, py-1.4.31, pluggy-0.4.0 + collected 11 items test_cool_feature.py::test_dtypes[int8] PASSED test_cool_feature.py::test_series[int8] PASSED diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 7fc091ebb1892..d2874b1606e72 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -38,13 +38,14 @@ @pytest.fixture(params=( - pytest.mark.skipif(engine == 'numexpr' and not _USE_NUMEXPR, - reason='numexpr enabled->{enabled}, ' - 'installed->{installed}'.format( - enabled=_USE_NUMEXPR, - installed=_NUMEXPR_INSTALLED))(engine) - for engine in _engines # noqa -)) + pytest.param(engine, + marks=pytest.mark.skipif( + engine == 'numexpr' and not _USE_NUMEXPR, + reason='numexpr enabled->{enabled}, ' + 'installed->{installed}'.format( + enabled=_USE_NUMEXPR, + installed=_NUMEXPR_INSTALLED))) + for engine in _engines)) # noqa def engine(request): return request.param diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index cfa60248605ad..3344243f8137a 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -23,8 +23,10 @@ def salaries_table(): @pytest.mark.parametrize( "compression,extension", [('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'), - pytest.mark.skipif(not tm._check_if_lzma(), - reason='need backports.lzma to run')(('xz', '.xz'))]) + pytest.param('xz', '.xz', + marks=pytest.mark.skipif(not tm._check_if_lzma(), + reason='need backports.lzma ' + 'to run'))]) @pytest.mark.parametrize('mode', ['explicit', 'infer']) @pytest.mark.parametrize('engine', ['python', 'c']) def test_compressed_urls(salaries_table, compression, extension, mode, engine): diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 856e8d6466526..92147b46097b8 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -2400,8 +2400,10 @@ def check_called(func): @pytest.mark.parametrize('engine', [ - pytest.mark.xfail('xlwt', reason='xlwt does not support ' - 'openpyxl-compatible style dicts'), + pytest.param('xlwt', + marks=pytest.mark.xfail(reason='xlwt does not support ' + 'openpyxl-compatible ' + 'style dicts')), 'xlsxwriter', 'openpyxl', ]) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index ff0935c7dcc6f..78c72e2a05566 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -26,10 +26,14 @@ # setup engines & skips @pytest.fixture(params=[ - pytest.mark.skipif(not _HAVE_FASTPARQUET, - reason='fastparquet is not installed')('fastparquet'), - pytest.mark.skipif(not _HAVE_PYARROW, - reason='pyarrow is not installed')('pyarrow')]) + pytest.param('fastparquet', + marks=pytest.mark.skipif(not _HAVE_FASTPARQUET, + reason='fastparquet is ' + 'not installed')), + pytest.param('pyarrow', + marks=pytest.mark.skipif(not _HAVE_PYARROW, + reason='pyarrow is ' + 'not installed'))]) def engine(request): return request.param diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index d94e34c41786b..21a9b05d48126 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -530,8 +530,9 @@ def test_numpy_compat(self): @pytest.mark.parametrize( 'expander', - [1, pytest.mark.xfail( - reason='GH 16425 expanding with offset not supported')('1s')]) + [1, pytest.param('ls', marks=pytest.mark.xfail( + reason='GH 16425 expanding with ' + 'offset not supported'))]) def test_empty_df_expanding(self, expander): # GH 15819 Verifies that datetime and integer expanding windows can be # applied to empty DataFrames
Test parameters with marks are updated according to the updated API of Pytest. https://docs.pytest.org/en/latest/changelog.html#pytest-3-2-0-2017-07-30 https://docs.pytest.org/en/latest/parametrize.html - [x] closes #17197 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17253
2017-08-15T06:52:55Z
2017-08-17T22:39:38Z
2017-08-17T22:39:37Z
2017-08-19T23:57:10Z
CLN: replace %s syntax with .format in pandas.core.reshape
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 20d561738dc78..9e180c624963c 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -220,7 +220,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, if isinstance(objs, (NDFrame, compat.string_types)): raise TypeError('first argument must be an iterable of pandas ' 'objects, you passed an object of type ' - '"{0}"'.format(type(objs).__name__)) + '"{name}"'.format(name=type(objs).__name__)) if join == 'outer': self.intersect = False @@ -309,8 +309,8 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, self._is_series = isinstance(sample, Series) if not 0 <= axis <= sample.ndim: - raise AssertionError("axis must be between 0 and {0}, " - "input was {1}".format(sample.ndim, axis)) + raise AssertionError("axis must be between 0 and {ndim}, input was" + " {axis}".format(ndim=sample.ndim, axis=axis)) # if we have mixed ndims, then convert to highest ndim # creating column numbers as needed @@ -431,8 +431,8 @@ def _get_new_axes(self): new_axes[i] = self._get_comb_axis(i) else: if len(self.join_axes) != ndim - 1: - raise AssertionError("length of join_axes must not be " - "equal to {0}".format(ndim - 1)) + raise AssertionError("length of join_axes must not be equal " + "to {length}".format(length=ndim - 1)) # ufff... indices = compat.lrange(ndim) @@ -451,7 +451,8 @@ def _get_comb_axis(self, i): intersect=self.intersect) except IndexError: types = [type(x).__name__ for x in self.objs] - raise TypeError("Cannot concatenate list of %s" % types) + raise TypeError("Cannot concatenate list of {types}" + .format(types=types)) def _get_concat_axis(self): """ @@ -470,8 +471,8 @@ def _get_concat_axis(self): for i, x in enumerate(self.objs): if not isinstance(x, Series): raise TypeError("Cannot concatenate type 'Series' " - "with object of type " - "%r" % type(x).__name__) + "with object of type {type!r}" + .format(type=type(x).__name__)) if x.name is not None: names[i] = x.name has_names = True @@ -505,8 +506,8 @@ def _maybe_check_integrity(self, concat_index): if self.verify_integrity: if not concat_index.is_unique: overlap = concat_index.get_duplicates() - raise ValueError('Indexes have overlapping values: %s' - % str(overlap)) + raise ValueError('Indexes have overlapping values: ' + '{overlap!s}'.format(overlap=overlap)) def _concat_indexes(indexes): @@ -547,8 +548,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None): try: i = level.get_loc(key) except KeyError: - raise ValueError('Key %s not in level %s' - % (str(key), str(level))) + raise ValueError('Key {key!s} not in level {level!s}' + .format(key=key, level=level)) to_concat.append(np.repeat(i, len(index))) label_list.append(np.concatenate(to_concat)) @@ -597,8 +598,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None): mask = mapped == -1 if mask.any(): - raise ValueError('Values not found in passed level: %s' - % str(hlevel[mask])) + raise ValueError('Values not found in passed level: {hlevel!s}' + .format(hlevel=hlevel[mask])) new_labels.append(np.repeat(mapped, n)) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 00828b8241f4c..947300a28e510 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -534,28 +534,27 @@ def __init__(self, left, right, how='inner', on=None, 'indicator option can only accept boolean or string arguments') if not isinstance(left, DataFrame): - raise ValueError( - 'can not merge DataFrame with instance of ' - 'type {0}'.format(type(left))) + raise ValueError('can not merge DataFrame with instance of ' + 'type {left}'.format(left=type(left))) if not isinstance(right, DataFrame): - raise ValueError( - 'can not merge DataFrame with instance of ' - 'type {0}'.format(type(right))) + raise ValueError('can not merge DataFrame with instance of ' + 'type {right}'.format(right=type(right))) if not is_bool(left_index): raise ValueError( 'left_index parameter must be of type bool, not ' - '{0}'.format(type(left_index))) + '{left_index}'.format(left_index=type(left_index))) if not is_bool(right_index): raise ValueError( 'right_index parameter must be of type bool, not ' - '{0}'.format(type(right_index))) + '{right_index}'.format(right_index=type(right_index))) # warn user when merging between different levels if left.columns.nlevels != right.columns.nlevels: msg = ('merging between different levels can give an unintended ' - 'result ({0} levels on the left, {1} on the right)') - msg = msg.format(left.columns.nlevels, right.columns.nlevels) + 'result ({left} levels on the left, {right} on the right)' + ).format(left=left.columns.nlevels, + right=right.columns.nlevels) warnings.warn(msg, UserWarning) self._validate_specification() @@ -613,7 +612,8 @@ def _indicator_pre_merge(self, left, right): for i in ['_left_indicator', '_right_indicator']: if i in columns: raise ValueError("Cannot use `indicator=True` option when " - "data contains a column named {}".format(i)) + "data contains a column named {name}" + .format(name=i)) if self.indicator_name in columns: raise ValueError( "Cannot use name of an existing column for indicator column") @@ -717,7 +717,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer): if name in result: result[name] = key_col else: - result.insert(i, name or 'key_%d' % i, key_col) + result.insert(i, name or 'key_{i}'.format(i=i), key_col) def _get_join_indexers(self): """ return the join indexers """ @@ -952,8 +952,8 @@ def _validate_specification(self): if len(common_cols) == 0: raise MergeError('No common columns to perform merge on') if not common_cols.is_unique: - raise MergeError("Data columns not unique: %s" - % repr(common_cols)) + raise MergeError("Data columns not unique: {common!r}" + .format(common=common_cols)) self.left_on = self.right_on = common_cols elif self.on is not None: if self.left_on is not None or self.right_on is not None: @@ -1119,12 +1119,14 @@ def get_result(self): def _asof_function(direction, on_type): - return getattr(libjoin, 'asof_join_%s_%s' % (direction, on_type), None) + name = 'asof_join_{dir}_{on}'.format(dir=direction, on=on_type) + return getattr(libjoin, name, None) def _asof_by_function(direction, on_type, by_type): - return getattr(libjoin, 'asof_join_%s_%s_by_%s' % - (direction, on_type, by_type), None) + name = 'asof_join_{dir}_{on}_by_{by}'.format( + dir=direction, on=on_type, by=by_type) + return getattr(libjoin, name, None) _type_casters = { @@ -1153,7 +1155,7 @@ def _get_cython_type(dtype): type_name = _get_dtype(dtype).name ctype = _cython_types.get(type_name, 'object') if ctype == 'error': - raise MergeError('unsupported type: ' + type_name) + raise MergeError('unsupported type: {type}'.format(type=type_name)) return ctype @@ -1235,7 +1237,8 @@ def _validate_specification(self): # check 'direction' is valid if self.direction not in ['backward', 'forward', 'nearest']: - raise MergeError('direction invalid: ' + self.direction) + raise MergeError('direction invalid: {direction}' + .format(direction=self.direction)) @property def _asof_key(self): @@ -1264,7 +1267,7 @@ def _get_merge_keys(self): lt = left_join_keys[-1] msg = "incompatible tolerance, must be compat " \ - "with type {0}".format(type(lt)) + "with type {lt}".format(lt=type(lt)) if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt): if not isinstance(self.tolerance, Timedelta): @@ -1283,8 +1286,8 @@ def _get_merge_keys(self): # validate allow_exact_matches if not is_bool(self.allow_exact_matches): - raise MergeError("allow_exact_matches must be boolean, " - "passed {0}".format(self.allow_exact_matches)) + msg = "allow_exact_matches must be boolean, passed {passed}" + raise MergeError(msg.format(passed=self.allow_exact_matches)) return left_join_keys, right_join_keys, join_names @@ -1306,11 +1309,11 @@ def flip(xs): tolerance = self.tolerance # we required sortedness in the join keys - msg = " keys must be sorted" + msg = "{side} keys must be sorted" if not Index(left_values).is_monotonic: - raise ValueError('left' + msg) + raise ValueError(msg.format(side='left')) if not Index(right_values).is_monotonic: - raise ValueError('right' + msg) + raise ValueError(msg.format(side='right')) # initial type conversion as needed if needs_i8_conversion(left_values): diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index e08c307bba818..f07123ca18489 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -145,10 +145,10 @@ def _add_margins(table, data, values, rows, cols, aggfunc, if not isinstance(margins_name, compat.string_types): raise ValueError('margins_name argument must be a string') - exception_msg = 'Conflicting name "{0}" in margins'.format(margins_name) + msg = 'Conflicting name "{name}" in margins'.format(name=margins_name) for level in table.index.names: if margins_name in table.index.get_level_values(level): - raise ValueError(exception_msg) + raise ValueError(msg) grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name) @@ -156,7 +156,7 @@ def _add_margins(table, data, values, rows, cols, aggfunc, if hasattr(table, 'columns'): for level in table.columns.names[1:]: if margins_name in table.columns.get_level_values(level): - raise ValueError(exception_msg) + raise ValueError(msg) if len(rows) > 1: key = (margins_name,) + ('',) * (len(rows) - 1) @@ -553,7 +553,7 @@ def _get_names(arrs, names, prefix='row'): if isinstance(arr, ABCSeries) and arr.name is not None: names.append(arr.name) else: - names.append('%s_%d' % (prefix, i)) + names.append('{prefix}_{i}'.format(prefix=prefix, i=i)) else: if len(names) != len(arrs): raise AssertionError('arrays and names must have the same length') diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index b7638471f2ad0..455da9246783c 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -1,6 +1,6 @@ # pylint: disable=E1101,E1103 # pylint: disable=W0703,W0622,W0613,W0201 -from pandas.compat import range, zip +from pandas.compat import range, text_type, zip from pandas import compat import itertools import re @@ -91,8 +91,8 @@ def __init__(self, values, index, level=-1, value_columns=None, if isinstance(self.index, MultiIndex): if index._reference_duplicate_name(level): - msg = ("Ambiguous reference to {0}. The index " - "names are not unique.".format(level)) + msg = ("Ambiguous reference to {level}. The index " + "names are not unique.".format(level=level)) raise ValueError(msg) self.level = self.index._get_level_number(level) @@ -229,7 +229,7 @@ def get_new_values(self): sorted_values = sorted_values.astype(name, copy=False) # fill in our values & mask - f = getattr(_reshape, "unstack_{}".format(name)) + f = getattr(_reshape, "unstack_{name}".format(name=name)) f(sorted_values, mask.view('u1'), stride, @@ -516,8 +516,8 @@ def factorize(index): N, K = frame.shape if isinstance(frame.columns, MultiIndex): if frame.columns._reference_duplicate_name(level): - msg = ("Ambiguous reference to {0}. The column " - "names are not unique.".format(level)) + msg = ("Ambiguous reference to {level}. The column " + "names are not unique.".format(level=level)) raise ValueError(msg) # Will also convert negative level numbers and check if out of bounds. @@ -747,7 +747,7 @@ def melt(frame, id_vars=None, value_vars=None, var_name=None, if len(frame.columns.names) == len(set(frame.columns.names)): var_name = frame.columns.names else: - var_name = ['variable_%s' % i + var_name = ['variable_{i}'.format(i=i) for i in range(len(frame.columns.names))] else: var_name = [frame.columns.name if frame.columns.name is not None @@ -1027,7 +1027,8 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix='\d+'): in a typicaly case. """ def get_var_names(df, stub, sep, suffix): - regex = "^{0}{1}{2}".format(re.escape(stub), re.escape(sep), suffix) + regex = "^{stub}{sep}{suffix}".format( + stub=re.escape(stub), sep=re.escape(sep), suffix=suffix) return df.filter(regex=regex).columns.tolist() def melt_stub(df, stub, i, j, value_vars, sep): @@ -1180,13 +1181,14 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, # validate prefixes and separator to avoid silently dropping cols def check_len(item, name): - length_msg = ("Length of '{0}' ({1}) did not match the length of " - "the columns being encoded ({2}).") + len_msg = ("Length of '{name}' ({len_item}) did not match the " + "length of the columns being encoded ({len_enc}).") if is_list_like(item): if not len(item) == len(columns_to_encode): - raise ValueError(length_msg.format(name, len(item), - len(columns_to_encode))) + len_msg = len_msg.format(name=name, len_item=len(item), + len_enc=len(columns_to_encode)) + raise ValueError(len_msg) check_len(prefix, 'prefix') check_len(prefix_sep, 'prefix_sep') @@ -1253,7 +1255,10 @@ def get_empty_Frame(data, sparse): number_of_cols = len(levels) if prefix is not None: - dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) for v in levels] + dummy_strs = [u'{prefix}{sep}{level}' if isinstance(v, text_type) + else '{prefix}{sep}{level}' for v in levels] + dummy_cols = [dummy_str.format(prefix=prefix, sep=prefix_sep, level=v) + for dummy_str, v in zip(dummy_strs, levels)] else: dummy_cols = levels diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 1cb39faa2e869..2f5538556fa6d 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -229,9 +229,9 @@ def _bins_to_cuts(x, bins, right=True, labels=None, unique_bins = algos.unique(bins) if len(unique_bins) < len(bins) and len(bins) != 2: if duplicates == 'raise': - raise ValueError("Bin edges must be unique: {}.\nYou " + raise ValueError("Bin edges must be unique: {bins!r}.\nYou " "can drop duplicate edges by setting " - "the 'duplicates' kwarg".format(repr(bins))) + "the 'duplicates' kwarg".format(bins=bins)) else: bins = unique_bins
Progress towards #16130 - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Replaced `%s` syntax with `.format` in pandas.core.reshape. Additionally, made some of the existing positional `.format` code more explicit.
https://api.github.com/repos/pandas-dev/pandas/pulls/17252
2017-08-15T02:58:14Z
2017-08-15T18:26:07Z
2017-08-15T18:26:06Z
2017-08-15T18:34:23Z
BUG: Have object dtype for empty Categorical.categories
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index e21ee8d7d31f5..a7996ab76a1ca 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -379,6 +379,9 @@ Numeric Categorical ^^^^^^^^^^^ - Bug in :func:`Series.isin` when called with a categorical (:issue`16639`) +- Bug in the categorical constructor with empty values and categories causing + the ``.categories`` to be an empty ``Float64Index`` rather than an empty + ``Index`` with object dtype (:issue:`17248`) Other diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 230361931125e..1c2a29333001c 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -290,7 +290,10 @@ def __init__(self, values, categories=None, ordered=False, fastpath=False): # On list with NaNs, int values will be converted to float. Use # "object" dtype to prevent this. In the end objects will be # casted to int/... in the category assignment step. - dtype = 'object' if isna(values).any() else None + if len(values) == 0 or isna(values).any(): + dtype = 'object' + else: + dtype = None values = _sanitize_array(values, None, dtype=dtype) if categories is None: diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index da1b309f5a621..c66775f4690cc 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -776,7 +776,7 @@ def test_from_arrays_empty(self): arrays = [[]] * N names = list('ABC')[:N] result = MultiIndex.from_arrays(arrays=arrays, names=names) - expected = MultiIndex(levels=[np.array([])] * N, labels=[[]] * N, + expected = MultiIndex(levels=[[]] * N, labels=[[]] * N, names=names) tm.assert_index_equal(result, expected) @@ -829,7 +829,7 @@ def test_from_product_empty(self): # 1 level result = MultiIndex.from_product([[]], names=['A']) - expected = pd.Float64Index([], name='A') + expected = pd.Index([], name='A') tm.assert_index_equal(result, expected) # 2 levels @@ -838,7 +838,7 @@ def test_from_product_empty(self): names = ['A', 'B'] for first, second in zip(l1, l2): result = MultiIndex.from_product([first, second], names=names) - expected = MultiIndex(levels=[np.array(first), np.array(second)], + expected = MultiIndex(levels=[first, second], labels=[[], []], names=names) tm.assert_index_equal(result, expected) @@ -847,8 +847,7 @@ def test_from_product_empty(self): for N in range(4): lvl2 = lrange(N) result = MultiIndex.from_product([[], lvl2, []], names=names) - expected = MultiIndex(levels=[np.array(A) - for A in [[], lvl2, []]], + expected = MultiIndex(levels=[[], lvl2, []], labels=[[], [], []], names=names) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 46fea86c45925..52cd18126859a 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -680,7 +680,7 @@ def test_concat_categorical_empty(self): tm.assert_series_equal(s1.append(s2, ignore_index=True), s2) s1 = pd.Series([], dtype='category') - s2 = pd.Series([]) + s2 = pd.Series([], dtype='object') # different dtype => not-category tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2) diff --git a/pandas/tests/reshape/test_union_categoricals.py b/pandas/tests/reshape/test_union_categoricals.py index fe8d54005ba9b..eb80fb54b4016 100644 --- a/pandas/tests/reshape/test_union_categoricals.py +++ b/pandas/tests/reshape/test_union_categoricals.py @@ -107,17 +107,11 @@ def test_union_categoricals_empty(self): exp = Categorical([]) tm.assert_categorical_equal(res, exp) - res = union_categoricals([pd.Categorical([]), - pd.Categorical([1.0])]) - exp = Categorical([1.0]) + res = union_categoricals([Categorical([]), + Categorical(['1'])]) + exp = Categorical(['1']) tm.assert_categorical_equal(res, exp) - # to make dtype equal - nanc = pd.Categorical(np.array([np.nan], dtype=np.float64)) - res = union_categoricals([nanc, - pd.Categorical([])]) - tm.assert_categorical_equal(res, nanc) - def test_union_categorical_same_category(self): # check fastpath c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4]) diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index eecdd672095b0..032fb698c47a6 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -112,6 +112,16 @@ def test_setitem_listlike(self): result = c.codes[np.array([100000]).astype(np.int64)] tm.assert_numpy_array_equal(result, np.array([5], dtype='int8')) + def test_constructor_empty(self): + # GH 17248 + c = Categorical([]) + expected = Index([]) + tm.assert_index_equal(c.categories, expected) + + c = Categorical([], categories=[1, 2, 3]) + expected = pd.Int64Index([1, 2, 3]) + tm.assert_index_equal(c.categories, expected) + def test_constructor_unsortable(self): # it works!
Closes https://github.com/pandas-dev/pandas/issues/17248
https://api.github.com/repos/pandas-dev/pandas/pulls/17249
2017-08-14T17:52:25Z
2017-08-19T11:27:06Z
2017-08-19T11:27:06Z
2017-08-19T11:27:09Z
Fix apparent typo in tests
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 7ff9c2b23cbfb..9764b65d330af 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1112,9 +1112,9 @@ def test_parsers(self): result3 = Timestamp('NaT') result4 = DatetimeIndex(['NaT'])[0] assert result1 is tslib.NaT - assert result1 is tslib.NaT - assert result1 is tslib.NaT - assert result1 is tslib.NaT + assert result2 is tslib.NaT + assert result3 is tslib.NaT + assert result4 is tslib.NaT def test_parsers_quarter_invalid(self):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17247
2017-08-14T17:22:38Z
2017-08-14T18:19:38Z
2017-08-14T18:19:38Z
2017-10-30T16:24:01Z
Make pd.Period immutable
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index f760d0b6359a2..604d275511fa0 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -291,6 +291,8 @@ Other API Changes - Moved definition of ``MergeError`` to the ``pandas.errors`` module. - The signature of :func:`Series.set_axis` and :func:`DataFrame.set_axis` has been changed from ``set_axis(axis, labels)`` to ``set_axis(labels, axis=0)``, for consistency with the rest of the API. The old signature is deprecated and will show a ``FutureWarning`` (:issue:`14636`) - :func:`Series.argmin` and :func:`Series.argmax` will now raise a ``TypeError`` when used with ``object`` dtypes, instead of a ``ValueError`` (:issue:`13595`) +- :class:`Period` is now immutable, and will now raise an ``AttributeError`` when a user tries to assign a new value to the ``ordinal`` or ``freq`` attributes (:issue:`17116`). + .. _whatsnew_0210.deprecations: diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx index 6ba7ec0270f30..a1d04fea89151 100644 --- a/pandas/_libs/period.pyx +++ b/pandas/_libs/period.pyx @@ -29,7 +29,9 @@ from datetime cimport ( PANDAS_FR_ns, INT32_MIN) + cimport util, lib + from lib cimport is_null_datetimelike, is_period from pandas._libs import tslib, lib from pandas._libs.tslib import (Timedelta, Timestamp, iNaT, @@ -668,13 +670,17 @@ class IncompatibleFrequency(ValueError): cdef class _Period(object): - cdef public: + cdef readonly: int64_t ordinal object freq _comparables = ['name', 'freqstr'] _typ = 'period' + def __cinit__(self, ordinal, freq): + self.ordinal = ordinal + self.freq = freq + @classmethod def _maybe_convert_freq(cls, object freq): @@ -698,9 +704,8 @@ cdef class _Period(object): if ordinal == iNaT: return NaT else: - self = _Period.__new__(cls) - self.ordinal = ordinal - self.freq = cls._maybe_convert_freq(freq) + freq = cls._maybe_convert_freq(freq) + self = _Period.__new__(cls, ordinal, freq) return self def __richcmp__(self, other, op): @@ -752,7 +757,7 @@ cdef class _Period(object): def __add__(self, other): if isinstance(self, Period): if isinstance(other, (timedelta, np.timedelta64, - offsets.Tick, offsets.DateOffset, + offsets.DateOffset, Timedelta)): return self._add_delta(other) elif other is NaT: @@ -770,7 +775,7 @@ cdef class _Period(object): def __sub__(self, other): if isinstance(self, Period): if isinstance(other, (timedelta, np.timedelta64, - offsets.Tick, offsets.DateOffset, + offsets.DateOffset, Timedelta)): neg_other = -other return self + neg_other diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py index 931d6b2b8f1f0..a167c9c738b0b 100644 --- a/pandas/tests/scalar/test_period.py +++ b/pandas/tests/scalar/test_period.py @@ -1406,3 +1406,14 @@ def test_period_ops_offset(self): with tm.assert_raises_regex(period.IncompatibleFrequency, msg): p - offsets.Hour(2) + + +def test_period_immutable(): + # see gh-17116 + per = pd.Period('2014Q1') + with pytest.raises(AttributeError): + per.ordinal = 14 + + freq = per.freq + with pytest.raises(AttributeError): + per.freq = 2 * freq
Follows @jreback 's suggestion in #17116 to define `_Period.ordinal` and `_Period.freq` as `def readonly` attributes. This requires massaging `__new__/__init__` a bit. Adds a quick test that these attributes can't be changed. Note that this doesn't fully solve the issue of Periods being immutable because `per.freq` is a `DateOffset` object which itself can be messed with. - [ ] closes # - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17239
2017-08-13T03:05:09Z
2017-08-24T10:03:19Z
2017-08-24T10:03:18Z
2017-08-24T16:18:22Z
Fix bugs in IntervalIndex.is_non_overlapping_monotonic
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index b8f142700b830..c0512137d9437 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -311,6 +311,8 @@ Conversion - Bug in assignment against datetime-like data with ``int`` may incorrectly convert to datetime-like (:issue:`14145`) - Bug in assignment against ``int64`` data with ``np.ndarray`` with ``float64`` dtype may keep ``int64`` dtype (:issue:`14001`) - Fix :func:`DataFrame.memory_usage` to support PyPy. Objects on PyPy do not have a fixed size, so an approximation is used instead (:issue:`17228`) +- Fixed the return type of ``IntervalIndex.is_non_overlapping_monotonic`` to be a Python ``bool`` for consistency with similar attributes/methods. Previously returned a ``numpy.bool_``. (:issue:`17237`) +- Bug in ``IntervalIndex.is_non_overlapping_monotonic`` when intervals are closed on both sides and overlap at a point (:issue:`16560`) Indexing diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index aa2ad21ae37fd..e90378184e3f3 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -556,8 +556,17 @@ def is_non_overlapping_monotonic(self): # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... ) # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...) # we already require left <= right - return ((self.right[:-1] <= self.left[1:]).all() or - (self.left[:-1] >= self.right[1:]).all()) + + # strict inequality for closed == 'both'; equality implies overlapping + # at a point when both sides of intervals are included + if self.closed == 'both': + return bool((self.right[:-1] < self.left[1:]).all() or + (self.left[:-1] > self.right[1:]).all()) + + # non-strict inequality when closed != 'both'; at least one side is + # not included in the intervals, so equality does not imply overlapping + return bool((self.right[:-1] <= self.left[1:]).all() or + (self.left[:-1] >= self.right[1:]).all()) @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index fe86a2121761a..18eefc3fbdca6 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -371,8 +371,9 @@ def slice_locs_cases(self, breaks): assert index.slice_locs(1, 1) == (1, 1) assert index.slice_locs(1, 2) == (1, 2) - index = IntervalIndex.from_breaks([0, 1, 2], closed='both') - assert index.slice_locs(1, 1) == (0, 2) + index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)], + closed='both') + assert index.slice_locs(1, 1) == (0, 1) assert index.slice_locs(1, 2) == (0, 2) def test_slice_locs_int64(self): @@ -681,6 +682,42 @@ def f(): pytest.raises(ValueError, f) + def test_is_non_overlapping_monotonic(self): + # Should be True in all cases + tpls = [(0, 1), (2, 3), (4, 5), (6, 7)] + for closed in ('left', 'right', 'neither', 'both'): + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is True + + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) + assert idx.is_non_overlapping_monotonic is True + + # Should be False in all cases (overlapping) + tpls = [(0, 2), (1, 3), (4, 5), (6, 7)] + for closed in ('left', 'right', 'neither', 'both'): + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is False + + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) + assert idx.is_non_overlapping_monotonic is False + + # Should be False in all cases (non-monotonic) + tpls = [(0, 1), (2, 3), (6, 7), (4, 5)] + for closed in ('left', 'right', 'neither', 'both'): + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is False + + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) + assert idx.is_non_overlapping_monotonic is False + + # Should be False for closed='both', overwise True (GH16560) + idx = IntervalIndex.from_breaks(range(4), closed='both') + assert idx.is_non_overlapping_monotonic is False + + for closed in ('left', 'right', 'neither'): + idx = IntervalIndex.from_breaks(range(4), closed=closed) + assert idx.is_non_overlapping_monotonic is True + class TestIntervalRange(object):
- [X] closes #17237 - [X] closes #16560 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry I had to modify one of the existing tests, which used an `IntervalIndex` that was overlapping at endpoints but was assumed to be non-overlapping. The test failed due to an intermediate check against `is_non_overlapping_monotonic`, which is now fixed and properly returns that the index in question is in fact overlapping. The test was modified to use an index which is similar with `closed='both'` but non-overlapping. Note that #16588 also fixes #16560, but the PR appears to have gone stale, as there hasn't been an update in over two months. Both #17237 and #16560 involve making changes to the same block of code, so it seemed easiest to fix both simultaneously.
https://api.github.com/repos/pandas-dev/pandas/pulls/17238
2017-08-12T22:44:26Z
2017-08-15T20:42:40Z
2017-08-15T20:42:40Z
2017-09-09T04:57:20Z
API: Have MultiIndex consturctors always return a MI
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 33b7e128ef8bf..87896778bea14 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -273,6 +273,30 @@ named ``.isna()`` and ``.notna()``, these are included for classes ``Categorical The configuration option ``pd.options.mode.use_inf_as_null`` is deprecated, and ``pd.options.mode.use_inf_as_na`` is added as a replacement. +.. _whatsnew_210.api.multiindex_single: + +MultiIndex Constructor with a Single Level +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``MultiIndex`` constructors no longer squeeze a MultiIndex with all +length-one levels down to a regular ``Index``. This affects all the +``MultiIndex`` constructors. (:issue:`17178`) + +Previous behavior: + +.. code-block:: ipython + + In [2]: pd.MultiIndex.from_tuples([('a',), ('b',)]) + Out[2]: Index(['a', 'b'], dtype='object') + +Length 1 levels are no longer special-cased. They behave exactly as if you had +length 2+ levels, so a :class:`MultiIndex` is always returned from all of the +``MultiIndex`` constructors: + +.. ipython:: python + + pd.MultiIndex.from_tuples([('a',), ('b',)]) + .. _whatsnew_0210.api: Other API Changes diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b5b3df64d24c0..5991ec825c841 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -67,7 +67,8 @@ _dict_compat, standardize_mapping) from pandas.core.generic import NDFrame, _shared_docs -from pandas.core.index import Index, MultiIndex, _ensure_index +from pandas.core.index import (Index, MultiIndex, _ensure_index, + _ensure_index_from_sequences) from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable, check_bool_indexer) from pandas.core.internals import (BlockManager, @@ -1155,9 +1156,9 @@ def from_records(cls, data, index=None, exclude=None, columns=None, else: try: to_remove = [arr_columns.get_loc(field) for field in index] - - result_index = MultiIndex.from_arrays( - [arrays[i] for i in to_remove], names=index) + index_data = [arrays[i] for i in to_remove] + result_index = _ensure_index_from_sequences(index_data, + names=index) exclude.update(index) except Exception: @@ -3000,7 +3001,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False, to_remove.append(col) arrays.append(level) - index = MultiIndex.from_arrays(arrays, names=names) + index = _ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: duplicates = index.get_duplicates() diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 323d50166e7b6..d20a0b0a2c73d 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -1,6 +1,9 @@ -from pandas.core.indexes.base import (Index, _new_Index, # noqa - _ensure_index, _get_na_value, - InvalidIndexError) +from pandas.core.indexes.base import (Index, + _new_Index, + _ensure_index, + _ensure_index_from_sequences, + _get_na_value, + InvalidIndexError) # noqa from pandas.core.indexes.category import CategoricalIndex # noqa from pandas.core.indexes.multi import MultiIndex # noqa from pandas.core.indexes.interval import IntervalIndex # noqa @@ -22,7 +25,8 @@ 'InvalidIndexError', 'TimedeltaIndex', 'PeriodIndex', 'DatetimeIndex', '_new_Index', 'NaT', - '_ensure_index', '_get_na_value', '_get_combined_index', + '_ensure_index', '_ensure_index_from_sequences', '_get_na_value', + '_get_combined_index', '_get_objs_combined_axis', '_union_indexes', '_get_consensus_names', '_all_indexes_same'] diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 31cf1e48b8529..6a30eaefaaae7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4012,7 +4012,76 @@ def invalid_op(self, other=None): Index._add_comparison_methods() +def _ensure_index_from_sequences(sequences, names=None): + """Construct an index from sequences of data. + + A single sequence returns an Index. Many sequences returns a + MultiIndex. + + Parameters + ---------- + sequences : sequence of sequences + names : sequence of str + + Returns + ------- + index : Index or MultiIndex + + Examples + -------- + >>> _ensure_index_from_sequences([[1, 2, 3]], names=['name']) + Int64Index([1, 2, 3], dtype='int64', name='name') + + >>> _ensure_index_from_sequences([['a', 'a'], ['a', 'b']], + names=['L1', 'L2']) + MultiIndex(levels=[['a'], ['a', 'b']], + labels=[[0, 0], [0, 1]], + names=['L1', 'L2']) + + See Also + -------- + _ensure_index + """ + from .multi import MultiIndex + + if len(sequences) == 1: + if names is not None: + names = names[0] + return Index(sequences[0], name=names) + else: + return MultiIndex.from_arrays(sequences, names=names) + + def _ensure_index(index_like, copy=False): + """ + Ensure that we have an index from some index-like object + + Parameters + ---------- + index : sequence + An Index or other sequence + copy : bool + + Returns + ------- + index : Index or MultiIndex + + Examples + -------- + >>> _ensure_index(['a', 'b']) + Index(['a', 'b'], dtype='object') + + >>> _ensure_index([('a', 'a'), ('b', 'c')]) + Index([('a', 'a'), ('b', 'c')], dtype='object') + + >>> _ensure_index([['a', 'a'], ['b', 'c']]) + MultiIndex(levels=[['a'], ['b', 'c']], + labels=[[0, 0], [0, 1]]) + + See Also + -------- + _ensure_index_from_sequences + """ if isinstance(index_like, Index): if copy: index_like = index_like.copy() diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index ea45b4700172f..d7d5b6d128a2c 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -91,12 +91,6 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None, raise ValueError('Length of levels and labels must be the same.') if len(levels) == 0: raise ValueError('Must pass non-zero number of levels/labels') - if len(levels) == 1: - if names: - name = names[0] - else: - name = None - return Index(levels[0], name=name, copy=True).take(labels[0]) result = object.__new__(MultiIndex) @@ -1084,10 +1078,6 @@ def from_arrays(cls, arrays, sortorder=None, names=None): MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables """ - if len(arrays) == 1: - name = None if names is None else names[0] - return Index(arrays[0], name=name) - # Check if lengths of all arrays are equal or not, # raise ValueError, if not for i in range(1, len(arrays)): diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 455da9246783c..b4abba8026b35 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -31,7 +31,7 @@ from pandas.core.frame import _shared_docs from pandas.util._decorators import Appender -from pandas.core.index import MultiIndex, _get_na_value +from pandas.core.index import Index, MultiIndex, _get_na_value class _Unstacker(object): @@ -311,10 +311,14 @@ def _unstack_multiple(data, clocs): recons_labels = decons_obs_group_ids(comp_ids, obs_ids, shape, clabels, xnull=False) - dummy_index = MultiIndex(levels=rlevels + [obs_ids], - labels=rlabels + [comp_ids], - names=rnames + ['__placeholder__'], - verify_integrity=False) + if rlocs == []: + # Everything is in clocs, so the dummy df has a regular index + dummy_index = Index(obs_ids, name='__placeholder__') + else: + dummy_index = MultiIndex(levels=rlevels + [obs_ids], + labels=rlabels + [comp_ids], + names=rnames + ['__placeholder__'], + verify_integrity=False) if isinstance(data, Series): dummy = data.copy() @@ -446,7 +450,12 @@ def _slow_pivot(index, columns, values): def unstack(obj, level, fill_value=None): if isinstance(level, (tuple, list)): - return _unstack_multiple(obj, level) + if len(level) != 1: + # _unstack_multiple only handles MultiIndexes, + # and isn't needed for a single level + return _unstack_multiple(obj, level) + else: + level = level[0] if isinstance(obj, DataFrame): if isinstance(obj.index, MultiIndex): diff --git a/pandas/core/sparse/scipy_sparse.py b/pandas/core/sparse/scipy_sparse.py index ea108e3e89935..d2b9583d8efe5 100644 --- a/pandas/core/sparse/scipy_sparse.py +++ b/pandas/core/sparse/scipy_sparse.py @@ -71,7 +71,11 @@ def robust_get_level_values(i): labels_to_i = Series(labels_to_i) if len(subset) > 1: labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index) - labels_to_i.index.names = [index.names[i] for i in subset] + labels_to_i.index.names = [index.names[i] for i in subset] + else: + labels_to_i.index = Index(x[0] for x in labels_to_i.index) + labels_to_i.index.name = index.names[subset[0]] + labels_to_i.name = 'value' return (labels_to_i) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 2f95e510bba5e..48bc2ee05dd68 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1452,7 +1452,12 @@ def cons_row(x): if expand: result = list(result) - return MultiIndex.from_tuples(result, names=name) + out = MultiIndex.from_tuples(result, names=name) + if out.nlevels == 1: + # We had all tuples of length-one, which are + # better represented as a regular Index. + out = out.get_level_values(0) + return out else: return Index(result, name=name) else: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index a9821be3fa5e2..8b1a921536a1d 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -23,7 +23,8 @@ is_scalar, is_categorical_dtype) from pandas.core.dtypes.missing import isna from pandas.core.dtypes.cast import astype_nansafe -from pandas.core.index import Index, MultiIndex, RangeIndex +from pandas.core.index import (Index, MultiIndex, RangeIndex, + _ensure_index_from_sequences) from pandas.core.series import Series from pandas.core.frame import DataFrame from pandas.core.categorical import Categorical @@ -1444,7 +1445,8 @@ def _agg_index(self, index, try_parse_dates=True): arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues) arrays.append(arr) - index = MultiIndex.from_arrays(arrays, names=self.index_names) + names = self.index_names + index = _ensure_index_from_sequences(arrays, names) return index @@ -1808,7 +1810,7 @@ def read(self, nrows=None): try_parse_dates=True) arrays.append(values) - index = MultiIndex.from_arrays(arrays) + index = _ensure_index_from_sequences(arrays) if self.usecols is not None: names = self._filter_usecols(names) @@ -3138,9 +3140,8 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None): if index_col is None or index_col is False: index = Index([]) else: - index = [Series([], dtype=dtype[index_name]) - for index_name in index_names] - index = MultiIndex.from_arrays(index, names=index_names) + data = [Series([], dtype=dtype[name]) for name in index_names] + index = _ensure_index_from_sequences(data, names=index_names) index_col.sort() for i, n in enumerate(index_col): columns.pop(n - i) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 07e98c326bcaa..aa32e75ba0d58 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -17,7 +17,7 @@ DataFrame, Float64Index, Int64Index, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, isna) -from pandas.core.index import _get_combined_index +from pandas.core.index import _get_combined_index, _ensure_index_from_sequences from pandas.util.testing import assert_almost_equal from pandas.compat.numpy import np_datetime64_compat @@ -2112,3 +2112,19 @@ def test_intersect_str_dates(self): res = i2.intersection(i1) assert len(res) == 0 + + +class TestIndexUtils(object): + + @pytest.mark.parametrize('data, names, expected', [ + ([[1, 2, 3]], None, Index([1, 2, 3])), + ([[1, 2, 3]], ['name'], Index([1, 2, 3], name='name')), + ([['a', 'a'], ['c', 'd']], None, + MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]])), + ([['a', 'a'], ['c', 'd']], ['L1', 'L2'], + MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]], + names=['L1', 'L2'])), + ]) + def test_ensure_index_from_sequences(self, data, names, expected): + result = _ensure_index_from_sequences(data, names) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index c66775f4690cc..798d244468961 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -537,15 +537,12 @@ def test_astype(self): self.index.astype(np.dtype(int)) def test_constructor_single_level(self): - single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']], - labels=[[0, 1, 2, 3]], names=['first']) - assert isinstance(single_level, Index) - assert not isinstance(single_level, MultiIndex) - assert single_level.name == 'first' - - single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']], - labels=[[0, 1, 2, 3]]) - assert single_level.name is None + result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']], + labels=[[0, 1, 2, 3]], names=['first']) + assert isinstance(result, MultiIndex) + expected = Index(['foo', 'bar', 'baz', 'qux'], name='first') + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ['first'] def test_constructor_no_levels(self): tm.assert_raises_regex(ValueError, "non-zero number " @@ -768,8 +765,9 @@ def test_from_arrays_empty(self): # 1 level result = MultiIndex.from_arrays(arrays=[[]], names=['A']) + assert isinstance(result, MultiIndex) expected = Index([], name='A') - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result.levels[0], expected) # N levels for N in [2, 3]: @@ -830,7 +828,7 @@ def test_from_product_empty(self): # 1 level result = MultiIndex.from_product([[]], names=['A']) expected = pd.Index([], name='A') - tm.assert_index_equal(result, expected) + tm.assert_index_equal(result.levels[0], expected) # 2 levels l1 = [[], ['foo', 'bar', 'baz'], []] diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 5a17cb6d7dc47..7dac83953ad8f 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1909,7 +1909,11 @@ def keyfunc(x): # convert tuples to index if nentries == 1: + # we have a single level of tuples, i.e. a regular Index index = Index(tuples[0], name=names[0]) + elif nlevels == 1: + name = None if names is None else names[0] + index = Index((x[0] for x in tuples), name=name) else: index = MultiIndex.from_tuples(tuples, names=names) return index
This removes the special case for MultiIndex constructors returning an Index if all the levels are length-1. Now this will return a MultiIndex with a single level. This is a backwards incompatabile change, with no clear method for deprecation, so we're making a clean break. Old Behavior: ```python In [1]: import pandas as pd In [2]: pd.MultiIndex.from_tuples([('a',), ('b',)]) Out[2]: Index(['a', 'b'], dtype='object') ``` New: ```python In [1]: import pandas as pd In [2]: pd.MultiIndex.from_tuples([('a',), ('b',)]) Out[2]: MultiIndex(levels=[['a', 'b']], labels=[[0, 1]]) ``` Closes #17178
https://api.github.com/repos/pandas-dev/pandas/pulls/17236
2017-08-12T11:11:23Z
2017-08-30T20:30:54Z
2017-08-30T20:30:54Z
2017-08-30T20:30:57Z
Remove import of pandas as pd in core.window
diff --git a/pandas/core/window.py b/pandas/core/window.py index 5866f1e8a76bd..4bd959f52673c 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -17,7 +17,8 @@ ABCDataFrame, ABCDatetimeIndex, ABCTimedeltaIndex, - ABCPeriodIndex) + ABCPeriodIndex, + ABCDateOffset) from pandas.core.dtypes.common import ( is_integer, is_bool, @@ -28,13 +29,12 @@ is_list_like, _ensure_float64, is_scalar) -import pandas as pd from pandas.core.base import (PandasObject, SelectionMixin, GroupByMixin) import pandas.core.common as com import pandas._libs.window as _window -from pandas.tseries.offsets import DateOffset + from pandas import compat from pandas.compat.numpy import function as nv from pandas.util._decorators import (Substitution, Appender, @@ -254,7 +254,8 @@ def _wrap_result(self, result, block=None, obj=None): # coerce if necessary if block is not None: if is_timedelta64_dtype(block.values.dtype): - result = pd.to_timedelta( + from pandas import to_timedelta + result = to_timedelta( result.ravel(), unit='ns').values.reshape(result.shape) if result.ndim == 1: @@ -275,7 +276,7 @@ def _wrap_results(self, results, blocks, obj): obj : conformed data (may be resampled) """ - from pandas import Series + from pandas import Series, concat from pandas.core.index import _ensure_index final = [] @@ -290,8 +291,7 @@ def _wrap_results(self, results, blocks, obj): # we want to put it back into the results # in the same location columns = self._selected_obj.columns - if self.on is not None \ - and not self._on.equals(obj.index): + if self.on is not None and not self._on.equals(obj.index): name = self._on.name final.append(Series(self._on, index=obj.index, name=name)) @@ -309,8 +309,7 @@ def _wrap_results(self, results, blocks, obj): if not len(final): return obj.astype('float64') - return pd.concat(final, axis=1).reindex(columns=columns, - copy=False) + return concat(final, axis=1).reindex(columns=columns, copy=False) def _center_window(self, result, window): """ center the result in the window """ @@ -318,10 +317,9 @@ def _center_window(self, result, window): raise ValueError("Requested axis is larger then no. of argument " "dimensions") - from pandas import Series, DataFrame offset = _offset(window, True) if offset > 0: - if isinstance(result, (Series, DataFrame)): + if isinstance(result, (ABCSeries, ABCDataFrame)): result = result.slice_shift(-offset, axis=self.axis) else: lead_indexer = [slice(None)] * result.ndim @@ -1085,7 +1083,8 @@ def _on(self): return self.obj.index elif (isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns): - return pd.Index(self.obj[self.on]) + from pandas import Index + return Index(self.obj[self.on]) else: raise ValueError("invalid on specified as {0}, " "must be a column (if DataFrame) " @@ -1096,7 +1095,7 @@ def validate(self): # we allow rolling on a datetimelike index if ((self.obj.empty or self.is_datetimelike) and - isinstance(self.window, (compat.string_types, DateOffset, + isinstance(self.window, (compat.string_types, ABCDateOffset, timedelta))): self._validate_monotonic() @@ -1871,19 +1870,19 @@ def _cov(x, y): def _flex_binary_moment(arg1, arg2, f, pairwise=False): - from pandas import Series, DataFrame - if not (isinstance(arg1, (np.ndarray, Series, DataFrame)) and - isinstance(arg2, (np.ndarray, Series, DataFrame))): + if not (isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) and + isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))): raise TypeError("arguments to moment function must be of type " "np.ndarray/Series/DataFrame") - if (isinstance(arg1, (np.ndarray, Series)) and - isinstance(arg2, (np.ndarray, Series))): + if (isinstance(arg1, (np.ndarray, ABCSeries)) and + isinstance(arg2, (np.ndarray, ABCSeries))): X, Y = _prep_binary(arg1, arg2) return f(X, Y) - elif isinstance(arg1, DataFrame): + elif isinstance(arg1, ABCDataFrame): + from pandas import DataFrame def dataframe_from_int_dict(data, frame_template): result = DataFrame(data, index=frame_template.index) @@ -1892,7 +1891,7 @@ def dataframe_from_int_dict(data, frame_template): return result results = {} - if isinstance(arg2, DataFrame): + if isinstance(arg2, ABCDataFrame): if pairwise is False: if arg1 is arg2: # special case in order to handle duplicate column names @@ -1929,7 +1928,7 @@ def dataframe_from_int_dict(data, frame_template): # TODO: not the most efficient (perf-wise) # though not bad code-wise - from pandas import Panel, MultiIndex + from pandas import Panel, MultiIndex, concat with warnings.catch_warnings(record=True): p = Panel.from_dict(results).swapaxes('items', 'major') @@ -1939,7 +1938,7 @@ def dataframe_from_int_dict(data, frame_template): p.minor_axis = arg2.columns[p.minor_axis] if len(p.items): - result = pd.concat( + result = concat( [p.iloc[i].T for i in range(len(p.items))], keys=p.items) else: @@ -2034,8 +2033,7 @@ def _zsqrt(x): result = np.sqrt(x) mask = x < 0 - from pandas import DataFrame - if isinstance(x, DataFrame): + if isinstance(x, ABCDataFrame): if mask.values.any(): result[mask] = 0 else: @@ -2060,8 +2058,7 @@ def _prep_binary(arg1, arg2): def rolling(obj, win_type=None, **kwds): - from pandas import Series, DataFrame - if not isinstance(obj, (Series, DataFrame)): + if not isinstance(obj, (ABCSeries, ABCDataFrame)): raise TypeError('invalid type: %s' % type(obj)) if win_type is not None: @@ -2074,8 +2071,7 @@ def rolling(obj, win_type=None, **kwds): def expanding(obj, **kwds): - from pandas import Series, DataFrame - if not isinstance(obj, (Series, DataFrame)): + if not isinstance(obj, (ABCSeries, ABCDataFrame)): raise TypeError('invalid type: %s' % type(obj)) return Expanding(obj, **kwds) @@ -2085,8 +2081,7 @@ def expanding(obj, **kwds): def ewm(obj, **kwds): - from pandas import Series, DataFrame - if not isinstance(obj, (Series, DataFrame)): + if not isinstance(obj, (ABCSeries, ABCDataFrame)): raise TypeError('invalid type: %s' % type(obj)) return EWM(obj, **kwds)
Do `isinstance` checks against `ABCFoo`. - [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17233
2017-08-12T01:25:39Z
2017-08-12T17:30:27Z
2017-08-12T17:30:27Z
2017-08-12T18:26:12Z
TST: Move more frame tests to SharedWithSparse
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 8c4c13b66ffa9..53a1b9525a0dd 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -28,6 +28,20 @@ class SharedWithSparse(object): + """ + A collection of tests DataFrame and SparseDataFrame can share. + + In generic tests on this class, use ``self._assert_frame_equal()`` and + ``self._assert_series_equal()`` which are implemented in sub-classes + and dispatch correctly. + """ + def _assert_frame_equal(self, left, right): + """Dispatch to frame class dependent assertion""" + raise NotImplementedError + + def _assert_series_equal(self, left, right): + """Dispatch to series class dependent assertion""" + raise NotImplementedError def test_copy_index_name_checking(self): # don't want to be able to modify the index stored elsewhere after @@ -76,11 +90,6 @@ def test_add_prefix_suffix(self): expected = pd.Index(['{}%'.format(c) for c in self.frame.columns]) tm.assert_index_equal(with_pct_suffix.columns, expected) - -class TestDataFrameMisc(SharedWithSparse, TestData): - - klass = DataFrame - def test_get_axis(self): f = self.frame assert f._get_axis_number(0) == 0 @@ -118,13 +127,13 @@ def test_column_contains_typeerror(self): pass def test_not_hashable(self): - df = pd.DataFrame([1]) + df = self.klass([1]) pytest.raises(TypeError, hash, df) pytest.raises(TypeError, hash, self.empty) def test_new_empty_index(self): - df1 = DataFrame(randn(0, 3)) - df2 = DataFrame(randn(0, 3)) + df1 = self.klass(randn(0, 3)) + df2 = self.klass(randn(0, 3)) df1.index.name = 'foo' assert df2.index.name is None @@ -135,7 +144,7 @@ def test_array_interface(self): assert result.index is self.frame.index assert result.columns is self.frame.columns - assert_frame_equal(result, self.frame.apply(np.sqrt)) + self._assert_frame_equal(result, self.frame.apply(np.sqrt)) def test_get_agg_axis(self): cols = self.frame._get_agg_axis(0) @@ -160,36 +169,36 @@ def test_nonzero(self): assert not df.empty def test_iteritems(self): - df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b']) + df = self.klass([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b']) for k, v in compat.iteritems(df): - assert type(v) == Series + assert type(v) == self.klass._constructor_sliced def test_iter(self): assert tm.equalContents(list(self.frame), self.frame.columns) def test_iterrows(self): - for i, (k, v) in enumerate(self.frame.iterrows()): - exp = self.frame.xs(self.frame.index[i]) - assert_series_equal(v, exp) + for k, v in self.frame.iterrows(): + exp = self.frame.loc[k] + self._assert_series_equal(v, exp) - for i, (k, v) in enumerate(self.mixed_frame.iterrows()): - exp = self.mixed_frame.xs(self.mixed_frame.index[i]) - assert_series_equal(v, exp) + for k, v in self.mixed_frame.iterrows(): + exp = self.mixed_frame.loc[k] + self._assert_series_equal(v, exp) def test_itertuples(self): for i, tup in enumerate(self.frame.itertuples()): - s = Series(tup[1:]) + s = self.klass._constructor_sliced(tup[1:]) s.name = tup[0] expected = self.frame.iloc[i, :].reset_index(drop=True) - assert_series_equal(s, expected) + self._assert_series_equal(s, expected) - df = DataFrame({'floats': np.random.randn(5), - 'ints': lrange(5)}, columns=['floats', 'ints']) + df = self.klass({'floats': np.random.randn(5), + 'ints': lrange(5)}, columns=['floats', 'ints']) for tup in df.itertuples(index=False): assert isinstance(tup[1], np.integer) - df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]}) + df = self.klass(data={"a": [1, 2, 3], "b": [4, 5, 6]}) dfaa = df[['a', 'a']] assert (list(dfaa.itertuples()) == @@ -237,7 +246,7 @@ def test_as_matrix(self): mat = self.mixed_frame.as_matrix(['foo', 'A']) assert mat[0, 0] == 'bar' - df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]}) + df = self.klass({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]}) mat = df.as_matrix() assert mat[0, 0] == 1j @@ -246,20 +255,6 @@ def test_as_matrix(self): expected = self.frame.reindex(columns=['A', 'B']).values assert_almost_equal(mat, expected) - def test_values(self): - self.frame.values[:, 0] = 5. - assert (self.frame.values[:, 0] == 5).all() - - def test_deepcopy(self): - cp = deepcopy(self.frame) - series = cp['A'] - series[:] = 10 - for idx, value in compat.iteritems(series): - assert self.frame['A'][idx] != value - - # --------------------------------------------------------------------- - # Transposing - def test_transpose(self): frame = self.frame dft = frame.T @@ -272,23 +267,17 @@ def test_transpose(self): # mixed type index, data = tm.getMixedTypeDict() - mixed = DataFrame(data, index=index) + mixed = self.klass(data, index=index) mixed_T = mixed.T for col, s in compat.iteritems(mixed_T): assert s.dtype == np.object_ - def test_transpose_get_view(self): - dft = self.frame.T - dft.values[:, 5:10] = 5 - - assert (self.frame.values[5:10] == 5).all() - def test_swapaxes(self): - df = DataFrame(np.random.randn(10, 5)) - assert_frame_equal(df.T, df.swapaxes(0, 1)) - assert_frame_equal(df.T, df.swapaxes(1, 0)) - assert_frame_equal(df, df.swapaxes(0, 0)) + df = self.klass(np.random.randn(10, 5)) + self._assert_frame_equal(df.T, df.swapaxes(0, 1)) + self._assert_frame_equal(df.T, df.swapaxes(1, 0)) + self._assert_frame_equal(df, df.swapaxes(0, 0)) pytest.raises(ValueError, df.swapaxes, 2, 5) def test_axis_aliases(self): @@ -308,8 +297,8 @@ def test_more_asMatrix(self): assert values.shape[1] == len(self.mixed_frame.columns) def test_repr_with_mi_nat(self): - df = DataFrame({'X': [1, 2]}, - index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']]) + df = self.klass({'X': [1, 2]}, + index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']]) res = repr(df) exp = ' X\nNaT a 1\n2013-01-01 b 2' assert res == exp @@ -324,31 +313,56 @@ def test_series_put_names(self): assert v.name == k def test_empty_nonzero(self): - df = DataFrame([1, 2, 3]) + df = self.klass([1, 2, 3]) assert not df.empty - df = pd.DataFrame(index=[1], columns=[1]) + df = self.klass(index=[1], columns=[1]) assert not df.empty - df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna() + df = self.klass(index=['a', 'b'], columns=['c', 'd']).dropna() assert df.empty assert df.T.empty - empty_frames = [pd.DataFrame(), - pd.DataFrame(index=[1]), - pd.DataFrame(columns=[1]), - pd.DataFrame({1: []})] + empty_frames = [self.klass(), + self.klass(index=[1]), + self.klass(columns=[1]), + self.klass({1: []})] for df in empty_frames: assert df.empty assert df.T.empty def test_with_datetimelikes(self): - df = DataFrame({'A': date_range('20130101', periods=10), - 'B': timedelta_range('1 day', periods=10)}) + df = self.klass({'A': date_range('20130101', periods=10), + 'B': timedelta_range('1 day', periods=10)}) t = df.T result = t.get_dtype_counts() expected = Series({'object': 10}) tm.assert_series_equal(result, expected) + +class TestDataFrameMisc(SharedWithSparse, TestData): + + klass = DataFrame + # SharedWithSparse tests use generic, klass-agnostic assertion + _assert_frame_equal = staticmethod(assert_frame_equal) + _assert_series_equal = staticmethod(assert_series_equal) + + def test_values(self): + self.frame.values[:, 0] = 5. + assert (self.frame.values[:, 0] == 5).all() + + def test_deepcopy(self): + cp = deepcopy(self.frame) + series = cp['A'] + series[:] = 10 + for idx, value in compat.iteritems(series): + assert self.frame['A'][idx] != value + + def test_transpose_get_view(self): + dft = self.frame.T + dft.values[:, 5:10] = 5 + + assert (self.frame.values[5:10] == 5).all() + def test_inplace_return_self(self): # re #1893 diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py index f0f8954e5785b..004af5066fe83 100644 --- a/pandas/tests/sparse/test_frame.py +++ b/pandas/tests/sparse/test_frame.py @@ -29,6 +29,10 @@ class TestSparseDataFrame(SharedWithSparse): klass = SparseDataFrame + # SharedWithSparse tests use generic, klass-agnostic assertion + _assert_frame_equal = staticmethod(tm.assert_sp_frame_equal) + _assert_series_equal = staticmethod(tm.assert_sp_series_equal) + def setup_method(self, method): self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6], 'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6], @@ -43,6 +47,8 @@ def setup_method(self, method): self.frame = SparseDataFrame(self.data, index=self.dates) self.iframe = SparseDataFrame(self.data, index=self.dates, default_kind='integer') + self.mixed_frame = self.frame.copy(False) + self.mixed_frame['foo'] = pd.SparseArray(['bar'] * len(self.dates)) values = self.frame.values.copy() values[np.isnan(values)] = 0
- [ ] closes #xxxx - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Doing as was told in https://github.com/pandas-dev/pandas/pull/17050#issuecomment-317203306.
https://api.github.com/repos/pandas-dev/pandas/pulls/17227
2017-08-11T11:40:33Z
2017-08-12T17:33:04Z
2017-08-12T17:33:04Z
2017-08-12T17:33:07Z
CLN: replace %s syntax with .format in pandas.util
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index f346995c0a1a4..bb7ffe45c689b 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -27,8 +27,9 @@ def deprecate(name, alternative, alt_name=None, klass=None, klass = klass or FutureWarning def wrapper(*args, **kwargs): - warnings.warn("%s is deprecated. Use %s instead" % (name, alt_name), - klass, stacklevel=stacklevel) + msg = "{name} is deprecated. Use {alt_name} instead".format( + name=name, alt_name=alt_name) + warnings.warn(msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) return wrapper @@ -90,19 +91,24 @@ def wrapper(*args, **kwargs): old_arg_value) else: new_arg_value = mapping(old_arg_value) - msg = "the %s=%r keyword is deprecated, " \ - "use %s=%r instead" % \ - (old_arg_name, old_arg_value, - new_arg_name, new_arg_value) + msg = ("the {old_name}={old_val!r} keyword is deprecated, " + "use {new_name}={new_val!r} instead" + ).format(old_name=old_arg_name, + old_val=old_arg_value, + new_name=new_arg_name, + new_val=new_arg_value) else: new_arg_value = old_arg_value - msg = "the '%s' keyword is deprecated, " \ - "use '%s' instead" % (old_arg_name, new_arg_name) + msg = ("the '{old_name}' keyword is deprecated, " + "use '{new_name}' instead" + ).format(old_name=old_arg_name, + new_name=new_arg_name) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) if kwargs.get(new_arg_name, None) is not None: - msg = ("Can only specify '%s' or '%s', not both" % - (old_arg_name, new_arg_name)) + msg = ("Can only specify '{old_name}' or '{new_name}', " + "not both").format(old_name=old_arg_name, + new_name=new_arg_name) raise TypeError(msg) else: kwargs[new_arg_name] = new_arg_value diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py index 9ecd4b10365c8..83c1433bf5c39 100644 --- a/pandas/util/_print_versions.py +++ b/pandas/util/_print_versions.py @@ -38,18 +38,17 @@ def get_sys_info(): (sysname, nodename, release, version, machine, processor) = platform.uname() blob.extend([ - ("python", "%d.%d.%d.%s.%s" % sys.version_info[:]), + ("python", '.'.join(map(str, sys.version_info))), ("python-bits", struct.calcsize("P") * 8), - ("OS", "%s" % (sysname)), - ("OS-release", "%s" % (release)), - # ("Version", "%s" % (version)), - ("machine", "%s" % (machine)), - ("processor", "%s" % (processor)), - ("byteorder", "%s" % sys.byteorder), - ("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")), - ("LANG", "%s" % os.environ.get('LANG', "None")), - ("LOCALE", "%s.%s" % locale.getlocale()), - + ("OS", "{sysname}".format(sysname=sysname)), + ("OS-release", "{release}".format(release=release)), + # ("Version", "{version}".format(version=version)), + ("machine", "{machine}".format(machine=machine)), + ("processor", "{processor}".format(processor=processor)), + ("byteorder", "{byteorder}".format(byteorder=sys.byteorder)), + ("LC_ALL", "{lc}".format(lc=os.environ.get('LC_ALL', "None"))), + ("LANG", "{lang}".format(lang=os.environ.get('LANG', "None"))), + ("LOCALE", '.'.join(map(str, locale.getlocale()))), ]) except: pass @@ -131,11 +130,11 @@ def show_versions(as_json=False): print("------------------") for k, stat in sys_info: - print("%s: %s" % (k, stat)) + print("{k}: {stat}".format(k=k, stat=stat)) print("") for k, stat in deps_blob: - print("%s: %s" % (k, stat)) + print("{k}: {stat}".format(k=k, stat=stat)) def main(): diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index 6b19904f4a665..2661e4a98aedf 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -220,7 +220,7 @@ def validate_args_and_kwargs(fname, args, kwargs, def validate_bool_kwarg(value, arg_name): """ Ensures that argument passed in arg_name is of type bool. """ if not (is_bool(value) or value is None): - raise ValueError('For argument "%s" expected type bool, ' - 'received type %s.' % - (arg_name, type(value).__name__)) + raise ValueError('For argument "{arg}" expected type bool, received ' + 'type {typ}.'.format(arg=arg_name, + typ=type(value).__name__)) return value diff --git a/pandas/util/testing.py b/pandas/util/testing.py index d6ba9561340cc..a000e189dfaa9 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -104,7 +104,7 @@ def round_trip_pickle(obj, path=None): """ if path is None: - path = u('__%s__.pickle' % rands(10)) + path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10))) with ensure_clean(path) as path: pd.to_pickle(obj, path) return pd.read_pickle(path) @@ -244,13 +244,15 @@ def _check_isinstance(left, right, cls): AssertionError : Either `left` or `right` is not an instance of `cls`. """ - err_msg = "{0} Expected type {1}, found {2} instead" + err_msg = "{name} Expected type {exp_type}, found {act_type} instead" cls_name = cls.__name__ if not isinstance(left, cls): - raise AssertionError(err_msg.format(cls_name, cls, type(left))) + raise AssertionError(err_msg.format(name=cls_name, exp_type=cls, + act_type=type(left))) if not isinstance(right, cls): - raise AssertionError(err_msg.format(cls_name, cls, type(right))) + raise AssertionError(err_msg.format(name=cls_name, exp_type=cls, + act_type=type(right))) def assert_dict_equal(left, right, compare_keys=True): @@ -371,7 +373,7 @@ def _skip_if_no_xarray(): if v < LooseVersion('0.7.0'): import pytest - pytest.skip("xarray not version is too low: {0}".format(v)) + pytest.skip("xarray version is too low: {version}".format(version=v)) def _skip_if_windows_python_3(): @@ -436,7 +438,7 @@ def _skip_if_has_locale(): lang, _ = locale.getlocale() if lang is not None: import pytest - pytest.skip("Specific locale is set {0}".format(lang)) + pytest.skip("Specific locale is set {lang}".format(lang=lang)) def _skip_if_not_us_locale(): @@ -444,7 +446,7 @@ def _skip_if_not_us_locale(): lang, _ = locale.getlocale() if lang != 'en_US': import pytest - pytest.skip("Specific locale is set {0}".format(lang)) + pytest.skip("Specific locale is set {lang}".format(lang=lang)) def _skip_if_no_mock(): @@ -505,8 +507,8 @@ def _default_locale_getter(): try: raw_locales = check_output(['locale -a'], shell=True) except subprocess.CalledProcessError as e: - raise type(e)("%s, the 'locale -a' command cannot be found on your " - "system" % e) + raise type(e)("{exception}, the 'locale -a' command cannot be found " + "on your system".format(exception=e)) return raw_locales @@ -563,7 +565,8 @@ def get_locales(prefix=None, normalize=True, if prefix is None: return _valid_locales(out_locales, normalize) - found = re.compile('%s.*' % prefix).findall('\n'.join(out_locales)) + found = re.compile('{prefix}.*'.format(prefix=prefix)) \ + .findall('\n'.join(out_locales)) return _valid_locales(found, normalize) @@ -818,13 +821,13 @@ def ensure_clean(filename=None, return_filelike=False): try: os.close(fd) except Exception as e: - print("Couldn't close file descriptor: %d (file: %s)" % - (fd, filename)) + print("Couldn't close file descriptor: {fdesc} (file: {fname})" + .format(fdesc=fd, fname=filename)) try: if os.path.exists(filename): os.remove(filename) except Exception as e: - print("Exception on removing file: %s" % e) + print("Exception on removing file: {error}".format(error=e)) def get_data_path(f=''): @@ -900,15 +903,17 @@ def _get_ilevel_values(index, level): # level comparison if left.nlevels != right.nlevels: - raise_assert_detail(obj, '{0} levels are different'.format(obj), - '{0}, {1}'.format(left.nlevels, left), - '{0}, {1}'.format(right.nlevels, right)) + msg1 = '{obj} levels are different'.format(obj=obj) + msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left) + msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right) + raise_assert_detail(obj, msg1, msg2, msg3) # length comparison if len(left) != len(right): - raise_assert_detail(obj, '{0} length are different'.format(obj), - '{0}, {1}'.format(len(left), left), - '{0}, {1}'.format(len(right), right)) + msg1 = '{obj} length are different'.format(obj=obj) + msg2 = '{length}, {left}'.format(length=len(left), left=left) + msg3 = '{length}, {right}'.format(length=len(right), right=right) + raise_assert_detail(obj, msg1, msg2, msg3) # MultiIndex special comparison for little-friendly error messages if left.nlevels > 1: @@ -917,7 +922,7 @@ def _get_ilevel_values(index, level): llevel = _get_ilevel_values(left, level) rlevel = _get_ilevel_values(right, level) - lobj = 'MultiIndex level [{0}]'.format(level) + lobj = 'MultiIndex level [{level}]'.format(level=level) assert_index_equal(llevel, rlevel, exact=exact, check_names=check_names, check_less_precise=check_less_precise, @@ -929,8 +934,8 @@ def _get_ilevel_values(index, level): if not left.equals(right): diff = np.sum((left.values != right.values) .astype(int)) * 100.0 / len(left) - msg = '{0} values are different ({1} %)'\ - .format(obj, np.round(diff, 5)) + msg = '{obj} values are different ({pct} %)'.format( + obj=obj, pct=np.round(diff, 5)) raise_assert_detail(obj, msg, left, right) else: _testing.assert_almost_equal(left.values, right.values, @@ -950,7 +955,7 @@ def _get_ilevel_values(index, level): if check_categorical: if is_categorical_dtype(left) or is_categorical_dtype(right): assert_categorical_equal(left.values, right.values, - obj='{0} category'.format(obj)) + obj='{obj} category'.format(obj=obj)) def assert_class_equal(left, right, exact=True, obj='Input'): @@ -971,12 +976,12 @@ def repr_class(x): # allow equivalence of Int64Index/RangeIndex types = set([type(left).__name__, type(right).__name__]) if len(types - set(['Int64Index', 'RangeIndex'])): - msg = '{0} classes are not equivalent'.format(obj) + msg = '{obj} classes are not equivalent'.format(obj=obj) raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) elif exact: if type(left) != type(right): - msg = '{0} classes are different'.format(obj) + msg = '{obj} classes are different'.format(obj=obj) raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) @@ -1016,23 +1021,22 @@ def assert_attr_equal(attr, left, right, obj='Attributes'): if result: return True else: - raise_assert_detail(obj, 'Attribute "{0}" are different'.format(attr), - left_attr, right_attr) + msg = 'Attribute "{attr}" are different'.format(attr=attr) + raise_assert_detail(obj, msg, left_attr, right_attr) def assert_is_valid_plot_return_object(objs): import matplotlib.pyplot as plt if isinstance(objs, (pd.Series, np.ndarray)): for el in objs.ravel(): - msg = ('one of \'objs\' is not a matplotlib Axes instance, ' - 'type encountered {0!r}') - assert isinstance(el, (plt.Axes, dict)), msg.format( - el.__class__.__name__) + msg = ('one of \'objs\' is not a matplotlib Axes instance, type ' + 'encountered {name!r}').format(name=el.__class__.__name__) + assert isinstance(el, (plt.Axes, dict)), msg else: assert isinstance(objs, (plt.Artist, tuple, dict)), \ ('objs is neither an ndarray of Artist instances nor a ' - 'single Artist instance, tuple, or dict, "objs" is a {0!r} ' - ''.format(objs.__class__.__name__)) + 'single Artist instance, tuple, or dict, "objs" is a {name!r}' + ).format(name=objs.__class__.__name__) def isiterable(obj): @@ -1069,17 +1073,17 @@ def assert_categorical_equal(left, right, check_dtype=True, if check_category_order: assert_index_equal(left.categories, right.categories, - obj='{0}.categories'.format(obj)) + obj='{obj}.categories'.format(obj=obj)) assert_numpy_array_equal(left.codes, right.codes, check_dtype=check_dtype, - obj='{0}.codes'.format(obj)) + obj='{obj}.codes'.format(obj=obj)) else: assert_index_equal(left.categories.sort_values(), right.categories.sort_values(), - obj='{0}.categories'.format(obj)) + obj='{obj}.categories'.format(obj=obj)) assert_index_equal(left.categories.take(left.codes), right.categories.take(right.codes), - obj='{0}.values'.format(obj)) + obj='{obj}.values'.format(obj=obj)) assert_attr_equal('ordered', left, right, obj=obj) @@ -1090,14 +1094,14 @@ def raise_assert_detail(obj, message, left, right, diff=None): if isinstance(right, np.ndarray): right = pprint_thing(right) - msg = """{0} are different + msg = """{obj} are different -{1} -[left]: {2} -[right]: {3}""".format(obj, message, left, right) +{message} +[left]: {left} +[right]: {right}""".format(obj=obj, message=message, left=left, right=right) if diff is not None: - msg = msg + "\n[diff]: {diff}".format(diff=diff) + msg += "\n[diff]: {diff}".format(diff=diff) raise AssertionError(msg) @@ -1138,18 +1142,20 @@ def _get_base(obj): if check_same == 'same': if left_base is not right_base: - msg = "%r is not %r" % (left_base, right_base) + msg = "{left!r} is not {right!r}".format( + left=left_base, right=right_base) raise AssertionError(msg) elif check_same == 'copy': if left_base is right_base: - msg = "%r is %r" % (left_base, right_base) + msg = "{left!r} is {right!r}".format( + left=left_base, right=right_base) raise AssertionError(msg) def _raise(left, right, err_msg): if err_msg is None: if left.shape != right.shape: - raise_assert_detail(obj, '{0} shapes are different' - .format(obj), left.shape, right.shape) + raise_assert_detail(obj, '{obj} shapes are different' + .format(obj=obj), left.shape, right.shape) diff = 0 for l, r in zip(left, right): @@ -1158,8 +1164,8 @@ def _raise(left, right, err_msg): diff += 1 diff = diff * 100.0 / left.size - msg = '{0} values are different ({1} %)'\ - .format(obj, np.round(diff, 5)) + msg = '{obj} values are different ({pct} %)'.format( + obj=obj, pct=np.round(diff, 5)) raise_assert_detail(obj, msg, left, right) raise AssertionError(err_msg) @@ -1226,9 +1232,9 @@ def assert_series_equal(left, right, check_dtype=True, # length comparison if len(left) != len(right): - raise_assert_detail(obj, 'Series length are different', - '{0}, {1}'.format(len(left), left.index), - '{0}, {1}'.format(len(right), right.index)) + msg1 = '{len}, {left}'.format(len=len(left), left=left.index) + msg2 = '{len}, {right}'.format(len=len(right), right=right.index) + raise_assert_detail(obj, 'Series length are different', msg1, msg2) # index comparison assert_index_equal(left.index, right.index, exact=check_index_type, @@ -1236,7 +1242,7 @@ def assert_series_equal(left, right, check_dtype=True, check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, - obj='{0}.index'.format(obj)) + obj='{obj}.index'.format(obj=obj)) if check_dtype: assert_attr_equal('dtype', left, right) @@ -1244,7 +1250,7 @@ def assert_series_equal(left, right, check_dtype=True, if check_exact: assert_numpy_array_equal(left.get_values(), right.get_values(), check_dtype=check_dtype, - obj='{0}'.format(obj),) + obj='{obj}'.format(obj=obj),) elif check_datetimelike_compat: # we want to check only if we have compat dtypes # e.g. integer and M|m are NOT compat, but we can simply check @@ -1257,8 +1263,9 @@ def assert_series_equal(left, right, check_dtype=True, # datetimelike may have different objects (e.g. datetime.datetime # vs Timestamp) but will compare equal if not Index(left.values).equals(Index(right.values)): - msg = '[datetimelike_compat=True] {0} is not equal to {1}.' - raise AssertionError(msg.format(left.values, right.values)) + msg = ('[datetimelike_compat=True] {left} is not equal to ' + '{right}.').format(left=left.values, right=right.values) + raise AssertionError(msg) else: assert_numpy_array_equal(left.get_values(), right.get_values(), check_dtype=check_dtype) @@ -1266,13 +1273,13 @@ def assert_series_equal(left, right, check_dtype=True, # TODO: big hack here l = pd.IntervalIndex(left) r = pd.IntervalIndex(right) - assert_index_equal(l, r, obj='{0}.index'.format(obj)) + assert_index_equal(l, r, obj='{obj}.index'.format(obj=obj)) else: _testing.assert_almost_equal(left.get_values(), right.get_values(), check_less_precise=check_less_precise, check_dtype=check_dtype, - obj='{0}'.format(obj)) + obj='{obj}'.format(obj=obj)) # metadata comparison if check_names: @@ -1281,7 +1288,7 @@ def assert_series_equal(left, right, check_dtype=True, if check_categorical: if is_categorical_dtype(left) or is_categorical_dtype(right): assert_categorical_equal(left.values, right.values, - obj='{0} category'.format(obj)) + obj='{obj} category'.format(obj=obj)) # This could be refactored to use the NDFrame.equals method @@ -1348,8 +1355,8 @@ def assert_frame_equal(left, right, check_dtype=True, if left.shape != right.shape: raise_assert_detail(obj, 'DataFrame shape mismatch', - '({0}, {1})'.format(*left.shape), - '({0}, {1})'.format(*right.shape)) + '{shape!r}'.format(shape=left.shape), + '{shape!r}'.format(shape=right.shape)) if check_like: left, right = left.reindex_like(right), right @@ -1360,7 +1367,7 @@ def assert_frame_equal(left, right, check_dtype=True, check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, - obj='{0}.index'.format(obj)) + obj='{obj}.index'.format(obj=obj)) # column comparison assert_index_equal(left.columns, right.columns, exact=check_column_type, @@ -1368,7 +1375,7 @@ def assert_frame_equal(left, right, check_dtype=True, check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, - obj='{0}.columns'.format(obj)) + obj='{obj}.columns'.format(obj=obj)) # compare by blocks if by_blocks: @@ -1393,7 +1400,7 @@ def assert_frame_equal(left, right, check_dtype=True, check_exact=check_exact, check_names=check_names, check_datetimelike_compat=check_datetimelike_compat, check_categorical=check_categorical, - obj='DataFrame.iloc[:, {0}]'.format(i)) + obj='DataFrame.iloc[:, {idx}]'.format(idx=i)) def assert_panelnd_equal(left, right, @@ -1448,13 +1455,15 @@ def assert_panelnd_equal(left, right, # can potentially be slow for i, item in enumerate(left._get_axis(0)): - assert item in right, "non-matching item (right) '%s'" % item + msg = "non-matching item (right) '{item}'".format(item=item) + assert item in right, msg litem = left.iloc[i] ritem = right.iloc[i] assert_func(litem, ritem, check_less_precise=check_less_precise) for i, item in enumerate(right._get_axis(0)): - assert item in left, "non-matching item (left) '%s'" % item + msg = "non-matching item (left) '{item}'".format(item=item) + assert item in left, msg # TODO: strangely check_names fails in py3 ? @@ -1526,7 +1535,7 @@ def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True, assert_class_equal(left, right, obj=obj) assert_index_equal(left.index, right.index, - obj='{0}.index'.format(obj)) + obj='{obj}.index'.format(obj=obj)) assert_sp_array_equal(left.block.values, right.block.values) @@ -1563,9 +1572,9 @@ def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True, assert_class_equal(left, right, obj=obj) assert_index_equal(left.index, right.index, - obj='{0}.index'.format(obj)) + obj='{obj}.index'.format(obj=obj)) assert_index_equal(left.columns, right.columns, - obj='{0}.columns'.format(obj)) + obj='{obj}.columns'.format(obj=obj)) for col, series in compat.iteritems(left): assert (col in right) @@ -1599,7 +1608,7 @@ def assert_sp_list_equal(left, right): def assert_contains_all(iterable, dic): for k in iterable: - assert k in dic, "Did not contain item: '%r'" % k + assert k in dic, "Did not contain item: '{key!r}'".format(key=k) def assert_copy(iter1, iter2, **eql_kwargs): @@ -1613,10 +1622,10 @@ def assert_copy(iter1, iter2, **eql_kwargs): """ for elem1, elem2 in zip(iter1, iter2): assert_almost_equal(elem1, elem2, **eql_kwargs) - assert elem1 is not elem2, ("Expected object %r and " - "object %r to be different " - "objects, were same." - % (type(elem1), type(elem2))) + msg = ("Expected object {obj1!r} and object {obj2!r} to be " + "different objects, but they were the same object." + ).format(obj1=type(elem1), obj2=type(elem2)) + assert elem1 is not elem2, msg def getCols(k): @@ -1870,8 +1879,9 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None, idx.name = names[0] return idx elif idx_type is not None: - raise ValueError('"%s" is not a legal value for `idx_type`, use ' - '"i"/"f"/"s"/"u"/"dt/"p"/"td".' % idx_type) + raise ValueError('"{idx_type}" is not a legal value for `idx_type`, ' + 'use "i"/"f"/"s"/"u"/"dt/"p"/"td".' + .format(idx_type=idx_type)) if len(ndupe_l) < nlevels: ndupe_l.extend([1] * (nlevels - len(ndupe_l))) @@ -1890,7 +1900,7 @@ def keyfunc(x): div_factor = nentries // ndupe_l[i] + 1 cnt = Counter() for j in range(div_factor): - label = prefix + '_l%d_g' % i + str(j) + label = '{prefix}_l{i}_g{j}'.format(prefix=prefix, i=i, j=j) cnt[label] = ndupe_l[i] # cute Counter trick result = list(sorted(cnt.elements(), key=keyfunc))[:nentries] @@ -1983,7 +1993,7 @@ def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True, # by default, generate data based on location if data_gen_f is None: - data_gen_f = lambda r, c: "R%dC%d" % (r, c) + data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c) data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)] @@ -2112,13 +2122,13 @@ def skip_if_no_package(pkg_name, min_version=None, max_version=None, import pytest if app: - msg = '%s requires %s' % (app, pkg_name) + msg = '{app} requires {pkg_name}'.format(app=app, pkg_name=pkg_name) else: - msg = 'module requires %s' % pkg_name + msg = 'module requires {pkg_name}'.format(pkg_name=pkg_name) if min_version: - msg += ' with version >= %s' % (min_version,) + msg += ' with version >= {min_version}'.format(min_version=min_version) if max_version: - msg += ' with version < %s' % (max_version,) + msg += ' with version < {max_version}'.format(max_version=max_version) try: mod = __import__(pkg_name) except ImportError: @@ -2126,7 +2136,8 @@ def skip_if_no_package(pkg_name, min_version=None, max_version=None, try: have_version = mod.__version__ except AttributeError: - pytest.skip('Cannot find version for %s' % pkg_name) + pytest.skip('Cannot find version for {pkg_name}' + .format(pkg_name=pkg_name)) if min_version and checker(have_version) < checker(min_version): pytest.skip(msg) if max_version and checker(have_version) >= checker(max_version): @@ -2332,7 +2343,7 @@ def wrapper(*args, **kwargs): if errno in skip_errnos: skip("Skipping test due to known errno" - " and error %s" % e) + " and error {error}".format(error=e)) try: e_str = traceback.format_exc(e) @@ -2341,7 +2352,7 @@ def wrapper(*args, **kwargs): if any([m.lower() in e_str.lower() for m in _skip_on_messages]): skip("Skipping test because exception " - "message is known and error %s" % e) + "message is known and error {error}".format(error=e)) if not isinstance(e, error_classes): raise @@ -2350,7 +2361,7 @@ def wrapper(*args, **kwargs): raise else: skip("Skipping test due to lack of connectivity" - " and error %s" % e) + " and error {error}".format(e)) return wrapper @@ -2488,7 +2499,7 @@ def __exit__(self, exc_type, exc_value, trace_back): if not exc_type: exp_name = getattr(expected, "__name__", str(expected)) - raise AssertionError("{0} not raised.".format(exp_name)) + raise AssertionError("{name} not raised.".format(name=exp_name)) return self.exception_matches(exc_type, exc_value, trace_back) @@ -2523,8 +2534,9 @@ def exception_matches(self, exc_type, exc_value, trace_back): val = str(exc_value) if not self.regexp.search(val): - e = AssertionError('"%s" does not match "%s"' % - (self.regexp.pattern, str(val))) + msg = '"{pat}" does not match "{val}"'.format( + pat=self.regexp.pattern, val=val) + e = AssertionError(msg) raise_with_traceback(e, trace_back) return True @@ -2591,18 +2603,20 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always", from inspect import getframeinfo, stack caller = getframeinfo(stack()[2][0]) msg = ("Warning not set with correct stacklevel. " - "File where warning is raised: {0} != {1}. " - "Warning message: {2}".format( - actual_warning.filename, caller.filename, - actual_warning.message)) + "File where warning is raised: {actual} != " + "{caller}. Warning message: {message}" + ).format(actual=actual_warning.filename, + caller=caller.filename, + message=actual_warning.message) assert actual_warning.filename == caller.filename, msg else: extra_warnings.append(actual_warning.category.__name__) if expected_warning: - assert saw_warning, ("Did not see expected warning of class %r." - % expected_warning.__name__) - assert not extra_warnings, ("Caused unexpected warning(s): %r." - % extra_warnings) + msg = "Did not see expected warning of class {name!r}.".format( + name=expected_warning.__name__) + assert saw_warning, msg + assert not extra_warnings, ("Caused unexpected warning(s): {extra!r}." + ).format(extra=extra_warnings) class RNGContext(object):
Progress towards #16130 - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Replaced `%s` syntax with `.format` in pandas.util. Additionally, made some of the existing positional `.format` code more explicit. Left the `Substitution` class in _decorators.py as-is, since it appears to impact docstrings across the entire codebase.
https://api.github.com/repos/pandas-dev/pandas/pulls/17224
2017-08-11T03:21:21Z
2017-08-11T10:22:47Z
2017-08-11T10:22:47Z
2017-08-11T18:32:49Z
DOC: Add 'See also' sections in type conversion functions
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bd3297f66a469..5a7f37bba91aa 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3681,6 +3681,9 @@ def astype(self, dtype, copy=True, errors='raise', **kwargs): See also -------- + pandas.to_datetime : Convert argument to datetime. + pandas.to_timedelta : Convert argument to timedelta. + pandas.to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. """ if is_dict_like(dtype): diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index eebf78d7619eb..6ff4302937d07 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -335,6 +335,10 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, 1 1960-01-03 2 1960-01-04 + See also + -------- + pandas.DataFrame.astype : Cast argument to a specified dtype. + pandas.to_timedelta : Convert argument to timedelta. """ from pandas.core.indexes.datetimes import DatetimeIndex diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index eda88a2f7e474..c584e29f682dd 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -84,6 +84,13 @@ def to_numeric(arg, errors='raise', downcast=None): 2 2.0 3 -3.0 dtype: float64 + + See also + -------- + pandas.DataFrame.astype : Cast argument to a specified dtype. + pandas.to_datetime : Convert argument to datetime. + pandas.to_timedelta : Convert argument to timedelta. + numpy.ndarray.astype : Cast a numpy array to a specified type. """ if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'): raise ValueError('invalid downcasting method provided') diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index fe03f89fdb2c5..f2d99d26a87b8 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -61,6 +61,11 @@ def to_timedelta(arg, unit='ns', box=True, errors='raise'): >>> pd.to_timedelta(np.arange(5), unit='d') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) + + See also + -------- + pandas.DataFrame.astype : Cast argument to a specified dtype. + pandas.to_datetime : Convert argument to datetime. """ unit = _validate_timedelta_unit(unit)
Minor continuation of #17203 + solving the last paragraph of #15550 (*"Furthermore I think we could update the .astype docs (and the .to_*) ones to cross reference."*).
https://api.github.com/repos/pandas-dev/pandas/pulls/17223
2017-08-11T02:34:21Z
2017-08-11T10:23:34Z
2017-08-11T10:23:33Z
2017-09-11T21:12:03Z
MAINT: Remove extra the's in deprecation messages
diff --git a/pandas/json.py b/pandas/json.py index 0b87aa22394b9..16d6580c87951 100644 --- a/pandas/json.py +++ b/pandas/json.py @@ -3,5 +3,5 @@ import warnings warnings.warn("The pandas.json module is deprecated and will be " "removed in a future version. Please import from " - "the pandas.io.json instead", FutureWarning, stacklevel=2) + "pandas.io.json instead", FutureWarning, stacklevel=2) from pandas._libs.json import dumps, loads diff --git a/pandas/parser.py b/pandas/parser.py index c0c3bf3179a2d..f43a408c943d0 100644 --- a/pandas/parser.py +++ b/pandas/parser.py @@ -3,6 +3,6 @@ import warnings warnings.warn("The pandas.parser module is deprecated and will be " "removed in a future version. Please import from " - "the pandas.io.parser instead", FutureWarning, stacklevel=2) + "pandas.io.parser instead", FutureWarning, stacklevel=2) from pandas._libs.parsers import na_values from pandas.io.common import CParserError
https://api.github.com/repos/pandas-dev/pandas/pulls/17222
2017-08-11T00:12:53Z
2017-08-11T00:13:03Z
2017-08-11T00:13:03Z
2017-08-21T15:24:49Z
Remove import exception handling
diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py index 55c0506acb132..797c12139656d 100644 --- a/pandas/tests/io/parser/compression.py +++ b/pandas/tests/io/parser/compression.py @@ -13,10 +13,7 @@ class CompressionTests(object): def test_zip(self): - try: - import zipfile - except ImportError: - pytest.skip('need zipfile to run') + import zipfile with open(self.csv1, 'rb') as data_file: data = data_file.read() @@ -65,10 +62,7 @@ def test_zip(self): f, compression='zip') def test_gzip(self): - try: - import gzip - except ImportError: - pytest.skip('need gzip to run') + import gzip with open(self.csv1, 'rb') as data_file: data = data_file.read() @@ -94,10 +88,7 @@ def test_gzip(self): tm.assert_frame_equal(result, expected) def test_bz2(self): - try: - import bz2 - except ImportError: - pytest.skip('need bz2 to run') + import bz2 with open(self.csv1, 'rb') as data_file: data = data_file.read()
Imports should succeed on all versions of Python that pandas supports.
https://api.github.com/repos/pandas-dev/pandas/pulls/17218
2017-08-10T18:58:52Z
2017-08-10T20:43:37Z
2017-08-10T20:43:37Z
2017-08-10T21:01:56Z
REF: _get_objs_combined_axis
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2c82fe4c348d5..467ef52de234e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6126,12 +6126,10 @@ def _list_to_arrays(data, columns, coerce_float=False, dtype=None): def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): - from pandas.core.index import _get_combined_index + from pandas.core.index import _get_objs_combined_axis if columns is None: - columns = _get_combined_index([ - s.index for s in data if getattr(s, 'index', None) is not None - ]) + columns = _get_objs_combined_axis(data) indexer_cache = {} diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index d90c681abc03f..db73a6878258a 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -23,11 +23,22 @@ 'PeriodIndex', 'DatetimeIndex', '_new_Index', 'NaT', '_ensure_index', '_get_na_value', '_get_combined_index', + '_get_objs_combined_axis', '_get_distinct_indexes', '_union_indexes', '_get_consensus_names', '_all_indexes_same'] +def _get_objs_combined_axis(objs, intersect=False, axis=0): + # Extract combined index: return intersection or union (depending on the + # value of "intersect") of indexes on given axis, or None if all objects + # lack indexes (e.g. they are numpy arrays) + obs_idxes = [obj._get_axis(axis) for obj in objs + if hasattr(obj, '_get_axis')] + if obs_idxes: + return _get_combined_index(obs_idxes, intersect=intersect) + + def _get_combined_index(indexes, intersect=False): # TODO: handle index names! indexes = _get_distinct_indexes(indexes) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index e4515efe109c5..a3e35492ad9af 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -26,7 +26,7 @@ from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, - _get_combined_index) + _get_objs_combined_axis) from pandas.io.formats.printing import pprint_thing from pandas.core.indexing import maybe_droplevels from pandas.core.internals import (BlockManager, @@ -1448,7 +1448,6 @@ def _extract_axis(self, data, axis=0, intersect=False): index = Index([]) elif len(data) > 0: raw_lengths = [] - indexes = [] have_raw_arrays = False have_frames = False @@ -1456,13 +1455,13 @@ def _extract_axis(self, data, axis=0, intersect=False): for v in data.values(): if isinstance(v, self._constructor_sliced): have_frames = True - indexes.append(v._get_axis(axis)) elif v is not None: have_raw_arrays = True raw_lengths.append(v.shape[axis]) if have_frames: - index = _get_combined_index(indexes, intersect=intersect) + index = _get_objs_combined_axis(data.values(), axis=axis, + intersect=intersect) if have_raw_arrays: lengths = list(set(raw_lengths)) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index e199ec2710367..20d561738dc78 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -4,7 +4,7 @@ import numpy as np from pandas import compat, DataFrame, Series, Index, MultiIndex -from pandas.core.index import (_get_combined_index, +from pandas.core.index import (_get_objs_combined_axis, _ensure_index, _get_consensus_names, _all_indexes_same) from pandas.core.categorical import (_factorize_from_iterable, @@ -445,16 +445,13 @@ def _get_new_axes(self): return new_axes def _get_comb_axis(self, i): - if self._is_series: - all_indexes = [x.index for x in self.objs] - else: - try: - all_indexes = [x._data.axes[i] for x in self.objs] - except IndexError: - types = [type(x).__name__ for x in self.objs] - raise TypeError("Cannot concatenate list of %s" % types) - - return _get_combined_index(all_indexes, intersect=self.intersect) + data_axis = self.objs[0]._get_block_manager_axis(i) + try: + return _get_objs_combined_axis(self.objs, axis=data_axis, + intersect=self.intersect) + except IndexError: + types = [type(x).__name__ for x in self.objs] + raise TypeError("Cannot concatenate list of %s" % types) def _get_concat_axis(self): """ diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index e61adf3aac30a..e08c307bba818 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -8,7 +8,7 @@ from pandas.core.series import Series from pandas.core.groupby import Grouper from pandas.core.reshape.util import cartesian_product -from pandas.core.index import Index, _get_combined_index +from pandas.core.index import Index, _get_objs_combined_axis from pandas.compat import range, lrange, zip from pandas import compat import pandas.core.common as com @@ -440,12 +440,7 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, rownames = _get_names(index, rownames, prefix='row') colnames = _get_names(columns, colnames, prefix='col') - obs_idxes = [obj.index for objs in (index, columns) for obj in objs - if hasattr(obj, 'index')] - if obs_idxes: - common_idx = _get_combined_index(obs_idxes, intersect=True) - else: - common_idx = None + common_idx = _get_objs_combined_axis(index + columns, intersect=True) data = {} data.update(zip(rownames, index))
- [x] tests passed - [x] passes `git diff master -u -- "*.py" | flake8 --diff` As proposed [here](https://github.com/pandas-dev/pandas/pull/17011#discussion_r128045375), with the addition of the ``axis`` argument.
https://api.github.com/repos/pandas-dev/pandas/pulls/17217
2017-08-10T18:27:44Z
2017-08-12T17:33:52Z
2017-08-12T17:33:52Z
2017-08-13T00:44:57Z
FIX: define `DataFrame.items` for all versions of python
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 6008ea5d4cbcd..c5fe89282bf52 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -128,6 +128,10 @@ Other Enhancements - :func:`DataFrame.add_prefix` and :func:`DataFrame.add_suffix` now accept strings containing the '%' character. (:issue:`17151`) - `read_*` methods can now infer compression from non-string paths, such as ``pathlib.Path`` objects (:issue:`17206`). - :func:`pd.read_sas()` now recognizes much more of the most frequently used date (datetime) formats in SAS7BDAT files (:issue:`15871`). +- :func:`DataFrame.items` and :func:`Series.items` is now present in both Python 2 and 3 and is lazy in all cases (:issue:`13918`, :issue:`17213`) + + + .. _whatsnew_0210.api_breaking: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 467ef52de234e..b5b3df64d24c0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -802,8 +802,7 @@ def itertuples(self, index=True, name="Pandas"): # fallback to regular tuples return zip(*arrays) - if compat.PY3: # pragma: no cover - items = iteritems + items = iteritems def __len__(self): """Returns length of info axis, but here we use the index """ diff --git a/pandas/core/series.py b/pandas/core/series.py index c8282450b77a9..75dc3d6403650 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1110,8 +1110,7 @@ def iteritems(self): """ return zip(iter(self.index), iter(self)) - if compat.PY3: # pragma: no cover - items = iteritems + items = iteritems # ---------------------------------------------------------------------- # Misc public methods diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 53a1b9525a0dd..a62fcb506a34b 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -171,7 +171,16 @@ def test_nonzero(self): def test_iteritems(self): df = self.klass([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b']) for k, v in compat.iteritems(df): - assert type(v) == self.klass._constructor_sliced + assert isinstance(v, self.klass._constructor_sliced) + + def test_items(self): + # issue #17213, #13918 + cols = ['a', 'b', 'c'] + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols) + for c, (k, v) in zip(cols, df.items()): + assert c == k + assert isinstance(v, Series) + assert (df[k] == v).all() def test_iter(self): assert tm.equalContents(list(self.frame), self.frame.columns) diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 8e22dd38030ee..b7fbe803f8d3b 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -301,6 +301,16 @@ def test_iteritems(self): # assert is lazy (genrators don't define reverse, lists do) assert not hasattr(self.series.iteritems(), 'reverse') + def test_items(self): + for idx, val in self.series.items(): + assert val == self.series[idx] + + for idx, val in self.ts.items(): + assert val == self.ts[idx] + + # assert is lazy (genrators don't define reverse, lists do) + assert not hasattr(self.series.items(), 'reverse') + def test_raise_on_info(self): s = Series(np.random.randn(10)) with pytest.raises(AttributeError):
Closes #13918, #17213 This leaves a slight semantic difference between `dict.items` and `DateFrame.items` in python2, however there is no code currently expecting `DataFrame.items` to return a list and in python3 there is no `dict.iteritems`. This eases writing 'native' python3 code that also runs under python2. Do you want a whatsnew for this? - [x] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17214
2017-08-10T15:31:38Z
2017-08-19T21:59:20Z
2017-08-19T21:59:19Z
2017-08-19T22:02:21Z
ENH/PERF: Remove frequency inference from .dt accessor
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index efe713639fec9..b7151ad2eaa99 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -510,3 +510,17 @@ def time_begin_incr_rng(self): def time_begin_decr_rng(self): self.rng - self.semi_month_begin + + +class DatetimeAccessor(object): + def setup(self): + self.N = 100000 + self.series = pd.Series( + pd.date_range(start='1/1/2000', periods=self.N, freq='T') + ) + + def time_dt_accessor(self): + self.series.dt + + def time_dt_accessor_normalize(self): + self.series.dt.normalize() diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 8b2c4d16f4e1a..e21ee8d7d31f5 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -296,6 +296,7 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Improved performance of instantiating :class:`SparseDataFrame` (:issue:`16773`) +- :attr:`Series.dt` no longer performs frequency inference, yielding a large speedup when accessing the attribute (:issue:`17210`) .. _whatsnew_0210.bug_fixes: diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index ce3143b342cec..88297ac70984d 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -61,23 +61,20 @@ def maybe_to_datetimelike(data, copy=False): data = orig.values.categories if is_datetime64_dtype(data.dtype): - return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'), + return DatetimeProperties(DatetimeIndex(data, copy=copy), index, name=name, orig=orig) elif is_datetime64tz_dtype(data.dtype): - return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer', - ambiguous='infer'), + return DatetimeProperties(DatetimeIndex(data, copy=copy), index, data.name, orig=orig) elif is_timedelta64_dtype(data.dtype): - return TimedeltaProperties(TimedeltaIndex(data, copy=copy, - freq='infer'), index, + return TimedeltaProperties(TimedeltaIndex(data, copy=copy), index, name=name, orig=orig) else: if is_period_arraylike(data): return PeriodProperties(PeriodIndex(data, copy=copy), index, name=name, orig=orig) if is_datetime_arraylike(data): - return DatetimeProperties(DatetimeIndex(data, copy=copy, - freq='infer'), index, + return DatetimeProperties(DatetimeIndex(data, copy=copy), index, name=name, orig=orig) raise TypeError("cannot convert an object of type {0} to a " @@ -162,6 +159,10 @@ class DatetimeProperties(Properties): def to_pydatetime(self): return self.values.to_pydatetime() + @property + def freq(self): + return self.values.inferred_freq + DatetimeProperties._add_delegate_accessors( delegate=DatetimeIndex, @@ -202,6 +203,10 @@ def components(self): """ return self.values.components.set_index(self.index) + @property + def freq(self): + return self.values.inferred_freq + TimedeltaProperties._add_delegate_accessors( delegate=TimedeltaIndex,
null
https://api.github.com/repos/pandas-dev/pandas/pulls/17210
2017-08-10T03:12:44Z
2017-08-14T10:31:42Z
2017-08-14T10:31:42Z
2017-08-14T11:56:14Z
CLN: replace %s syntax with .format in core.computation
diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 1c75301082297..691eaebfd5fc1 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -98,12 +98,11 @@ def _align_core(terms): ordm = np.log10(max(1, abs(reindexer_size - term_axis_size))) if ordm >= 1 and reindexer_size >= 10000: - warnings.warn('Alignment difference on axis {0} is larger ' - 'than an order of magnitude on term {1!r}, ' - 'by more than {2:.4g}; performance may ' - 'suffer'.format(axis, terms[i].name, ordm), - category=PerformanceWarning, - stacklevel=6) + w = ('Alignment difference on axis {axis} is larger ' + 'than an order of magnitude on term {term!r}, by ' + 'more than {ordm:.4g}; performance may suffer' + ).format(axis=axis, term=terms[i].name, ordm=ordm) + warnings.warn(w, category=PerformanceWarning, stacklevel=6) if transpose: f = partial(ti.reindex, index=reindexer, copy=False) diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py index f45d0355e7442..155ff554cf99c 100644 --- a/pandas/core/computation/engines.py +++ b/pandas/core/computation/engines.py @@ -33,8 +33,9 @@ def _check_ne_builtin_clash(expr): if overlap: s = ', '.join(map(repr, overlap)) - raise NumExprClobberingError('Variables in expression "%s" ' - 'overlap with builtins: (%s)' % (expr, s)) + raise NumExprClobberingError('Variables in expression "{expr}" ' + 'overlap with builtins: ({s})' + .format(expr=expr, s=s)) class AbstractEngine(object): diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index ef15e886fd554..d391764794c1c 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -40,8 +40,9 @@ def _check_engine(engine): engine = 'python' if engine not in _engines: - raise KeyError('Invalid engine {0!r} passed, valid engines are' - ' {1}'.format(engine, list(_engines.keys()))) + valid = list(_engines.keys()) + raise KeyError('Invalid engine {engine!r} passed, valid engines are' + ' {valid}'.format(engine=engine, valid=valid)) # TODO: validate this in a more general way (thinking of future engines # that won't necessarily be import-able) @@ -69,8 +70,8 @@ def _check_parser(parser): * If an invalid parser is passed """ if parser not in _parsers: - raise KeyError('Invalid parser {0!r} passed, valid parsers are' - ' {1}'.format(parser, _parsers.keys())) + raise KeyError('Invalid parser {parser!r} passed, valid parsers are' + ' {valid}'.format(parser=parser, valid=_parsers.keys())) def _check_resolvers(resolvers): @@ -78,8 +79,8 @@ def _check_resolvers(resolvers): for resolver in resolvers: if not hasattr(resolver, '__getitem__'): name = type(resolver).__name__ - raise TypeError('Resolver of type %r does not implement ' - 'the __getitem__ method' % name) + raise TypeError('Resolver of type {name!r} does not implement ' + 'the __getitem__ method'.format(name=name)) def _check_expression(expr): diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 73c27f4d772ca..ae956bce11329 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -189,8 +189,8 @@ def _filter_nodes(superclass, all_nodes=_all_nodes): # and we don't want `stmt` and friends in their so get only the class whose # names are capitalized _base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes -_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes & - _base_supported_nodes) +_msg = 'cannot both support and not support {intersection}'.format( + intersection=_unsupported_nodes & _base_supported_nodes) assert not _unsupported_nodes & _base_supported_nodes, _msg @@ -200,8 +200,8 @@ def _node_not_implemented(node_name, cls): """ def f(self, *args, **kwargs): - raise NotImplementedError("{0!r} nodes are not " - "implemented".format(node_name)) + raise NotImplementedError("{name!r} nodes are not " + "implemented".format(name=node_name)) return f @@ -217,7 +217,7 @@ def disallowed(cls): cls.unsupported_nodes = () for node in nodes: new_method = _node_not_implemented(node, cls) - name = 'visit_{0}'.format(node) + name = 'visit_{node}'.format(node=node) cls.unsupported_nodes += (name,) setattr(cls, name, new_method) return cls @@ -251,13 +251,14 @@ def add_ops(op_classes): """Decorator to add default implementation of ops.""" def f(cls): for op_attr_name, op_class in compat.iteritems(op_classes): - ops = getattr(cls, '{0}_ops'.format(op_attr_name)) - ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name)) + ops = getattr(cls, '{name}_ops'.format(name=op_attr_name)) + ops_map = getattr(cls, '{name}_op_nodes_map'.format( + name=op_attr_name)) for op in ops: op_node = ops_map[op] if op_node is not None: made_op = _op_maker(op_class, op) - setattr(cls, 'visit_{0}'.format(op_node), made_op) + setattr(cls, 'visit_{node}'.format(node=op_node), made_op) return cls return f @@ -388,9 +389,10 @@ def _maybe_evaluate_binop(self, op, op_class, lhs, rhs, res = op(lhs, rhs) if res.has_invalid_return_type: - raise TypeError("unsupported operand type(s) for {0}:" - " '{1}' and '{2}'".format(res.op, lhs.type, - rhs.type)) + raise TypeError("unsupported operand type(s) for {op}:" + " '{lhs}' and '{rhs}'".format(op=res.op, + lhs=lhs.type, + rhs=rhs.type)) if self.engine != 'pytables': if (res.op in _cmp_ops_syms and @@ -527,7 +529,8 @@ def visit_Attribute(self, node, **kwargs): if isinstance(value, ast.Name) and value.id == attr: return resolved - raise ValueError("Invalid Attribute context {0}".format(ctx.__name__)) + raise ValueError("Invalid Attribute context {name}" + .format(name=ctx.__name__)) def visit_Call_35(self, node, side=None, **kwargs): """ in 3.5 the starargs attribute was changed to be more flexible, @@ -549,7 +552,8 @@ def visit_Call_35(self, node, side=None, **kwargs): raise if res is None: - raise ValueError("Invalid function call {0}".format(node.func.id)) + raise ValueError("Invalid function call {func}" + .format(func=node.func.id)) if hasattr(res, 'value'): res = res.value @@ -558,8 +562,8 @@ def visit_Call_35(self, node, side=None, **kwargs): new_args = [self.visit(arg) for arg in node.args] if node.keywords: - raise TypeError("Function \"{0}\" does not support keyword " - "arguments".format(res.name)) + raise TypeError("Function \"{name}\" does not support keyword " + "arguments".format(name=res.name)) return res(*new_args, **kwargs) @@ -570,7 +574,7 @@ def visit_Call_35(self, node, side=None, **kwargs): for key in node.keywords: if not isinstance(key, ast.keyword): raise ValueError("keyword error in function call " - "'{0}'".format(node.func.id)) + "'{func}'".format(func=node.func.id)) if key.arg: # TODO: bug? @@ -598,7 +602,8 @@ def visit_Call_legacy(self, node, side=None, **kwargs): raise if res is None: - raise ValueError("Invalid function call {0}".format(node.func.id)) + raise ValueError("Invalid function call {func}" + .format(func=node.func.id)) if hasattr(res, 'value'): res = res.value @@ -609,8 +614,8 @@ def visit_Call_legacy(self, node, side=None, **kwargs): args += self.visit(node.starargs) if node.keywords or node.kwargs: - raise TypeError("Function \"{0}\" does not support keyword " - "arguments".format(res.name)) + raise TypeError("Function \"{name}\" does not support keyword " + "arguments".format(name=res.name)) return res(*args, **kwargs) @@ -623,7 +628,7 @@ def visit_Call_legacy(self, node, side=None, **kwargs): for key in node.keywords: if not isinstance(key, ast.keyword): raise ValueError("keyword error in function call " - "'{0}'".format(node.func.id)) + "'{func}'".format(func=node.func.id)) keywords[key.arg] = self.visit(key.value).value if node.kwargs is not None: keywords.update(self.visit(node.kwargs).value) diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 83d02af65cc85..af068bd1f32b3 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -103,7 +103,7 @@ def _evaluate_numexpr(op, op_str, a, b, raise_on_error=False, truediv=True, a_value = getattr(a, "values", a) b_value = getattr(b, "values", b) - result = ne.evaluate('a_value %s b_value' % op_str, + result = ne.evaluate('a_value {op} b_value'.format(op=op_str), local_dict={'a_value': a_value, 'b_value': b_value}, casting='safe', truediv=truediv, @@ -177,15 +177,15 @@ def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('/', '//', '**')), if _has_bool_dtype(a) and _has_bool_dtype(b): if op_str in unsupported: - warnings.warn("evaluating in Python space because the %r operator" - " is not supported by numexpr for the bool " - "dtype, use %r instead" % (op_str, - unsupported[op_str])) + warnings.warn("evaluating in Python space because the {op!r} " + "operator is not supported by numexpr for " + "the bool dtype, use {alt_op!r} instead" + .format(op=op_str, alt_op=unsupported[op_str])) return False if op_str in not_allowed: - raise NotImplementedError("operator %r not implemented for bool " - "dtypes" % op_str) + raise NotImplementedError("operator {op!r} not implemented for " + "bool dtypes".format(op=op_str)) return True diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 5870090856ff9..4b3c608a88be8 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -41,7 +41,8 @@ def _resolve_name(self): # must be a queryables if self.side == 'left': if self.name not in self.env.queryables: - raise NameError('name {0!r} is not defined'.format(self.name)) + raise NameError('name {name!r} is not defined' + .format(name=self.name)) return self.name # resolve the rhs (and allow it to be None) @@ -161,7 +162,7 @@ def metadata(self): def generate(self, v): """ create and return the op string for this TermValue """ val = v.tostring(self.encoding) - return "(%s %s %s)" % (self.lhs, self.op, val) + return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val) def convert_value(self, v): """ convert the expression that is in the term to something that is @@ -215,9 +216,8 @@ def stringify(value): # string quoting return TermValue(v, stringify(v), u('string')) else: - raise TypeError(("Cannot compare {v} of type {typ}" - " to {kind} column").format(v=v, typ=type(v), - kind=kind)) + raise TypeError("Cannot compare {v} of type {typ} to {kind} column" + .format(v=v, typ=type(v), kind=kind)) def convert_values(self): pass @@ -226,8 +226,8 @@ def convert_values(self): class FilterBinOp(BinOp): def __unicode__(self): - return pprint_thing("[Filter : [{0}] -> " - "[{1}]".format(self.filter[0], self.filter[1])) + return pprint_thing("[Filter : [{lhs}] -> [{op}]" + .format(lhs=self.filter[0], op=self.filter[1])) def invert(self): """ invert the filter """ @@ -244,7 +244,8 @@ def format(self): def evaluate(self): if not self.is_valid: - raise ValueError("query term is not valid [%s]" % self) + raise ValueError("query term is not valid [{slf}]" + .format(slf=self)) rhs = self.conform(self.rhs) values = [TermValue(v, v, self.kind) for v in rhs] @@ -273,9 +274,8 @@ def evaluate(self): pd.Index([v.value for v in values])) else: - raise TypeError( - "passing a filterable condition to a non-table indexer [%s]" % - self) + raise TypeError("passing a filterable condition to a non-table " + "indexer [{slf}]".format(slf=self)) return self @@ -298,7 +298,8 @@ def evaluate(self): class ConditionBinOp(BinOp): def __unicode__(self): - return pprint_thing("[Condition : [{0}]]".format(self.condition)) + return pprint_thing("[Condition : [{cond}]]" + .format(cond=self.condition)) def invert(self): """ invert the condition """ @@ -315,7 +316,8 @@ def format(self): def evaluate(self): if not self.is_valid: - raise ValueError("query term is not valid [%s]" % self) + raise ValueError("query term is not valid [{slf}]" + .format(slf=self)) # convert values if we are in the table if not self.is_in_table: @@ -330,7 +332,7 @@ def evaluate(self): # too many values to create the expression? if len(values) <= self._max_selectors: vs = [self.generate(v) for v in values] - self.condition = "(%s)" % ' | '.join(vs) + self.condition = "({cond})".format(cond=' | '.join(vs)) # use a filter after reading else: @@ -344,10 +346,9 @@ def evaluate(self): class JointConditionBinOp(ConditionBinOp): def evaluate(self): - self.condition = "(%s %s %s)" % ( - self.lhs.condition, - self.op, - self.rhs.condition) + self.condition = "({lhs} {op} {rhs})".format(lhs=self.lhs.condition, + op=self.op, + rhs=self.rhs.condition) return self @@ -382,7 +383,8 @@ class ExprVisitor(BaseExprVisitor): def __init__(self, env, engine, parser, **kwargs): super(ExprVisitor, self).__init__(env, engine, parser) for bin_op in self.binary_ops: - setattr(self, 'visit_{0}'.format(self.binary_op_nodes_map[bin_op]), + bin_node = self.binary_op_nodes_map[bin_op] + setattr(self, 'visit_{node}'.format(node=bin_node), lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs)) @@ -415,8 +417,8 @@ def visit_Subscript(self, node, **kwargs): try: return self.const_type(value[slobj], self.env) except TypeError: - raise ValueError("cannot subscript {0!r} with " - "{1!r}".format(value, slobj)) + raise ValueError("cannot subscript {value!r} with " + "{slobj!r}".format(value=value, slobj=slobj)) def visit_Attribute(self, node, **kwargs): attr = node.attr @@ -441,7 +443,8 @@ def visit_Attribute(self, node, **kwargs): if isinstance(value, ast.Name) and value.id == attr: return resolved - raise ValueError("Invalid Attribute context {0}".format(ctx.__name__)) + raise ValueError("Invalid Attribute context {name}" + .format(name=ctx.__name__)) def translate_In(self, op): return ast.Eq() if isinstance(op, ast.In) else op @@ -529,7 +532,7 @@ def __init__(self, where, queryables=None, encoding=None, scope_level=0): else: w = _validate_where(w) where[idx] = w - where = ' & ' .join(["(%s)" % w for w in where]) # noqa + where = ' & '.join(map('({})'.format, com.flatten(where))) # noqa self.expr = where self.env = Scope(scope_level + 1, local_dict=local_dict) @@ -552,13 +555,15 @@ def evaluate(self): try: self.condition = self.terms.prune(ConditionBinOp) except AttributeError: - raise ValueError("cannot process expression [{0}], [{1}] is not a " - "valid condition".format(self.expr, self)) + raise ValueError("cannot process expression [{expr}], [{slf}] " + "is not a valid condition".format(expr=self.expr, + slf=self)) try: self.filter = self.terms.prune(FilterBinOp) except AttributeError: - raise ValueError("cannot process expression [{0}], [{1}] is not a " - "valid filter".format(self.expr, self)) + raise ValueError("cannot process expression [{expr}], [{slf}] " + "is not a valid filter".format(expr=self.expr, + slf=self)) return self.condition, self.filter @@ -578,7 +583,7 @@ def tostring(self, encoding): if self.kind == u'string': if encoding is not None: return self.converted - return '"%s"' % self.converted + return '"{converted}"'.format(converted=self.converted) elif self.kind == u'float': # python 2 str(float) is not always # round-trippable so use repr() diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 5a589473f64b7..6a298f5137eb1 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -137,8 +137,10 @@ def __init__(self, level, global_dict=None, local_dict=None, resolvers=(), def __unicode__(self): scope_keys = _get_pretty_string(list(self.scope.keys())) res_keys = _get_pretty_string(list(self.resolvers.keys())) - return '%s(scope=%s, resolvers=%s)' % (type(self).__name__, scope_keys, - res_keys) + unicode_str = '{name}(scope={scope_keys}, resolvers={res_keys})' + return unicode_str.format(name=type(self).__name__, + scope_keys=scope_keys, + res_keys=res_keys) @property def has_resolvers(self): @@ -269,8 +271,9 @@ def add_tmp(self, value): name : basestring The name of the temporary variable created. """ - name = '{0}_{1}_{2}'.format(type(value).__name__, self.ntemps, - _raw_hex_id(self)) + name = '{name}_{num}_{hex_id}'.format(name=type(value).__name__, + num=self.ntemps, + hex_id=_raw_hex_id(self)) # add to inner most scope assert name not in self.temps
Progress towards #16130 - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Replaced `%s` syntax with `.format` in core.computation. Additionally, made some of the existing positional `.format` code more explicit by modifying things like `'{0}'.format(foo)` to `'{foo}'.format(foo=foo)`. Doesn't seem like adding tests or a whatsnew entry is relevant, and I'm not sure what a test here would look like, but I'd be happy to add either if relevant.
https://api.github.com/repos/pandas-dev/pandas/pulls/17209
2017-08-10T03:00:36Z
2017-08-10T10:26:58Z
2017-08-10T10:26:58Z
2017-08-10T16:29:28Z
Minor touch-ups to GitHub PULL_REQUEST_TEMPLATE
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e8b6ee21ad104..4e1e9ce017408 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,4 +1,4 @@ - - [ ] closes #xxxx - - [ ] tests added / passed - - [ ] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - - [ ] whatsnew entry +- [ ] closes #xxxx +- [ ] tests added / passed +- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` +- [ ] whatsnew entry
Previously, `PULL_REQUEST_TEMPLATE.md` included spaces before each item in the [GitHub task list](https://github.com/blog/1375-task-lists-in-gfm-issues-pulls-comments), which caused the tasks to become nested upon GUI reordering. I made minor changes to other aspects of `PULL_REQUEST_TEMPLATE.md`. I removed the `-u` arg from the `git diff` command since its default. In `contributing.rst`, I switched `-u` to the long form of `--patch`, since it's later juxtaposed to `--name-only`.
https://api.github.com/repos/pandas-dev/pandas/pulls/17207
2017-08-09T19:17:05Z
2017-08-09T20:19:28Z
2017-08-09T20:19:28Z
2017-08-09T20:24:04Z
Infer compression from non-string paths
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index b8f142700b830..4032a7d22d4a2 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -126,6 +126,7 @@ Other Enhancements - :func:`date_range` now accepts 'Y' in addition to 'A' as an alias for end of year (:issue:`9313`) - Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` and :func:`DataFrame.to_parquet` method, see :ref:`here <io.parquet>`. - :func:`DataFrame.add_prefix` and :func:`DataFrame.add_suffix` now accept strings containing the '%' character. (:issue:`17151`) +- `read_*` methods can now infer compression from non-string paths, such as ``pathlib.Path`` objects (:issue:`17206`). .. _whatsnew_0210.api_breaking: diff --git a/pandas/io/common.py b/pandas/io/common.py index cbfc33dbebb81..69a7e69ea724b 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -272,13 +272,15 @@ def _infer_compression(filepath_or_buffer, compression): if compression is None: return None - # Cannot infer compression of a buffer. Hence assume no compression. - is_path = isinstance(filepath_or_buffer, compat.string_types) - if compression == 'infer' and not is_path: - return None - - # Infer compression from the filename/URL extension + # Infer compression if compression == 'infer': + # Convert all path types (e.g. pathlib.Path) to strings + filepath_or_buffer = _stringify_path(filepath_or_buffer) + if not isinstance(filepath_or_buffer, compat.string_types): + # Cannot infer compression of a buffer, assume no compression + return None + + # Infer compression from the filename/URL extension for compression, extension in _compression_to_extension.items(): if filepath_or_buffer.endswith(extension): return compression diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 9c76d3126890c..05a04f268f72b 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -208,11 +208,11 @@ <http://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_ for more information on ``iterator`` and ``chunksize``. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' - For on-the-fly decompression of on-disk data. If 'infer', then use gzip, - bz2, zip or xz if filepath_or_buffer is a string ending in '.gz', '.bz2', - '.zip', or 'xz', respectively, and no decompression otherwise. If using - 'zip', the ZIP file must contain only one data file to be read in. - Set to None for no decompression. + For on-the-fly decompression of on-disk data. If 'infer' and + `filepath_or_buffer` is path-like, then detect compression from the + following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no + decompression). If using 'zip', the ZIP file must contain only one data + file to be read in. Set to None for no decompression. .. versionadded:: 0.18.1 support for 'zip' and 'xz' compression. diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 6f345092c514d..143b76575e36b 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -62,8 +62,8 @@ def read_pickle(path, compression='infer'): File path compression : {'infer', 'gzip', 'bz2', 'xz', 'zip', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use - gzip, bz2, xz or zip if path is a string ending in '.gz', '.bz2', 'xz', - or 'zip' respectively, and no decompression otherwise. + gzip, bz2, xz or zip if path ends in '.gz', '.bz2', '.xz', + or '.zip' respectively, and no decompression otherwise. Set to None for no decompression. .. versionadded:: 0.20.0 diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index b527e3c5dc254..30904593fedc4 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -14,16 +14,6 @@ from pandas import read_csv, concat -try: - from pathlib import Path -except ImportError: - pass - -try: - from py.path import local as LocalPath -except ImportError: - pass - class CustomFSPath(object): """For testing fspath on unknown objects""" @@ -34,6 +24,21 @@ def __fspath__(self): return self.path +# Functions that consume a string path and return a string or path-like object +path_types = [str, CustomFSPath] + +try: + from pathlib import Path + path_types.append(Path) +except ImportError: + pass + +try: + from py.path import local as LocalPath + path_types.append(LocalPath) +except ImportError: + pass + HERE = os.path.dirname(__file__) @@ -83,6 +88,19 @@ def test_stringify_path_fspath(self): result = common._stringify_path(p) assert result == 'foo/bar.csv' + @pytest.mark.parametrize('extension,expected', [ + ('', None), + ('.gz', 'gzip'), + ('.bz2', 'bz2'), + ('.zip', 'zip'), + ('.xz', 'xz'), + ]) + @pytest.mark.parametrize('path_type', path_types) + def test_infer_compression_from_path(self, extension, expected, path_type): + path = path_type('foo/bar.csv' + extension) + compression = common._infer_compression(path, compression='infer') + assert compression == expected + def test_get_filepath_or_buffer_with_path(self): filename = '~/sometest' filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename)
Currently, `compression='infer'` only works on paths that are strings. However, pandas supports non-string paths, such as `pathlib.Path` objects. This PR adds support for inferring compression for all path types (for io interfaces that use `io.common._infer_compression`). ## Checklist - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [x] No issue opened - [x] tests added / passed - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17206
2017-08-09T18:18:27Z
2017-08-15T20:23:44Z
2017-08-15T20:23:44Z
2017-08-21T15:25:33Z
CLN/ASV clean-up frame stat ops benchmarks
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py index 12fbb2478c2a5..1e1eb167b46bf 100644 --- a/asv_bench/benchmarks/stat_ops.py +++ b/asv_bench/benchmarks/stat_ops.py @@ -1,92 +1,36 @@ from .pandas_vb_common import * -class stat_ops_frame_mean_float_axis_0(object): - goal_time = 0.2 - - def setup(self): - self.df = DataFrame(np.random.randn(100000, 4)) - self.dfi = DataFrame(np.random.randint(1000, size=self.df.shape)) - - def time_stat_ops_frame_mean_float_axis_0(self): - self.df.mean() - - -class stat_ops_frame_mean_float_axis_1(object): - goal_time = 0.2 - - def setup(self): - self.df = DataFrame(np.random.randn(100000, 4)) - self.dfi = DataFrame(np.random.randint(1000, size=self.df.shape)) - - def time_stat_ops_frame_mean_float_axis_1(self): - self.df.mean(1) - - -class stat_ops_frame_mean_int_axis_0(object): - goal_time = 0.2 - - def setup(self): - self.df = DataFrame(np.random.randn(100000, 4)) - self.dfi = DataFrame(np.random.randint(1000, size=self.df.shape)) - - def time_stat_ops_frame_mean_int_axis_0(self): - self.dfi.mean() - - -class stat_ops_frame_mean_int_axis_1(object): - goal_time = 0.2 +def _set_use_bottleneck_False(): + try: + pd.options.compute.use_bottleneck = False + except: + from pandas.core import nanops + nanops._USE_BOTTLENECK = False - def setup(self): - self.df = DataFrame(np.random.randn(100000, 4)) - self.dfi = DataFrame(np.random.randint(1000, size=self.df.shape)) - - def time_stat_ops_frame_mean_int_axis_1(self): - self.dfi.mean(1) - - -class stat_ops_frame_sum_float_axis_0(object): - goal_time = 0.2 - def setup(self): - self.df = DataFrame(np.random.randn(100000, 4)) - self.dfi = DataFrame(np.random.randint(1000, size=self.df.shape)) - - def time_stat_ops_frame_sum_float_axis_0(self): - self.df.sum() - - -class stat_ops_frame_sum_float_axis_1(object): +class FrameOps(object): goal_time = 0.2 - def setup(self): - self.df = DataFrame(np.random.randn(100000, 4)) - self.dfi = DataFrame(np.random.randint(1000, size=self.df.shape)) + param_names = ['op', 'use_bottleneck', 'dtype', 'axis'] + params = [['mean', 'sum', 'median'], + [True, False], + ['float', 'int'], + [0, 1]] - def time_stat_ops_frame_sum_float_axis_1(self): - self.df.sum(1) + def setup(self, op, use_bottleneck, dtype, axis): + if dtype == 'float': + self.df = DataFrame(np.random.randn(100000, 4)) + elif dtype == 'int': + self.df = DataFrame(np.random.randint(1000, size=(100000, 4))) + if not use_bottleneck: + _set_use_bottleneck_False() -class stat_ops_frame_sum_int_axis_0(object): - goal_time = 0.2 - - def setup(self): - self.df = DataFrame(np.random.randn(100000, 4)) - self.dfi = DataFrame(np.random.randint(1000, size=self.df.shape)) - - def time_stat_ops_frame_sum_int_axis_0(self): - self.dfi.sum() - - -class stat_ops_frame_sum_int_axis_1(object): - goal_time = 0.2 - - def setup(self): - self.df = DataFrame(np.random.randn(100000, 4)) - self.dfi = DataFrame(np.random.randint(1000, size=self.df.shape)) + self.func = getattr(self.df, op) - def time_stat_ops_frame_sum_int_axis_1(self): - self.dfi.sum(1) + def time_op(self, op, use_bottleneck, dtype, axis): + self.func(axis=axis) class stat_ops_level_frame_sum(object):
Small clean-up and testing of both with/without bottleneck. Also includes a benchmark for https://github.com/pandas-dev/pandas/pull/16509
https://api.github.com/repos/pandas-dev/pandas/pulls/17205
2017-08-09T12:07:57Z
2017-08-10T10:37:40Z
2017-08-10T10:37:40Z
2017-08-10T10:37:43Z
DOCS: Updated NDFrame.astype docs
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2d52eed81d22b..bd3297f66a469 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3610,8 +3610,7 @@ def blocks(self): mapping={True: 'raise', False: 'ignore'}) def astype(self, dtype, copy=True, errors='raise', **kwargs): """ - Cast object to input numpy.dtype - Return a copy when copy = True (be really careful with this!) + Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- @@ -3620,6 +3619,10 @@ def astype(self, dtype, copy=True, errors='raise', **kwargs): the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. + copy : bool, default True. + Return a copy when ``copy=True`` (be very careful setting + ``copy=False`` as changes to values then may propagate to other + pandas objects). errors : {'raise', 'ignore'}, default 'raise'. Control raising of exceptions on invalid data for provided dtype. @@ -3636,6 +3639,49 @@ def astype(self, dtype, copy=True, errors='raise', **kwargs): Returns ------- casted : type of caller + + Examples + -------- + >>> ser = pd.Series([1, 2], dtype='int32') + >>> ser + 0 1 + 1 2 + dtype: int32 + >>> ser.astype('int64') + 0 1 + 1 2 + dtype: int64 + + Convert to categorical type: + + >>> ser.astype('category') + 0 1 + 1 2 + dtype: category + Categories (2, int64): [1, 2] + + Convert to ordered categorical type with custom ordering: + + >>> ser.astype('category', ordered=True, categories=[2, 1]) + 0 1 + 1 2 + dtype: category + Categories (2, int64): [2 < 1] + + Note that using ``copy=False`` and changing data on a new + pandas object may propagate changes: + + >>> s1 = pd.Series([1,2]) + >>> s2 = s1.astype('int', copy=False) + >>> s2[0] = 10 + >>> s1 # note that s1[0] has changed too + 0 10 + 1 2 + dtype: int64 + + See also + -------- + numpy.ndarray.astype : Cast a numpy array to a specified type. """ if is_dict_like(dtype): if self.ndim == 1: # i.e. Series
I've had a couple situations where I've doubted if it's ``ser.astype('category')`` or ``ser.astype('categorical')`` and the doc string doesn't say. I think a few examples could help here + illustrate usage of the method. Also some cleanup of the doc string.
https://api.github.com/repos/pandas-dev/pandas/pulls/17203
2017-08-08T21:57:23Z
2017-08-09T11:24:37Z
2017-08-09T11:24:37Z
2017-08-09T17:52:26Z
COMPAT: reading json with lines=True from s3, xref #17200
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 4c6cdb9846305..84583a8b6af67 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -88,7 +88,7 @@ I/O - :func:`read_parquet` now allows to specify kwargs which are passed to the respective engine (:issue:`18216`) - Bug in parsing integer datetime-like columns with specified format in ``read_sql`` (:issue:`17855`). - Bug in :meth:`DataFrame.to_msgpack` when serializing data of the numpy.bool_ datatype (:issue:`18390`) - +- Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) Plotting ^^^^^^^^ diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 11bf3a9363953..21736673350d8 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -5,7 +5,7 @@ import pandas._libs.json as json from pandas._libs.tslib import iNaT -from pandas.compat import StringIO, long, u +from pandas.compat import StringIO, long, u, to_str from pandas import compat, isna from pandas import Series, DataFrame, to_datetime, MultiIndex from pandas.io.common import (get_filepath_or_buffer, _get_handle, @@ -458,8 +458,10 @@ def read(self): if self.lines and self.chunksize: obj = concat(self) elif self.lines: + + data = to_str(self.data) obj = self._get_object_parser( - self._combine_lines(self.data.split('\n')) + self._combine_lines(data.split('\n')) ) else: obj = self._get_object_parser(self.data) @@ -612,7 +614,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: dtype = np.dtype(dtype) return data.astype(dtype), True - except: + except (TypeError, ValueError): return data, False if convert_dates: @@ -628,7 +630,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('float64') result = True - except: + except (TypeError, ValueError): pass if data.dtype.kind == 'f': @@ -639,7 +641,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('float64') result = True - except: + except (TypeError, ValueError): pass # do't coerce 0-len data @@ -651,7 +653,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, if (new_data == data).all(): data = new_data result = True - except: + except (TypeError, ValueError): pass # coerce ints to 64 @@ -661,7 +663,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('int64') result = True - except: + except (TypeError, ValueError): pass return data, result @@ -680,7 +682,7 @@ def _try_convert_to_date(self, data): if new_data.dtype == 'object': try: new_data = data.astype('int64') - except: + except (TypeError, ValueError): pass # ignore numbers that are out of range @@ -697,7 +699,7 @@ def _try_convert_to_date(self, data): unit=date_unit) except ValueError: continue - except: + except Exception: break return new_data, True return data, False diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py new file mode 100644 index 0000000000000..828d5d0ccd3c6 --- /dev/null +++ b/pandas/tests/io/conftest.py @@ -0,0 +1,74 @@ +import os + +import moto +import pytest +from pandas.io.parsers import read_table + +HERE = os.path.dirname(__file__) + + +@pytest.fixture(scope='module') +def tips_file(): + """Path to the tips dataset""" + return os.path.join(HERE, 'parser', 'data', 'tips.csv') + + +@pytest.fixture(scope='module') +def jsonl_file(): + """Path a JSONL dataset""" + return os.path.join(HERE, 'parser', 'data', 'items.jsonl') + + +@pytest.fixture(scope='module') +def salaries_table(): + """DataFrame with the salaries dataset""" + path = os.path.join(HERE, 'parser', 'data', 'salaries.csv') + return read_table(path) + + +@pytest.fixture(scope='module') +def s3_resource(tips_file, jsonl_file): + """Fixture for mocking S3 interaction. + + The primary bucket name is "pandas-test". The following datasets + are loaded. + + - tips.csv + - tips.csv.gz + - tips.csv.bz2 + - items.jsonl + + A private bucket "cant_get_it" is also created. The boto3 s3 resource + is yielded by the fixture. + """ + pytest.importorskip('s3fs') + moto.mock_s3().start() + + test_s3_files = [ + ('tips.csv', tips_file), + ('tips.csv.gz', tips_file + '.gz'), + ('tips.csv.bz2', tips_file + '.bz2'), + ('items.jsonl', jsonl_file), + ] + + def add_tips_files(bucket_name): + for s3_key, file_name in test_s3_files: + with open(file_name, 'rb') as f: + conn.Bucket(bucket_name).put_object( + Key=s3_key, + Body=f) + + boto3 = pytest.importorskip('boto3') + # see gh-16135 + bucket = 'pandas-test' + + conn = boto3.resource("s3", region_name="us-east-1") + conn.create_bucket(Bucket=bucket) + add_tips_files(bucket) + + conn.create_bucket(Bucket='cant_get_it', ACL='private') + add_tips_files('cant_get_it') + + yield conn + + moto.mock_s3().stop() diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 1c895f7e9e89a..fe447534efdc7 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -4,7 +4,6 @@ from pandas.compat import (range, lrange, StringIO, OrderedDict, is_platform_32bit) import os - import numpy as np from pandas import (Series, DataFrame, DatetimeIndex, Timestamp, read_json, compat) @@ -1032,6 +1031,70 @@ def test_tz_range_is_utc(self): df = DataFrame({'DT': dti}) assert dumps(df, iso_dates=True) == dfexp + def test_read_inline_jsonl(self): + # GH9180 + result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_s3_jsonl(self, s3_resource): + pytest.importorskip('s3fs') + # GH17200 + + result = read_json('s3n://pandas-test/items.jsonl', lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_local_jsonl(self): + # GH17200 + with ensure_clean('tmp_items.json') as path: + with open(path, 'w') as infile: + infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n') + result = read_json(path, lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_jsonl_unicode_chars(self): + # GH15132: non-ascii unicode characters + # \u201d == RIGHT DOUBLE QUOTATION MARK + + # simulate file handle + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + json = StringIO(json) + result = read_json(json, lines=True) + expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], + columns=['a', 'b']) + assert_frame_equal(result, expected) + + # simulate string + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + result = read_json(json, lines=True) + expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], + columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_to_jsonl(self): + # GH9180 + df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + result = df.to_json(orient="records", lines=True) + expected = '{"a":1,"b":2}\n{"a":1,"b":2}' + assert result == expected + + df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b']) + result = df.to_json(orient="records", lines=True) + expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}' + assert result == expected + assert_frame_equal(pd.read_json(result, lines=True), df) + + # GH15096: escaped characters in columns and data + df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], + columns=["a\\", 'b']) + result = df.to_json(orient="records", lines=True) + expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n' + '{"a\\\\":"foo\\"","b":"bar"}') + assert result == expected + assert_frame_equal(pd.read_json(result, lines=True), df) + def test_latin_encoding(self): if compat.PY2: tm.assert_raises_regex( diff --git a/pandas/tests/io/parser/data/items.jsonl b/pandas/tests/io/parser/data/items.jsonl new file mode 100644 index 0000000000000..f784d37befa82 --- /dev/null +++ b/pandas/tests/io/parser/data/items.jsonl @@ -0,0 +1,2 @@ +{"a": 1, "b": 2} +{"b":2, "a" :1} diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 27cc708889fa2..d00d3f31ce189 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -4,10 +4,7 @@ Tests parsers ability to read and parse non-local files and hence require a network connection to be read. """ -import os - import pytest -import moto import pandas.util.testing as tm from pandas import DataFrame @@ -15,51 +12,6 @@ from pandas.compat import BytesIO -@pytest.fixture(scope='module') -def tips_file(): - return os.path.join(tm.get_data_path(), 'tips.csv') - - -@pytest.fixture(scope='module') -def salaries_table(): - path = os.path.join(tm.get_data_path(), 'salaries.csv') - return read_table(path) - - -@pytest.fixture(scope='module') -def s3_resource(tips_file): - pytest.importorskip('s3fs') - moto.mock_s3().start() - - test_s3_files = [ - ('tips.csv', tips_file), - ('tips.csv.gz', tips_file + '.gz'), - ('tips.csv.bz2', tips_file + '.bz2'), - ] - - def add_tips_files(bucket_name): - for s3_key, file_name in test_s3_files: - with open(file_name, 'rb') as f: - conn.Bucket(bucket_name).put_object( - Key=s3_key, - Body=f) - - boto3 = pytest.importorskip('boto3') - # see gh-16135 - bucket = 'pandas-test' - - conn = boto3.resource("s3", region_name="us-east-1") - conn.create_bucket(Bucket=bucket) - add_tips_files(bucket) - - conn.create_bucket(Bucket='cant_get_it', ACL='private') - add_tips_files('cant_get_it') - - yield conn - - moto.mock_s3().stop() - - @pytest.mark.network @pytest.mark.parametrize( "compression,extension",
Attempt to decode the bytes array with `encoding` passed to the call. - [ ] closes #17200
https://api.github.com/repos/pandas-dev/pandas/pulls/17201
2017-08-08T18:39:45Z
2017-11-27T11:34:57Z
2017-11-27T11:34:57Z
2017-12-11T20:21:52Z
Define Series.plot and Series.hist in class definition
diff --git a/pandas/core/series.py b/pandas/core/series.py index e42ba3908a29a..61508c11cae4b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -76,6 +76,8 @@ from pandas._libs import index as libindex, tslib as libts, lib, iNaT from pandas.core.config import get_option +import pandas.plotting._core as gfx + __all__ = ['Series'] _shared_doc_kwargs = dict( @@ -2952,12 +2954,23 @@ def _dir_additions(self): pass return rv + # ---------------------------------------------------------------------- + # Add plotting methods to Series + plot = base.AccessorProperty(gfx.SeriesPlotMethods, + gfx.SeriesPlotMethods) + hist = gfx.hist_series + Series._setup_axes(['index'], info_axis=0, stat_axis=0, aliases={'rows': 0}) Series._add_numeric_operations() Series._add_series_only_operations() Series._add_series_or_dataframe_operations() +# Add arithmetic! +ops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs) +ops.add_special_arithmetic_methods(Series, **ops.series_special_funcs) + + # ----------------------------------------------------------------------------- # Supplementary functions @@ -3129,17 +3142,3 @@ def create_from_value(value, index, dtype): subarr = np.array(data, dtype=object, copy=copy) return subarr - - -# ---------------------------------------------------------------------- -# Add plotting methods to Series - -import pandas.plotting._core as _gfx # noqa - -Series.plot = base.AccessorProperty(_gfx.SeriesPlotMethods, - _gfx.SeriesPlotMethods) -Series.hist = _gfx.hist_series - -# Add arithmetic! -ops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs) -ops.add_special_arithmetic_methods(Series, **ops.series_special_funcs) diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 97295dfa7baf1..47d15195315ba 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -18,6 +18,7 @@ is_period_arraylike, is_nested_list_like ) +from pandas.core.dtypes.generic import ABCSeries from pandas.compat import lrange import pandas.compat as compat @@ -25,7 +26,6 @@ import pandas.core.common as com from pandas.core.index import Index -from pandas.core.series import Series from pandas.core.indexes.datetimes import date_range import pandas.core.tools.datetimes as tools import pandas.tseries.frequencies as frequencies @@ -175,7 +175,7 @@ def _dt_to_float_ordinal(dt): preserving hours, minutes, seconds and microseconds. Return value is a :func:`float`. """ - if (isinstance(dt, (np.ndarray, Index, Series) + if (isinstance(dt, (np.ndarray, Index, ABCSeries) ) and is_datetime64_ns_dtype(dt)): base = dates.epoch2num(dt.asi8 / 1.0E9) else: diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index b8d7cebe8a274..e5b9497993172 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -18,10 +18,12 @@ is_number, is_hashable, is_iterator) +from pandas.core.dtypes.generic import ABCSeries + from pandas.core.common import AbstractMethodError, _try_sort from pandas.core.generic import _shared_docs, _shared_doc_kwargs from pandas.core.index import Index, MultiIndex -from pandas.core.series import Series + from pandas.core.indexes.period import PeriodIndex from pandas.compat import range, lrange, map, zip, string_types import pandas.compat as compat @@ -334,7 +336,7 @@ def result(self): def _compute_plot_data(self): data = self.data - if isinstance(data, Series): + if isinstance(data, ABCSeries): label = self.label if label is None and data.name is None: label = 'None' @@ -1575,6 +1577,7 @@ def maybe_color_bp(self, bp): def _make_plot(self): if self.subplots: + from pandas.core.series import Series self._return_obj = Series() for i, (label, y) in enumerate(self._iter_data()): @@ -2338,6 +2341,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, figsize=figsize, layout=layout) axes = _flatten(axes) + from pandas.core.series import Series ret = Series() for (key, group), ax in zip(grouped, axes): d = group.boxplot(ax=ax, column=column, fontsize=fontsize, @@ -2409,7 +2413,6 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, _axes = _flatten(axes) - result = Series() ax_values = [] for i, col in enumerate(columns): @@ -2422,6 +2425,7 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, ax_values.append(re_plotf) ax.grid(grid) + from pandas.core.series import Series result = Series(ax_values, index=columns) # Return axes in multiplot case, maybe revisit later # 985 diff --git a/pandas/plotting/_tools.py b/pandas/plotting/_tools.py index 0c2314087525c..389e238ccb96e 100644 --- a/pandas/plotting/_tools.py +++ b/pandas/plotting/_tools.py @@ -8,8 +8,8 @@ import numpy as np from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.generic import ABCSeries from pandas.core.index import Index -from pandas.core.series import Series from pandas.compat import range @@ -25,8 +25,7 @@ def format_date_labels(ax, rot): pass -def table(ax, data, rowLabels=None, colLabels=None, - **kwargs): +def table(ax, data, rowLabels=None, colLabels=None, **kwargs): """ Helper function to convert DataFrame and Series to matplotlib.table @@ -45,7 +44,7 @@ def table(ax, data, rowLabels=None, colLabels=None, matplotlib table object """ from pandas import DataFrame - if isinstance(data, Series): + if isinstance(data, ABCSeries): data = DataFrame(data, columns=[data.name]) elif isinstance(data, DataFrame): pass
Avoid fragile circular import between core.series and `plotting._core`, `plotting._tools`, `plotting._converter` Ends the saga of #16913, #16931. In the status quo, the tail end of `core.series` does: ``` import pandas.plotting._core as _gfx # noqa Series.plot = base.AccessorProperty(_gfx.SeriesPlotMethods, _gfx.SeriesPlotMethods) Series.hist = _gfx.hist_series ``` This PR moves the import of `plotting._core` to the top of the module and defines `plot` and `hist` inside the `class Series` block. Initially this caused a bunch of test failures because `plotting._core` itself imports `Series` (and `plotting._tools`, which also imports `Series`). #16913 and #16931 removed these circular imports, but mysteriously that didn't solve the problem. The one that I missed last time around was `pandas.plotting.__init__`, which imports `pandas.plotting._converter`, which imports `Series`. Changing that to `ABSeries` in addition to the earlier ones finally fixes the problem. - [ ] closes #xxxx - [ ] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17199
2017-08-08T16:00:54Z
2017-08-08T23:28:43Z
2017-08-08T23:28:43Z
2017-10-30T16:24:28Z
BUG: support pandas objects in iloc with old numpy versions
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 2f61b71d06019..2c6b1c5bc1897 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -278,6 +278,7 @@ Indexing - Fixes bug where indexing with ``np.inf`` caused an ``OverflowError`` to be raised (:issue:`16957`) - Bug in reindexing on an empty ``CategoricalIndex`` (:issue:`16770`) - Fixes ``DataFrame.loc`` for setting with alignment and tz-aware ``DatetimeIndex`` (:issue:`16889`) +- Avoids ``IndexError`` when passing an Index or Series to ``.iloc`` with older numpy (:issue:`17193`) I/O ^^^ diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 0f85c4e046e5a..7977ef7e7efb9 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -861,6 +861,9 @@ def _is_empty_indexer(indexer): # set else: + if _np_version_under1p9: + # Work around GH 6168 to support old numpy + indexer = getattr(indexer, 'values', indexer) values[indexer] = value # coerce and try to infer the dtypes of the result diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 1ba9f3101e7b6..31fee303a41e2 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -282,6 +282,19 @@ def test_iloc_setitem_list(self): index=["A", "B", "C"], columns=["A", "B", "C"]) tm.assert_frame_equal(df, expected) + def test_iloc_setitem_pandas_object(self): + # GH 17193, affecting old numpy (1.7 and 1.8) + s_orig = Series([0, 1, 2, 3]) + expected = Series([0, -1, -2, 3]) + + s = s_orig.copy() + s.iloc[Series([1, 2])] = [-1, -2] + tm.assert_series_equal(s, expected) + + s = s_orig.copy() + s.iloc[pd.Index([1, 2])] = [-1, -2] + tm.assert_series_equal(s, expected) + def test_iloc_setitem_dups(self): # GH 6766
- [x] closes #17193 - [x] tests added / passed - [x] passes ``git diff master -u -- "*.py" | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17194
2017-08-07T22:07:19Z
2017-08-08T23:30:09Z
2017-08-08T23:30:09Z
2017-08-18T15:28:27Z
TST: Partial Boolean DataFrame Indexing (#17170)
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 3ecd1f3029cad..f1f51f26df55c 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -861,6 +861,20 @@ def test_maybe_numeric_slice(self): expected = [1] assert result == expected + def test_partial_boolean_frame_indexing(self): + # GH 17170 + df = pd.DataFrame(np.arange(9.).reshape(3, 3), + index=list('abc'), + columns=list('ABC')) + index_df = pd.DataFrame(1, index=list('ab'), columns=list('AB')) + result = df[index_df.notnull()] + expected = pd.DataFrame(np.array([[0., 1., np.nan], + [3., 4., np.nan], + [np.nan] * 3]), + index=list('abc'), + columns=list('ABC')) + tm.assert_frame_equal(result, expected) + class TestSeriesNoneCoercion(object): EXPECTED_RESULTS = [
- [x] closes #17170 - [x] tests added / passed
https://api.github.com/repos/pandas-dev/pandas/pulls/17186
2017-08-07T04:56:42Z
2017-08-07T18:04:08Z
2017-08-07T18:04:08Z
2017-12-20T02:04:15Z
move pivot_table doc-string to DataFrame
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 027a427555253..26de1a9c0b1d3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4146,6 +4146,92 @@ def pivot(self, index=None, columns=None, values=None): from pandas.core.reshape.reshape import pivot return pivot(self, index=index, columns=columns, values=values) + _shared_docs['pivot_table'] = """ + Create a spreadsheet-style pivot table as a DataFrame. The levels in + the pivot table will be stored in MultiIndex objects (hierarchical + indexes) on the index and columns of the result DataFrame + + Parameters + ----------%s + values : column to aggregate, optional + index : column, Grouper, array, or list of the previous + If an array is passed, it must be the same length as the data. The + list can contain any of the other types (except list). + Keys to group by on the pivot table index. If an array is passed, + it is being used as the same manner as column values. + columns : column, Grouper, array, or list of the previous + If an array is passed, it must be the same length as the data. The + list can contain any of the other types (except list). + Keys to group by on the pivot table column. If an array is passed, + it is being used as the same manner as column values. + aggfunc : function or list of functions, default numpy.mean + If list of functions passed, the resulting pivot table will have + hierarchical columns whose top level are the function names + (inferred from the function objects themselves) + fill_value : scalar, default None + Value to replace missing values with + margins : boolean, default False + Add all row / columns (e.g. for subtotal / grand totals) + dropna : boolean, default True + Do not include columns whose entries are all NaN + margins_name : string, default 'All' + Name of the row / column that will contain the totals + when margins is True. + + Examples + -------- + >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", + ... "bar", "bar", "bar", "bar"], + ... "B": ["one", "one", "one", "two", "two", + ... "one", "one", "two", "two"], + ... "C": ["small", "large", "large", "small", + ... "small", "large", "small", "small", + ... "large"], + ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]}) + >>> df + A B C D + 0 foo one small 1 + 1 foo one large 2 + 2 foo one large 2 + 3 foo two small 3 + 4 foo two small 3 + 5 bar one large 4 + 6 bar one small 5 + 7 bar two small 6 + 8 bar two large 7 + + >>> table = pivot_table(df, values='D', index=['A', 'B'], + ... columns=['C'], aggfunc=np.sum) + >>> table + ... # doctest: +NORMALIZE_WHITESPACE + C large small + A B + bar one 4.0 5.0 + two 7.0 6.0 + foo one 4.0 1.0 + two NaN 6.0 + + Returns + ------- + table : DataFrame + + See also + -------- + DataFrame.pivot : pivot without aggregation that can handle + non-numeric data + """ + + @Substitution('') + @Appender(_shared_docs['pivot_table']) + def pivot_table(self, values=None, index=None, columns=None, + aggfunc='mean', fill_value=None, margins=False, + dropna=True, margins_name='All'): + from pandas.core.reshape.pivot import pivot_table + return pivot_table(self, values=values, index=index, columns=columns, + aggfunc=aggfunc, fill_value=fill_value, + margins=margins, dropna=dropna, + margins_name=margins_name) + def stack(self, level=-1, dropna=True): """ Pivot a level of the (possibly hierarchical) column labels, returning a diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index d4ea49c130add..e61adf3aac30a 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -2,95 +2,30 @@ from pandas.core.dtypes.common import is_list_like, is_scalar +from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries + from pandas.core.reshape.concat import concat -from pandas import Series, DataFrame, MultiIndex, Index +from pandas.core.series import Series from pandas.core.groupby import Grouper from pandas.core.reshape.util import cartesian_product -from pandas.core.index import _get_combined_index +from pandas.core.index import Index, _get_combined_index from pandas.compat import range, lrange, zip from pandas import compat import pandas.core.common as com +from pandas.util._decorators import Appender, Substitution + +from pandas.core.frame import _shared_docs +# Note: We need to make sure `frame` is imported before `pivot`, otherwise +# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency + import numpy as np +@Substitution('\ndata : DataFrame') +@Appender(_shared_docs['pivot_table'], indents=1) def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', fill_value=None, margins=False, dropna=True, margins_name='All'): - """ - Create a spreadsheet-style pivot table as a DataFrame. The levels in the - pivot table will be stored in MultiIndex objects (hierarchical indexes) on - the index and columns of the result DataFrame - - Parameters - ---------- - data : DataFrame - values : column to aggregate, optional - index : column, Grouper, array, or list of the previous - If an array is passed, it must be the same length as the data. The list - can contain any of the other types (except list). - Keys to group by on the pivot table index. If an array is passed, it - is being used as the same manner as column values. - columns : column, Grouper, array, or list of the previous - If an array is passed, it must be the same length as the data. The list - can contain any of the other types (except list). - Keys to group by on the pivot table column. If an array is passed, it - is being used as the same manner as column values. - aggfunc : function or list of functions, default numpy.mean - If list of functions passed, the resulting pivot table will have - hierarchical columns whose top level are the function names (inferred - from the function objects themselves) - fill_value : scalar, default None - Value to replace missing values with - margins : boolean, default False - Add all row / columns (e.g. for subtotal / grand totals) - dropna : boolean, default True - Do not include columns whose entries are all NaN - margins_name : string, default 'All' - Name of the row / column that will contain the totals - when margins is True. - - Examples - -------- - >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", - ... "bar", "bar", "bar", "bar"], - ... "B": ["one", "one", "one", "two", "two", - ... "one", "one", "two", "two"], - ... "C": ["small", "large", "large", "small", - ... "small", "large", "small", "small", - ... "large"], - ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]}) - >>> df - A B C D - 0 foo one small 1 - 1 foo one large 2 - 2 foo one large 2 - 3 foo two small 3 - 4 foo two small 3 - 5 bar one large 4 - 6 bar one small 5 - 7 bar two small 6 - 8 bar two large 7 - - >>> table = pivot_table(df, values='D', index=['A', 'B'], - ... columns=['C'], aggfunc=np.sum) - >>> table - ... # doctest: +NORMALIZE_WHITESPACE - C large small - A B - bar one 4.0 5.0 - two 7.0 6.0 - foo one 4.0 1.0 - two NaN 6.0 - - Returns - ------- - table : DataFrame - - See also - -------- - DataFrame.pivot : pivot without aggregation that can handle - non-numeric data - """ index = _convert_by(index) columns = _convert_by(columns) @@ -162,6 +97,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', table = agged.unstack(to_unstack) if not dropna: + from pandas import MultiIndex try: m = MultiIndex.from_arrays(cartesian_product(table.index.levels), names=table.index.names) @@ -176,7 +112,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', except AttributeError: pass # it's a single level or a series - if isinstance(table, DataFrame): + if isinstance(table, ABCDataFrame): table = table.sort_index(axis=1) if fill_value is not None: @@ -197,16 +133,13 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', if len(index) == 0 and len(columns) > 0: table = table.T - # GH 15193 Makse sure empty columns are removed if dropna=True - if isinstance(table, DataFrame) and dropna: + # GH 15193 Make sure empty columns are removed if dropna=True + if isinstance(table, ABCDataFrame) and dropna: table = table.dropna(how='all', axis=1) return table -DataFrame.pivot_table = pivot_table - - def _add_margins(table, data, values, rows, cols, aggfunc, margins_name='All', fill_value=None): if not isinstance(margins_name, compat.string_types): @@ -230,7 +163,7 @@ def _add_margins(table, data, values, rows, cols, aggfunc, else: key = margins_name - if not values and isinstance(table, Series): + if not values and isinstance(table, ABCSeries): # If there are no values and the table is a series, then there is only # one column in the data. Compute grand margin and return it. return table.append(Series({key: grand_margin[margins_name]})) @@ -257,6 +190,7 @@ def _add_margins(table, data, values, rows, cols, aggfunc, else: row_margin[k] = grand_margin[k[0]] + from pandas import DataFrame margin_dummy = DataFrame(row_margin, columns=[key]).T row_names = result.index.names @@ -402,7 +336,7 @@ def _convert_by(by): if by is None: by = [] elif (is_scalar(by) or - isinstance(by, (np.ndarray, Index, Series, Grouper)) or + isinstance(by, (np.ndarray, Index, ABCSeries, Grouper)) or hasattr(by, '__call__')): by = [by] else: @@ -523,6 +457,7 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, if values is not None and aggfunc is None: raise ValueError("values cannot be used without an aggfunc.") + from pandas import DataFrame df = DataFrame(data, index=common_idx) if values is None: df['__dummy__'] = 0 @@ -620,7 +555,7 @@ def _get_names(arrs, names, prefix='row'): if names is None: names = [] for i, arr in enumerate(arrs): - if isinstance(arr, Series) and arr.name is not None: + if isinstance(arr, ABCSeries) and arr.name is not None: names.append(arr.name) else: names.append('%s_%d' % (prefix, i))
instead of pinning it on over pin `core.reshape.pivot` At the moment `core.reshape.pivot.pivot_table` is pinned to `DataFrame` by writing `DataFrame.pivot_table = pivot_table`. This PR defines `pivot_table` directly in `DataFrame` and then calls to the function. It's the same pattern already used by `DataFrame.pivot`. I think this is the last `DataFrame` method pinned on to `DataFrame` this way. Two more left in `Series` - [ ] closes #xxxx - [ ] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17174
2017-08-04T18:45:32Z
2017-08-11T10:36:14Z
2017-08-11T10:36:14Z
2017-10-30T16:24:24Z
use == to test String equality
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 0a4426b55b323..09603fd6fdcce 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -10,10 +10,10 @@ def get_engine(engine): """ return our implementation """ - if engine is 'auto': + if engine == 'auto': engine = get_option('io.parquet.engine') - if engine is 'auto': + if engine == 'auto': # try engines in this order try: return PyArrowImpl()
@jreback quick fix following f4330611ff5ac1cbb4a89c4a7dab3d0900f9e64a (see [lgtm report](https://lgtm.com/projects/g/pydata/pandas/rev/f4330611ff5ac1cbb4a89c4a7dab3d0900f9e64a)) Using `is` instead of `==` might work in simple cases such as this one due to the Python implementation being "efficient" but it checks for identity rather than value and will produce likely un-intended results pretty quickly: ``` >>> a = "f" >>> b = "f" >>> a == b True >>> a is b True ``` but: ``` >>> a = "f " >>> b = "f " >>> a == b True >>> a is b False ```
https://api.github.com/repos/pandas-dev/pandas/pulls/17171
2017-08-04T11:20:41Z
2017-08-07T10:46:07Z
2017-08-07T10:46:07Z
2017-08-07T10:46:09Z
Remove unused get methods that would raise AttributeError if called
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 37fc1c01061ec..e62f88f47897c 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -323,10 +323,6 @@ def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, fill_value=fill_value, mask_info=mask_info) return self.make_block(new_values, fastpath=True) - def get(self, item): - loc = self.items.get_loc(item) - return self.values[loc] - def iget(self, i): return self.values[i] @@ -1658,13 +1654,6 @@ def set(self, locs, values, check=False): assert locs.tolist() == [0] self.values = values - def get(self, item): - if self.ndim == 1: - loc = self.items.get_loc(item) - return self.values[loc] - else: - return self.values - def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False, mgr=None): """ @@ -4722,8 +4711,6 @@ def _concat_indexes(indexes): def _block2d_to_blocknd(values, placement, shape, labels, ref_items): """ pivot to the labels shape """ - from pandas.core.internals import make_block - panel_shape = (len(placement),) + shape # TODO: lexsort depth needs to be 2!!
- [x] closes #17159 - [ ] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17169
2017-08-04T07:13:48Z
2017-08-07T13:10:37Z
2017-08-07T13:10:37Z
2017-08-07T22:53:03Z
Add chunksize param to read_json when lines=True
diff --git a/asv_bench/benchmarks/io_bench.py b/asv_bench/benchmarks/io_bench.py index 52064d2cdb8a2..93273955a29b9 100644 --- a/asv_bench/benchmarks/io_bench.py +++ b/asv_bench/benchmarks/io_bench.py @@ -1,3 +1,4 @@ +import os from .pandas_vb_common import * from pandas import concat, Timestamp, compat try: @@ -192,3 +193,32 @@ def time_read_nrows(self, compression, engine): ext = ".bz2" pd.read_csv(self.big_fname + ext, nrows=10, compression=compression, engine=engine) + + +class read_json_lines(object): + goal_time = 0.2 + fname = "__test__.json" + + def setup(self): + self.N = 100000 + self.C = 5 + self.df = DataFrame(dict([('float{0}'.format(i), randn(self.N)) for i in range(self.C)])) + self.df.to_json(self.fname,orient="records",lines=True) + + def teardown(self): + try: + os.remove(self.fname) + except: + pass + + def time_read_json_lines(self): + pd.read_json(self.fname, lines=True) + + def time_read_json_lines_chunk(self): + pd.concat(pd.read_json(self.fname, lines=True, chunksize=self.N//4)) + + def peakmem_read_json_lines(self): + pd.read_json(self.fname, lines=True) + + def peakmem_read_json_lines_chunk(self): + pd.concat(pd.read_json(self.fname, lines=True, chunksize=self.N//4)) diff --git a/doc/source/io.rst b/doc/source/io.rst index d6abed6e9d1ad..4eba9687efc58 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1845,6 +1845,7 @@ is ``None``. To explicitly force ``Series`` parsing, pass ``typ=series`` seconds, milliseconds, microseconds or nanoseconds respectively. - ``lines`` : reads file as one json object per line. - ``encoding`` : The encoding to use to decode py3 bytes. +- ``chunksize`` : when used in combination with ``lines=True``, return a JsonReader which reads in ``chunksize`` lines per iteration. The parser will raise one of ``ValueError/TypeError/AssertionError`` if the JSON is not parseable. @@ -2049,6 +2050,10 @@ Line delimited json pandas is able to read and write line-delimited json files that are common in data processing pipelines using Hadoop or Spark. +.. versionadded:: 0.21.0 + +For line-delimited json files, pandas can also return an iterator which reads in ``chunksize`` lines at a time. This can be useful for large files or to read from a stream. + .. ipython:: python jsonl = ''' @@ -2059,6 +2064,11 @@ using Hadoop or Spark. df df.to_json(orient='records', lines=True) + # reader is an iterator that returns `chunksize` lines each iteration + reader = pd.read_json(StringIO(jsonl), lines=True, chunksize=1) + reader + for chunk in reader: + print(chunk) .. _io.table_schema: diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 50f11c38bae23..d5d508d02cb73 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -162,6 +162,7 @@ Other Enhancements - :func:`MultiIndex.is_monotonic_decreasing` has been implemented. Previously returned ``False`` in all cases. (:issue:`16554`) - :func:`Categorical.rename_categories` now accepts a dict-like argument as `new_categories` and only updates the categories found in that dict. (:issue:`17336`) - :func:`read_excel` raises ``ImportError`` with a better message if ``xlrd`` is not installed. (:issue:`17613`) +- :func:`read_json` now accepts a ``chunksize`` parameter that can be used when ``lines=True``. If ``chunksize`` is passed, read_json now returns an iterator which reads in ``chunksize`` lines with each iteration. (:issue:`17048`) - :meth:`DataFrame.assign` will preserve the original order of ``**kwargs`` for Python 3.6+ users instead of sorting the column names diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 5dae6099446d0..ab74b265b6a06 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -1,4 +1,5 @@ # pylint: disable-msg=E1101,W0613,W0603 +from itertools import islice import os import numpy as np @@ -8,8 +9,10 @@ from pandas import compat, isna from pandas import Series, DataFrame, to_datetime, MultiIndex from pandas.io.common import (get_filepath_or_buffer, _get_handle, - _stringify_path) + _stringify_path, BaseIterator) +from pandas.io.parsers import _validate_integer from pandas.core.common import AbstractMethodError +from pandas.core.reshape.concat import concat from pandas.io.formats.printing import pprint_thing from .normalize import _convert_to_line_delimits from .table_schema import build_table_schema @@ -175,7 +178,7 @@ def write(self): def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, convert_axes=True, convert_dates=True, keep_default_dates=True, numpy=False, precise_float=False, date_unit=None, encoding=None, - lines=False): + lines=False, chunksize=None): """ Convert a JSON string to pandas object @@ -264,6 +267,16 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, .. versionadded:: 0.19.0 + chunksize: integer, default None + Return JsonReader object for iteration. + See the `line-delimted json docs + <http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_ + for more information on ``chunksize``. + This can only be passed if `lines=True`. + If this is None, the file will be read into memory all at once. + + .. versionadded:: 0.21.0 + Returns ------- result : Series or DataFrame, depending on the value of `typ`. @@ -323,47 +336,167 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf, encoding=encoding) - if isinstance(filepath_or_buffer, compat.string_types): - try: - exists = os.path.exists(filepath_or_buffer) - - # if the filepath is too long will raise here - # 5874 - except (TypeError, ValueError): - exists = False - - if exists: - fh, handles = _get_handle(filepath_or_buffer, 'r', - encoding=encoding) - json = fh.read() - fh.close() + + json_reader = JsonReader( + filepath_or_buffer, orient=orient, typ=typ, dtype=dtype, + convert_axes=convert_axes, convert_dates=convert_dates, + keep_default_dates=keep_default_dates, numpy=numpy, + precise_float=precise_float, date_unit=date_unit, encoding=encoding, + lines=lines, chunksize=chunksize + ) + + if chunksize: + return json_reader + + return json_reader.read() + + +class JsonReader(BaseIterator): + """ + JsonReader provides an interface for reading in a JSON file. + + If initialized with ``lines=True`` and ``chunksize``, can be iterated over + ``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the + whole document. + """ + def __init__(self, filepath_or_buffer, orient, typ, dtype, convert_axes, + convert_dates, keep_default_dates, numpy, precise_float, + date_unit, encoding, lines, chunksize): + + self.path_or_buf = filepath_or_buffer + self.orient = orient + self.typ = typ + self.dtype = dtype + self.convert_axes = convert_axes + self.convert_dates = convert_dates + self.keep_default_dates = keep_default_dates + self.numpy = numpy + self.precise_float = precise_float + self.date_unit = date_unit + self.encoding = encoding + self.lines = lines + self.chunksize = chunksize + self.nrows_seen = 0 + self.should_close = False + + if self.chunksize is not None: + self.chunksize = _validate_integer("chunksize", self.chunksize, 1) + if not self.lines: + raise ValueError("chunksize can only be passed if lines=True") + + data = self._get_data_from_filepath(filepath_or_buffer) + self.data = self._preprocess_data(data) + + def _preprocess_data(self, data): + """ + At this point, the data either has a `read` attribute (e.g. a file + object or a StringIO) or is a string that is a JSON document. + + If self.chunksize, we prepare the data for the `__next__` method. + Otherwise, we read it into memory for the `read` method. + """ + if hasattr(data, 'read') and not self.chunksize: + data = data.read() + if not hasattr(data, 'read') and self.chunksize: + data = StringIO(data) + + return data + + def _get_data_from_filepath(self, filepath_or_buffer): + """ + read_json accepts three input types: + 1. filepath (string-like) + 2. file-like object (e.g. open file object, StringIO) + 3. JSON string + + This method turns (1) into (2) to simplify the rest of the processing. + It returns input types (2) and (3) unchanged. + """ + + data = filepath_or_buffer + + if isinstance(data, compat.string_types): + try: + exists = os.path.exists(filepath_or_buffer) + + # gh-5874: if the filepath is too long will raise here + except (TypeError, ValueError): + pass + + else: + if exists: + data, _ = _get_handle(filepath_or_buffer, 'r', + encoding=self.encoding) + self.should_close = True + self.open_stream = data + + return data + + def _combine_lines(self, lines): + """Combines a list of JSON objects into one JSON object""" + lines = filter(None, map(lambda x: x.strip(), lines)) + return '[' + ','.join(lines) + ']' + + def read(self): + """Read the whole JSON input into a pandas object""" + if self.lines and self.chunksize: + obj = concat(self) + elif self.lines: + obj = self._get_object_parser( + self._combine_lines(self.data.split('\n')) + ) else: - json = filepath_or_buffer - elif hasattr(filepath_or_buffer, 'read'): - json = filepath_or_buffer.read() - else: - json = filepath_or_buffer + obj = self._get_object_parser(self.data) + self.close() + return obj + + def _get_object_parser(self, json): + """parses a json document into a pandas object""" + typ = self.typ + dtype = self.dtype + kwargs = { + "orient": self.orient, "dtype": self.dtype, + "convert_axes": self.convert_axes, + "convert_dates": self.convert_dates, + "keep_default_dates": self.keep_default_dates, "numpy": self.numpy, + "precise_float": self.precise_float, "date_unit": self.date_unit + } + obj = None + if typ == 'frame': + obj = FrameParser(json, **kwargs).parse() + + if typ == 'series' or obj is None: + if not isinstance(dtype, bool): + dtype = dict(data=dtype) + obj = SeriesParser(json, **kwargs).parse() + + return obj + + def close(self): + """ + If we opened a stream earlier, in _get_data_from_filepath, we should + close it. If an open stream or file was passed, we leave it open. + """ + if self.should_close: + try: + self.open_stream.close() + except (IOError, AttributeError): + pass - if lines: - # If given a json lines file, we break the string into lines, add - # commas and put it in a json list to make a valid json object. - lines = list(StringIO(json.strip())) - json = '[' + ','.join(lines) + ']' - - obj = None - if typ == 'frame': - obj = FrameParser(json, orient, dtype, convert_axes, convert_dates, - keep_default_dates, numpy, precise_float, - date_unit).parse() - - if typ == 'series' or obj is None: - if not isinstance(dtype, bool): - dtype = dict(data=dtype) - obj = SeriesParser(json, orient, dtype, convert_axes, convert_dates, - keep_default_dates, numpy, precise_float, - date_unit).parse() - - return obj + def __next__(self): + lines = list(islice(self.data, self.chunksize)) + if lines: + lines_json = self._combine_lines(lines) + obj = self._get_object_parser(lines_json) + + # Make sure that the returned objects have the right index. + obj.index = range(self.nrows_seen, self.nrows_seen + len(obj)) + self.nrows_seen += len(obj) + + return obj + + self.close() + raise StopIteration class Parser(object): diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 671d4248818e4..de4afec883efd 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -985,53 +985,6 @@ def test_tz_range_is_utc(self): df = DataFrame({'DT': dti}) assert dumps(df, iso_dates=True) == dfexp - def test_read_jsonl(self): - # GH9180 - result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True) - expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) - assert_frame_equal(result, expected) - - def test_read_jsonl_unicode_chars(self): - # GH15132: non-ascii unicode characters - # \u201d == RIGHT DOUBLE QUOTATION MARK - - # simulate file handle - json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' - json = StringIO(json) - result = read_json(json, lines=True) - expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], - columns=['a', 'b']) - assert_frame_equal(result, expected) - - # simulate string - json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' - result = read_json(json, lines=True) - expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], - columns=['a', 'b']) - assert_frame_equal(result, expected) - - def test_to_jsonl(self): - # GH9180 - df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) - result = df.to_json(orient="records", lines=True) - expected = '{"a":1,"b":2}\n{"a":1,"b":2}' - assert result == expected - - df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b']) - result = df.to_json(orient="records", lines=True) - expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}' - assert result == expected - assert_frame_equal(pd.read_json(result, lines=True), df) - - # GH15096: escaped characters in columns and data - df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], - columns=["a\\", 'b']) - result = df.to_json(orient="records", lines=True) - expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n' - '{"a\\\\":"foo\\"","b":"bar"}') - assert result == expected - assert_frame_equal(pd.read_json(result, lines=True), df) - def test_latin_encoding(self): if compat.PY2: tm.assert_raises_regex( diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py new file mode 100644 index 0000000000000..d14355b07cf20 --- /dev/null +++ b/pandas/tests/io/json/test_readlines.py @@ -0,0 +1,168 @@ +# -*- coding: utf-8 -*- +import pytest +import pandas as pd +from pandas import DataFrame, read_json +from pandas.compat import StringIO +from pandas.io.json.json import JsonReader +import pandas.util.testing as tm +from pandas.util.testing import (assert_frame_equal, assert_series_equal, + ensure_clean) + + +@pytest.fixture +def lines_json_df(): + df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) + return df.to_json(lines=True, orient="records") + + +def test_read_jsonl(): + # GH9180 + result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + +def test_read_jsonl_unicode_chars(): + # GH15132: non-ascii unicode characters + # \u201d == RIGHT DOUBLE QUOTATION MARK + + # simulate file handle + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + json = StringIO(json) + result = read_json(json, lines=True) + expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], + columns=['a', 'b']) + assert_frame_equal(result, expected) + + # simulate string + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + result = read_json(json, lines=True) + expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], + columns=['a', 'b']) + assert_frame_equal(result, expected) + + +def test_to_jsonl(): + # GH9180 + df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + result = df.to_json(orient="records", lines=True) + expected = '{"a":1,"b":2}\n{"a":1,"b":2}' + assert result == expected + + df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b']) + result = df.to_json(orient="records", lines=True) + expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}' + assert result == expected + assert_frame_equal(read_json(result, lines=True), df) + + # GH15096: escaped characters in columns and data + df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], + columns=["a\\", 'b']) + result = df.to_json(orient="records", lines=True) + expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n' + '{"a\\\\":"foo\\"","b":"bar"}') + assert result == expected + assert_frame_equal(read_json(result, lines=True), df) + + +@pytest.mark.parametrize("chunksize", [1, 1.0]) +def test_readjson_chunks(lines_json_df, chunksize): + # Basic test that read_json(chunks=True) gives the same result as + # read_json(chunks=False) + # GH17048: memory usage when lines=True + + unchunked = read_json(StringIO(lines_json_df), lines=True) + reader = read_json(StringIO(lines_json_df), lines=True, + chunksize=chunksize) + chunked = pd.concat(reader) + + assert_frame_equal(chunked, unchunked) + + +def test_readjson_chunksize_requires_lines(lines_json_df): + msg = "chunksize can only be passed if lines=True" + with tm.assert_raises_regex(ValueError, msg): + pd.read_json(StringIO(lines_json_df), lines=False, chunksize=2) + + +def test_readjson_chunks_series(): + # Test reading line-format JSON to Series with chunksize param + s = pd.Series({'A': 1, 'B': 2}) + + strio = StringIO(s.to_json(lines=True, orient="records")) + unchunked = pd.read_json(strio, lines=True, typ='Series') + + strio = StringIO(s.to_json(lines=True, orient="records")) + chunked = pd.concat(pd.read_json( + strio, lines=True, typ='Series', chunksize=1 + )) + + assert_series_equal(chunked, unchunked) + + +def test_readjson_each_chunk(lines_json_df): + # Other tests check that the final result of read_json(chunksize=True) + # is correct. This checks the intermediate chunks. + chunks = list( + pd.read_json(StringIO(lines_json_df), lines=True, chunksize=2) + ) + assert chunks[0].shape == (2, 2) + assert chunks[1].shape == (1, 2) + + +def test_readjson_chunks_from_file(): + with ensure_clean('test.json') as path: + df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) + df.to_json(path, lines=True, orient="records") + chunked = pd.concat(pd.read_json(path, lines=True, chunksize=1)) + unchunked = pd.read_json(path, lines=True) + assert_frame_equal(unchunked, chunked) + + +@pytest.mark.parametrize("chunksize", [None, 1]) +def test_readjson_chunks_closes(chunksize): + with ensure_clean('test.json') as path: + df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) + df.to_json(path, lines=True, orient="records") + reader = JsonReader( + path, orient=None, typ="frame", dtype=True, convert_axes=True, + convert_dates=True, keep_default_dates=True, numpy=False, + precise_float=False, date_unit=None, encoding=None, + lines=True, chunksize=chunksize) + reader.read() + assert reader.open_stream.closed, "didn't close stream with \ + chunksize = %s" % chunksize + + +@pytest.mark.parametrize("chunksize", [0, -1, 2.2, "foo"]) +def test_readjson_invalid_chunksize(lines_json_df, chunksize): + msg = r"'chunksize' must be an integer >=1" + + with tm.assert_raises_regex(ValueError, msg): + pd.read_json(StringIO(lines_json_df), lines=True, + chunksize=chunksize) + + +@pytest.mark.parametrize("chunksize", [None, 1, 2]) +def test_readjson_chunks_multiple_empty_lines(chunksize): + j = """ + + {"A":1,"B":4} + + + + {"A":2,"B":5} + + + + + + + + {"A":3,"B":6} + """ + orig = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) + test = pd.read_json(j, lines=True, chunksize=chunksize) + if chunksize is not None: + test = pd.concat(test) + tm.assert_frame_equal(orig, test, obj="chunksize: %s" % chunksize)
Previous behavior: reading the whole file to memory and then split into lines. New behavior: if `lines=True` and `chunksize` is passed: read in `chunksize` lines at a time, and concat. This only covers some kinds of input to `read_json`. When `chunksize` is passed, `read_json` becomes slower but more memory-efficient. Closes #17048. Tests and style-check pass, no new tests added.
https://api.github.com/repos/pandas-dev/pandas/pulls/17168
2017-08-03T21:20:14Z
2017-09-28T23:42:02Z
2017-09-28T23:42:02Z
2017-09-29T05:06:55Z
Implement _make_accessor classmethod for PandasDelegate
diff --git a/pandas/core/base.py b/pandas/core/base.py index eb785b18bd02b..8f21e3125a27e 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -165,6 +165,12 @@ def __setattr__(self, key, value): class PandasDelegate(PandasObject): """ an abstract base class for delegating methods/properties """ + @classmethod + def _make_accessor(cls, data): + raise AbstractMethodError("_make_accessor should be implemented" + "by subclass and return an instance" + "of `cls`.") + def _delegate_property_get(self, name, *args, **kwargs): raise TypeError("You cannot access the " "property {name}".format(name=name)) @@ -231,9 +237,10 @@ class AccessorProperty(object): """Descriptor for implementing accessor properties like Series.str """ - def __init__(self, accessor_cls, construct_accessor): + def __init__(self, accessor_cls, construct_accessor=None): self.accessor_cls = accessor_cls - self.construct_accessor = construct_accessor + self.construct_accessor = (construct_accessor or + accessor_cls._make_accessor) self.__doc__ = accessor_cls.__doc__ def __get__(self, instance, owner=None): diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 1392ad2f011db..230361931125e 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -2061,6 +2061,13 @@ def _delegate_method(self, name, *args, **kwargs): if res is not None: return Series(res, index=self.index) + @classmethod + def _make_accessor(cls, data): + if not is_categorical_dtype(data.dtype): + raise AttributeError("Can only use .cat accessor with a " + "'category' dtype") + return CategoricalAccessor(data.values, data.index) + CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=["categories", diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index f1fb9a8ad93a7..ce3143b342cec 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -243,3 +243,11 @@ class CombinedDatetimelikeProperties(DatetimeProperties, TimedeltaProperties): # the Series.dt class property. For Series objects, .dt will always be one # of the more specific classes above. __doc__ = DatetimeProperties.__doc__ + + @classmethod + def _make_accessor(cls, data): + try: + return maybe_to_datetimelike(data) + except Exception: + raise AttributeError("Can only use .dt accessor with " + "datetimelike values") diff --git a/pandas/core/series.py b/pandas/core/series.py index 60d268c89a9d7..5f76fe1bdf7c7 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -55,8 +55,7 @@ from pandas.core.internals import SingleBlockManager from pandas.core.categorical import Categorical, CategoricalAccessor import pandas.core.strings as strings -from pandas.core.indexes.accessors import ( - maybe_to_datetimelike, CombinedDatetimelikeProperties) +from pandas.core.indexes.accessors import CombinedDatetimelikeProperties from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexes.period import PeriodIndex @@ -2912,27 +2911,11 @@ def to_period(self, freq=None, copy=True): # ------------------------------------------------------------------------- # Datetimelike delegation methods - - def _make_dt_accessor(self): - try: - return maybe_to_datetimelike(self) - except Exception: - raise AttributeError("Can only use .dt accessor with datetimelike " - "values") - - dt = base.AccessorProperty(CombinedDatetimelikeProperties, - _make_dt_accessor) + dt = base.AccessorProperty(CombinedDatetimelikeProperties) # ------------------------------------------------------------------------- # Categorical methods - - def _make_cat_accessor(self): - if not is_categorical_dtype(self.dtype): - raise AttributeError("Can only use .cat accessor with a " - "'category' dtype") - return CategoricalAccessor(self.values, self.index) - - cat = base.AccessorProperty(CategoricalAccessor, _make_cat_accessor) + cat = base.AccessorProperty(CategoricalAccessor) def _dir_deletions(self): return self._accessors diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 30465561a911c..0b1db0277eee3 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1890,18 +1890,14 @@ def rindex(self, sub, start=0, end=None): docstring=_shared_docs['ismethods'] % _shared_docs['isdecimal']) - -class StringAccessorMixin(object): - """ Mixin to add a `.str` acessor to the class.""" - - # string methods - def _make_str_accessor(self): + @classmethod + def _make_accessor(cls, data): from pandas.core.index import Index - if (isinstance(self, ABCSeries) and - not ((is_categorical_dtype(self.dtype) and - is_object_dtype(self.values.categories)) or - (is_object_dtype(self.dtype)))): + if (isinstance(data, ABCSeries) and + not ((is_categorical_dtype(data.dtype) and + is_object_dtype(data.values.categories)) or + (is_object_dtype(data.dtype)))): # it's neither a string series not a categorical series with # strings inside the categories. # this really should exclude all series with any non-string values @@ -1910,23 +1906,27 @@ def _make_str_accessor(self): raise AttributeError("Can only use .str accessor with string " "values, which use np.object_ dtype in " "pandas") - elif isinstance(self, Index): + elif isinstance(data, Index): # can't use ABCIndex to exclude non-str # see scc/inferrence.pyx which can contain string values allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer') - if self.inferred_type not in allowed_types: + if data.inferred_type not in allowed_types: message = ("Can only use .str accessor with string values " "(i.e. inferred_type is 'string', 'unicode' or " "'mixed')") raise AttributeError(message) - if self.nlevels > 1: + if data.nlevels > 1: message = ("Can only use .str accessor with Index, not " "MultiIndex") raise AttributeError(message) - return StringMethods(self) + return StringMethods(data) + + +class StringAccessorMixin(object): + """ Mixin to add a `.str` acessor to the class.""" - str = AccessorProperty(StringMethods, _make_str_accessor) + str = AccessorProperty(StringMethods) def _dir_additions(self): return set()
This is an absolutely minimal subset of the refactor in #17042. It should be obvious and uncontroversial. - Define `_make_accessor` as a classmethod of `PandasDelegate`. - Make `AccessorProperty.__init__` argument `construct_accessor` default to `accessor_cls._make_accessor`. - Remove methods `_make_cat_accessor` and `_make_dt_accessor` from `Series`, as they do not belong in the namespace. Remove `_make_str_accessor` from both `Series` and `Index` namespace. - [ ] closes #xxxx - [ ] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17166
2017-08-03T17:09:20Z
2017-08-08T23:49:00Z
2017-08-08T23:49:00Z
2017-12-08T19:41:10Z
Create ABCDateOffset
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index 90608c18ae503..618bcf6495155 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -52,6 +52,8 @@ def _check(cls, inst): ABCCategorical = create_pandas_abc_type("ABCCategorical", "_typ", ("categorical")) ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period", )) +ABCDateOffset = create_pandas_abc_type("ABCDateOffset", "_typ", + ("dateoffset",)) class _ABCGeneric(type): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 4aecc75d95971..de6221987a59a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -13,7 +13,11 @@ from pandas import compat -from pandas.core.dtypes.generic import ABCSeries, ABCMultiIndex, ABCPeriodIndex +from pandas.core.dtypes.generic import ( + ABCSeries, + ABCMultiIndex, + ABCPeriodIndex, + ABCDateOffset) from pandas.core.dtypes.missing import isna, array_equivalent from pandas.core.dtypes.common import ( _ensure_int64, @@ -3814,8 +3818,6 @@ def _validate_for_numeric_binop(self, other, op, opstr): internal method called by ops """ - from pandas.tseries.offsets import DateOffset - # if we are an inheritor of numeric, # but not actually numeric (e.g. DatetimeIndex/PeriodInde) if not self._is_numeric_dtype: @@ -3843,7 +3845,7 @@ def _validate_for_numeric_binop(self, other, op, opstr): if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") - elif isinstance(other, (DateOffset, np.timedelta64, + elif isinstance(other, (ABCDateOffset, np.timedelta64, Timedelta, datetime.timedelta)): # higher up to handle pass @@ -3862,12 +3864,10 @@ def _add_numeric_methods_binary(cls): def _make_evaluate_binop(op, opstr, reversed=False, constructor=Index): def _evaluate_numeric_binop(self, other): - - from pandas.tseries.offsets import DateOffset other = self._validate_for_numeric_binop(other, op, opstr) # handle time-based others - if isinstance(other, (DateOffset, np.timedelta64, + if isinstance(other, (ABCDateOffset, np.timedelta64, Timedelta, datetime.timedelta)): return self._evaluate_with_timedelta_like(other, op, opstr) elif isinstance(other, (Timestamp, np.datetime64)): diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 4e08e1483d617..82101414e4aa6 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -35,7 +35,11 @@ is_scalar, _ensure_object) from pandas.core.dtypes.cast import maybe_upcast_putmask, find_common_type -from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCPeriodIndex +from pandas.core.dtypes.generic import ( + ABCSeries, + ABCIndex, + ABCPeriodIndex, + ABCDateOffset) # ----------------------------------------------------------------------------- # Functions that add arithmetic methods to objects, given arithmetic factory @@ -605,10 +609,10 @@ def f(x): def _is_offset(self, arr_or_obj): """ check if obj or all elements of list-like is DateOffset """ - if isinstance(arr_or_obj, pd.DateOffset): + if isinstance(arr_or_obj, ABCDateOffset): return True elif is_list_like(arr_or_obj) and len(arr_or_obj): - return all(isinstance(x, pd.DateOffset) for x in arr_or_obj) + return all(isinstance(x, ABCDateOffset) for x in arr_or_obj) return False diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index a1f323aff7c1a..eebf78d7619eb 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -17,7 +17,7 @@ is_numeric_dtype) from pandas.core.dtypes.generic import ( ABCIndexClass, ABCSeries, - ABCDataFrame) + ABCDataFrame, ABCDateOffset) from pandas.core.dtypes.missing import notna from pandas.core import algorithms @@ -720,8 +720,7 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): if not isinstance(arg, compat.string_types): return arg - from pandas.tseries.offsets import DateOffset - if isinstance(freq, DateOffset): + if isinstance(freq, ABCDateOffset): freq = freq.rule_code if dayfirst is None: diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py index ec850cc34e23b..82444d6c94157 100644 --- a/pandas/tests/dtypes/test_generic.py +++ b/pandas/tests/dtypes/test_generic.py @@ -40,6 +40,12 @@ def test_abc_types(self): assert isinstance(self.categorical, gt.ABCCategorical) assert isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCPeriod) + assert isinstance(pd.DateOffset(), gt.ABCDateOffset) + assert isinstance(pd.Period('2012', freq='A-DEC').freq, + gt.ABCDateOffset) + assert not isinstance(pd.Period('2012', freq='A-DEC'), + gt.ABCDateOffset) + def test_setattr_warnings(): # GH5904 - Suggestion: Warning for DataFrame colname-methodname clash diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 2a120a0696836..56ef703e67ca0 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -184,6 +184,7 @@ def __add__(date): ) _use_relativedelta = False _adjust_dst = False + _typ = "dateoffset" # default for prior pickles normalize = False
`DateOffset` is one of a small number of commonly-used classes for which no `ABCFoo` class exists. Putting this in place is cheap and gives us an option to use it down the road. Replaced `isinstance(x, DateOffset)` with `isinstance(x, ABCDateOffset)` in a few places where doing so lets us avoid run-time imports. Other than that this holds off on actually _using_ `ABCDateOffset`, as the `ABCFoo` isinstance checks do appear to be appreciably slower than the non-ABC versions. - [ ] closes #xxxx - [x] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17165
2017-08-03T16:47:10Z
2017-08-09T10:27:35Z
2017-08-09T10:27:35Z
2017-10-30T16:24:26Z
ENH: Support strings containing '%' in add_prefix/add_suffix (#17151)
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 2a2e08c2ccf5d..cb906374840c4 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -81,6 +81,7 @@ Other Enhancements - :func:`date_range` now accepts 'YS' in addition to 'AS' as an alias for start of year (:issue:`9313`) - :func:`date_range` now accepts 'Y' in addition to 'A' as an alias for end of year (:issue:`9313`) - Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` and :func:`DataFrame.to_parquet` method, see :ref:`here <io.parquet>`. +- :func:`DataFrame.add_prefix` and :func:`DataFrame.add_suffix` now accept strings containing the '%' character. (:issue:`17151`) .. _whatsnew_0210.api_breaking: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 25c367fcbd968..37fc1c01061ec 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -5,6 +5,7 @@ import operator from datetime import datetime, timedelta, date from collections import defaultdict +from functools import partial import numpy as np @@ -2959,11 +2960,11 @@ def rename_axis(self, mapper, axis, copy=True, level=None): return obj def add_prefix(self, prefix): - f = (str(prefix) + '%s').__mod__ + f = partial('{prefix}{}'.format, prefix=prefix) return self.rename_axis(f, axis=0) def add_suffix(self, suffix): - f = ('%s' + str(suffix)).__mod__ + f = partial('{}{suffix}'.format, suffix=suffix) return self.rename_axis(f, axis=0) @property diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index f63918c97c614..8c4c13b66ffa9 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -68,6 +68,14 @@ def test_add_prefix_suffix(self): expected = pd.Index(['%s#foo' % c for c in self.frame.columns]) tm.assert_index_equal(with_suffix.columns, expected) + with_pct_prefix = self.frame.add_prefix('%') + expected = pd.Index(['%{}'.format(c) for c in self.frame.columns]) + tm.assert_index_equal(with_pct_prefix.columns, expected) + + with_pct_suffix = self.frame.add_suffix('%') + expected = pd.Index(['{}%'.format(c) for c in self.frame.columns]) + tm.assert_index_equal(with_pct_suffix.columns, expected) + class TestDataFrameMisc(SharedWithSparse, TestData):
- [X] closes #17151 - [X] tests added / passed - [X] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [X] whatsnew entry Updated the `DataFrame.add_prefix` and `DataFrame.add_suffix` methods to use the new style `.format` syntax to perform the string formatting. These previously used the old style string formatting, which raises when the prefix/suffix to add contains the '%' character.
https://api.github.com/repos/pandas-dev/pandas/pulls/17162
2017-08-03T05:08:23Z
2017-08-03T21:06:06Z
2017-08-03T21:06:06Z
2017-08-08T12:12:14Z
BUG: Rolling apply on DataFrame with Datetime index returns NaN
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index cc9ab81ce0955..6aee779151003 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -349,6 +349,8 @@ Groupby/Resample/Rolling - Bug in ``.rolling(...).quantile()`` which incorrectly used different defaults than :func:`Series.quantile()` and :func:`DataFrame.quantile()` (:issue:`9413`, :issue:`16211`) - Bug in ``groupby.transform()`` that would coerce boolean dtypes back to float (:issue:`16875`) - Bug in ``Series.resample(...).apply()`` where an empty ``Series`` modified the source index and did not return the name of a ``Series`` (:issue:`14313`) +- Bug in ``.rolling(...).apply(...)`` with a ``DataFrame`` with a ``DatetimeIndex``, a ``window`` of a timedelta-convertible and ``min_periods >= 1` (:issue:`15305`) + Sparse ^^^^^^ diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index 2450eea5500cd..bdd371871b6e1 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1428,15 +1428,16 @@ def roll_generic(ndarray[float64_t, cast=True] input, if n == 0: return input + counts = roll_sum(np.concatenate([np.isfinite(input).astype(float), + np.array([0.] * offset)]), + win, minp, index, closed)[offset:] + start, end, N, win, minp, is_variable = get_window_indexer(input, win, minp, index, closed, floor=0) - output = np.empty(N, dtype=float) - counts = roll_sum(np.concatenate([np.isfinite(input).astype(float), - np.array([0.] * offset)]), - win, minp, index, closed)[offset:] + output = np.empty(N, dtype=float) if is_variable: diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 5ab33bd6cc5e1..d94e34c41786b 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -423,6 +423,26 @@ def test_constructor_with_timedelta_window(self): expected = df.rolling('3D').sum() tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize( + 'window', [timedelta(days=3), pd.Timedelta(days=3), '3D']) + def test_constructor_with_timedelta_window_and_minperiods(self, window): + # GH 15305 + n = 10 + df = pd.DataFrame({'value': np.arange(n)}, + index=pd.date_range('2017-08-08', + periods=n, + freq="D")) + expected = pd.DataFrame({'value': np.append([np.NaN, 1.], + np.arange(3., 27., 3))}, + index=pd.date_range('2017-08-08', + periods=n, + freq="D")) + result_roll_sum = df.rolling(window=window, min_periods=2).sum() + result_roll_generic = df.rolling(window=window, + min_periods=2).apply(sum) + tm.assert_frame_equal(result_roll_sum, expected) + tm.assert_frame_equal(result_roll_generic, expected) + def test_numpy_compat(self): # see gh-12811 r = rwindow.Rolling(Series([2, 4, 6]), window=2)
- [x] #15305 - [x] changing the order of 'calling roll_sum' and 'get_window_indexer' in 'roll_generic' could fix #15305 The bug caused by the return values from get_window_indexer. It changes value of 'win', which is not supposed to be passed to roll_sum.
https://api.github.com/repos/pandas-dev/pandas/pulls/17156
2017-08-02T20:29:11Z
2017-08-10T12:32:47Z
2017-08-10T12:32:47Z
2017-08-10T12:32:50Z
TST: test for categorical index monotonicity
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 411428e001c81..4aecc75d95971 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1195,7 +1195,7 @@ def _mpl_repr(self): @property def is_monotonic(self): """ alias for is_monotonic_increasing (deprecated) """ - return self._engine.is_monotonic_increasing + return self.is_monotonic_increasing @property def is_monotonic_increasing(self): diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index e8427f847dd2d..ac4698b570d17 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -316,10 +316,19 @@ def _engine(self): # we are going to look things up with the codes themselves return self._engine_type(lambda: self.codes.astype('i8'), len(self)) + # introspection @cache_readonly def is_unique(self): return not self.duplicated().any() + @property + def is_monotonic_increasing(self): + return Index(self.codes).is_monotonic_increasing + + @property + def is_monotonic_decreasing(self): + return Index(self.codes).is_monotonic_decreasing + @Appender(base._shared_docs['unique'] % _index_doc_kwargs) def unique(self): result = base.IndexOpsMixin.unique(self) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index e2777cb56374e..2f4e437c0ae61 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -28,7 +28,8 @@ try: import bottleneck as bn ver = bn.__version__ - _BOTTLENCK_INSTALLED = ver >= LooseVersion(_MIN_BOTTLENECK_VERSION) + _BOTTLENECK_INSTALLED = (LooseVersion(ver) >= + LooseVersion(_MIN_BOTTLENECK_VERSION)) if not _BOTTLENECK_INSTALLED: warnings.warn( diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index a3d72fdb88239..64bd6df361aeb 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -427,6 +427,38 @@ def test_reindex_empty_index(self): tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp)) + def test_is_monotonic(self): + c = CategoricalIndex([1, 2, 3]) + assert c.is_monotonic_increasing + assert not c.is_monotonic_decreasing + + c = CategoricalIndex([1, 2, 3], ordered=True) + assert c.is_monotonic_increasing + assert not c.is_monotonic_decreasing + + c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1]) + assert not c.is_monotonic_increasing + assert c.is_monotonic_decreasing + + c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1]) + assert not c.is_monotonic_increasing + assert not c.is_monotonic_decreasing + + c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True) + assert not c.is_monotonic_increasing + assert c.is_monotonic_decreasing + + # non lexsorted categories + categories = [9, 0, 1, 2, 3] + + c = CategoricalIndex([9, 0], categories=categories) + assert c.is_monotonic_increasing + assert not c.is_monotonic_decreasing + + c = CategoricalIndex([0, 1], categories=categories) + assert c.is_monotonic_increasing + assert not c.is_monotonic_decreasing + def test_duplicates(self): idx = CategoricalIndex([0, 0, 0], name='foo')
- xref occasional failures here: https://travis-ci.org/MacPython/pandas-wheels/jobs/259998677 - incorrect version detection in #17142 of bottleneck
https://api.github.com/repos/pandas-dev/pandas/pulls/17152
2017-08-02T10:17:42Z
2017-08-03T01:02:13Z
2017-08-03T01:02:13Z
2017-08-03T01:03:30Z
DOC: Additions/updates to documentation
diff --git a/README.md b/README.md index dc74828ba9863..ac043f5586498 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ <tr> <td>Conda</td> <td> - <a href="http://pandas.pydata.org"> + <a href="https://pandas.pydata.org"> <img src="http://pubbadges.s3-website-us-east-1.amazonaws.com/pkgs-downloads-pandas.png" alt="conda default downloads" /> </a> </td> @@ -61,7 +61,7 @@ <tr> <td>Conda-forge</td> <td> - <a href="http://pandas.pydata.org"> + <a href="https://pandas.pydata.org"> <img src="https://anaconda.org/conda-forge/pandas/badges/downloads.svg" alt="conda-forge downloads" /> </a> </td> @@ -123,31 +123,31 @@ Here are just a few of the things that pandas does well: moving window linear regressions, date shifting and lagging, etc. - [missing-data]: http://pandas.pydata.org/pandas-docs/stable/missing_data.html#working-with-missing-data - [insertion-deletion]: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#column-selection-addition-deletion - [alignment]: http://pandas.pydata.org/pandas-docs/stable/dsintro.html?highlight=alignment#intro-to-data-structures - [groupby]: http://pandas.pydata.org/pandas-docs/stable/groupby.html#group-by-split-apply-combine - [conversion]: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe - [slicing]: http://pandas.pydata.org/pandas-docs/stable/indexing.html#slicing-ranges - [fancy-indexing]: http://pandas.pydata.org/pandas-docs/stable/indexing.html#advanced-indexing-with-ix - [subsetting]: http://pandas.pydata.org/pandas-docs/stable/indexing.html#boolean-indexing - [merging]: http://pandas.pydata.org/pandas-docs/stable/merging.html#database-style-dataframe-joining-merging - [joining]: http://pandas.pydata.org/pandas-docs/stable/merging.html#joining-on-index - [reshape]: http://pandas.pydata.org/pandas-docs/stable/reshaping.html#reshaping-and-pivot-tables - [pivot-table]: http://pandas.pydata.org/pandas-docs/stable/reshaping.html#pivot-tables-and-cross-tabulations - [mi]: http://pandas.pydata.org/pandas-docs/stable/indexing.html#hierarchical-indexing-multiindex - [flat-files]: http://pandas.pydata.org/pandas-docs/stable/io.html#csv-text-files - [excel]: http://pandas.pydata.org/pandas-docs/stable/io.html#excel-files - [db]: http://pandas.pydata.org/pandas-docs/stable/io.html#sql-queries - [hdfstore]: http://pandas.pydata.org/pandas-docs/stable/io.html#hdf5-pytables - [timeseries]: http://pandas.pydata.org/pandas-docs/stable/timeseries.html#time-series-date-functionality + [missing-data]: https://pandas.pydata.org/pandas-docs/stable/missing_data.html#working-with-missing-data + [insertion-deletion]: https://pandas.pydata.org/pandas-docs/stable/dsintro.html#column-selection-addition-deletion + [alignment]: https://pandas.pydata.org/pandas-docs/stable/dsintro.html?highlight=alignment#intro-to-data-structures + [groupby]: https://pandas.pydata.org/pandas-docs/stable/groupby.html#group-by-split-apply-combine + [conversion]: https://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe + [slicing]: https://pandas.pydata.org/pandas-docs/stable/indexing.html#slicing-ranges + [fancy-indexing]: https://pandas.pydata.org/pandas-docs/stable/indexing.html#advanced-indexing-with-ix + [subsetting]: https://pandas.pydata.org/pandas-docs/stable/indexing.html#boolean-indexing + [merging]: https://pandas.pydata.org/pandas-docs/stable/merging.html#database-style-dataframe-joining-merging + [joining]: https://pandas.pydata.org/pandas-docs/stable/merging.html#joining-on-index + [reshape]: https://pandas.pydata.org/pandas-docs/stable/reshaping.html#reshaping-and-pivot-tables + [pivot-table]: https://pandas.pydata.org/pandas-docs/stable/reshaping.html#pivot-tables-and-cross-tabulations + [mi]: https://pandas.pydata.org/pandas-docs/stable/indexing.html#hierarchical-indexing-multiindex + [flat-files]: https://pandas.pydata.org/pandas-docs/stable/io.html#csv-text-files + [excel]: https://pandas.pydata.org/pandas-docs/stable/io.html#excel-files + [db]: https://pandas.pydata.org/pandas-docs/stable/io.html#sql-queries + [hdfstore]: https://pandas.pydata.org/pandas-docs/stable/io.html#hdf5-pytables + [timeseries]: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#time-series-date-functionality ## Where to get it The source code is currently hosted on GitHub at: -http://github.com/pandas-dev/pandas +https://github.com/pandas-dev/pandas Binary installers for the latest released version are available at the [Python -package index](http://pypi.python.org/pypi/pandas/) and on conda. +package index](https://pypi.python.org/pypi/pandas) and on conda. ```sh # conda @@ -161,11 +161,11 @@ pip install pandas ## Dependencies - [NumPy](http://www.numpy.org): 1.7.0 or higher -- [python-dateutil](http://labix.org/python-dateutil): 1.5 or higher -- [pytz](http://pytz.sourceforge.net) +- [python-dateutil](https://labix.org/python-dateutil): 1.5 or higher +- [pytz](https://pythonhosted.org/pytz) - Needed for time zone support with ``pandas.date_range`` -See the [full installation instructions](http://pandas.pydata.org/pandas-docs/stable/install.html#dependencies) +See the [full installation instructions](https://pandas.pydata.org/pandas-docs/stable/install.html#dependencies) for recommended and optional dependencies. ## Installation from sources @@ -197,13 +197,13 @@ mode](https://pip.pypa.io/en/latest/reference/pip_install.html#editable-installs pip install -e . ``` -See the full instructions for [installing from source](http://pandas.pydata.org/pandas-docs/stable/install.html#installing-from-source). +See the full instructions for [installing from source](https://pandas.pydata.org/pandas-docs/stable/install.html#installing-from-source). ## License -BSD +[BSD 3](LICENSE) ## Documentation -The official documentation is hosted on PyData.org: http://pandas.pydata.org/pandas-docs/stable/ +The official documentation is hosted on PyData.org: https://pandas.pydata.org/pandas-docs/stable The Sphinx documentation should provide a good starting point for learning how to use the library. Expect the docs to continue to expand as time goes on. @@ -223,7 +223,7 @@ Most development discussion is taking place on github in this repo. Further, the ## Contributing to pandas All contributions, bug reports, bug fixes, documentation improvements, enhancements and ideas are welcome. -A detailed overview on how to contribute can be found in the **[contributing guide.](http://pandas.pydata.org/pandas-docs/stable/contributing.html)** +A detailed overview on how to contribute can be found in the **[contributing guide.](https://pandas.pydata.org/pandas-docs/stable/contributing.html)** If you are simply looking to start working with the pandas codebase, navigate to the [GitHub “issues” tab](https://github.com/pandas-dev/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open) and [Difficulty Novice](https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22) where you could start out. diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index a3a90f514f142..a3062b4086673 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -144,7 +144,7 @@ To evaluate single-element pandas objects in a boolean context, use the method ` Bitwise boolean ~~~~~~~~~~~~~~~ -Bitwise boolean operators like ``==`` and ``!=`` will return a boolean ``Series``, +Bitwise boolean operators like ``==`` and ``!=`` return a boolean ``Series``, which is almost always what you want anyways. .. code-block:: python @@ -194,7 +194,7 @@ For lack of ``NA`` (missing) support from the ground up in NumPy and Python in general, we were given the difficult choice between either - A *masked array* solution: an array of data and an array of boolean values - indicating whether a value + indicating whether a value is there or is missing - Using a special sentinel value, bit pattern, or set of sentinel values to denote ``NA`` across the dtypes @@ -247,16 +247,16 @@ dtype in order to store the NAs. These are summarized by this table: ``integer``, cast to ``float64`` ``boolean``, cast to ``object`` -While this may seem like a heavy trade-off, I have found very few -cases where this is an issue in practice. Some explanation for the motivation -here in the next section. +While this may seem like a heavy trade-off, I have found very few cases where +this is an issue in practice i.e. storing values greater than 2**53. Some +explanation for the motivation is in the next section. Why not make NumPy like R? ~~~~~~~~~~~~~~~~~~~~~~~~~~ Many people have suggested that NumPy should simply emulate the ``NA`` support present in the more domain-specific statistical programming language `R -<http://r-project.org>`__. Part of the reason is the NumPy type hierarchy: +<https://r-project.org>`__. Part of the reason is the NumPy type hierarchy: .. csv-table:: :header: "Typeclass","Dtypes" @@ -305,7 +305,7 @@ the ``DataFrame.copy`` method. If you are doing a lot of copying of DataFrame objects shared among threads, we recommend holding locks inside the threads where the data copying occurs. -See `this link <http://stackoverflow.com/questions/13592618/python-pandas-dataframe-thread-safe>`__ +See `this link <https://stackoverflow.com/questions/13592618/python-pandas-dataframe-thread-safe>`__ for more information. @@ -332,5 +332,5 @@ using something similar to the following: s = pd.Series(newx) See `the NumPy documentation on byte order -<http://docs.scipy.org/doc/numpy/user/basics.byteswapping.html>`__ for more +<https://docs.scipy.org/doc/numpy/user/basics.byteswapping.html>`__ for more details.
- Minor grammar fixes - Added some extra details - Added link to LICENSE - Made explicit HTTPS calls - Minor URL cleanup
https://api.github.com/repos/pandas-dev/pandas/pulls/17150
2017-08-02T07:21:12Z
2017-08-02T09:47:00Z
2017-08-02T09:47:00Z
2017-08-02T09:47:02Z
BUG: resample and apply modify the index type for empty Series
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index fad6647d4de8d..72175f877532b 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -299,6 +299,7 @@ Groupby/Resample/Rolling - Bug in :func:`infer_freq` causing indices with 2-day gaps during the working week to be wrongly inferred as business daily (:issue:`16624`) - Bug in ``.rolling(...).quantile()`` which incorrectly used different defaults than :func:`Series.quantile()` and :func:`DataFrame.quantile()` (:issue:`9413`, :issue:`16211`) - Bug in ``groupby.transform()`` that would coerce boolean dtypes back to float (:issue:`16875`) +- Bug in ``Series.resample(...).apply()`` where an empty ``Series`` modified the source index and did not return the name of a ``Series`` (:issue:`14313`) Sparse ^^^^^^ diff --git a/pandas/core/resample.py b/pandas/core/resample.py index a8a48624fb885..96e7a6a3b3904 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -17,7 +17,7 @@ from pandas.core.indexes.period import PeriodIndex, period_range import pandas.core.common as com import pandas.core.algorithms as algos -from pandas.core.dtypes.generic import ABCDataFrame +from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries import pandas.compat as compat from pandas.compat.numpy import function as nv @@ -439,6 +439,11 @@ def _wrap_result(self, result): if isinstance(result, com.ABCSeries) and self._selection is not None: result.name = self._selection + if isinstance(result, ABCSeries) and result.empty: + obj = self.obj + result.index = obj.index._shallow_copy(freq=to_offset(self.freq)) + result.name = getattr(obj, 'name', None) + return result def pad(self, limit=None): diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 08fa7992e8da1..d938d5bf9f3ab 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -852,6 +852,16 @@ def test_resample_loffset_arg_type(self): assert_frame_equal(result_agg, expected) assert_frame_equal(result_how, expected) + def test_apply_to_empty_series(self): + # GH 14313 + series = self.create_series()[:0] + + for freq in ['M', 'D', 'H']: + result = series.resample(freq).apply(lambda x: 1) + expected = series.resample(freq).apply(np.sum) + + assert_series_equal(result, expected, check_dtype=False) + class TestDatetimeIndex(Base): _index_factory = lambda x: date_range @@ -2794,6 +2804,14 @@ def test_evenly_divisible_with_no_extra_bins(self): result = df.resample('7D').sum() assert_frame_equal(result, expected) + def test_apply_to_empty_series(self): + # GH 14313 + series = self.create_series()[:0] + + for freq in ['M', 'D', 'H']: + with pytest.raises(TypeError): + series.resample(freq).apply(lambda x: 1) + class TestTimedeltaIndex(Base): _index_factory = lambda x: timedelta_range
- [x] closes #14313 - [x] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17149
2017-08-02T05:20:41Z
2017-08-09T10:37:21Z
2017-08-09T10:37:21Z
2017-08-09T10:37:24Z
DOC: Clean up instructions in ISSUE_TEMPLATE
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 1f614b54b1f71..6ab03c9907475 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -8,11 +8,18 @@ [this should explain **why** the current behaviour is a problem and why the expected output is a better solution.] +**Note**: We receive a lot of issues on our GitHub tracker, so it is very possible that your issue has been posted before. +Please check first before submitting so that we do not have to handle and close duplicates! + +**Note**: Many problems can be resolved by simply upgrading `pandas` to the latest version. Before submitting, please check +if that solution works for you. If possible, you may want to check if `master` addresses this issue, but that is not necessary. + #### Expected Output #### Output of ``pd.show_versions()`` <details> -# Paste the output here pd.show_versions() here + +[paste the output of ``pd.show_versions()`` here below this line] </details>
* Minor grammar fixes + clarification on where to paste the output of `pd.show_versions()` to ensure that it renders properly. * Additional notes about ways to filter out duplicates and / or resolve issues by upgrading.
https://api.github.com/repos/pandas-dev/pandas/pulls/17146
2017-08-01T16:33:54Z
2017-08-01T22:33:00Z
2017-08-01T22:33:00Z
2017-08-03T08:42:27Z
REF: repr - allow block to override values that get formatted
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 25c367fcbd968..880fc7db4b6eb 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -159,6 +159,10 @@ def internal_values(self, dtype=None): """ return self.values + def formatting_values(self): + """Return the internal values used by the DataFrame/SeriesFormatter""" + return self.internal_values() + def get_values(self, dtype=None): """ return an internal format, currently just the ndarray @@ -4316,6 +4320,10 @@ def external_values(self): def internal_values(self): return self._block.internal_values() + def formatting_values(self): + """Return the internal values used by the DataFrame/SeriesFormatter""" + return self._block.formatting_values() + def get_values(self): """ return a dense type view """ return np.array(self._block.to_dense(), copy=False) diff --git a/pandas/core/series.py b/pandas/core/series.py index 60d268c89a9d7..2b852d3b6916c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -398,6 +398,12 @@ def _values(self): """ return the internal repr of this data """ return self._data.internal_values() + def _formatting_values(self): + """Return the values that can be formatted (used by SeriesFormatter + and DataFrameFormatter) + """ + return self._data.formatting_values() + def get_values(self): """ same as values (but handles sparseness conversions); is a view """ return self._data.get_values() diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 2b322431bd301..733fd3bd39b52 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -237,7 +237,8 @@ def _get_formatted_index(self): return fmt_index, have_header def _get_formatted_values(self): - return format_array(self.tr_series._values, None, + values_to_format = self.tr_series._formatting_values() + return format_array(values_to_format, None, float_format=self.float_format, na_rep=self.na_rep) def to_string(self): @@ -694,7 +695,8 @@ def to_latex(self, column_format=None, longtable=False, encoding=None, def _format_col(self, i): frame = self.tr_frame formatter = self._get_formatter(i) - return format_array(frame.iloc[:, i]._values, formatter, + values_to_format = frame.iloc[:, i]._formatting_values() + return format_array(values_to_format, formatter, float_format=self.float_format, na_rep=self.na_rep, space=self.col_space, decimal=self.decimal) diff --git a/pandas/tests/internals/__init__.py b/pandas/tests/internals/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/internals/test_external_block.py b/pandas/tests/internals/test_external_block.py new file mode 100644 index 0000000000000..cccde76c3e1d9 --- /dev/null +++ b/pandas/tests/internals/test_external_block.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# pylint: disable=W0102 + +import numpy as np + +import pandas as pd +from pandas.core.internals import Block, BlockManager, SingleBlockManager + + +class CustomBlock(Block): + + def formatting_values(self): + return np.array(["Val: {}".format(i) for i in self.values]) + + +def test_custom_repr(): + values = np.arange(3, dtype='int64') + + # series + block = CustomBlock(values, placement=slice(0, 3)) + + s = pd.Series(SingleBlockManager(block, pd.RangeIndex(3))) + assert repr(s) == '0 Val: 0\n1 Val: 1\n2 Val: 2\ndtype: int64' + + # dataframe + block = CustomBlock(values.reshape(1, -1), placement=slice(0, 1)) + blk_mgr = BlockManager([block], [['col'], range(3)]) + df = pd.DataFrame(blk_mgr) + assert repr(df) == ' col\n0 Val: 0\n1 Val: 1\n2 Val: 2' diff --git a/pandas/tests/test_internals.py b/pandas/tests/internals/test_internals.py similarity index 100% rename from pandas/tests/test_internals.py rename to pandas/tests/internals/test_internals.py diff --git a/setup.py b/setup.py index d5791862cfb19..a912b25328954 100755 --- a/setup.py +++ b/setup.py @@ -670,6 +670,7 @@ def pxd(name): 'pandas.tests.indexes.datetimes', 'pandas.tests.indexes.timedeltas', 'pandas.tests.indexes.period', + 'pandas.tests.internals', 'pandas.tests.io', 'pandas.tests.io.json', 'pandas.tests.io.parser',
This change allows an external defined Block to override which values are used by the SeriesFormatter and DataFrameFormatter. By default, the 'internal values' are used (currently it uses `Series._values` which calles Block.internal_values()), so for pandas itself nothing should change. #xref https://github.com/pandas-dev/pandas/pull/17144
https://api.github.com/repos/pandas-dev/pandas/pulls/17143
2017-08-01T14:38:25Z
2017-08-04T07:44:53Z
2017-08-04T07:44:53Z
2019-06-24T16:20:26Z
CI: remove no-longer-available conda versions
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 8cf6f2ce636da..dcc1656ce3dd7 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -52,9 +52,6 @@ conda update -q conda echo echo "[add channels]" -# add the pandas channel to take priority -# to add extra packages -conda config --add channels pandas || exit 1 conda config --remove channels defaults || exit 1 conda config --add channels defaults || exit 1 diff --git a/ci/requirements-2.7.pip b/ci/requirements-2.7.pip index eb796368e7820..876d9e978fa84 100644 --- a/ci/requirements-2.7.pip +++ b/ci/requirements-2.7.pip @@ -1,5 +1,7 @@ blosc pandas-gbq +html5lib +beautifulsoup4 pathlib backports.lzma py diff --git a/ci/requirements-2.7.run b/ci/requirements-2.7.run index 62e31e4ae24e3..7152cb2c8b605 100644 --- a/ci/requirements-2.7.run +++ b/ci/requirements-2.7.run @@ -10,13 +10,11 @@ xlrd=0.9.2 sqlalchemy=0.9.6 lxml=3.2.1 scipy -xlsxwriter=0.4.6 +xlsxwriter=0.5.2 s3fs bottleneck -psycopg2=2.5.2 +psycopg2 patsy pymysql=0.6.3 -html5lib=1.0b2 -beautiful-soup=4.2.1 jinja2=2.8 xarray=0.8.0 diff --git a/ci/requirements-2.7_COMPAT.pip b/ci/requirements-2.7_COMPAT.pip index 9533a630d06a4..13cd35a923124 100644 --- a/ci/requirements-2.7_COMPAT.pip +++ b/ci/requirements-2.7_COMPAT.pip @@ -1,2 +1,4 @@ +html5lib==1.0b2 +beautifulsoup4==4.2.0 openpyxl argparse diff --git a/ci/requirements-2.7_COMPAT.run b/ci/requirements-2.7_COMPAT.run index d27b6a72c2d15..b94f4ab7b27d1 100644 --- a/ci/requirements-2.7_COMPAT.run +++ b/ci/requirements-2.7_COMPAT.run @@ -4,13 +4,10 @@ pytz=2013b scipy=0.11.0 xlwt=0.7.5 xlrd=0.9.2 -bottleneck=0.8.0 numexpr=2.2.2 pytables=3.0.0 -html5lib=1.0b2 -beautiful-soup=4.2.0 -psycopg2=2.5.1 +psycopg2 pymysql=0.6.0 sqlalchemy=0.7.8 -xlsxwriter=0.4.6 +xlsxwriter=0.5.2 jinja2=2.8 diff --git a/ci/requirements-2.7_LOCALE.pip b/ci/requirements-2.7_LOCALE.pip index cf8e6b8b3d3a6..1b825bbf492ca 100644 --- a/ci/requirements-2.7_LOCALE.pip +++ b/ci/requirements-2.7_LOCALE.pip @@ -1 +1,3 @@ +html5lib==1.0b2 +beautifulsoup4==4.2.1 blosc diff --git a/ci/requirements-2.7_LOCALE.run b/ci/requirements-2.7_LOCALE.run index 5d7cc31b7d55e..8e360cf74b081 100644 --- a/ci/requirements-2.7_LOCALE.run +++ b/ci/requirements-2.7_LOCALE.run @@ -3,12 +3,9 @@ pytz=2013b numpy=1.8.2 xlwt=0.7.5 openpyxl=1.6.2 -xlsxwriter=0.4.6 +xlsxwriter=0.5.2 xlrd=0.9.2 -bottleneck=0.8.0 matplotlib=1.3.1 sqlalchemy=0.8.1 -html5lib=1.0b2 lxml=3.2.1 scipy -beautiful-soup=4.2.1 diff --git a/ci/requirements-2.7_SLOW.run b/ci/requirements-2.7_SLOW.run index c2d2a14285ad6..0a549554f5219 100644 --- a/ci/requirements-2.7_SLOW.run +++ b/ci/requirements-2.7_SLOW.run @@ -13,7 +13,6 @@ pytables sqlalchemy lxml s3fs -bottleneck psycopg2 pymysql html5lib diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run index ef66ebeb336f3..00db27d3f2704 100644 --- a/ci/requirements-3.6.run +++ b/ci/requirements-3.6.run @@ -16,7 +16,7 @@ sqlalchemy pymysql feather-format pyarrow -# psycopg2 (not avail on defaults ATM) +psycopg2 beautifulsoup4 s3fs xarray diff --git a/ci/requirements-3.6_LOCALE.run b/ci/requirements-3.6_LOCALE.run index ae456f4f9f38a..ad54284c6f7e3 100644 --- a/ci/requirements-3.6_LOCALE.run +++ b/ci/requirements-3.6_LOCALE.run @@ -15,7 +15,7 @@ jinja2 sqlalchemy pymysql # feather-format (not available on defaults ATM) -# psycopg2 (not avail on defaults ATM) +psycopg2 beautifulsoup4 s3fs xarray diff --git a/ci/requirements-3.6_LOCALE_SLOW.run b/ci/requirements-3.6_LOCALE_SLOW.run index 28131031f0bbd..ad54284c6f7e3 100644 --- a/ci/requirements-3.6_LOCALE_SLOW.run +++ b/ci/requirements-3.6_LOCALE_SLOW.run @@ -15,7 +15,7 @@ jinja2 sqlalchemy pymysql # feather-format (not available on defaults ATM) -# psycopg2 (not available on defaults ATM) +psycopg2 beautifulsoup4 s3fs xarray diff --git a/doc/source/install.rst b/doc/source/install.rst index 48d51e1200447..c185a7cf4b875 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -217,7 +217,8 @@ Recommended Dependencies If installed, must be Version 2.4.6 or higher. * `bottleneck <http://berkeleyanalytics.com/bottleneck>`__: for accelerating certain types of ``nan`` - evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups. + evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups. If installed, + must be Version 1.0.0 or higher. .. note:: diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 1931ffff4b217..589e88dc4aaf6 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -205,6 +205,7 @@ Other API Changes ^^^^^^^^^^^^^^^^^ - Support has been dropped for Python 3.4 (:issue:`15251`) +- Support has been dropped for bottleneck < 1.0.0 (:issue:`15214`) - The Categorical constructor no longer accepts a scalar for the ``categories`` keyword. (:issue:`16022`) - Accessing a non-existent attribute on a closed :class:`~pandas.HDFStore` will now raise an ``AttributeError`` rather than a ``ClosedFileError`` (:issue:`16301`) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 5bebb8eb65b23..e2777cb56374e 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1,6 +1,8 @@ import itertools import functools import operator +import warnings +from distutils.version import LooseVersion import numpy as np from pandas import compat @@ -20,11 +22,24 @@ from pandas.core.config import get_option from pandas.core.common import _values_from_object +_BOTTLENECK_INSTALLED = False +_MIN_BOTTLENECK_VERSION = '1.0.0' + try: import bottleneck as bn - _BOTTLENECK_INSTALLED = True + ver = bn.__version__ + _BOTTLENCK_INSTALLED = ver >= LooseVersion(_MIN_BOTTLENECK_VERSION) + + if not _BOTTLENECK_INSTALLED: + warnings.warn( + "The installed version of bottleneck {ver} is not supported " + "in pandas and will be not be used\nThe minimum supported " + "version is {min_ver}\n".format( + ver=ver, min_ver=_MIN_BOTTLENECK_VERSION), UserWarning) + except ImportError: # pragma: no cover - _BOTTLENECK_INSTALLED = False + pass + _USE_BOTTLENECK = False
closes #15214, remove bottleneck < 1.0.0 changes CI min of xlsxwriter to 0.5.2 removing our ``pandas`` channel as well (which held some really old versions of things).
https://api.github.com/repos/pandas-dev/pandas/pulls/17142
2017-08-01T10:43:10Z
2017-08-01T20:09:23Z
2017-08-01T20:09:23Z
2017-08-01T20:09:23Z
Add missing space to the NotImplementedError's message for compound d…
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fbd26655798bd..ec44dce0da9bc 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -152,7 +152,7 @@ def _validate_dtype(self, dtype): # a compound dtype if dtype.kind == 'V': raise NotImplementedError("compound dtypes are not implemented" - "in the {0} constructor" + " in the {0} constructor" .format(self.__class__.__name__)) return dtype
https://api.github.com/repos/pandas-dev/pandas/pulls/17140
2017-08-01T07:46:09Z
2017-08-01T22:36:40Z
2017-08-01T22:36:40Z
2017-08-01T22:39:54Z
COMPAT: make sure use_inf_as_null is deprecated
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 3e753aacf7c71..875ab8249f953 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -412,8 +412,11 @@ def use_inf_as_na_cb(key): _use_inf_as_na(key) -cf.register_option('mode.use_inf_as_na', False, use_inf_as_na_doc, - cb=use_inf_as_na_cb) +with cf.config_prefix('mode'): + cf.register_option('use_inf_as_na', False, use_inf_as_na_doc, + cb=use_inf_as_na_cb) + cf.register_option('use_inf_as_null', False, use_inf_as_null_doc, + cb=use_inf_as_na_cb) cf.deprecate_option('mode.use_inf_as_null', msg=use_inf_as_null_doc, rkey='mode.use_inf_as_na') diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 2d20ac9685914..01bf7274fd384 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -410,6 +410,22 @@ def test_isna_for_inf(self): tm.assert_series_equal(r, e) tm.assert_series_equal(dr, de) + @tm.capture_stdout + def test_isnull_for_inf_deprecated(self): + # gh-17115 + s = Series(['a', np.inf, np.nan, 1.0]) + with tm.assert_produces_warning(DeprecationWarning, + check_stacklevel=False): + pd.set_option('mode.use_inf_as_null', True) + r = s.isna() + dr = s.dropna() + pd.reset_option('mode.use_inf_as_null') + + e = Series([False, True, True, False]) + de = Series(['a', 1.0], index=[0, 3]) + tm.assert_series_equal(r, e) + tm.assert_series_equal(dr, de) + def test_fillna(self): ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
closes #17115
https://api.github.com/repos/pandas-dev/pandas/pulls/17126
2017-07-31T11:08:36Z
2017-08-01T18:19:16Z
2017-08-01T18:19:16Z
2017-08-01T18:20:43Z
DOC: (de)type the return value of concat (#17079)
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 96603b6adc3b0..e199ec2710367 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -65,7 +65,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, Returns ------- - concatenated : type of objects + concatenated : object, type of objs Notes -----
Was being parsed by Pycharm as being type "type". - [ ] closes #17079 - [ ] tests added / passed - [ ] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17119
2017-07-30T08:31:39Z
2017-08-01T22:38:19Z
2017-08-01T22:38:18Z
2017-08-18T15:28:36Z
Unify Index._dir_* with Series implementation
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py new file mode 100644 index 0000000000000..9f8556d1e6961 --- /dev/null +++ b/pandas/core/accessor.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +""" + +accessor.py contains base classes for implementing accessor properties +that can be mixed into or pinned onto other pandas classes. + +""" + + +class DirNamesMixin(object): + _accessors = frozenset([]) + + def _dir_deletions(self): + """ delete unwanted __dir__ for this object """ + return self._accessors + + def _dir_additions(self): + """ add addtional __dir__ for this object """ + rv = set() + for accessor in self._accessors: + try: + getattr(self, accessor) + rv.add(accessor) + except AttributeError: + pass + return rv + + def __dir__(self): + """ + Provide method name lookup and completion + Only provide 'public' methods + """ + rv = set(dir(type(self))) + rv = (rv - self._dir_deletions()) | self._dir_additions() + return sorted(rv) diff --git a/pandas/core/base.py b/pandas/core/base.py index 8f21e3125a27e..b15431464b166 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -18,6 +18,7 @@ from pandas.util._decorators import (Appender, cache_readonly, deprecate_kwarg, Substitution) from pandas.core.common import AbstractMethodError +from pandas.core.accessor import DirNamesMixin _shared_docs = dict() _indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='', @@ -72,7 +73,7 @@ def __repr__(self): return str(self) -class PandasObject(StringMixin): +class PandasObject(StringMixin, DirNamesMixin): """baseclass for various pandas objects""" @@ -91,23 +92,6 @@ def __unicode__(self): # Should be overwritten by base classes return object.__repr__(self) - def _dir_additions(self): - """ add addtional __dir__ for this object """ - return set() - - def _dir_deletions(self): - """ delete unwanted __dir__ for this object """ - return set() - - def __dir__(self): - """ - Provide method name lookup and completion - Only provide 'public' methods - """ - rv = set(dir(type(self))) - rv = (rv - self._dir_deletions()) | self._dir_additions() - return sorted(rv) - def _reset_cache(self, key=None): """ Reset cached properties. If ``key`` is passed, only clears that key. @@ -140,7 +124,7 @@ class NoNewAttributesMixin(object): Prevents additional attributes via xxx.attribute = "something" after a call to `self.__freeze()`. Mainly used to prevent the user from using - wrong attrirbutes on a accessor (`Series.cat/.str/.dt`). + wrong attributes on a accessor (`Series.cat/.str/.dt`). If you really want to add a new attribute at a later time, you need to use `object.__setattr__(self, key, value)`. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2d52eed81d22b..04debef2fcac0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -192,8 +192,9 @@ def __unicode__(self): def _dir_additions(self): """ add the string-like attributes from the info_axis """ - return set([c for c in self._info_axis - if isinstance(c, string_types) and isidentifier(c)]) + additions = set([c for c in self._info_axis + if isinstance(c, string_types) and isidentifier(c)]) + return super(NDFrame, self)._dir_additions().union(additions) @property def _constructor_sliced(self): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 4aecc75d95971..d614b69c85afa 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -52,7 +52,7 @@ import pandas.core.sorting as sorting from pandas.io.formats.printing import pprint_thing from pandas.core.ops import _comp_method_OBJECT_ARRAY -from pandas.core.strings import StringAccessorMixin +from pandas.core import strings from pandas.core.config import get_option @@ -98,7 +98,7 @@ def _new_Index(cls, d): return cls.__new__(cls, **d) -class Index(IndexOpsMixin, StringAccessorMixin, PandasObject): +class Index(IndexOpsMixin, PandasObject): """ Immutable ndarray implementing an ordered, sliceable set. The basic object storing axis labels for all pandas objects @@ -151,6 +151,11 @@ class Index(IndexOpsMixin, StringAccessorMixin, PandasObject): _engine_type = libindex.ObjectEngine + _accessors = frozenset(['str']) + + # String Methods + str = base.AccessorProperty(strings.StringMethods) + def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, tupleize_cols=True, **kwargs): diff --git a/pandas/core/series.py b/pandas/core/series.py index c8282450b77a9..06504a4059a2f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -114,8 +114,7 @@ def wrapper(self): # Series class -class Series(base.IndexOpsMixin, strings.StringAccessorMixin, - generic.NDFrame,): +class Series(base.IndexOpsMixin, generic.NDFrame): """ One-dimensional ndarray with axis labels (including time series). @@ -2924,18 +2923,8 @@ def to_period(self, freq=None, copy=True): # Categorical methods cat = base.AccessorProperty(CategoricalAccessor) - def _dir_deletions(self): - return self._accessors - - def _dir_additions(self): - rv = set() - for accessor in self._accessors: - try: - getattr(self, accessor) - rv.add(accessor) - except AttributeError: - pass - return rv + # String Methods + str = base.AccessorProperty(strings.StringMethods) # ---------------------------------------------------------------------- # Add plotting methods to Series diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 0b1db0277eee3..2f95e510bba5e 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -16,7 +16,7 @@ from pandas.core.algorithms import take_1d import pandas.compat as compat -from pandas.core.base import AccessorProperty, NoNewAttributesMixin +from pandas.core.base import NoNewAttributesMixin from pandas.util._decorators import Appender import re import pandas._libs.lib as lib @@ -1920,20 +1920,4 @@ def _make_accessor(cls, data): message = ("Can only use .str accessor with Index, not " "MultiIndex") raise AttributeError(message) - return StringMethods(data) - - -class StringAccessorMixin(object): - """ Mixin to add a `.str` acessor to the class.""" - - str = AccessorProperty(StringMethods) - - def _dir_additions(self): - return set() - - def _dir_deletions(self): - try: - getattr(self, 'str') - except AttributeError: - return set(['str']) - return set() + return cls(data)
Have `Index` use the same implementation of `_dir_additions` and `_dir_deletions` that `Series` currently uses. Move that implementation into a shared base class. @jreback suggested that shared base class `DirNamesMixin` be moved from `core.base` to `core.accessor`. <s>De-duplicate `_dir_additions` and `_dir_deletions` code. `Index` currently gets its implementations of these two methods from `strings.StringMethodMixin`, but the implementation is just a special case of the implementation in `Series`. That implementation is essentially The General Case, so this PR moves it up to a base class `DirNamesMixin`. (I'm open to putting this elsewhere in the hierarchy)</s> This PR removes `StringMethodMixin`, discussed in #17042. <s>For `DatetimeIndex`, `TimeDeltaIndex`, and `PeriodIndex`, this adds a property `dt` that just returns `self`. This way a user can access e.g. `item.year` symmetrically without having to check whether `item` is a `Series/Index`. Holding off on writing tests until a consensus is reached.</s> - [ ] closes #xxxx - [ ] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17117
2017-07-29T22:54:44Z
2017-08-29T13:23:39Z
2017-08-29T13:23:39Z
2017-10-30T16:23:53Z
DOC: further clean-up null/na changes
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index c8138d795b836..fe20a7eb2b786 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -511,7 +511,7 @@ optional ``level`` parameter which applies only if the object has a :header: "Function", "Description" :widths: 20, 80 - ``count``, Number of non-na observations + ``count``, Number of non-NA observations ``sum``, Sum of values ``mean``, Mean of values ``mad``, Mean absolute deviation @@ -541,7 +541,7 @@ will exclude NAs on Series input by default: np.mean(df['one'].values) ``Series`` also has a method :meth:`~Series.nunique` which will return the -number of unique non-na values: +number of unique non-NA values: .. ipython:: python diff --git a/doc/source/io.rst b/doc/source/io.rst index 149c86aead135..bf68a0cae1d27 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -137,7 +137,6 @@ usecols : array-like or callable, default ``None`` Using this parameter results in much faster parsing time and lower memory usage. as_recarray : boolean, default ``False`` - .. deprecated:: 0.18.2 Please call ``pd.read_csv(...).to_records()`` instead. @@ -193,7 +192,6 @@ skiprows : list-like or integer, default ``None`` skipfooter : int, default ``0`` Number of lines at bottom of file to skip (unsupported with engine='c'). skip_footer : int, default ``0`` - .. deprecated:: 0.19.0 Use the ``skipfooter`` parameter instead, as they are identical @@ -208,13 +206,11 @@ low_memory : boolean, default ``True`` use the ``chunksize`` or ``iterator`` parameter to return the data in chunks. (Only valid with C parser) buffer_lines : int, default None - .. deprecated:: 0.19.0 Argument removed because its value is not respected by the parser compact_ints : boolean, default False - .. deprecated:: 0.19.0 Argument moved to ``pd.to_numeric`` @@ -223,7 +219,6 @@ compact_ints : boolean, default False parser will attempt to cast it as the smallest integer ``dtype`` possible, either signed or unsigned depending on the specification from the ``use_unsigned`` parameter. use_unsigned : boolean, default False - .. deprecated:: 0.18.2 Argument moved to ``pd.to_numeric`` diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index 5c10df25051a2..d54288baa389b 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -36,7 +36,7 @@ When / why does data become missing? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some might quibble over our usage of *missing*. By "missing" we simply mean -**NA** or "not present for whatever reason". Many data sets simply arrive with +**NA** ("not available") or "not present for whatever reason". Many data sets simply arrive with missing data, either because it exists and was not collected or it never existed. For example, in a collection of financial time series, some of the time series might start on different dates. Thus, values prior to the start date diff --git a/doc/source/whatsnew/v0.10.0.txt b/doc/source/whatsnew/v0.10.0.txt index cf5369466308c..f0db1d82252c1 100644 --- a/doc/source/whatsnew/v0.10.0.txt +++ b/doc/source/whatsnew/v0.10.0.txt @@ -128,15 +128,45 @@ labeled the aggregated group with the end of the interval: the next day). ``notnull``. That they ever were was a relic of early pandas. This behavior can be re-enabled globally by the ``mode.use_inf_as_null`` option: -.. ipython:: python +.. code-block:: ipython - s = pd.Series([1.5, np.inf, 3.4, -np.inf]) - pd.isnull(s) - s.fillna(0) - pd.set_option('use_inf_as_null', True) - pd.isnull(s) - s.fillna(0) - pd.reset_option('use_inf_as_null') + In [6]: s = pd.Series([1.5, np.inf, 3.4, -np.inf]) + + In [7]: pd.isnull(s) + Out[7]: + 0 False + 1 False + 2 False + 3 False + Length: 4, dtype: bool + + In [8]: s.fillna(0) + Out[8]: + 0 1.500000 + 1 inf + 2 3.400000 + 3 -inf + Length: 4, dtype: float64 + + In [9]: pd.set_option('use_inf_as_null', True) + + In [10]: pd.isnull(s) + Out[10]: + 0 False + 1 True + 2 False + 3 True + Length: 4, dtype: bool + + In [11]: s.fillna(0) + Out[11]: + 0 1.5 + 1 0.0 + 2 3.4 + 3 0.0 + Length: 4, dtype: float64 + + In [12]: pd.reset_option('use_inf_as_null') - Methods with the ``inplace`` option now all return ``None`` instead of the calling object. E.g. code written like ``df = df.fillna(0, inplace=True)`` diff --git a/doc/source/whatsnew/v0.4.x.txt b/doc/source/whatsnew/v0.4.x.txt index 237ea84425051..ed9352059a6dc 100644 --- a/doc/source/whatsnew/v0.4.x.txt +++ b/doc/source/whatsnew/v0.4.x.txt @@ -9,7 +9,7 @@ New Features - Added Python 3 support using 2to3 (:issue:`200`) - :ref:`Added <dsintro.name_attribute>` ``name`` attribute to ``Series``, now prints as part of ``Series.__repr__`` -- :ref:`Added <missing.isnull>` instance methods ``isnull`` and ``notnull`` to +- :ref:`Added <missing.isna>` instance methods ``isnull`` and ``notnull`` to Series (:issue:`209`, :issue:`203`) - :ref:`Added <basics.align>` ``Series.align`` method for aligning two series with choice of join method (ENH56_) diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 76e30a6fb9d52..3e753aacf7c71 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -398,8 +398,8 @@ def table_schema_cb(key): use_inf_as_na_doc = """ : boolean - True means treat None, NaN, INF, -INF as na (old way), - False means None and NaN are null, but INF, -INF are not na + True means treat None, NaN, INF, -INF as NA (old way), + False means None and NaN are null, but INF, -INF are not NA (new way). """
Old whatsnew example was failing -> converted to static code-block (+ some additional minor fixes)
https://api.github.com/repos/pandas-dev/pandas/pulls/17113
2017-07-29T19:02:19Z
2017-07-29T21:58:03Z
2017-07-29T21:58:03Z
2017-07-29T22:18:09Z
API: Localize Series when calling to_datetime with utc=True (#6415)
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 273cbd8357f85..e0963a1908bbc 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -298,6 +298,36 @@ length 2+ levels, so a :class:`MultiIndex` is always returned from all of the pd.MultiIndex.from_tuples([('a',), ('b',)]) +.. _whatsnew_0210.api.utc_localization_with_series: + +UTC Localization with Series +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Previously, :func:`to_datetime` did not localize datetime ``Series`` data when ``utc=True`` was passed. Now, :func:`to_datetime` will correctly localize ``Series`` with a ``datetime64[ns, UTC]`` dtype to be consistent with how list-like and ``Index`` data are handled. (:issue:`6415`). + + Previous Behavior + + .. ipython:: python + + s = Series(['20130101 00:00:00'] * 3) + + .. code-block:: ipython + + In [12]: pd.to_datetime(s, utc=True) + Out[12]: + 0 2013-01-01 + 1 2013-01-01 + 2 2013-01-01 + dtype: datetime64[ns] + + New Behavior + + .. ipython:: python + + pd.to_datetime(s, utc=True) + +Additionally, DataFrames with datetime columns that were parsed by :func:`read_sql_table` and :func:`read_sql_query` will also be localized to UTC only if the original SQL columns were timezone aware datetime columns. + .. _whatsnew_0210.api: Other API Changes diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index c0f234a36803d..9ff0275a7c370 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -516,7 +516,7 @@ def _convert_listlike(arg, box, format, name=None, tz=tz): result = arg elif isinstance(arg, ABCSeries): from pandas import Series - values = _convert_listlike(arg._values, False, format) + values = _convert_listlike(arg._values, True, format) result = Series(values, index=arg.index, name=arg.name) elif isinstance(arg, (ABCDataFrame, MutableMapping)): result = _assemble_from_unit_mappings(arg, errors=errors) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 9aa47e5c69850..9c6d01d236c57 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -99,24 +99,24 @@ def _convert_params(sql, params): return args -def _handle_date_column(col, format=None): +def _handle_date_column(col, utc=None, format=None): if isinstance(format, dict): return to_datetime(col, errors='ignore', **format) else: if format in ['D', 's', 'ms', 'us', 'ns']: - return to_datetime(col, errors='coerce', unit=format, utc=True) + return to_datetime(col, errors='coerce', unit=format, utc=utc) elif (issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer)): # parse dates as timestamp format = 's' if format is None else format - return to_datetime(col, errors='coerce', unit=format, utc=True) + return to_datetime(col, errors='coerce', unit=format, utc=utc) elif is_datetime64tz_dtype(col): # coerce to UTC timezone # GH11216 return (to_datetime(col, errors='coerce') .astype('datetime64[ns, UTC]')) else: - return to_datetime(col, errors='coerce', format=format, utc=True) + return to_datetime(col, errors='coerce', format=format, utc=utc) def _parse_date_columns(data_frame, parse_dates): @@ -821,8 +821,9 @@ def _harmonize_columns(self, parse_dates=None): if (col_type is datetime or col_type is date or col_type is DatetimeTZDtype): - self.frame[col_name] = _handle_date_column(df_col) - + # Convert tz-aware Datetime SQL columns to UTC + utc = col_type is DatetimeTZDtype + self.frame[col_name] = _handle_date_column(df_col, utc=utc) elif col_type is float: # floats support NA, can always convert! self.frame[col_name] = df_col.astype(col_type, copy=False) diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 50669ee357bbd..089d74a1d69b8 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -260,15 +260,53 @@ def test_to_datetime_tz_pytz(self): dtype='datetime64[ns, UTC]', freq=None) tm.assert_index_equal(result, expected) - def test_to_datetime_utc_is_true(self): - # See gh-11934 - start = pd.Timestamp('2014-01-01', tz='utc') - end = pd.Timestamp('2014-01-03', tz='utc') - date_range = pd.bdate_range(start, end) - - result = pd.to_datetime(date_range, utc=True) - expected = pd.DatetimeIndex(data=date_range) - tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("init_constructor, end_constructor, test_method", + [(Index, DatetimeIndex, tm.assert_index_equal), + (list, DatetimeIndex, tm.assert_index_equal), + (np.array, DatetimeIndex, tm.assert_index_equal), + (Series, Series, tm.assert_series_equal)]) + def test_to_datetime_utc_true(self, + init_constructor, + end_constructor, + test_method): + # See gh-11934 & gh-6415 + data = ['20100102 121314', '20100102 121315'] + expected_data = [pd.Timestamp('2010-01-02 12:13:14', tz='utc'), + pd.Timestamp('2010-01-02 12:13:15', tz='utc')] + + result = pd.to_datetime(init_constructor(data), + format='%Y%m%d %H%M%S', + utc=True) + expected = end_constructor(expected_data) + test_method(result, expected) + + # Test scalar case as well + for scalar, expected in zip(data, expected_data): + result = pd.to_datetime(scalar, format='%Y%m%d %H%M%S', utc=True) + assert result == expected + + def test_to_datetime_utc_true_with_series_single_value(self): + # GH 15760 UTC=True with Series + ts = 1.5e18 + result = pd.to_datetime(pd.Series([ts]), utc=True) + expected = pd.Series([pd.Timestamp(ts, tz='utc')]) + tm.assert_series_equal(result, expected) + + def test_to_datetime_utc_true_with_series_tzaware_string(self): + ts = '2013-01-01 00:00:00-01:00' + expected_ts = '2013-01-01 01:00:00' + data = pd.Series([ts] * 3) + result = pd.to_datetime(data, utc=True) + expected = pd.Series([pd.Timestamp(expected_ts, tz='utc')] * 3) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('date, dtype', + [('2013-01-01 01:00:00', 'datetime64[ns]'), + ('2013-01-01 01:00:00', 'datetime64[ns, UTC]')]) + def test_to_datetime_utc_true_with_series_datetime_ns(self, date, dtype): + expected = pd.Series([pd.Timestamp('2013-01-01 01:00:00', tz='UTC')]) + result = pd.to_datetime(pd.Series([date], dtype=dtype), utc=True) + tm.assert_series_equal(result, expected) def test_to_datetime_tz_psycopg2(self): diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index a7c42391effe6..93eb0ff0ac1f2 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -602,7 +602,7 @@ def test_execute_sql(self): tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']) def test_date_parsing(self): - # Test date parsing in read_sq + # Test date parsing in read_sql # No Parsing df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn) assert not issubclass(df.DateCol.dtype.type, np.datetime64) @@ -1271,11 +1271,13 @@ def check(col): # "2000-01-01 00:00:00-08:00" should convert to # "2000-01-01 08:00:00" - assert col[0] == Timestamp('2000-01-01 08:00:00', tz='UTC') - # "2000-06-01 00:00:00-07:00" should convert to # "2000-06-01 07:00:00" - assert col[1] == Timestamp('2000-06-01 07:00:00', tz='UTC') + # GH 6415 + expected_data = [Timestamp('2000-01-01 08:00:00', tz='UTC'), + Timestamp('2000-06-01 07:00:00', tz='UTC')] + expected = Series(expected_data, name=col.name) + tm.assert_series_equal(col, expected) else: raise AssertionError("DateCol loaded with incorrect type " @@ -1298,6 +1300,9 @@ def check(col): self.conn, parse_dates=['DateColWithTz']) if not hasattr(df, 'DateColWithTz'): pytest.skip("no column with datetime with time zone") + col = df.DateColWithTz + assert is_datetime64tz_dtype(col.dtype) + assert str(col.dt.tz) == 'UTC' check(df.DateColWithTz) df = pd.concat(list(pd.read_sql_query("select * from types_test_data", @@ -1307,9 +1312,9 @@ def check(col): assert is_datetime64tz_dtype(col.dtype) assert str(col.dt.tz) == 'UTC' expected = sql.read_sql_table("types_test_data", self.conn) - tm.assert_series_equal(df.DateColWithTz, - expected.DateColWithTz - .astype('datetime64[ns, UTC]')) + col = expected.DateColWithTz + assert is_datetime64tz_dtype(col.dtype) + tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz) # xref #7139 # this might or might not be converted depending on the postgres driver @@ -1388,8 +1393,10 @@ def test_datetime_date(self): df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"]) df.to_sql('test_date', self.conn, index=False) res = read_sql_table('test_date', self.conn) + result = res['a'] + expected = to_datetime(df['a']) # comes back as datetime64 - tm.assert_series_equal(res['a'], to_datetime(df['a'])) + tm.assert_series_equal(result, expected) def test_datetime_time(self): # test support for datetime.time diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index a765e2c4ca1bf..6976fe162c5d5 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -2137,7 +2137,7 @@ def test_set_index_datetime(self): '2011-07-19 08:00:00', '2011-07-19 09:00:00'], 'value': range(6)}) df.index = pd.to_datetime(df.pop('datetime'), utc=True) - df.index = df.index.tz_localize('UTC').tz_convert('US/Pacific') + df.index = df.index.tz_convert('US/Pacific') expected = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
- [x] closes #6415 - [x] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [x] whatsnew entry When `_convert_listlike` passes a `np.array` into a `Series`, the `Series` seems to initially be of naive datetime dtype. Localized `Series` if `utc=True`. I made one minor change to an existing test that `tz_localize` and index when it originally set the index with a `Series` with `pd.to_datetime(...utc=True)`.
https://api.github.com/repos/pandas-dev/pandas/pulls/17109
2017-07-29T06:19:28Z
2017-09-01T12:19:31Z
2017-09-01T12:19:31Z
2017-12-20T02:04:38Z
BUG: Allow pd.unique to accept tuple of strings
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 0025f8d098d81..2d55144848e42 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -327,3 +327,4 @@ Other ^^^^^ - Bug in :func:`eval` where the ``inplace`` parameter was being incorrectly handled (:issue:`16732`) - Bug in ``.isin()`` in which checking membership in empty ``Series`` objects raised an error (:issue:`16991`) +- Bug in :func:`unique` where checking a tuple of strings raised a ``TypeError`` (:issue:`17108`) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 4ca658b35a276..f2359f3ff1a9d 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -170,6 +170,8 @@ def _ensure_arraylike(values): ABCIndexClass, ABCSeries)): inferred = lib.infer_dtype(values) if inferred in ['mixed', 'string', 'unicode']: + if isinstance(values, tuple): + values = list(values) values = lib.list_to_object_array(values) else: values = np.asarray(values) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 0e86ec123efea..b26089ea7a822 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -415,6 +415,15 @@ def test_order_of_appearance(self): expected = pd.Categorical(list('abc')) tm.assert_categorical_equal(result, expected) + @pytest.mark.parametrize("arg ,expected", [ + (('1', '1', '2'), np.array(['1', '2'], dtype=object)), + (('foo',), np.array(['foo'], dtype=object)) + ]) + def test_tuple_with_strings(self, arg, expected): + # see GH 17108 + result = pd.unique(arg) + tm.assert_numpy_array_equal(result, expected) + class TestIsin(object):
- [x] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [x] whatsnew entry xref #17077 `pd.unique()` calls `_ensure_arraylike` which did not accept tuples that contains strings. Converts tuple to list first before dispatching to `list_to_object_array`
https://api.github.com/repos/pandas-dev/pandas/pulls/17108
2017-07-29T05:44:39Z
2017-07-30T09:43:25Z
2017-07-30T09:43:25Z
2017-12-20T02:04:12Z
Bugfix for multilevel columns with empty strings in Python 2
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index c2eb371059955..f601c4e8a321b 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -323,6 +323,7 @@ Indexing - Bug in reindexing on an empty ``CategoricalIndex`` (:issue:`16770`) - Fixes ``DataFrame.loc`` for setting with alignment and tz-aware ``DatetimeIndex`` (:issue:`16889`) - Avoids ``IndexError`` when passing an Index or Series to ``.iloc`` with older numpy (:issue:`17193`) +- Allow unicode empty strings as placeholders in multilevel columns in Python 2 (:issue:`17099`) I/O ^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 027a427555253..94cce1b4d05b5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2134,10 +2134,18 @@ def _getitem_multilevel(self, key): result = self._constructor(new_values, index=self.index, columns=result_columns) result = result.__finalize__(self) + + # If there is only one column being returned, and its name is + # either an empty string, or a tuple with an empty string as its + # first element, then treat the empty string as a placeholder + # and return the column as if the user had provided that empty + # string in the key. If the result is a Series, exclude the + # implied empty string from its name. if len(result.columns) == 1: top = result.columns[0] - if ((type(top) == str and top == '') or - (type(top) == tuple and top[0] == '')): + if isinstance(top, tuple): + top = top[0] + if top == '': result = result[''] if isinstance(result, Series): result = self._constructor_sliced(result, diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 0b2dc9ba70f03..a765e2c4ca1bf 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1675,24 +1675,31 @@ def test_int_series_slicing(self): expected = self.ymd.reindex(s.index[5:]) tm.assert_frame_equal(result, expected) - def test_mixed_depth_get(self): + @pytest.mark.parametrize('unicode_strings', [True, False]) + def test_mixed_depth_get(self, unicode_strings): + # If unicode_strings is True, the column labels in dataframe + # construction will use unicode strings in Python 2 (pull request + # #17099). + arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'], ['', 'OD', 'OD', 'result1', 'result2', 'result1'], ['', 'wx', 'wy', '', '', '']] + if unicode_strings: + arrays = [[u(s) for s in arr] for arr in arrays] + tuples = sorted(zip(*arrays)) index = MultiIndex.from_tuples(tuples) - df = DataFrame(randn(4, 6), columns=index) + df = DataFrame(np.random.randn(4, 6), columns=index) result = df['a'] - expected = df['a', '', ''] - tm.assert_series_equal(result, expected, check_names=False) - assert result.name == 'a' + expected = df['a', '', ''].rename('a') + tm.assert_series_equal(result, expected) result = df['routine1', 'result1'] expected = df['routine1', 'result1', ''] - tm.assert_series_equal(result, expected, check_names=False) - assert result.name == ('routine1', 'result1') + expected = expected.rename(('routine1', 'result1')) + tm.assert_series_equal(result, expected) def test_mixed_depth_insert(self): arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
Allow unicode empty strings to be used as placeholders in multilevel column names in Python 2. The code in `Frame._getitem_multilevel()` only treats empty strings as placeholders in multilevel columns if they are of type `str`. Since in Python 2 this means bytestrings, this means a unicode empty string is not treated as a placeholder. This is a problem for Python 2 code that is trying to be forward-compatible with Python 3 by importing `unicode_literals` from `__future__` and using empty strings in DataFrame construction - those empty strings will be unicode strings and not treated as placeholders by pandas. This patch changes the logic to allow anything that passes an equality check with `''` to be used as a placeholder, rather than checking its type, which means that after this patch either bytestring or unicode empty strings can be used as placeholders in Python 2, and the situation is unchanged in Python 3 (unicode strings, but not bytestrings, will be treated as placeholders). Without this patch, the following code: ```python from __future__ import print_function import pandas # Unicode string placeholder in multilevel column: df = pandas.DataFrame({(u'test', u''): [1.0, 2.0, 3.0]}) print(df['test']) print(type(df['test'])) print() # native string type placeholder in multilevel column: df = pandas.DataFrame({(u'test', ''): [1.0, 2.0, 3.0]}) print(df['test']) print(type(df['test'])) ``` Results in the following in Python 2: ``` 0 1.0 1 2.0 2 3.0 <class 'pandas.core.frame.DataFrame'> 0 1.0 1 2.0 2 3.0 Name: test, dtype: float64 <class 'pandas.core.series.Series'> ``` Note the first blank line in the output when a unicode empty string placeholder was used. It's an empty string column label for the dataframe. Whereas in Python 3 it results in: ``` 0 1.0 1 2.0 2 3.0 Name: test, dtype: float64 <class 'pandas.core.series.Series'> 0 1.0 1 2.0 2 3.0 Name: test, dtype: float64 <class 'pandas.core.series.Series'> ``` After the patch, the Python 2 result is the same as the Python 3 result.
https://api.github.com/repos/pandas-dev/pandas/pulls/17099
2017-07-27T18:10:13Z
2017-08-10T10:36:51Z
2017-08-10T10:36:51Z
2017-08-10T13:38:45Z
BUG: Thoroughly dedup column names in read_csv
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 0025f8d098d81..db710e73a1286 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -278,6 +278,7 @@ I/O ^^^ - Bug in :func:`read_csv` in which columns were not being thoroughly de-duplicated (:issue:`17060`) +- Bug in :func:`read_csv` in which specified column names were not being thoroughly de-duplicated (:issue:`17095`) - Bug in :func:`read_csv` in which non integer values for the header argument generated an unhelpful / unrelated error message (:issue:`16338`) - Bug in :func:`read_csv` in which memory management issues in exception handling, under certain conditions, would cause the interpreter to segfault (:issue:`14696`, :issue:`16798`). - Bug in :func:`read_csv` when called with ``low_memory=False`` in which a CSV with at least one column > 2GB in size would incorrectly raise a ``MemoryError`` (:issue:`16798`). diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index ea0bb104338b6..41b0cdd6dd250 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1318,14 +1318,18 @@ def _maybe_dedup_names(self, names): # would be nice! if self.mangle_dupe_cols: names = list(names) # so we can index - counts = {} + counts = defaultdict(int) for i, col in enumerate(names): - cur_count = counts.get(col, 0) + cur_count = counts[col] - if cur_count > 0: - names[i] = '%s.%d' % (col, cur_count) + while cur_count > 0: + counts[col] = cur_count + 1 + col = '%s.%d' % (col, cur_count) + cur_count = counts[col] + + names[i] = col counts[col] = cur_count + 1 return names @@ -2330,15 +2334,15 @@ def _infer_columns(self): this_columns.append(c) if not have_mi_columns and self.mangle_dupe_cols: - counts = {} + counts = defaultdict(int) for i, col in enumerate(this_columns): - cur_count = counts.get(col, 0) + cur_count = counts[col] while cur_count > 0: counts[col] = cur_count + 1 col = "%s.%d" % (col, cur_count) - cur_count = counts.get(col, 0) + cur_count = counts[col] this_columns[i] = col counts[col] = cur_count + 1 diff --git a/pandas/tests/io/parser/mangle_dupes.py b/pandas/tests/io/parser/mangle_dupes.py index 70ecfe51c0f09..e2efb1377f8b0 100644 --- a/pandas/tests/io/parser/mangle_dupes.py +++ b/pandas/tests/io/parser/mangle_dupes.py @@ -25,7 +25,7 @@ def test_basic(self): mangle_dupe_cols=True) assert list(df.columns) == expected - def test_thorough_mangle(self): + def test_thorough_mangle_columns(self): # see gh-17060 data = "a,a,a.1\n1,2,3" df = self.read_csv(StringIO(data), sep=",", mangle_dupe_cols=True) @@ -40,3 +40,25 @@ def test_thorough_mangle(self): df = self.read_csv(StringIO(data), sep=",", mangle_dupe_cols=True) assert list(df.columns) == ["a", "a.1", "a.3", "a.1.1", "a.2", "a.2.1", "a.3.1"] + + def test_thorough_mangle_names(self): + # see gh-17095 + data = "a,b,b\n1,2,3" + names = ["a.1", "a.1", "a.1.1"] + df = self.read_csv(StringIO(data), sep=",", names=names, + mangle_dupe_cols=True) + assert list(df.columns) == ["a.1", "a.1.1", "a.1.1.1"] + + data = "a,b,c,d,e,f\n1,2,3,4,5,6" + names = ["a", "a", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1"] + df = self.read_csv(StringIO(data), sep=",", names=names, + mangle_dupe_cols=True) + assert list(df.columns) == ["a", "a.1", "a.1.1", "a.1.1.1", + "a.1.1.1.1", "a.1.1.1.1.1"] + + data = "a,b,c,d,e,f,g\n1,2,3,4,5,6,7" + names = ["a", "a", "a.3", "a.1", "a.2", "a", "a"] + df = self.read_csv(StringIO(data), sep=",", names=names, + mangle_dupe_cols=True) + assert list(df.columns) == ["a", "a.1", "a.3", "a.1.1", + "a.2", "a.2.1", "a.3.1"]
Caught after merging in #17060. This is for when we specify the parameter `names=(...)`.
https://api.github.com/repos/pandas-dev/pandas/pulls/17095
2017-07-27T09:58:26Z
2017-08-01T22:44:10Z
2017-08-01T22:44:10Z
2017-08-02T15:16:29Z
BUG: #7757 Fix CSV parsing of singleton list header
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index aed00ca578984..424b496c93f31 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -283,6 +283,7 @@ I/O - Bug in :func:`read_csv` in which non integer values for the header argument generated an unhelpful / unrelated error message (:issue:`16338`) - Bug in :func:`read_csv` in which memory management issues in exception handling, under certain conditions, would cause the interpreter to segfault (:issue:`14696, :issue:`16798`). - Bug in :func:`read_csv` when called with ``low_memory=False`` in which a CSV with at least one column > 2GB in size would incorrectly raise a ``MemoryError`` (:issue:`16798`). +- Bug in :func:`read_csv` when called with a single-element list ``header`` would return a ``DataFrame`` of all NaN values (:issue:`7757`) - Bug in :func:`read_stata` where value labels could not be read when using an iterator (:issue:`16923`) - Bug in :func:`read_html` where import check fails when run in multiple threads (:issue:`16928`) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 543a943aea311..928b2cdd57b2c 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -535,23 +535,26 @@ cdef class TextReader: self.parser_start = 0 self.header = [] else: - if isinstance(header, list) and len(header): - # need to artifically skip the final line - # which is still a header line - header = list(header) - header.append(header[-1] + 1) + if isinstance(header, list): + if len(header) > 1: + # need to artifically skip the final line + # which is still a header line + header = list(header) + header.append(header[-1] + 1) + self.parser.header_end = header[-1] + self.has_mi_columns = 1 + else: + self.parser.header_end = header[0] + self.parser_start = header[-1] + 1 self.parser.header_start = header[0] - self.parser.header_end = header[-1] self.parser.header = header[0] - self.parser_start = header[-1] + 1 - self.has_mi_columns = 1 self.header = header else: self.parser.header_start = header self.parser.header_end = header - self.parser.header = header self.parser_start = header + 1 + self.parser.header = header self.header = [ header ] self.names = names diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index ea0bb104338b6..3a78866b7b53f 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -2279,10 +2279,11 @@ def _infer_columns(self): if self.header is not None: header = self.header - # we have a mi columns, so read an extra line if isinstance(header, (list, tuple, np.ndarray)): - have_mi_columns = True - header = list(header) + [header[-1] + 1] + have_mi_columns = len(header) > 1 + # we have a mi columns, so read an extra line + if have_mi_columns: + header = list(header) + [header[-1] + 1] else: have_mi_columns = False header = [header] diff --git a/pandas/tests/io/parser/header.py b/pandas/tests/io/parser/header.py index 4935fd2cd910a..50ae4dae541ac 100644 --- a/pandas/tests/io/parser/header.py +++ b/pandas/tests/io/parser/header.py @@ -286,3 +286,10 @@ def test_non_int_header(self): self.read_csv(StringIO(data), sep=',', header=['a', 'b']) with tm.assert_raises_regex(ValueError, msg): self.read_csv(StringIO(data), sep=',', header='string_header') + + def test_singleton_header(self): + # See GH #7757 + data = """a,b,c\n0,1,2\n1,2,3""" + df = self.read_csv(StringIO(data), header=[0]) + expected = DataFrame({"a": [0, 1], "b": [1, 2], "c": [2, 3]}) + tm.assert_frame_equal(df, expected)
- [ ] closes #7757 - [ ] added ```test_singleton_header``` in ```pandas/tests/io/parser/header.py``` / passed - [ ] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] what's new entry I fixed an issue with CSV parsing where passing a single-row header as a list to ```pd.read_csv``` would result in a ```pd.DataFrame``` of all ```NaN``` values. I added a simple parser test of whether there are any ```NaN``` values in the ```pd.DataFrame``` returned by ```pd.read_csv``` given a header list of length one.
https://api.github.com/repos/pandas-dev/pandas/pulls/17090
2017-07-27T04:43:13Z
2017-08-03T16:37:27Z
2017-08-03T16:37:27Z
2017-08-18T15:28:46Z
MAINT: Remove non-standard and inconsistently-used imports
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9d63bd2e120aa..027a427555253 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -20,7 +20,6 @@ import warnings from textwrap import dedent -from numpy import nan as NA import numpy as np import numpy.ma as ma @@ -436,7 +435,7 @@ def _init_dict(self, data, index, columns, dtype=None): else: v = np.empty(len(index), dtype=dtype) - v.fill(NA) + v.fill(np.nan) else: v = data[k] data_names.append(k) @@ -1437,8 +1436,8 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, columns : sequence, optional Columns to write header : boolean or list of string, default True - Write out column names. If a list of string is given it is assumed - to be aliases for the column names + Write out the column names. If a list of strings is given it is + assumed to be aliases for the column names index : boolean, default True Write row names (index) index_label : string or sequence, or False, default None @@ -1622,8 +1621,9 @@ def to_parquet(self, fname, engine='auto', compression='snappy', to_parquet(self, fname, engine, compression=compression, **kwargs) - @Substitution(header='Write out column names. If a list of string is given, \ -it is assumed to be aliases for the column names') + @Substitution(header='Write out the column names. If a list of strings ' + 'is given, it is assumed to be aliases for the ' + 'column names') @Appender(fmt.docstring_to_string, indents=1) def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, @@ -2805,7 +2805,7 @@ def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, return frame - def _reindex_index(self, new_index, method, copy, level, fill_value=NA, + def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan, limit=None, tolerance=None): new_index, indexer = self.index.reindex(new_index, method=method, level=level, limit=limit, @@ -2814,8 +2814,8 @@ def _reindex_index(self, new_index, method, copy, level, fill_value=NA, copy=copy, fill_value=fill_value, allow_dups=False) - def _reindex_columns(self, new_columns, method, copy, level, fill_value=NA, - limit=None, tolerance=None): + def _reindex_columns(self, new_columns, method, copy, level, + fill_value=np.nan, limit=None, tolerance=None): new_columns, indexer = self.columns.reindex(new_columns, method=method, level=level, limit=limit, tolerance=tolerance) @@ -3794,7 +3794,7 @@ def _combine_series(self, other, func, fill_value=None, axis=None, def _combine_series_infer(self, other, func, level=None, fill_value=None, try_cast=True): if len(other) == 0: - return self * NA + return self * np.nan if len(self) == 0: # Ambiguous case, use _series so works with DataFrame @@ -3948,7 +3948,7 @@ def combine(self, other, func, fill_value=None, overwrite=True): if do_fill: arr = _ensure_float(arr) - arr[this_mask & other_mask] = NA + arr[this_mask & other_mask] = np.nan # try to downcast back to the original dtype if needs_i8_conversion_i: @@ -4567,7 +4567,7 @@ def _apply_empty_result(self, func, axis, reduce, *args, **kwds): pass if reduce: - return Series(NA, index=self._get_agg_axis(axis)) + return Series(np.nan, index=self._get_agg_axis(axis)) else: return self.copy() @@ -5185,7 +5185,7 @@ def corr(self, method='pearson', min_periods=1): valid = mask[i] & mask[j] if valid.sum() < min_periods: - c = NA + c = np.nan elif i == j: c = 1. elif not valid.all(): @@ -5509,7 +5509,7 @@ def idxmin(self, axis=0, skipna=True): axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) - result = [index[i] if i >= 0 else NA for i in indices] + result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) def idxmax(self, axis=0, skipna=True): @@ -5540,7 +5540,7 @@ def idxmax(self, axis=0, skipna=True): axis = self._get_axis_number(axis) indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) - result = [index[i] if i >= 0 else NA for i in indices] + result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): @@ -5778,9 +5778,8 @@ def isin(self, values): 2 True True """ if isinstance(values, dict): - from collections import defaultdict from pandas.core.reshape.concat import concat - values = defaultdict(list, values) + values = collections.defaultdict(list, values) return concat((self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns)), axis=1) elif isinstance(values, Series): @@ -6143,7 +6142,7 @@ def _homogenize(data, index, dtype=None): v = _dict_compat(v) else: v = dict(v) - v = lib.fast_multiget(v, oindex.values, default=NA) + v = lib.fast_multiget(v, oindex.values, default=np.nan) v = _sanitize_array(v, index, dtype=dtype, copy=False, raise_cast_failure=False) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ec44dce0da9bc..442ec93d94023 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1207,7 +1207,7 @@ def _repr_latex_(self): columns : sequence, optional Columns to write header : boolean or list of string, default True - Write out column names. If a list of string is given it is + Write out the column names. If a list of strings is given it is assumed to be aliases for the column names index : boolean, default True Write row names (index) @@ -1702,8 +1702,9 @@ def to_xarray(self): .. versionadded:: 0.20.0 """ - @Substitution(header='Write out column names. If a list of string is given, \ -it is assumed to be aliases for the column names.') + @Substitution(header='Write out the column names. If a list of strings ' + 'is given, it is assumed to be aliases for the ' + 'column names.') @Appender(_shared_docs['to_latex'] % _shared_doc_kwargs) def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 8f6b00fd204cc..109183827de4e 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1,5 +1,5 @@ # pylint: disable=W0223 - +import textwrap import warnings import numpy as np from pandas.compat import range, zip @@ -1288,13 +1288,13 @@ class _IXIndexer(_NDFrameIndexer): def __init__(self, obj, name): - _ix_deprecation_warning = """ -.ix is deprecated. Please use -.loc for label based indexing or -.iloc for positional indexing + _ix_deprecation_warning = textwrap.dedent(""" + .ix is deprecated. Please use + .loc for label based indexing or + .iloc for positional indexing -See the documentation here: -http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated""" # noqa + See the documentation here: + http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated""") # noqa warnings.warn(_ix_deprecation_warning, DeprecationWarning, stacklevel=3) diff --git a/pandas/core/series.py b/pandas/core/series.py index 60d268c89a9d7..996b483ff6092 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -10,7 +10,6 @@ import warnings from textwrap import dedent -from numpy import nan, ndarray import numpy as np import numpy.ma as ma @@ -210,13 +209,13 @@ def __init__(self, data=None, index=None, dtype=None, name=None, data = np.nan # GH #12169 elif isinstance(index, (PeriodIndex, TimedeltaIndex)): - data = ([data.get(i, nan) for i in index] + data = ([data.get(i, np.nan) for i in index] if data else np.nan) else: data = lib.fast_multiget(data, index.values, default=np.nan) except TypeError: - data = ([data.get(i, nan) for i in index] + data = ([data.get(i, np.nan) for i in index] if data else np.nan) elif isinstance(data, SingleBlockManager): @@ -1686,7 +1685,7 @@ def _binop(self, other, func, level=None, fill_value=None): result.name = None return result - def combine(self, other, func, fill_value=nan): + def combine(self, other, func, fill_value=np.nan): """ Perform elementwise binary operation on two Series using given function with optional fill value when an index is missing from one Series or @@ -2952,7 +2951,6 @@ def _dir_additions(self): Series._add_numeric_operations() Series._add_series_only_operations() Series._add_series_or_dataframe_operations() -_INDEX_TYPES = ndarray, Index, list, tuple # ----------------------------------------------------------------------------- # Supplementary functions
of nan and ndarray. Wrap some docstring lines. Split off from #17083 - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17085
2017-07-26T21:40:20Z
2017-08-03T01:03:08Z
2017-08-03T01:03:08Z
2017-08-03T01:08:39Z
PERF: Add cache keyword to to_datetime (#11665)
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index 779fc0bd20964..9614a63332609 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -346,17 +346,22 @@ class ToDatetime(object): def setup(self): self.rng = date_range(start='1/1/2000', periods=10000, freq='D') - self.stringsD = Series((((self.rng.year * 10000) + (self.rng.month * 100)) + self.rng.day), dtype=np.int64).apply(str) + self.stringsD = Series(self.rng.strftime('%Y%m%d')) self.rng = date_range(start='1/1/2000', periods=20000, freq='H') - self.strings = [x.strftime('%Y-%m-%d %H:%M:%S') for x in self.rng] - self.strings_nosep = [x.strftime('%Y%m%d %H:%M:%S') for x in self.rng] + self.strings = self.rng.strftime('%Y-%m-%d %H:%M:%S').tolist() + self.strings_nosep = self.rng.strftime('%Y%m%d %H:%M:%S').tolist() self.strings_tz_space = [x.strftime('%Y-%m-%d %H:%M:%S') + ' -0800' for x in self.rng] self.s = Series((['19MAY11', '19MAY11:00:00:00'] * 100000)) self.s2 = self.s.str.replace(':\\S+$', '') + self.unique_numeric_seconds = range(10000) + self.dup_numeric_seconds = [1000] * 10000 + self.dup_string_dates = ['2000-02-11'] * 10000 + self.dup_string_with_tz = ['2000-02-11 15:00:00-0800'] * 10000 + def time_format_YYYYMMDD(self): to_datetime(self.stringsD, format='%Y%m%d') @@ -381,6 +386,36 @@ def time_format_exact(self): def time_format_no_exact(self): to_datetime(self.s, format='%d%b%y', exact=False) + def time_cache_true_with_unique_seconds_and_unit(self): + to_datetime(self.unique_numeric_seconds, unit='s', cache=True) + + def time_cache_false_with_unique_seconds_and_unit(self): + to_datetime(self.unique_numeric_seconds, unit='s', cache=False) + + def time_cache_true_with_dup_seconds_and_unit(self): + to_datetime(self.dup_numeric_seconds, unit='s', cache=True) + + def time_cache_false_with_dup_seconds_and_unit(self): + to_datetime(self.dup_numeric_seconds, unit='s', cache=False) + + def time_cache_true_with_dup_string_dates(self): + to_datetime(self.dup_string_dates, cache=True) + + def time_cache_false_with_dup_string_dates(self): + to_datetime(self.dup_string_dates, cache=False) + + def time_cache_true_with_dup_string_dates_and_format(self): + to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=True) + + def time_cache_false_with_dup_string_dates_and_format(self): + to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=False) + + def time_cache_true_with_dup_string_tzoffset_dates(self): + to_datetime(self.dup_string_with_tz, cache=True) + + def time_cache_false_with_dup_string_tzoffset_dates(self): + to_datetime(self.dup_string_with_tz, cache=False) + class Offsets(object): goal_time = 0.2 diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 61679b14a8592..712119caae6f2 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -70,7 +70,7 @@ Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Indexers on ``Series`` or ``DataFrame`` no longer create a reference cycle (:issue:`17956`) -- +- Added a keyword argument, ``cache``, to :func:`to_datetime` that improved the performance of converting duplicate datetime arguments (:issue:`11665`) - .. _whatsnew_0220.docs: diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index ae8aa275b2bae..19f7e459d0725 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -36,9 +36,77 @@ def _guess_datetime_format_for_array(arr, **kwargs): return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs) +def _maybe_cache(arg, format, cache, tz, convert_listlike): + """ + Create a cache of unique dates from an array of dates + + Parameters + ---------- + arg : integer, float, string, datetime, list, tuple, 1-d array, Series + format : string + Strftime format to parse time + cache : boolean + True attempts to create a cache of converted values + tz : string + Timezone of the dates + convert_listlike : function + Conversion function to apply on dates + + Returns + ------- + cache_array : Series + Cache of converted, unique dates. Can be empty + """ + from pandas import Series + cache_array = Series() + if cache: + # Perform a quicker unique check + from pandas import Index + if not Index(arg).is_unique: + unique_dates = algorithms.unique(arg) + cache_dates = convert_listlike(unique_dates, True, format, tz=tz) + cache_array = Series(cache_dates, index=unique_dates) + return cache_array + + +def _convert_and_box_cache(arg, cache_array, box, errors, name=None): + """ + Convert array of dates with a cache and box the result + + Parameters + ---------- + arg : integer, float, string, datetime, list, tuple, 1-d array, Series + cache_array : Series + Cache of converted, unique dates + box : boolean + True boxes result as an Index-like, False returns an ndarray + errors : string + 'ignore' plus box=True will convert result to Index + name : string, default None + Name for a DatetimeIndex + + Returns + ------- + result : datetime of converted dates + Returns: + + - Index-like if box=True + - ndarray if box=False + """ + from pandas import Series, DatetimeIndex, Index + result = Series(arg).map(cache_array) + if box: + if errors == 'ignore': + return Index(result) + else: + return DatetimeIndex(result, name=name) + return result.values + + def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, - unit=None, infer_datetime_format=False, origin='unix'): + unit=None, infer_datetime_format=False, origin='unix', + cache=False): """ Convert argument to datetime. @@ -111,7 +179,12 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, origin. .. versionadded: 0.20.0 + cache : boolean, default False + If True, use a cache of unique, converted dates to apply the datetime + conversion. May produce sigificant speed-up when parsing duplicate date + strings, especially ones with timezone offsets. + .. versionadded: 0.22.0 Returns ------- ret : datetime if parsing succeeded. @@ -369,15 +442,28 @@ def _convert_listlike(arg, box, format, name=None, tz=tz): if isinstance(arg, tslib.Timestamp): result = arg elif isinstance(arg, ABCSeries): - from pandas import Series - values = _convert_listlike(arg._values, True, format) - result = Series(values, index=arg.index, name=arg.name) + cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike) + if not cache_array.empty: + result = arg.map(cache_array) + else: + from pandas import Series + values = _convert_listlike(arg._values, True, format) + result = Series(values, index=arg.index, name=arg.name) elif isinstance(arg, (ABCDataFrame, MutableMapping)): result = _assemble_from_unit_mappings(arg, errors=errors) elif isinstance(arg, ABCIndexClass): - result = _convert_listlike(arg, box, format, name=arg.name) + cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike) + if not cache_array.empty: + result = _convert_and_box_cache(arg, cache_array, box, errors, + name=arg.name) + else: + result = _convert_listlike(arg, box, format, name=arg.name) elif is_list_like(arg): - result = _convert_listlike(arg, box, format) + cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike) + if not cache_array.empty: + result = _convert_and_box_cache(arg, cache_array, box, errors) + else: + result = _convert_listlike(arg, box, format) else: result = _convert_listlike(np.array([arg]), box, format)[0] diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 8205b4fde217b..307184cb34e27 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -28,7 +28,8 @@ class TestTimeConversionFormats(object): - def test_to_datetime_format(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_format(self, cache): values = ['1/1/2000', '1/2/2000', '1/3/2000'] results1 = [Timestamp('20000101'), Timestamp('20000201'), @@ -43,7 +44,7 @@ def test_to_datetime_format(self): (values[2], (results1[2], results2[2]))]: for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']): - result = to_datetime(vals, format=fmt) + result = to_datetime(vals, format=fmt, cache=cache) expected = expecteds[i] if isinstance(expected, Series): @@ -53,14 +54,15 @@ def test_to_datetime_format(self): else: tm.assert_index_equal(result, expected) - def test_to_datetime_format_YYYYMMDD(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_format_YYYYMMDD(self, cache): s = Series([19801222, 19801222] + [19810105] * 5) expected = Series([Timestamp(x) for x in s.apply(str)]) - result = to_datetime(s, format='%Y%m%d') + result = to_datetime(s, format='%Y%m%d', cache=cache) assert_series_equal(result, expected) - result = to_datetime(s.apply(str), format='%Y%m%d') + result = to_datetime(s.apply(str), format='%Y%m%d', cache=cache) assert_series_equal(result, expected) # with NaT @@ -69,44 +71,48 @@ def test_to_datetime_format_YYYYMMDD(self): expected[2] = np.nan s[2] = np.nan - result = to_datetime(s, format='%Y%m%d') + result = to_datetime(s, format='%Y%m%d', cache=cache) assert_series_equal(result, expected) # string with NaT s = s.apply(str) s[2] = 'nat' - result = to_datetime(s, format='%Y%m%d') + result = to_datetime(s, format='%Y%m%d', cache=cache) assert_series_equal(result, expected) # coercion # GH 7930 s = Series([20121231, 20141231, 99991231]) - result = pd.to_datetime(s, format='%Y%m%d', errors='ignore') + result = pd.to_datetime(s, format='%Y%m%d', errors='ignore', + cache=cache) expected = Series([datetime(2012, 12, 31), datetime(2014, 12, 31), datetime(9999, 12, 31)], dtype=object) tm.assert_series_equal(result, expected) - result = pd.to_datetime(s, format='%Y%m%d', errors='coerce') + result = pd.to_datetime(s, format='%Y%m%d', errors='coerce', + cache=cache) expected = Series(['20121231', '20141231', 'NaT'], dtype='M8[ns]') assert_series_equal(result, expected) - # GH 10178 - def test_to_datetime_format_integer(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_format_integer(self, cache): + # GH 10178 s = Series([2000, 2001, 2002]) expected = Series([Timestamp(x) for x in s.apply(str)]) - result = to_datetime(s, format='%Y') + result = to_datetime(s, format='%Y', cache=cache) assert_series_equal(result, expected) s = Series([200001, 200105, 200206]) expected = Series([Timestamp(x[:4] + '-' + x[4:]) for x in s.apply(str) ]) - result = to_datetime(s, format='%Y%m') + result = to_datetime(s, format='%Y%m', cache=cache) assert_series_equal(result, expected) - def test_to_datetime_format_microsecond(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_format_microsecond(self, cache): # these are locale dependent lang, _ = locale.getlocale() @@ -114,11 +120,12 @@ def test_to_datetime_format_microsecond(self): val = '01-{}-2011 00:00:01.978'.format(month_abbr) format = '%d-%b-%Y %H:%M:%S.%f' - result = to_datetime(val, format=format) + result = to_datetime(val, format=format, cache=cache) exp = datetime.strptime(val, format) assert result == exp - def test_to_datetime_format_time(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_format_time(self, cache): data = [ ['01/10/2010 15:20', '%m/%d/%Y %H:%M', Timestamp('2010-01-10 15:20')], @@ -134,9 +141,10 @@ def test_to_datetime_format_time(self): # Timestamp('2010-01-10 09:12:56')] ] for s, format, dt in data: - assert to_datetime(s, format=format) == dt + assert to_datetime(s, format=format, cache=cache) == dt - def test_to_datetime_with_non_exact(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_with_non_exact(self, cache): # GH 10834 tm._skip_if_has_locale() @@ -147,12 +155,13 @@ def test_to_datetime_with_non_exact(self): s = Series(['19MAY11', 'foobar19MAY11', '19MAY11:00:00:00', '19MAY11 00:00:00Z']) - result = to_datetime(s, format='%d%b%y', exact=False) + result = to_datetime(s, format='%d%b%y', exact=False, cache=cache) expected = to_datetime(s.str.extract(r'(\d+\w+\d+)', expand=False), - format='%d%b%y') + format='%d%b%y', cache=cache) assert_series_equal(result, expected) - def test_parse_nanoseconds_with_formula(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_parse_nanoseconds_with_formula(self, cache): # GH8989 # trunctaing the nanoseconds when a format was provided @@ -161,44 +170,48 @@ def test_parse_nanoseconds_with_formula(self): "2012-01-01 09:00:00.001", "2012-01-01 09:00:00.001000", "2012-01-01 09:00:00.001000000", ]: - expected = pd.to_datetime(v) - result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f") + expected = pd.to_datetime(v, cache=cache) + result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f", + cache=cache) assert result == expected - def test_to_datetime_format_weeks(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_format_weeks(self, cache): data = [ ['2009324', '%Y%W%w', Timestamp('2009-08-13')], ['2013020', '%Y%U%w', Timestamp('2013-01-13')] ] for s, format, dt in data: - assert to_datetime(s, format=format) == dt + assert to_datetime(s, format=format, cache=cache) == dt class TestToDatetime(object): - def test_to_datetime_dt64s(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_dt64s(self, cache): in_bound_dts = [ np.datetime64('2000-01-01'), np.datetime64('2000-01-02'), ] for dt in in_bound_dts: - assert pd.to_datetime(dt) == Timestamp(dt) + assert pd.to_datetime(dt, cache=cache) == Timestamp(dt) oob_dts = [np.datetime64('1000-01-01'), np.datetime64('5000-01-02'), ] for dt in oob_dts: pytest.raises(ValueError, pd.to_datetime, dt, errors='raise') pytest.raises(ValueError, Timestamp, dt) - assert pd.to_datetime(dt, errors='coerce') is NaT + assert pd.to_datetime(dt, errors='coerce', cache=cache) is NaT - def test_to_datetime_array_of_dt64s(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_array_of_dt64s(self, cache): dts = [np.datetime64('2000-01-01'), np.datetime64('2000-01-02'), ] # Assuming all datetimes are in bounds, to_datetime() returns # an array that is equal to Timestamp() parsing tm.assert_numpy_array_equal( - pd.to_datetime(dts, box=False), + pd.to_datetime(dts, box=False, cache=cache), np.array([Timestamp(x).asm8 for x in dts]) ) @@ -209,7 +222,8 @@ def test_to_datetime_array_of_dt64s(self): errors='raise') tm.assert_numpy_array_equal( - pd.to_datetime(dts_with_oob, box=False, errors='coerce'), + pd.to_datetime(dts_with_oob, box=False, errors='coerce', + cache=cache), np.array( [ Timestamp(dts_with_oob[0]).asm8, @@ -224,20 +238,22 @@ def test_to_datetime_array_of_dt64s(self): # are converted to their .item(), which depending on the version of # numpy is either a python datetime.datetime or datetime.date tm.assert_numpy_array_equal( - pd.to_datetime(dts_with_oob, box=False, errors='ignore'), + pd.to_datetime(dts_with_oob, box=False, errors='ignore', + cache=cache), np.array( [dt.item() for dt in dts_with_oob], dtype='O' ) ) - def test_to_datetime_tz(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_tz(self, cache): # xref 8260 # uniform returns a DatetimeIndex arr = [pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'), pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')] - result = pd.to_datetime(arr) + result = pd.to_datetime(arr, cache=cache) expected = DatetimeIndex( ['2013-01-01 13:00:00', '2013-01-02 14:00:00'], tz='US/Pacific') tm.assert_index_equal(result, expected) @@ -245,9 +261,10 @@ def test_to_datetime_tz(self): # mixed tzs will raise arr = [pd.Timestamp('2013-01-01 13:00:00', tz='US/Pacific'), pd.Timestamp('2013-01-02 14:00:00', tz='US/Eastern')] - pytest.raises(ValueError, lambda: pd.to_datetime(arr)) + pytest.raises(ValueError, lambda: pd.to_datetime(arr, cache=cache)) - def test_to_datetime_tz_pytz(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_tz_pytz(self, cache): # see gh-8260 us_eastern = pytz.timezone('US/Eastern') arr = np.array([us_eastern.localize(datetime(year=2000, month=1, day=1, @@ -255,18 +272,20 @@ def test_to_datetime_tz_pytz(self): us_eastern.localize(datetime(year=2000, month=6, day=1, hour=3, minute=0))], dtype=object) - result = pd.to_datetime(arr, utc=True) + result = pd.to_datetime(arr, utc=True, cache=cache) expected = DatetimeIndex(['2000-01-01 08:00:00+00:00', '2000-06-01 07:00:00+00:00'], dtype='datetime64[ns, UTC]', freq=None) tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('cache', [True, False]) @pytest.mark.parametrize("init_constructor, end_constructor, test_method", [(Index, DatetimeIndex, tm.assert_index_equal), (list, DatetimeIndex, tm.assert_index_equal), (np.array, DatetimeIndex, tm.assert_index_equal), (Series, Series, tm.assert_series_equal)]) def test_to_datetime_utc_true(self, + cache, init_constructor, end_constructor, test_method): @@ -277,39 +296,47 @@ def test_to_datetime_utc_true(self, result = pd.to_datetime(init_constructor(data), format='%Y%m%d %H%M%S', - utc=True) + utc=True, + cache=cache) expected = end_constructor(expected_data) test_method(result, expected) # Test scalar case as well for scalar, expected in zip(data, expected_data): - result = pd.to_datetime(scalar, format='%Y%m%d %H%M%S', utc=True) + result = pd.to_datetime(scalar, format='%Y%m%d %H%M%S', utc=True, + cache=cache) assert result == expected - def test_to_datetime_utc_true_with_series_single_value(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_utc_true_with_series_single_value(self, cache): # GH 15760 UTC=True with Series ts = 1.5e18 - result = pd.to_datetime(pd.Series([ts]), utc=True) + result = pd.to_datetime(pd.Series([ts]), utc=True, cache=cache) expected = pd.Series([pd.Timestamp(ts, tz='utc')]) tm.assert_series_equal(result, expected) - def test_to_datetime_utc_true_with_series_tzaware_string(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_utc_true_with_series_tzaware_string(self, cache): ts = '2013-01-01 00:00:00-01:00' expected_ts = '2013-01-01 01:00:00' data = pd.Series([ts] * 3) - result = pd.to_datetime(data, utc=True) + result = pd.to_datetime(data, utc=True, cache=cache) expected = pd.Series([pd.Timestamp(expected_ts, tz='utc')] * 3) tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('cache', [True, False]) @pytest.mark.parametrize('date, dtype', [('2013-01-01 01:00:00', 'datetime64[ns]'), ('2013-01-01 01:00:00', 'datetime64[ns, UTC]')]) - def test_to_datetime_utc_true_with_series_datetime_ns(self, date, dtype): + def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, + dtype): expected = pd.Series([pd.Timestamp('2013-01-01 01:00:00', tz='UTC')]) - result = pd.to_datetime(pd.Series([date], dtype=dtype), utc=True) + result = pd.to_datetime(pd.Series([date], dtype=dtype), utc=True, + cache=cache) tm.assert_series_equal(result, expected) - def test_to_datetime_tz_psycopg2(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_tz_psycopg2(self, cache): # xref 8260 try: @@ -324,7 +351,7 @@ def test_to_datetime_tz_psycopg2(self): datetime(2000, 6, 1, 3, 0, tzinfo=tz2)], dtype=object) - result = pd.to_datetime(arr, errors='coerce', utc=True) + result = pd.to_datetime(arr, errors='coerce', utc=True, cache=cache) expected = DatetimeIndex(['2000-01-01 08:00:00+00:00', '2000-06-01 07:00:00+00:00'], dtype='datetime64[ns, UTC]', freq=None) @@ -337,32 +364,39 @@ def test_to_datetime_tz_psycopg2(self): assert is_datetime64_ns_dtype(i) # tz coerceion - result = pd.to_datetime(i, errors='coerce') + result = pd.to_datetime(i, errors='coerce', cache=cache) tm.assert_index_equal(result, i) - result = pd.to_datetime(i, errors='coerce', utc=True) + result = pd.to_datetime(i, errors='coerce', utc=True, cache=cache) expected = pd.DatetimeIndex(['2000-01-01 13:00:00'], dtype='datetime64[ns, UTC]') tm.assert_index_equal(result, expected) - def test_datetime_bool(self): + @pytest.mark.parametrize( + 'cache', + [pytest.param(True, + marks=pytest.mark.skipif(True, reason="GH 18111")), + False]) + def test_datetime_bool(self, cache): # GH13176 with pytest.raises(TypeError): to_datetime(False) - assert to_datetime(False, errors="coerce") is NaT - assert to_datetime(False, errors="ignore") is False + assert to_datetime(False, errors="coerce", cache=cache) is NaT + assert to_datetime(False, errors="ignore", cache=cache) is False with pytest.raises(TypeError): to_datetime(True) - assert to_datetime(True, errors="coerce") is NaT - assert to_datetime(True, errors="ignore") is True + assert to_datetime(True, errors="coerce", cache=cache) is NaT + assert to_datetime(True, errors="ignore", cache=cache) is True with pytest.raises(TypeError): - to_datetime([False, datetime.today()]) + to_datetime([False, datetime.today()], cache=cache) with pytest.raises(TypeError): - to_datetime(['20130101', True]) + to_datetime(['20130101', True], cache=cache) tm.assert_index_equal(to_datetime([0, False, NaT, 0.0], - errors="coerce"), - DatetimeIndex([to_datetime(0), NaT, - NaT, to_datetime(0)])) + errors="coerce", cache=cache), + DatetimeIndex([to_datetime(0, cache=cache), + NaT, + NaT, + to_datetime(0, cache=cache)])) def test_datetime_invalid_datatype(self): # GH13176 @@ -372,6 +406,39 @@ def test_datetime_invalid_datatype(self): with pytest.raises(TypeError): pd.to_datetime(pd.to_datetime) + @pytest.mark.parametrize("utc", [True, None]) + @pytest.mark.parametrize("format", ['%Y%m%d %H:%M:%S', None]) + @pytest.mark.parametrize("box", [True, False]) + @pytest.mark.parametrize("constructor", [list, tuple, np.array, pd.Index]) + def test_to_datetime_cache(self, utc, format, box, constructor): + date = '20130101 00:00:00' + test_dates = [date] * 10**5 + data = constructor(test_dates) + result = pd.to_datetime(data, utc=utc, format=format, box=box, + cache=True) + expected = pd.to_datetime(data, utc=utc, format=format, box=box, + cache=False) + if box: + tm.assert_index_equal(result, expected) + else: + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("utc", [True, None]) + @pytest.mark.parametrize("format", ['%Y%m%d %H:%M:%S', None]) + def test_to_datetime_cache_series(self, utc, format): + date = '20130101 00:00:00' + test_dates = [date] * 10**5 + data = pd.Series(test_dates) + result = pd.to_datetime(data, utc=utc, format=format, cache=True) + expected = pd.to_datetime(data, utc=utc, format=format, cache=False) + tm.assert_series_equal(result, expected) + + def test_to_datetime_cache_scalar(self): + date = '20130101 00:00:00' + result = pd.to_datetime(date, cache=True) + expected = pd.Timestamp('20130101 00:00:00') + assert result == expected + @pytest.mark.parametrize('date, format', [('2017-20', '%Y-%W'), ('20 Sunday', '%W %A'), @@ -388,72 +455,77 @@ def test_week_without_day_and_calendar_year(self, date, format): class TestToDatetimeUnit(object): - - def test_unit(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_unit(self, cache): # GH 11758 # test proper behavior with erros with pytest.raises(ValueError): - to_datetime([1], unit='D', format='%Y%m%d') + to_datetime([1], unit='D', format='%Y%m%d', cache=cache) values = [11111111, 1, 1.0, tslib.iNaT, NaT, np.nan, 'NaT', ''] - result = to_datetime(values, unit='D', errors='ignore') + result = to_datetime(values, unit='D', errors='ignore', cache=cache) expected = Index([11111111, Timestamp('1970-01-02'), Timestamp('1970-01-02'), NaT, NaT, NaT, NaT, NaT], dtype=object) tm.assert_index_equal(result, expected) - result = to_datetime(values, unit='D', errors='coerce') + result = to_datetime(values, unit='D', errors='coerce', cache=cache) expected = DatetimeIndex(['NaT', '1970-01-02', '1970-01-02', 'NaT', 'NaT', 'NaT', 'NaT', 'NaT']) tm.assert_index_equal(result, expected) with pytest.raises(tslib.OutOfBoundsDatetime): - to_datetime(values, unit='D', errors='raise') + to_datetime(values, unit='D', errors='raise', cache=cache) values = [1420043460000, tslib.iNaT, NaT, np.nan, 'NaT'] - result = to_datetime(values, errors='ignore', unit='s') + result = to_datetime(values, errors='ignore', unit='s', cache=cache) expected = Index([1420043460000, NaT, NaT, NaT, NaT], dtype=object) tm.assert_index_equal(result, expected) - result = to_datetime(values, errors='coerce', unit='s') + result = to_datetime(values, errors='coerce', unit='s', cache=cache) expected = DatetimeIndex(['NaT', 'NaT', 'NaT', 'NaT', 'NaT']) tm.assert_index_equal(result, expected) with pytest.raises(tslib.OutOfBoundsDatetime): - to_datetime(values, errors='raise', unit='s') + to_datetime(values, errors='raise', unit='s', cache=cache) # if we have a string, then we raise a ValueError # and NOT an OutOfBoundsDatetime for val in ['foo', Timestamp('20130101')]: try: - to_datetime(val, errors='raise', unit='s') + to_datetime(val, errors='raise', unit='s', cache=cache) except tslib.OutOfBoundsDatetime: raise AssertionError("incorrect exception raised") except ValueError: pass - def test_unit_consistency(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_unit_consistency(self, cache): # consistency of conversions expected = Timestamp('1970-05-09 14:25:11') - result = pd.to_datetime(11111111, unit='s', errors='raise') + result = pd.to_datetime(11111111, unit='s', errors='raise', + cache=cache) assert result == expected assert isinstance(result, Timestamp) - result = pd.to_datetime(11111111, unit='s', errors='coerce') + result = pd.to_datetime(11111111, unit='s', errors='coerce', + cache=cache) assert result == expected assert isinstance(result, Timestamp) - result = pd.to_datetime(11111111, unit='s', errors='ignore') + result = pd.to_datetime(11111111, unit='s', errors='ignore', + cache=cache) assert result == expected assert isinstance(result, Timestamp) - def test_unit_with_numeric(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_unit_with_numeric(self, cache): # GH 13180 # coercions from floats/ints are ok @@ -462,10 +534,10 @@ def test_unit_with_numeric(self): arr1 = [1.434692e+18, 1.432766e+18] arr2 = np.array(arr1).astype('int64') for errors in ['ignore', 'raise', 'coerce']: - result = pd.to_datetime(arr1, errors=errors) + result = pd.to_datetime(arr1, errors=errors, cache=cache) tm.assert_index_equal(result, expected) - result = pd.to_datetime(arr2, errors=errors) + result = pd.to_datetime(arr2, errors=errors, cache=cache) tm.assert_index_equal(result, expected) # but we want to make sure that we are coercing @@ -474,7 +546,7 @@ def test_unit_with_numeric(self): '2015-06-19 05:33:20', '2015-05-27 22:33:20']) arr = ['foo', 1.434692e+18, 1.432766e+18] - result = pd.to_datetime(arr, errors='coerce') + result = pd.to_datetime(arr, errors='coerce', cache=cache) tm.assert_index_equal(result, expected) expected = DatetimeIndex(['2015-06-19 05:33:20', @@ -482,31 +554,33 @@ def test_unit_with_numeric(self): 'NaT', 'NaT']) arr = [1.434692e+18, 1.432766e+18, 'foo', 'NaT'] - result = pd.to_datetime(arr, errors='coerce') + result = pd.to_datetime(arr, errors='coerce', cache=cache) tm.assert_index_equal(result, expected) - def test_unit_mixed(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_unit_mixed(self, cache): # mixed integers/datetimes expected = DatetimeIndex(['2013-01-01', 'NaT', 'NaT']) arr = [pd.Timestamp('20130101'), 1.434692e+18, 1.432766e+18] - result = pd.to_datetime(arr, errors='coerce') + result = pd.to_datetime(arr, errors='coerce', cache=cache) tm.assert_index_equal(result, expected) with pytest.raises(ValueError): - pd.to_datetime(arr, errors='raise') + pd.to_datetime(arr, errors='raise', cache=cache) expected = DatetimeIndex(['NaT', 'NaT', '2013-01-01']) arr = [1.434692e+18, 1.432766e+18, pd.Timestamp('20130101')] - result = pd.to_datetime(arr, errors='coerce') + result = pd.to_datetime(arr, errors='coerce', cache=cache) tm.assert_index_equal(result, expected) with pytest.raises(ValueError): - pd.to_datetime(arr, errors='raise') + pd.to_datetime(arr, errors='raise', cache=cache) - def test_dataframe(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_dataframe(self, cache): df = DataFrame({'year': [2015, 2016], 'month': [2, 3], @@ -520,19 +594,20 @@ def test_dataframe(self): result = to_datetime({'year': df['year'], 'month': df['month'], - 'day': df['day']}) + 'day': df['day']}, cache=cache) expected = Series([Timestamp('20150204 00:00:00'), Timestamp('20160305 00:0:00')]) assert_series_equal(result, expected) # dict-like - result = to_datetime(df[['year', 'month', 'day']].to_dict()) + result = to_datetime(df[['year', 'month', 'day']].to_dict(), + cache=cache) assert_series_equal(result, expected) # dict but with constructable df2 = df[['year', 'month', 'day']].to_dict() df2['month'] = 2 - result = to_datetime(df2) + result = to_datetime(df2, cache=cache) expected2 = Series([Timestamp('20150204 00:00:00'), Timestamp('20160205 00:0:00')]) assert_series_equal(result, expected2) @@ -553,7 +628,8 @@ def test_dataframe(self): ] for d in units: - result = to_datetime(df[list(d.keys())].rename(columns=d)) + result = to_datetime(df[list(d.keys())].rename(columns=d), + cache=cache) expected = Series([Timestamp('20150204 06:58:10'), Timestamp('20160305 07:59:11')]) assert_series_equal(result, expected) @@ -568,13 +644,13 @@ def test_dataframe(self): 'us': 'us', 'ns': 'ns'} - result = to_datetime(df.rename(columns=d)) + result = to_datetime(df.rename(columns=d), cache=cache) expected = Series([Timestamp('20150204 06:58:10.001002003'), Timestamp('20160305 07:59:11.001002003')]) assert_series_equal(result, expected) # coerce back to int - result = to_datetime(df.astype(str)) + result = to_datetime(df.astype(str), cache=cache) assert_series_equal(result, expected) # passing coerce @@ -585,8 +661,8 @@ def test_dataframe(self): msg = ("cannot assemble the datetimes: time data .+ does not " "match format '%Y%m%d' \(match\)") with tm.assert_raises_regex(ValueError, msg): - to_datetime(df2) - result = to_datetime(df2, errors='coerce') + to_datetime(df2, cache=cache) + result = to_datetime(df2, errors='coerce', cache=cache) expected = Series([Timestamp('20150204 00:00:00'), NaT]) assert_series_equal(result, expected) @@ -597,7 +673,7 @@ def test_dataframe(self): with tm.assert_raises_regex(ValueError, msg): df2 = df.copy() df2['foo'] = 1 - to_datetime(df2) + to_datetime(df2, cache=cache) # not enough msg = ('to assemble mappings requires at least that \[year, month, ' @@ -608,7 +684,7 @@ def test_dataframe(self): ['month', 'day'], ['year', 'day', 'second']]: with tm.assert_raises_regex(ValueError, msg): - to_datetime(df[c]) + to_datetime(df[c], cache=cache) # duplicates msg = 'cannot assemble with duplicate keys' @@ -617,7 +693,7 @@ def test_dataframe(self): 'day': [4, 5]}) df2.columns = ['year', 'year', 'day'] with tm.assert_raises_regex(ValueError, msg): - to_datetime(df2) + to_datetime(df2, cache=cache) df2 = DataFrame({'year': [2015, 2016], 'month': [2, 20], @@ -625,16 +701,17 @@ def test_dataframe(self): 'hour': [4, 5]}) df2.columns = ['year', 'month', 'day', 'day'] with tm.assert_raises_regex(ValueError, msg): - to_datetime(df2) + to_datetime(df2, cache=cache) - def test_dataframe_dtypes(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_dataframe_dtypes(self, cache): # #13451 df = DataFrame({'year': [2015, 2016], 'month': [2, 3], 'day': [4, 5]}) # int16 - result = to_datetime(df.astype('int16')) + result = to_datetime(df.astype('int16'), cache=cache) expected = Series([Timestamp('20150204 00:00:00'), Timestamp('20160305 00:00:00')]) assert_series_equal(result, expected) @@ -642,7 +719,7 @@ def test_dataframe_dtypes(self): # mixed dtypes df['month'] = df['month'].astype('int8') df['day'] = df['day'].astype('int8') - result = to_datetime(df) + result = to_datetime(df, cache=cache) expected = Series([Timestamp('20150204 00:00:00'), Timestamp('20160305 00:00:00')]) assert_series_equal(result, expected) @@ -652,18 +729,19 @@ def test_dataframe_dtypes(self): 'month': [1.5, 1], 'day': [1, 1]}) with pytest.raises(ValueError): - to_datetime(df) + to_datetime(df, cache=cache) class TestToDatetimeMisc(object): - def test_index_to_datetime(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_index_to_datetime(self, cache): idx = Index(['1/1/2000', '1/2/2000', '1/3/2000']) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = idx.to_datetime() - expected = DatetimeIndex(pd.to_datetime(idx.values)) + expected = DatetimeIndex(pd.to_datetime(idx.values, cache=cache)) tm.assert_index_equal(result, expected) with tm.assert_produces_warning(FutureWarning, @@ -674,17 +752,19 @@ def test_index_to_datetime(self): expected = DatetimeIndex([today]) tm.assert_index_equal(result, expected) - def test_to_datetime_iso8601(self): - result = to_datetime(["2012-01-01 00:00:00"]) + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_iso8601(self, cache): + result = to_datetime(["2012-01-01 00:00:00"], cache=cache) exp = Timestamp("2012-01-01 00:00:00") assert result[0] == exp - result = to_datetime(['20121001']) # bad iso 8601 + result = to_datetime(['20121001'], cache=cache) # bad iso 8601 exp = Timestamp('2012-10-01') assert result[0] == exp - def test_to_datetime_default(self): - rs = to_datetime('2001') + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_default(self, cache): + rs = to_datetime('2001', cache=cache) xp = datetime(2001, 1, 1) assert rs == xp @@ -694,71 +774,80 @@ def test_to_datetime_default(self): # pytest.raises(ValueError, to_datetime('01-13-2012', # dayfirst=True)) - def test_to_datetime_on_datetime64_series(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_on_datetime64_series(self, cache): # #2699 s = Series(date_range('1/1/2000', periods=10)) - result = to_datetime(s) + result = to_datetime(s, cache=cache) assert result[0] == s[0] - def test_to_datetime_with_space_in_series(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_with_space_in_series(self, cache): # GH 6428 s = Series(['10/18/2006', '10/18/2008', ' ']) - pytest.raises(ValueError, lambda: to_datetime(s, errors='raise')) - result_coerce = to_datetime(s, errors='coerce') + pytest.raises(ValueError, lambda: to_datetime(s, + errors='raise', + cache=cache)) + result_coerce = to_datetime(s, errors='coerce', cache=cache) expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT]) tm.assert_series_equal(result_coerce, expected_coerce) - result_ignore = to_datetime(s, errors='ignore') + result_ignore = to_datetime(s, errors='ignore', cache=cache) tm.assert_series_equal(result_ignore, s) - def test_to_datetime_with_apply(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_with_apply(self, cache): # this is only locale tested with US/None locales tm._skip_if_has_locale() # GH 5195 # with a format and coerce a single item to_datetime fails td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1, 2, 3]) - expected = pd.to_datetime(td, format='%b %y') - result = td.apply(pd.to_datetime, format='%b %y') + expected = pd.to_datetime(td, format='%b %y', cache=cache) + result = td.apply(pd.to_datetime, format='%b %y', cache=cache) assert_series_equal(result, expected) td = pd.Series(['May 04', 'Jun 02', ''], index=[1, 2, 3]) pytest.raises(ValueError, lambda: pd.to_datetime(td, format='%b %y', - errors='raise')) + errors='raise', + cache=cache)) pytest.raises(ValueError, lambda: td.apply(pd.to_datetime, format='%b %y', - errors='raise')) - expected = pd.to_datetime(td, format='%b %y', errors='coerce') + errors='raise', cache=cache)) + expected = pd.to_datetime(td, format='%b %y', errors='coerce', + cache=cache) result = td.apply( - lambda x: pd.to_datetime(x, format='%b %y', errors='coerce')) + lambda x: pd.to_datetime(x, format='%b %y', errors='coerce', + cache=cache)) assert_series_equal(result, expected) - def test_to_datetime_types(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_types(self, cache): # empty string - result = to_datetime('') + result = to_datetime('', cache=cache) assert result is NaT - result = to_datetime(['', '']) + result = to_datetime(['', ''], cache=cache) assert isna(result).all() # ints result = Timestamp(0) - expected = to_datetime(0) + expected = to_datetime(0, cache=cache) assert result == expected # GH 3888 (strings) - expected = to_datetime(['2012'])[0] - result = to_datetime('2012') + expected = to_datetime(['2012'], cache=cache)[0] + result = to_datetime('2012', cache=cache) assert result == expected # array = ['2012','20120101','20120101 12:01:01'] array = ['20120101', '20120101 12:01:01'] - expected = list(to_datetime(array)) + expected = list(to_datetime(array, cache=cache)) result = lmap(Timestamp, array) tm.assert_almost_equal(result, expected) @@ -767,13 +856,15 @@ def test_to_datetime_types(self): # expected = to_datetime('2012') # assert result == expected - def test_to_datetime_unprocessable_input(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_unprocessable_input(self, cache): # GH 4928 tm.assert_numpy_array_equal( - to_datetime([1, '1'], errors='ignore'), + to_datetime([1, '1'], errors='ignore', cache=cache), np.array([1, '1'], dtype='O') ) - pytest.raises(TypeError, to_datetime, [1, '1'], errors='raise') + pytest.raises(TypeError, to_datetime, [1, '1'], errors='raise', + cache=cache) def test_to_datetime_other_datetime64_units(self): # 5/25/2012 @@ -809,7 +900,8 @@ def test_to_datetime_overflow(self): with pytest.raises(OverflowError): date_range(start='1/1/1700', freq='B', periods=100000) - def test_string_na_nat_conversion(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_string_na_nat_conversion(self, cache): # GH #999, #858 from pandas.compat import parse_date @@ -827,7 +919,7 @@ def test_string_na_nat_conversion(self): result = tslib.array_to_datetime(strings) tm.assert_almost_equal(result, expected) - result2 = to_datetime(strings) + result2 = to_datetime(strings, cache=cache) assert isinstance(result2, DatetimeIndex) tm.assert_numpy_array_equal(result, result2.values) @@ -835,22 +927,25 @@ def test_string_na_nat_conversion(self): # GH 10636, default is now 'raise' pytest.raises(ValueError, - lambda: to_datetime(malformed, errors='raise')) + lambda: to_datetime(malformed, errors='raise', + cache=cache)) - result = to_datetime(malformed, errors='ignore') + result = to_datetime(malformed, errors='ignore', cache=cache) tm.assert_numpy_array_equal(result, malformed) - pytest.raises(ValueError, to_datetime, malformed, errors='raise') + pytest.raises(ValueError, to_datetime, malformed, errors='raise', + cache=cache) idx = ['a', 'b', 'c', 'd', 'e'] series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan, '1/5/2000'], index=idx, name='foo') - dseries = Series([to_datetime('1/1/2000'), np.nan, - to_datetime('1/3/2000'), np.nan, - to_datetime('1/5/2000')], index=idx, name='foo') + dseries = Series([to_datetime('1/1/2000', cache=cache), np.nan, + to_datetime('1/3/2000', cache=cache), np.nan, + to_datetime('1/5/2000', cache=cache)], + index=idx, name='foo') - result = to_datetime(series) - dresult = to_datetime(dseries) + result = to_datetime(series, cache=cache) + dresult = to_datetime(dseries, cache=cache) expected = Series(np.empty(5, dtype='M8[ns]'), index=idx) for i in range(5): @@ -858,7 +953,7 @@ def test_string_na_nat_conversion(self): if isna(x): expected[i] = tslib.iNaT else: - expected[i] = to_datetime(x) + expected[i] = to_datetime(x, cache=cache) assert_series_equal(result, expected, check_names=False) assert result.name == 'foo' @@ -866,26 +961,29 @@ def test_string_na_nat_conversion(self): assert_series_equal(dresult, expected, check_names=False) assert dresult.name == 'foo' - def test_dti_constructor_numpy_timeunits(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_dti_constructor_numpy_timeunits(self, cache): # GH 9114 - base = pd.to_datetime(['2000-01-01T00:00', '2000-01-02T00:00', 'NaT']) + base = pd.to_datetime(['2000-01-01T00:00', '2000-01-02T00:00', 'NaT'], + cache=cache) for dtype in ['datetime64[h]', 'datetime64[m]', 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]', 'datetime64[ns]']: values = base.values.astype(dtype) tm.assert_index_equal(DatetimeIndex(values), base) - tm.assert_index_equal(to_datetime(values), base) + tm.assert_index_equal(to_datetime(values, cache=cache), base) - def test_dayfirst(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_dayfirst(self, cache): # GH 5917 arr = ['10/02/2014', '11/02/2014', '12/02/2014'] expected = DatetimeIndex([datetime(2014, 2, 10), datetime(2014, 2, 11), datetime(2014, 2, 12)]) idx1 = DatetimeIndex(arr, dayfirst=True) idx2 = DatetimeIndex(np.array(arr), dayfirst=True) - idx3 = to_datetime(arr, dayfirst=True) - idx4 = to_datetime(np.array(arr), dayfirst=True) + idx3 = to_datetime(arr, dayfirst=True, cache=cache) + idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache) idx5 = DatetimeIndex(Index(arr), dayfirst=True) idx6 = DatetimeIndex(Series(arr), dayfirst=True) tm.assert_index_equal(expected, idx1) @@ -920,7 +1018,8 @@ def test_guess_datetime_format_for_array(self): class TestToDatetimeInferFormat(object): - def test_to_datetime_infer_datetime_format_consistent_format(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_infer_datetime_format_consistent_format(self, cache): s = pd.Series(pd.date_range('20000101', periods=50, freq='H')) test_formats = ['%m-%d-%Y', '%m/%d/%Y %H:%M:%S.%f', @@ -929,90 +1028,113 @@ def test_to_datetime_infer_datetime_format_consistent_format(self): for test_format in test_formats: s_as_dt_strings = s.apply(lambda x: x.strftime(test_format)) - with_format = pd.to_datetime(s_as_dt_strings, format=test_format) + with_format = pd.to_datetime(s_as_dt_strings, format=test_format, + cache=cache) no_infer = pd.to_datetime(s_as_dt_strings, - infer_datetime_format=False) + infer_datetime_format=False, + cache=cache) yes_infer = pd.to_datetime(s_as_dt_strings, - infer_datetime_format=True) + infer_datetime_format=True, + cache=cache) # Whether the format is explicitly passed, it is inferred, or # it is not inferred, the results should all be the same tm.assert_series_equal(with_format, no_infer) tm.assert_series_equal(no_infer, yes_infer) - def test_to_datetime_infer_datetime_format_inconsistent_format(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_infer_datetime_format_inconsistent_format(self, + cache): s = pd.Series(np.array(['01/01/2011 00:00:00', '01-02-2011 00:00:00', '2011-01-03T00:00:00'])) # When the format is inconsistent, infer_datetime_format should just # fallback to the default parsing - tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False), - pd.to_datetime(s, infer_datetime_format=True)) + tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False, + cache=cache), + pd.to_datetime(s, infer_datetime_format=True, + cache=cache)) s = pd.Series(np.array(['Jan/01/2011', 'Feb/01/2011', 'Mar/01/2011'])) - tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False), - pd.to_datetime(s, infer_datetime_format=True)) + tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False, + cache=cache), + pd.to_datetime(s, infer_datetime_format=True, + cache=cache)) - def test_to_datetime_infer_datetime_format_series_with_nans(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_infer_datetime_format_series_with_nans(self, cache): s = pd.Series(np.array(['01/01/2011 00:00:00', np.nan, '01/03/2011 00:00:00', np.nan])) - tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False), - pd.to_datetime(s, infer_datetime_format=True)) - - def test_to_datetime_infer_datetime_format_series_starting_with_nans(self): + tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False, + cache=cache), + pd.to_datetime(s, infer_datetime_format=True, + cache=cache)) + + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_infer_datetime_format_series_start_with_nans(self, + cache): s = pd.Series(np.array([np.nan, np.nan, '01/01/2011 00:00:00', '01/02/2011 00:00:00', '01/03/2011 00:00:00'])) - tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False), - pd.to_datetime(s, infer_datetime_format=True)) + tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False, + cache=cache), + pd.to_datetime(s, infer_datetime_format=True, + cache=cache)) - def test_to_datetime_iso8601_noleading_0s(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_iso8601_noleading_0s(self, cache): # GH 11871 s = pd.Series(['2014-1-1', '2014-2-2', '2015-3-3']) expected = pd.Series([pd.Timestamp('2014-01-01'), pd.Timestamp('2014-02-02'), pd.Timestamp('2015-03-03')]) - tm.assert_series_equal(pd.to_datetime(s), expected) - tm.assert_series_equal(pd.to_datetime(s, format='%Y-%m-%d'), expected) + tm.assert_series_equal(pd.to_datetime(s, cache=cache), expected) + tm.assert_series_equal(pd.to_datetime(s, format='%Y-%m-%d', + cache=cache), expected) class TestDaysInMonth(object): # tests for issue #10154 - def test_day_not_in_month_coerce(self): - assert isna(to_datetime('2015-02-29', errors='coerce')) + @pytest.mark.parametrize('cache', [True, False]) + def test_day_not_in_month_coerce(self, cache): + assert isna(to_datetime('2015-02-29', errors='coerce', cache=cache)) assert isna(to_datetime('2015-02-29', format="%Y-%m-%d", - errors='coerce')) + errors='coerce', cache=cache)) assert isna(to_datetime('2015-02-32', format="%Y-%m-%d", - errors='coerce')) + errors='coerce', cache=cache)) assert isna(to_datetime('2015-04-31', format="%Y-%m-%d", - errors='coerce')) + errors='coerce', cache=cache)) - def test_day_not_in_month_raise(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_day_not_in_month_raise(self, cache): pytest.raises(ValueError, to_datetime, '2015-02-29', - errors='raise') + errors='raise', cache=cache) pytest.raises(ValueError, to_datetime, '2015-02-29', - errors='raise', format="%Y-%m-%d") + errors='raise', format="%Y-%m-%d", cache=cache) pytest.raises(ValueError, to_datetime, '2015-02-32', - errors='raise', format="%Y-%m-%d") + errors='raise', format="%Y-%m-%d", cache=cache) pytest.raises(ValueError, to_datetime, '2015-04-31', - errors='raise', format="%Y-%m-%d") + errors='raise', format="%Y-%m-%d", cache=cache) - def test_day_not_in_month_ignore(self): - assert to_datetime('2015-02-29', errors='ignore') == '2015-02-29' + @pytest.mark.parametrize('cache', [True, False]) + def test_day_not_in_month_ignore(self, cache): + assert to_datetime('2015-02-29', errors='ignore', + cache=cache) == '2015-02-29' assert to_datetime('2015-02-29', errors='ignore', - format="%Y-%m-%d") == '2015-02-29' + format="%Y-%m-%d", cache=cache) == '2015-02-29' assert to_datetime('2015-02-32', errors='ignore', - format="%Y-%m-%d") == '2015-02-32' + format="%Y-%m-%d", cache=cache) == '2015-02-32' assert to_datetime('2015-04-31', errors='ignore', - format="%Y-%m-%d") == '2015-04-31' + format="%Y-%m-%d", cache=cache) == '2015-04-31' class TestDatetimeParsingWrappers(object): - def test_parsers(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_parsers(self, cache): # https://github.com/dateutil/dateutil/issues/217 import dateutil @@ -1076,7 +1198,7 @@ def test_parsers(self): result3 = to_datetime([date_str], yearfirst=yearfirst) # result5 is used below result4 = to_datetime(np.array([date_str], dtype=object), - yearfirst=yearfirst) + yearfirst=yearfirst, cache=cache) result6 = DatetimeIndex([date_str], yearfirst=yearfirst) # result7 is used below result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst) @@ -1106,7 +1228,8 @@ def test_parsers(self): assert result3 is tslib.NaT assert result4 is tslib.NaT - def test_parsers_dayfirst_yearfirst(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_parsers_dayfirst_yearfirst(self, cache): # OK # 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00 # 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00 @@ -1190,7 +1313,7 @@ def test_parsers_dayfirst_yearfirst(self): assert result2 == expected result3 = to_datetime(date_str, dayfirst=dayfirst, - yearfirst=yearfirst) + yearfirst=yearfirst, cache=cache) result4 = DatetimeIndex([date_str], dayfirst=dayfirst, yearfirst=yearfirst)[0] @@ -1199,7 +1322,8 @@ def test_parsers_dayfirst_yearfirst(self): assert result3 == expected assert result4 == expected - def test_parsers_timestring(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_parsers_timestring(self, cache): # must be the same as dateutil result cases = {'10:15': (parse('10:15'), datetime(1, 1, 1, 10, 15)), '9:05': (parse('9:05'), datetime(1, 1, 1, 9, 5))} @@ -1254,9 +1378,10 @@ def test_parsers_time(self): assert isinstance(res, list) assert res == expected_arr - def test_parsers_timezone_minute_offsets_roundtrip(self): + @pytest.mark.parametrize('cache', [True, False]) + def test_parsers_timezone_minute_offsets_roundtrip(self, cache): # GH11708 - base = to_datetime("2013-01-01 00:00:00") + base = to_datetime("2013-01-01 00:00:00", cache=cache) dt_strings = [ ('2013-01-01 05:45+0545', "Asia/Katmandu", @@ -1267,7 +1392,7 @@ def test_parsers_timezone_minute_offsets_roundtrip(self): ] for dt_string, tz, dt_string_repr in dt_strings: - dt_time = to_datetime(dt_string) + dt_time = to_datetime(dt_string, cache=cache) assert base == dt_time converted_time = dt_time.tz_localize('UTC').tz_convert(tz) assert dt_string_repr == repr(converted_time)
- [x] closes #11665 - [x] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [x] whatsnew entry Added a `cache` keyword to `to_datetime` to speedup parsing of duplicate dates: Some notes: 1) I defaulted `cache=False` i.e. don't use a cache to parse the dates. Should the default be `True`? 2) I used `pd.unique()` to identify unique dates, and the current implementation did not accept a tuple of strings (objects). I added `tuple_to_object_array` and patched ` _ensure_arraylike` to fix this. 3) There is currently an included test that fails due to a case when using `to_datetime(..., utc=True)` with a `Series`. I am inclined to believe `In[5]` should have been the existing behavior. Thoughts? ``` In [2]: test_dates = ['20130101 00:00:00'] * 10 In [3]: s = pd.Series(test_dates) # Same as existing behavior In [4]: pd.to_datetime(s, utc=True, cache_datetime=False) Out[4]: 0 2013-01-01 1 2013-01-01 2 2013-01-01 3 2013-01-01 4 2013-01-01 5 2013-01-01 6 2013-01-01 7 2013-01-01 8 2013-01-01 9 2013-01-01 dtype: datetime64[ns] In [5]: pd.to_datetime(s, utc=True, cache_datetime=True) Out[5]: 0 2013-01-01 00:00:00+00:00 1 2013-01-01 00:00:00+00:00 2 2013-01-01 00:00:00+00:00 3 2013-01-01 00:00:00+00:00 4 2013-01-01 00:00:00+00:00 5 2013-01-01 00:00:00+00:00 6 2013-01-01 00:00:00+00:00 7 2013-01-01 00:00:00+00:00 8 2013-01-01 00:00:00+00:00 9 2013-01-01 00:00:00+00:00 dtype: datetime64[ns, UTC] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/17077
2017-07-26T05:45:13Z
2017-11-11T21:00:37Z
2017-11-11T21:00:37Z
2017-12-20T02:04:17Z
TST: Add tests/indexing/ and reshape/ to setup.py
diff --git a/setup.py b/setup.py index 31a3cddc3f9fd..d5791862cfb19 100755 --- a/setup.py +++ b/setup.py @@ -665,6 +665,7 @@ def pxd(name): 'pandas.tests.computation', 'pandas.tests.sparse', 'pandas.tests.frame', + 'pandas.tests.indexing', 'pandas.tests.indexes', 'pandas.tests.indexes.datetimes', 'pandas.tests.indexes.timedeltas', @@ -676,6 +677,7 @@ def pxd(name): 'pandas.tests.io.msgpack', 'pandas.tests.io.formats', 'pandas.tests.groupby', + 'pandas.tests.reshape', 'pandas.tests.series', 'pandas.tests.scalar', 'pandas.tests.tseries',
Oops.
https://api.github.com/repos/pandas-dev/pandas/pulls/17076
2017-07-26T04:24:11Z
2017-07-26T05:54:36Z
2017-07-26T05:54:36Z
2017-07-26T05:55:05Z
TST: Check more error messages in tests
diff --git a/pandas/tests/frame/test_validate.py b/pandas/tests/frame/test_validate.py index d6065e6042908..2de0e866f6e70 100644 --- a/pandas/tests/frame/test_validate.py +++ b/pandas/tests/frame/test_validate.py @@ -1,34 +1,33 @@ from pandas.core.frame import DataFrame import pytest +import pandas.util.testing as tm -class TestDataFrameValidate(object): - """Tests for error handling related to data types of method arguments.""" - df = DataFrame({'a': [1, 2], 'b': [3, 4]}) - - def test_validate_bool_args(self): - # Tests for error handling related to boolean arguments. - invalid_values = [1, "True", [1, 2, 3], 5.0] - - for value in invalid_values: - with pytest.raises(ValueError): - self.df.query('a > b', inplace=value) - - with pytest.raises(ValueError): - self.df.eval('a + b', inplace=value) +@pytest.fixture +def dataframe(): + return DataFrame({'a': [1, 2], 'b': [3, 4]}) - with pytest.raises(ValueError): - self.df.set_index(keys=['a'], inplace=value) - with pytest.raises(ValueError): - self.df.reset_index(inplace=value) - - with pytest.raises(ValueError): - self.df.dropna(inplace=value) - - with pytest.raises(ValueError): - self.df.drop_duplicates(inplace=value) +class TestDataFrameValidate(object): + """Tests for error handling related to data types of method arguments.""" - with pytest.raises(ValueError): - self.df.sort_values(by=['a'], inplace=value) + @pytest.mark.parametrize("func", ["query", "eval", "set_index", + "reset_index", "dropna", + "drop_duplicates", "sort_values"]) + @pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, dataframe, func, inplace): + msg = "For argument \"inplace\" expected type bool" + kwargs = dict(inplace=inplace) + + if func == "query": + kwargs["expr"] = "a > b" + elif func == "eval": + kwargs["expr"] = "a + b" + elif func == "set_index": + kwargs["keys"] = ["a"] + elif func == "sort_values": + kwargs["by"] = ["a"] + + with tm.assert_raises_regex(ValueError, msg): + getattr(dataframe, func)(**kwargs) diff --git a/pandas/tests/indexing/test_interval.py b/pandas/tests/indexing/test_interval.py index 2552fc066cc87..be6e5e1cffb2e 100644 --- a/pandas/tests/indexing/test_interval.py +++ b/pandas/tests/indexing/test_interval.py @@ -109,10 +109,10 @@ def test_with_slices(self): # slice of interval with pytest.raises(NotImplementedError): - result = s.loc[Interval(3, 6):] + s.loc[Interval(3, 6):] with pytest.raises(NotImplementedError): - result = s[Interval(3, 6):] + s[Interval(3, 6):] expected = s.iloc[3:5] result = s[[Interval(3, 6)]] diff --git a/pandas/tests/io/msgpack/test_except.py b/pandas/tests/io/msgpack/test_except.py index 6246e0777daee..5a803c5eba34b 100644 --- a/pandas/tests/io/msgpack/test_except.py +++ b/pandas/tests/io/msgpack/test_except.py @@ -1,9 +1,11 @@ # coding: utf-8 -import pytest - +from datetime import datetime from pandas.io.msgpack import packb, unpackb +import pytest +import pandas.util.testing as tm + class DummyException(Exception): pass @@ -12,12 +14,13 @@ class DummyException(Exception): class TestExceptions(object): def test_raise_on_find_unsupported_value(self): - import datetime - pytest.raises(TypeError, packb, datetime.datetime.now()) + msg = "can\'t serialize datetime" + with tm.assert_raises_regex(TypeError, msg): + packb(datetime.now()) def test_raise_from_object_hook(self): - def hook(obj): - raise DummyException + def hook(_): + raise DummyException() pytest.raises(DummyException, unpackb, packb({}), object_hook=hook) pytest.raises(DummyException, unpackb, packb({'fizz': 'buzz'}), @@ -30,5 +33,7 @@ def hook(obj): packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook) - def test_invalidvalue(self): - pytest.raises(ValueError, unpackb, b'\xd9\x97#DL_') + def test_invalid_value(self): + msg = "Unpack failed: error" + with tm.assert_raises_regex(ValueError, msg): + unpackb(b"\xd9\x97#DL_") diff --git a/pandas/tests/io/msgpack/test_limits.py b/pandas/tests/io/msgpack/test_limits.py index 07044dbb7e5de..e4abd4ddb8d13 100644 --- a/pandas/tests/io/msgpack/test_limits.py +++ b/pandas/tests/io/msgpack/test_limits.py @@ -1,10 +1,10 @@ # coding: utf-8 from __future__ import (absolute_import, division, print_function, unicode_literals) +from pandas.io.msgpack import packb, unpackb, Packer, Unpacker, ExtType import pytest - -from pandas.io.msgpack import packb, unpackb, Packer, Unpacker, ExtType +import pandas.util.testing as tm class TestLimits(object): @@ -39,7 +39,10 @@ def test_max_str_len(self): unpacker = Unpacker(max_str_len=2, encoding='utf-8') unpacker.feed(packed) - pytest.raises(ValueError, unpacker.unpack) + + msg = "3 exceeds max_str_len" + with tm.assert_raises_regex(ValueError, msg): + unpacker.unpack() def test_max_bin_len(self): d = b'x' * 3 @@ -51,7 +54,10 @@ def test_max_bin_len(self): unpacker = Unpacker(max_bin_len=2) unpacker.feed(packed) - pytest.raises(ValueError, unpacker.unpack) + + msg = "3 exceeds max_bin_len" + with tm.assert_raises_regex(ValueError, msg): + unpacker.unpack() def test_max_array_len(self): d = [1, 2, 3] @@ -63,7 +69,10 @@ def test_max_array_len(self): unpacker = Unpacker(max_array_len=2) unpacker.feed(packed) - pytest.raises(ValueError, unpacker.unpack) + + msg = "3 exceeds max_array_len" + with tm.assert_raises_regex(ValueError, msg): + unpacker.unpack() def test_max_map_len(self): d = {1: 2, 3: 4, 5: 6} @@ -75,7 +84,10 @@ def test_max_map_len(self): unpacker = Unpacker(max_map_len=2) unpacker.feed(packed) - pytest.raises(ValueError, unpacker.unpack) + + msg = "3 exceeds max_map_len" + with tm.assert_raises_regex(ValueError, msg): + unpacker.unpack() def test_max_ext_len(self): d = ExtType(42, b"abc") @@ -87,4 +99,7 @@ def test_max_ext_len(self): unpacker = Unpacker(max_ext_len=2) unpacker.feed(packed) - pytest.raises(ValueError, unpacker.unpack) + + msg = "4 exceeds max_ext_len" + with tm.assert_raises_regex(ValueError, msg): + unpacker.unpack() diff --git a/pandas/tests/io/msgpack/test_sequnpack.py b/pandas/tests/io/msgpack/test_sequnpack.py index 1178176c2c557..dc6fc5ef916b4 100644 --- a/pandas/tests/io/msgpack/test_sequnpack.py +++ b/pandas/tests/io/msgpack/test_sequnpack.py @@ -1,28 +1,26 @@ # coding: utf-8 -import pytest - from pandas import compat from pandas.io.msgpack import Unpacker, BufferFull from pandas.io.msgpack import OutOfData +import pytest +import pandas.util.testing as tm + class TestPack(object): - def test_partialdata(self): + def test_partial_data(self): unpacker = Unpacker() - unpacker.feed(b'\xa5') - pytest.raises(StopIteration, next, iter(unpacker)) - unpacker.feed(b'h') - pytest.raises(StopIteration, next, iter(unpacker)) - unpacker.feed(b'a') - pytest.raises(StopIteration, next, iter(unpacker)) - unpacker.feed(b'l') - pytest.raises(StopIteration, next, iter(unpacker)) - unpacker.feed(b'l') - pytest.raises(StopIteration, next, iter(unpacker)) - unpacker.feed(b'o') - assert next(iter(unpacker)) == b'hallo' + msg = "No more data to unpack" + + for data in [b"\xa5", b"h", b"a", b"l", b"l"]: + unpacker.feed(data) + with tm.assert_raises_regex(StopIteration, msg): + next(iter(unpacker)) + + unpacker.feed(b"o") + assert next(iter(unpacker)) == b"hallo" def test_foobar(self): unpacker = Unpacker(read_size=3, use_list=1) diff --git a/pandas/tests/io/sas/test_sas.py b/pandas/tests/io/sas/test_sas.py index 617df99b99f0b..b85f6b6bbd5ce 100644 --- a/pandas/tests/io/sas/test_sas.py +++ b/pandas/tests/io/sas/test_sas.py @@ -1,14 +1,16 @@ -import pytest - from pandas.compat import StringIO from pandas import read_sas +import pandas.util.testing as tm + class TestSas(object): def test_sas_buffer_format(self): - - # GH14947 + # see gh-14947 b = StringIO("") - with pytest.raises(ValueError): + + msg = ("If this is a buffer object rather than a string " + "name, you must specify a format string") + with tm.assert_raises_regex(ValueError, msg): read_sas(b) diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/test_interval.py index e06f7cb34eb52..d431db0b4ca4f 100644 --- a/pandas/tests/scalar/test_interval.py +++ b/pandas/tests/scalar/test_interval.py @@ -1,42 +1,49 @@ from __future__ import division -import pytest from pandas import Interval + +import pytest import pandas.util.testing as tm +@pytest.fixture +def interval(): + return Interval(0, 1) + + class TestInterval(object): - def setup_method(self, method): - self.interval = Interval(0, 1) - def test_properties(self): - assert self.interval.closed == 'right' - assert self.interval.left == 0 - assert self.interval.right == 1 - assert self.interval.mid == 0.5 + def test_properties(self, interval): + assert interval.closed == 'right' + assert interval.left == 0 + assert interval.right == 1 + assert interval.mid == 0.5 - def test_repr(self): - assert repr(self.interval) == "Interval(0, 1, closed='right')" - assert str(self.interval) == "(0, 1]" + def test_repr(self, interval): + assert repr(interval) == "Interval(0, 1, closed='right')" + assert str(interval) == "(0, 1]" interval_left = Interval(0, 1, closed='left') assert repr(interval_left) == "Interval(0, 1, closed='left')" assert str(interval_left) == "[0, 1)" - def test_contains(self): - assert 0.5 in self.interval - assert 1 in self.interval - assert 0 not in self.interval - pytest.raises(TypeError, lambda: self.interval in self.interval) - - interval = Interval(0, 1, closed='both') - assert 0 in interval + def test_contains(self, interval): + assert 0.5 in interval assert 1 in interval - - interval = Interval(0, 1, closed='neither') assert 0 not in interval - assert 0.5 in interval - assert 1 not in interval + + msg = "__contains__ not defined for two intervals" + with tm.assert_raises_regex(TypeError, msg): + interval in interval + + interval_both = Interval(0, 1, closed='both') + assert 0 in interval_both + assert 1 in interval_both + + interval_neither = Interval(0, 1, closed='neither') + assert 0 not in interval_neither + assert 0.5 in interval_neither + assert 1 not in interval_neither def test_equal(self): assert Interval(0, 1) == Interval(0, 1, closed='right') @@ -54,74 +61,79 @@ def test_comparison(self): assert Interval(0, 1) > Interval(-1, 2) assert Interval(0, 1) >= Interval(0, 1) - def test_hash(self): + def test_hash(self, interval): # should not raise - hash(self.interval) + hash(interval) - def test_math_add(self): + def test_math_add(self, interval): expected = Interval(1, 2) - actual = self.interval + 1 + actual = interval + 1 assert expected == actual expected = Interval(1, 2) - actual = 1 + self.interval + actual = 1 + interval assert expected == actual - actual = self.interval + actual = interval actual += 1 assert expected == actual - with pytest.raises(TypeError): - self.interval + Interval(1, 2) + msg = "unsupported operand type\(s\) for \+" + with tm.assert_raises_regex(TypeError, msg): + interval + Interval(1, 2) - with pytest.raises(TypeError): - self.interval + 'foo' + with tm.assert_raises_regex(TypeError, msg): + interval + 'foo' - def test_math_sub(self): + def test_math_sub(self, interval): expected = Interval(-1, 0) - actual = self.interval - 1 + actual = interval - 1 assert expected == actual - actual = self.interval + actual = interval actual -= 1 assert expected == actual - with pytest.raises(TypeError): - self.interval - Interval(1, 2) + msg = "unsupported operand type\(s\) for -" + with tm.assert_raises_regex(TypeError, msg): + interval - Interval(1, 2) - with pytest.raises(TypeError): - self.interval - 'foo' + with tm.assert_raises_regex(TypeError, msg): + interval - 'foo' - def test_math_mult(self): + def test_math_mult(self, interval): expected = Interval(0, 2) - actual = self.interval * 2 + actual = interval * 2 assert expected == actual expected = Interval(0, 2) - actual = 2 * self.interval + actual = 2 * interval assert expected == actual - actual = self.interval + actual = interval actual *= 2 assert expected == actual - with pytest.raises(TypeError): - self.interval * Interval(1, 2) + msg = "unsupported operand type\(s\) for \*" + with tm.assert_raises_regex(TypeError, msg): + interval * Interval(1, 2) - with pytest.raises(TypeError): - self.interval * 'foo' + msg = "can\'t multiply sequence by non-int" + with tm.assert_raises_regex(TypeError, msg): + interval * 'foo' - def test_math_div(self): + def test_math_div(self, interval): expected = Interval(0, 0.5) - actual = self.interval / 2.0 + actual = interval / 2.0 assert expected == actual - actual = self.interval + actual = interval actual /= 2.0 assert expected == actual - with pytest.raises(TypeError): - self.interval / Interval(1, 2) + msg = "unsupported operand type\(s\) for /" + with tm.assert_raises_regex(TypeError, msg): + interval / Interval(1, 2) - with pytest.raises(TypeError): - self.interval / 'foo' + with tm.assert_raises_regex(TypeError, msg): + interval / 'foo' diff --git a/pandas/tests/series/test_validate.py b/pandas/tests/series/test_validate.py index 134fa0a38f618..a0cde5f81d021 100644 --- a/pandas/tests/series/test_validate.py +++ b/pandas/tests/series/test_validate.py @@ -1,30 +1,27 @@ -import pytest from pandas.core.series import Series +import pytest +import pandas.util.testing as tm -class TestSeriesValidate(object): - """Tests for error handling related to data types of method arguments.""" - s = Series([1, 2, 3, 4, 5]) - - def test_validate_bool_args(self): - # Tests for error handling related to boolean arguments. - invalid_values = [1, "True", [1, 2, 3], 5.0] - for value in invalid_values: - with pytest.raises(ValueError): - self.s.reset_index(inplace=value) +@pytest.fixture +def series(): + return Series([1, 2, 3, 4, 5]) - with pytest.raises(ValueError): - self.s._set_name(name='hello', inplace=value) - with pytest.raises(ValueError): - self.s.sort_values(inplace=value) +class TestSeriesValidate(object): + """Tests for error handling related to data types of method arguments.""" - with pytest.raises(ValueError): - self.s.sort_index(inplace=value) + @pytest.mark.parametrize("func", ["reset_index", "_set_name", + "sort_values", "sort_index", + "rename", "dropna"]) + @pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, series, func, inplace): + msg = "For argument \"inplace\" expected type bool" + kwargs = dict(inplace=inplace) - with pytest.raises(ValueError): - self.s.rename(inplace=value) + if func == "_set_name": + kwargs["name"] = "hello" - with pytest.raises(ValueError): - self.s.dropna(inplace=value) + with tm.assert_raises_regex(ValueError, msg): + getattr(series, func)(**kwargs)
Just replaces some `pytest.raises` with more useful (I think) `tm.assert_raises_regexp`. xref #16521.
https://api.github.com/repos/pandas-dev/pandas/pulls/17075
2017-07-26T04:13:40Z
2017-07-26T23:43:50Z
2017-07-26T23:43:50Z
2017-07-27T04:00:40Z
BUG: Fix parser field type compatability on 32-bit systems.
diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index ab92290f87719..416bf039623d5 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -196,14 +196,14 @@ int parser_init(parser_t *self) { sz = STREAM_INIT_SIZE / 10; sz = sz ? sz : 1; self->words = (char **)malloc(sz * sizeof(char *)); - self->word_starts = (size_t *)malloc(sz * sizeof(size_t)); + self->word_starts = (int64_t *)malloc(sz * sizeof(int64_t)); self->words_cap = sz; self->words_len = 0; // line pointers and metadata - self->line_start = (size_t *)malloc(sz * sizeof(size_t)); + self->line_start = (int64_t *)malloc(sz * sizeof(int64_t)); - self->line_fields = (size_t *)malloc(sz * sizeof(size_t)); + self->line_fields = (int64_t *)malloc(sz * sizeof(int64_t)); self->lines_cap = sz; self->lines = 0; @@ -247,7 +247,7 @@ void parser_del(parser_t *self) { } static int make_stream_space(parser_t *self, size_t nbytes) { - size_t i, cap; + int64_t i, cap; int status; void *orig_ptr, *newptr; @@ -419,7 +419,7 @@ static void append_warning(parser_t *self, const char *msg) { static int end_line(parser_t *self) { char *msg; - int fields; + int64_t fields; int ex_fields = self->expected_fields; size_t bufsize = 100; // for error or warning messages @@ -468,8 +468,8 @@ static int end_line(parser_t *self) { if (self->error_bad_lines) { self->error_msg = (char *)malloc(bufsize); snprintf(self->error_msg, bufsize, - "Expected %d fields in line %d, saw %d\n", - ex_fields, self->file_lines, fields); + "Expected %d fields in line %lld, saw %lld\n", + ex_fields, (long long)self->file_lines, (long long)fields); TRACE(("Error at line %d, %d fields\n", self->file_lines, fields)); @@ -480,8 +480,9 @@ static int end_line(parser_t *self) { // pass up error message msg = (char *)malloc(bufsize); snprintf(msg, bufsize, - "Skipping line %d: expected %d fields, saw %d\n", - self->file_lines, ex_fields, fields); + "Skipping line %lld: expected %d fields, saw %lld\n", + (long long)self->file_lines, ex_fields, + (long long)fields); append_warning(self, msg); free(msg); } @@ -632,7 +633,7 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes) { stream = self->stream + self->stream_len; \ slen = self->stream_len; \ self->state = STATE; \ - if (line_limit > 0 && self->lines == start_lines + (size_t)line_limit) { \ + if (line_limit > 0 && self->lines == start_lines + (int64_t)line_limit) { \ goto linelimit; \ } @@ -647,7 +648,7 @@ static int parser_buffer_bytes(parser_t *self, size_t nbytes) { stream = self->stream + self->stream_len; \ slen = self->stream_len; \ self->state = STATE; \ - if (line_limit > 0 && self->lines == start_lines + (size_t)line_limit) { \ + if (line_limit > 0 && self->lines == start_lines + (int64_t)line_limit) { \ goto linelimit; \ } @@ -1147,7 +1148,8 @@ static int parser_handle_eof(parser_t *self) { case IN_QUOTED_FIELD: self->error_msg = (char *)malloc(bufsize); snprintf(self->error_msg, bufsize, - "EOF inside string starting at line %d", self->file_lines); + "EOF inside string starting at line %lld", + (long long)self->file_lines); return -1; case ESCAPED_CHAR: @@ -1318,7 +1320,7 @@ void debug_print_parser(parser_t *self) { char *token; for (line = 0; line < self->lines; ++line) { - printf("(Parsed) Line %d: ", line); + printf("(Parsed) Line %lld: ", (long long)line); for (j = 0; j < self->line_fields[j]; ++j) { token = self->words[j + self->line_start[line]];
- [ ] closes #17063 - [ ] tests added / passed - [ ] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17071
2017-07-24T23:09:12Z
2017-07-25T03:59:02Z
2017-07-25T03:59:02Z
2017-07-25T13:49:43Z
ENH: Add skipna parameter to infer_dtype
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 5a5ea827e74ad..5fd245cee7d88 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -24,6 +24,8 @@ New features <https://www.python.org/dev/peps/pep-0519/>`_ on most readers and writers (:issue:`13823`) - Added ``__fspath__`` method to :class:`~pandas.HDFStore`, :class:`~pandas.ExcelFile`, and :class:`~pandas.ExcelWriter` to work properly with the file system path protocol (:issue:`13823`) +- Added ``skipna`` parameter to :func:`~pandas.api.types.infer_dtype` to + support type inference in the presence of missing values (:issue:`17059`). .. _whatsnew_0210.enhancements.infer_objects: diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index 38e95fe6ee652..6b5a8f20f0067 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -1,6 +1,7 @@ import sys from decimal import Decimal cimport util +cimport cython from tslib import NaT, get_timezone from datetime import datetime, timedelta iNaT = util.get_nat() @@ -222,7 +223,7 @@ cdef _try_infer_map(v): return None -def infer_dtype(object value): +def infer_dtype(object value, bint skipna=False): """ Effeciently infer the type of a passed val, or list-like array of values. Return a string describing the type. @@ -230,6 +231,11 @@ def infer_dtype(object value): Parameters ---------- value : scalar, list, ndarray, or pandas type + skipna : bool, default False + Ignore NaN values when inferring the type. The default of ``False`` + will be deprecated in a later version of pandas. + + .. versionadded:: 0.21.0 Returns ------- @@ -272,6 +278,12 @@ def infer_dtype(object value): >>> infer_dtype(['foo', 'bar']) 'string' + >>> infer_dtype(['a', np.nan, 'b'], skipna=True) + 'string' + + >>> infer_dtype(['a', np.nan, 'b'], skipna=False) + 'mixed' + >>> infer_dtype([b'foo', b'bar']) 'bytes' @@ -310,13 +322,13 @@ def infer_dtype(object value): >>> infer_dtype(pd.Series(list('aabc')).astype('category')) 'categorical' - """ cdef: Py_ssize_t i, n object val ndarray values - bint seen_pdnat = False, seen_val = False + bint seen_pdnat = False + bint seen_val = False if isinstance(value, np.ndarray): values = value @@ -356,7 +368,7 @@ def infer_dtype(object value): values = values.ravel() # try to use a valid value - for i from 0 <= i < n: + for i in range(n): val = util.get_value_1d(values, i) # do not use is_nul_datetimelike to keep @@ -403,11 +415,11 @@ def infer_dtype(object value): return 'datetime' elif is_date(val): - if is_date_array(values): + if is_date_array(values, skipna=skipna): return 'date' elif is_time(val): - if is_time_array(values): + if is_time_array(values, skipna=skipna): return 'time' elif is_decimal(val): @@ -420,19 +432,19 @@ def infer_dtype(object value): return 'mixed-integer-float' elif util.is_bool_object(val): - if is_bool_array(values): + if is_bool_array(values, skipna=skipna): return 'boolean' elif PyString_Check(val): - if is_string_array(values): + if is_string_array(values, skipna=skipna): return 'string' elif PyUnicode_Check(val): - if is_unicode_array(values): + if is_unicode_array(values, skipna=skipna): return 'unicode' elif PyBytes_Check(val): - if is_bytes_array(values): + if is_bytes_array(values, skipna=skipna): return 'bytes' elif is_period(val): @@ -593,190 +605,284 @@ cdef inline bint is_timedelta(object o): return PyDelta_Check(o) or util.is_timedelta64_object(o) -cpdef bint is_bool_array(ndarray values): - cdef: - Py_ssize_t i, n = len(values) - ndarray[object] objbuf +cdef class Validator: - if issubclass(values.dtype.type, np.bool_): - return True - elif values.dtype == np.object_: - objbuf = values + cdef: + Py_ssize_t n + np.dtype dtype + bint skipna + + def __cinit__( + self, + Py_ssize_t n, + np.dtype dtype=np.dtype(np.object_), + bint skipna=False + ): + self.n = n + self.dtype = dtype + self.skipna = skipna + + cdef bint validate(self, object[:] values) except -1: + if not self.n: + return False - if n == 0: + if self.is_array_typed(): + return True + elif self.dtype.type_num == NPY_OBJECT: + if self.skipna: + return self._validate_skipna(values) + else: + return self._validate(values) + else: return False + @cython.wraparound(False) + @cython.boundscheck(False) + cdef bint _validate(self, object[:] values) except -1: + cdef: + Py_ssize_t i + Py_ssize_t n = self.n + for i in range(n): - if not util.is_bool_object(objbuf[i]): + if not self.is_valid(values[i]): return False - return True - else: + + return self.finalize_validate() + + @cython.wraparound(False) + @cython.boundscheck(False) + cdef bint _validate_skipna(self, object[:] values) except -1: + cdef: + Py_ssize_t i + Py_ssize_t n = self.n + + for i in range(n): + if not self.is_valid_skipna(values[i]): + return False + + return self.finalize_validate_skipna() + + cdef bint is_valid(self, object value) except -1: + return self.is_value_typed(value) + + cdef bint is_valid_skipna(self, object value) except -1: + return self.is_valid(value) or self.is_valid_null(value) + + cdef bint is_value_typed(self, object value) except -1: + raise NotImplementedError( + '{} child class must define is_value_typed'.format( + type(self).__name__ + ) + ) + + cdef bint is_valid_null(self, object value) except -1: + return util._checknull(value) + + cdef bint is_array_typed(self) except -1: return False + cdef inline bint finalize_validate(self): + return True + + cdef bint finalize_validate_skipna(self): + # TODO(phillipc): Remove the existing validate methods and replace them + # with the skipna versions upon full deprecation of skipna=False + return True + + +cdef class BoolValidator(Validator): + + cdef inline bint is_value_typed(self, object value) except -1: + return util.is_bool_object(value) + + cdef inline bint is_array_typed(self) except -1: + return issubclass(self.dtype.type, np.bool_) + + +cpdef bint is_bool_array(ndarray values, bint skipna=False): + cdef: + BoolValidator validator = BoolValidator( + len(values), + values.dtype, + skipna=skipna + ) + return validator.validate(values) + + +cdef class IntegerValidator(Validator): + + cdef inline bint is_value_typed(self, object value) except -1: + return util.is_integer_object(value) + + cdef inline bint is_array_typed(self) except -1: + return issubclass(self.dtype.type, np.integer) + cpdef bint is_integer_array(ndarray values): cdef: - Py_ssize_t i, n = len(values) - ndarray[object] objbuf + IntegerValidator validator = IntegerValidator( + len(values), + values.dtype, + ) + return validator.validate(values) - if issubclass(values.dtype.type, np.integer): - return True - elif values.dtype == np.object_: - objbuf = values - if n == 0: - return False +cdef class IntegerFloatValidator(Validator): - for i in range(n): - if not util.is_integer_object(objbuf[i]): - return False - return True - else: - return False + cdef inline bint is_value_typed(self, object value) except -1: + return util.is_integer_object(value) or util.is_float_object(value) + + cdef inline bint is_array_typed(self) except -1: + return issubclass(self.dtype.type, np.integer) cpdef bint is_integer_float_array(ndarray values): cdef: - Py_ssize_t i, n = len(values) - ndarray[object] objbuf + IntegerFloatValidator validator = IntegerFloatValidator( + len(values), + values.dtype, + ) + return validator.validate(values) - if issubclass(values.dtype.type, np.integer): - return True - elif values.dtype == np.object_: - objbuf = values - if n == 0: - return False +cdef class FloatValidator(Validator): - for i in range(n): - if not (util.is_integer_object(objbuf[i]) or - util.is_float_object(objbuf[i])): + cdef inline bint is_value_typed(self, object value) except -1: + return util.is_float_object(value) - return False - return True - else: - return False + cdef inline bint is_array_typed(self) except -1: + return issubclass(self.dtype.type, np.floating) cpdef bint is_float_array(ndarray values): - cdef: - Py_ssize_t i, n = len(values) - ndarray[object] objbuf + cdef FloatValidator validator = FloatValidator(len(values), values.dtype) + return validator.validate(values) - if issubclass(values.dtype.type, np.floating): - return True - elif values.dtype == np.object_: - objbuf = values - if n == 0: - return False +cdef class StringValidator(Validator): - for i in range(n): - if not util.is_float_object(objbuf[i]): - return False - return True - else: - return False + cdef inline bint is_value_typed(self, object value) except -1: + return PyString_Check(value) + + cdef inline bint is_array_typed(self) except -1: + return issubclass(self.dtype.type, np.str_) -cpdef bint is_string_array(ndarray values): +cpdef bint is_string_array(ndarray values, bint skipna=False): cdef: - Py_ssize_t i, n = len(values) - ndarray[object] objbuf + StringValidator validator = StringValidator( + len(values), + values.dtype, + skipna=skipna, + ) + return validator.validate(values) - if ((PY2 and issubclass(values.dtype.type, np.string_)) or - not PY2 and issubclass(values.dtype.type, np.unicode_)): - return True - elif values.dtype == np.object_: - objbuf = values - if n == 0: - return False +cdef class UnicodeValidator(Validator): - for i in range(n): - if not PyString_Check(objbuf[i]): - return False - return True - else: - return False + cdef inline bint is_value_typed(self, object value) except -1: + return PyUnicode_Check(value) + + cdef inline bint is_array_typed(self) except -1: + return issubclass(self.dtype.type, np.unicode_) -cpdef bint is_unicode_array(ndarray values): +cpdef bint is_unicode_array(ndarray values, bint skipna=False): cdef: - Py_ssize_t i, n = len(values) - ndarray[object] objbuf + UnicodeValidator validator = UnicodeValidator( + len(values), + values.dtype, + skipna=skipna, + ) + return validator.validate(values) - if issubclass(values.dtype.type, np.unicode_): - return True - elif values.dtype == np.object_: - objbuf = values - if n == 0: - return False +cdef class BytesValidator(Validator): - for i in range(n): - if not PyUnicode_Check(objbuf[i]): - return False - return True - else: - return False + cdef inline bint is_value_typed(self, object value) except -1: + return PyBytes_Check(value) + + cdef inline bint is_array_typed(self) except -1: + return issubclass(self.dtype.type, np.bytes_) -cpdef bint is_bytes_array(ndarray values): +cpdef bint is_bytes_array(ndarray values, bint skipna=False): cdef: - Py_ssize_t i, n = len(values) - ndarray[object] objbuf + BytesValidator validator = BytesValidator( + len(values), + values.dtype, + skipna=skipna + ) + return validator.validate(values) - if issubclass(values.dtype.type, np.bytes_): - return True - elif values.dtype == np.object_: - objbuf = values - if n == 0: - return False +cdef class TemporalValidator(Validator): + + cdef Py_ssize_t generic_null_count + + def __cinit__( + self, + Py_ssize_t n, + np.dtype dtype=np.dtype(np.object_), + bint skipna=False + ): + self.n = n + self.dtype = dtype + self.skipna = skipna + self.generic_null_count = 0 + + cdef inline bint is_valid(self, object value) except -1: + return self.is_value_typed(value) or self.is_valid_null(value) + + cdef bint is_valid_null(self, object value) except -1: + raise NotImplementedError( + '{} child class must define is_valid_null'.format( + type(self).__name__ + ) + ) + + cdef inline bint is_valid_skipna(self, object value) except -1: + cdef: + bint is_typed_null = self.is_valid_null(value) + bint is_generic_null = util._checknull(value) + self.generic_null_count += is_typed_null and is_generic_null + return self.is_value_typed(value) or is_typed_null or is_generic_null + + cdef inline bint finalize_validate_skipna(self): + return self.generic_null_count != self.n - for i in range(n): - if not PyBytes_Check(objbuf[i]): - return False - return True - else: - return False + +cdef class DatetimeValidator(TemporalValidator): + + cdef bint is_value_typed(self, object value) except -1: + return is_datetime(value) + + cdef inline bint is_valid_null(self, object value) except -1: + return is_null_datetime64(value) cpdef bint is_datetime_array(ndarray[object] values): - cdef Py_ssize_t i, null_count = 0, n = len(values) - cdef object v - if n == 0: - return False + cdef: + DatetimeValidator validator = DatetimeValidator( + len(values), + skipna=True, + ) + return validator.validate(values) - # return False for all nulls - for i in range(n): - v = values[i] - if is_null_datetime64(v): - # we are a regular null - if util._checknull(v): - null_count += 1 - elif not is_datetime(v): - return False - return null_count != n +cdef class Datetime64Validator(DatetimeValidator): -cpdef bint is_datetime64_array(ndarray values): - cdef Py_ssize_t i, null_count = 0, n = len(values) - cdef object v - if n == 0: - return False + cdef inline bint is_value_typed(self, object value) except -1: + return util.is_datetime64_object(value) - # return False for all nulls - for i in range(n): - v = values[i] - if is_null_datetime64(v): - # we are a regular null - if util._checknull(v): - null_count += 1 - elif not util.is_datetime64_object(v): - return False - return null_count != n + +cpdef bint is_datetime64_array(ndarray values): + cdef: + Datetime64Validator validator = Datetime64Validator( + len(values), + skipna=True, + ) + return validator.validate(values) cpdef bint is_datetime_with_singletz_array(ndarray[object] values): @@ -807,108 +913,104 @@ cpdef bint is_datetime_with_singletz_array(ndarray[object] values): return True +cdef class TimedeltaValidator(TemporalValidator): + + cdef bint is_value_typed(self, object value) except -1: + return PyDelta_Check(value) + + cdef inline bint is_valid_null(self, object value) except -1: + return is_null_timedelta64(value) + + cpdef bint is_timedelta_array(ndarray values): - cdef Py_ssize_t i, null_count = 0, n = len(values) - cdef object v - if n == 0: - return False - for i in range(n): - v = values[i] - if is_null_timedelta64(v): - # we are a regular null - if util._checknull(v): - null_count += 1 - elif not PyDelta_Check(v): - return False - return null_count != n + cdef: + TimedeltaValidator validator = TimedeltaValidator( + len(values), + skipna=True, + ) + return validator.validate(values) + + +cdef class Timedelta64Validator(TimedeltaValidator): + + cdef inline bint is_value_typed(self, object value) except -1: + return util.is_timedelta64_object(value) cpdef bint is_timedelta64_array(ndarray values): - cdef Py_ssize_t i, null_count = 0, n = len(values) - cdef object v - if n == 0: - return False - for i in range(n): - v = values[i] - if is_null_timedelta64(v): - # we are a regular null - if util._checknull(v): - null_count += 1 - elif not util.is_timedelta64_object(v): - return False - return null_count != n + cdef: + Timedelta64Validator validator = Timedelta64Validator( + len(values), + skipna=True, + ) + return validator.validate(values) + + +cdef class AnyTimedeltaValidator(TimedeltaValidator): + + cdef inline bint is_value_typed(self, object value) except -1: + return is_timedelta(value) cpdef bint is_timedelta_or_timedelta64_array(ndarray values): """ infer with timedeltas and/or nat/none """ - cdef Py_ssize_t i, null_count = 0, n = len(values) - cdef object v - if n == 0: - return False - for i in range(n): - v = values[i] - if is_null_timedelta64(v): - # we are a regular null - if util._checknull(v): - null_count += 1 - elif not is_timedelta(v): - return False - return null_count != n + cdef: + AnyTimedeltaValidator validator = AnyTimedeltaValidator( + len(values), + skipna=True, + ) + return validator.validate(values) -cpdef bint is_date_array(ndarray[object] values): - cdef Py_ssize_t i, n = len(values) - if n == 0: - return False - for i in range(n): - if not is_date(values[i]): - return False - return True +cdef class DateValidator(Validator): + cdef inline bint is_value_typed(self, object value) except -1: + return is_date(value) + + +cpdef bint is_date_array(ndarray[object] values, bint skipna=False): + cdef DateValidator validator = DateValidator(len(values), skipna=skipna) + return validator.validate(values) -cpdef bint is_time_array(ndarray[object] values): - cdef Py_ssize_t i, n = len(values) - if n == 0: - return False - for i in range(n): - if not is_time(values[i]): - return False - return True + +cdef class TimeValidator(Validator): + + cdef inline bint is_value_typed(self, object value) except -1: + return is_time(value) + + +cpdef bint is_time_array(ndarray[object] values, bint skipna=False): + cdef TimeValidator validator = TimeValidator(len(values), skipna=skipna) + return validator.validate(values) + + +cdef class PeriodValidator(TemporalValidator): + + cdef inline bint is_value_typed(self, object value) except -1: + return is_period(value) + + cdef inline bint is_valid_null(self, object value) except -1: + return is_null_period(value) cpdef bint is_period_array(ndarray[object] values): - cdef Py_ssize_t i, null_count = 0, n = len(values) - cdef object v - if n == 0: - return False + cdef PeriodValidator validator = PeriodValidator(len(values), skipna=True) + return validator.validate(values) - # return False for all nulls - for i in range(n): - v = values[i] - if is_null_period(v): - # we are a regular null - if util._checknull(v): - null_count += 1 - elif not is_period(v): - return False - return null_count != n + +cdef class IntervalValidator(Validator): + + cdef inline bint is_value_typed(self, object value) except -1: + return is_interval(value) cpdef bint is_interval_array(ndarray[object] values): cdef: - Py_ssize_t i, n = len(values), null_count = 0 - object v - - if n == 0: - return False - for i in range(n): - v = values[i] - if util._checknull(v): - null_count += 1 - continue - if not is_interval(v): - return False - return null_count != n + IntervalValidator validator = IntervalValidator( + len(values), + skipna=True, + ) + return validator.validate(values) cdef extern from "parse_helper.h": diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index d26ea047bb41f..dbde7ae5081d4 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -239,6 +239,9 @@ def test_infer_dtype_bytes(self): arr = arr.astype(object) assert lib.infer_dtype(arr) == compare + # object array of bytes with missing values + assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare + def test_isinf_scalar(self): # GH 11352 assert lib.isposinf_scalar(float('inf')) @@ -444,6 +447,10 @@ def test_bools(self): result = lib.infer_dtype(arr) assert result == 'boolean' + arr = np.array([True, np.nan, False], dtype='O') + result = lib.infer_dtype(arr, skipna=True) + assert result == 'boolean' + def test_floats(self): arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O') result = lib.infer_dtype(arr) @@ -472,11 +479,26 @@ def test_decimals(self): result = lib.infer_dtype(arr) assert result == 'mixed' + arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)]) + result = lib.infer_dtype(arr) + assert result == 'decimal' + + arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O') + result = lib.infer_dtype(arr) + assert result == 'decimal' + def test_string(self): pass def test_unicode(self): - pass + arr = [u'a', np.nan, u'c'] + result = lib.infer_dtype(arr) + assert result == 'mixed' + + arr = [u'a', np.nan, u'c'] + result = lib.infer_dtype(arr, skipna=True) + expected = 'unicode' if PY2 else 'string' + assert result == expected def test_datetime(self): @@ -714,10 +736,17 @@ def test_is_datetimelike_array_all_nan_nat_like(self): def test_date(self): - dates = [date(2012, 1, x) for x in range(1, 20)] + dates = [date(2012, 1, day) for day in range(1, 20)] index = Index(dates) assert index.inferred_type == 'date' + dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan] + result = lib.infer_dtype(dates) + assert result == 'mixed' + + result = lib.infer_dtype(dates, skipna=True) + assert result == 'date' + def test_to_object_array_tuples(self): r = (5, 6) values = [r]
closes #17059 - [x] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17066
2017-07-24T16:41:31Z
2017-07-25T17:27:46Z
2017-07-25T17:27:45Z
2017-07-25T18:23:18Z
BLD: add more versions
diff --git a/ci/requirements-2.7.sh b/ci/requirements-2.7.sh index 64d470e5c6e0e..5b20617f55759 100644 --- a/ci/requirements-2.7.sh +++ b/ci/requirements-2.7.sh @@ -4,4 +4,4 @@ source activate pandas echo "install 27" -conda install -n pandas -c conda-forge feather-format +conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1 diff --git a/ci/requirements-2.7_BUILD_TEST.sh b/ci/requirements-2.7_BUILD_TEST.sh index 78941fd0944e5..999651624be6b 100755 --- a/ci/requirements-2.7_BUILD_TEST.sh +++ b/ci/requirements-2.7_BUILD_TEST.sh @@ -4,4 +4,4 @@ source activate pandas echo "install 27 BUILD_TEST" -conda install -n pandas -c conda-forge pyarrow dask +conda install -n pandas -c conda-forge pyarrow=0.4.1 dask diff --git a/ci/requirements-3.5.sh b/ci/requirements-3.5.sh index 917439a8765a2..3b8fe793a413d 100644 --- a/ci/requirements-3.5.sh +++ b/ci/requirements-3.5.sh @@ -4,7 +4,7 @@ source activate pandas echo "install 35" -conda install -n pandas -c conda-forge feather-format +conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1 # pip install python-dateutil to get latest conda remove -n pandas python-dateutil --force diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run index 41c9680ce1b7e..a4f5f073856c8 100644 --- a/ci/requirements-3.6.run +++ b/ci/requirements-3.6.run @@ -15,6 +15,7 @@ jinja2 sqlalchemy pymysql feather-format +pyarrow=0.4.1 # psycopg2 (not avail on defaults ATM) beautifulsoup4 s3fs diff --git a/ci/requirements-3.6_DOC.sh b/ci/requirements-3.6_DOC.sh index e43e483d77a73..58d968a92ee9d 100644 --- a/ci/requirements-3.6_DOC.sh +++ b/ci/requirements-3.6_DOC.sh @@ -6,6 +6,6 @@ echo "[install DOC_BUILD deps]" pip install pandas-gbq -conda install -n pandas -c conda-forge feather-format nbsphinx pandoc +conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1 nbsphinx pandoc conda install -n pandas -c r r rpy2 --yes diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run index 899bfbc6b6b23..cc9c07d8a2716 100644 --- a/ci/requirements-3.6_WIN.run +++ b/ci/requirements-3.6_WIN.run @@ -8,6 +8,7 @@ xlrd xlwt scipy feather-format +pyarrow=0.4.1 numexpr pytables matplotlib diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py index ca75d4d02e927..f3888c3bffca8 100644 --- a/pandas/util/_print_versions.py +++ b/pandas/util/_print_versions.py @@ -94,7 +94,8 @@ def show_versions(as_json=False): ("jinja2", lambda mod: mod.__version__), ("s3fs", lambda mod: mod.__version__), ("pandas_gbq", lambda mod: mod.__version__), - ("pandas_datareader", lambda mod: mod.__version__) + ("pandas_datareader", lambda mod: mod.__version__), + ("pyarrow", lambda mod: mod.__version__), ] deps_blob = list()
xref #17064.
https://api.github.com/repos/pandas-dev/pandas/pulls/17065
2017-07-24T11:59:04Z
2017-07-24T21:05:15Z
2017-07-24T21:05:15Z
2017-07-24T21:06:47Z
BUG: Thoroughly dedup columns in read_csv
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index e6764178d1f25..1fcf580e6bdc6 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -265,11 +265,11 @@ Indexing I/O ^^^ +- Bug in :func:`read_csv` in which columns were not being thoroughly de-duplicated (:issue:`17060`) - Bug in :func:`read_csv` in which non integer values for the header argument generated an unhelpful / unrelated error message (:issue:`16338`) - Bug in :func:`read_csv` in which memory management issues in exception handling, under certain conditions, would cause the interpreter to segfault (:issue:`14696, :issue:`16798`). - Bug in :func:`read_csv` when called with ``low_memory=False`` in which a CSV with at least one column > 2GB in size would incorrectly raise a ``MemoryError`` (:issue:`16798`). - Bug in :func:`read_stata` where value labels could not be read when using an iterator (:issue:`16923`) - - Bug in :func:`read_html` where import check fails when run in multiple threads (:issue:`16928`) Plotting diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 9866eff3e5f31..543a943aea311 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -788,11 +788,14 @@ cdef class TextReader: unnamed_count += 1 count = counts.get(name, 0) - if (count > 0 and self.mangle_dupe_cols - and not self.has_mi_columns): - this_header.append('%s.%d' % (name, count)) - else: - this_header.append(name) + + if not self.has_mi_columns and self.mangle_dupe_cols: + while count > 0: + counts[name] = count + 1 + name = '%s.%d' % (name, count) + count = counts.get(name, 0) + + this_header.append(name) counts[name] = count + 1 if self.has_mi_columns: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 1e7d9d420b35d..b0a13234782ec 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -2331,10 +2331,16 @@ def _infer_columns(self): if not have_mi_columns and self.mangle_dupe_cols: counts = {} + for i, col in enumerate(this_columns): cur_count = counts.get(col, 0) - if cur_count > 0: - this_columns[i] = '%s.%d' % (col, cur_count) + + while cur_count > 0: + counts[col] = cur_count + 1 + col = "%s.%d" % (col, cur_count) + cur_count = counts.get(col, 0) + + this_columns[i] = col counts[col] = cur_count + 1 elif have_mi_columns: diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 4d1f9936af983..91cf238391252 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -224,25 +224,6 @@ def test_unnamed_columns(self): Index(['A', 'B', 'C', 'Unnamed: 3', 'Unnamed: 4'])) - def test_duplicate_columns(self): - # TODO: add test for condition 'mangle_dupe_cols=False' - # once it is actually supported (gh-12935) - data = """A,A,B,B,B -1,2,3,4,5 -6,7,8,9,10 -11,12,13,14,15 -""" - - for method in ('read_csv', 'read_table'): - - # check default behavior - df = getattr(self, method)(StringIO(data), sep=',') - assert list(df.columns) == ['A', 'A.1', 'B', 'B.1', 'B.2'] - - df = getattr(self, method)(StringIO(data), sep=',', - mangle_dupe_cols=True) - assert list(df.columns) == ['A', 'A.1', 'B', 'B.1', 'B.2'] - def test_csv_mixed_type(self): data = """A,B,C a,1,2 diff --git a/pandas/tests/io/parser/mangle_dupes.py b/pandas/tests/io/parser/mangle_dupes.py new file mode 100644 index 0000000000000..70ecfe51c0f09 --- /dev/null +++ b/pandas/tests/io/parser/mangle_dupes.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +""" +Tests that duplicate columns are handled appropriately when parsed by the +CSV engine. In general, the expected result is that they are either thoroughly +de-duplicated (if mangling requested) or ignored otherwise. +""" + +from pandas.compat import StringIO + + +class DupeColumnTests(object): + def test_basic(self): + # TODO: add test for condition "mangle_dupe_cols=False" + # once it is actually supported (gh-12935) + data = "a,a,b,b,b\n1,2,3,4,5" + + for method in ("read_csv", "read_table"): + # Check default behavior. + expected = ["a", "a.1", "b", "b.1", "b.2"] + df = getattr(self, method)(StringIO(data), sep=",") + assert list(df.columns) == expected + + df = getattr(self, method)(StringIO(data), sep=",", + mangle_dupe_cols=True) + assert list(df.columns) == expected + + def test_thorough_mangle(self): + # see gh-17060 + data = "a,a,a.1\n1,2,3" + df = self.read_csv(StringIO(data), sep=",", mangle_dupe_cols=True) + assert list(df.columns) == ["a", "a.1", "a.1.1"] + + data = "a,a,a.1,a.1.1,a.1.1.1,a.1.1.1.1\n1,2,3,4,5,6" + df = self.read_csv(StringIO(data), sep=",", mangle_dupe_cols=True) + assert list(df.columns) == ["a", "a.1", "a.1.1", "a.1.1.1", + "a.1.1.1.1", "a.1.1.1.1.1"] + + data = "a,a,a.3,a.1,a.2,a,a\n1,2,3,4,5,6,7" + df = self.read_csv(StringIO(data), sep=",", mangle_dupe_cols=True) + assert list(df.columns) == ["a", "a.1", "a.3", "a.1.1", + "a.2", "a.2.1", "a.3.1"] diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py index 9bbc624dff90f..2fee2451c5e36 100644 --- a/pandas/tests/io/parser/test_parsers.py +++ b/pandas/tests/io/parser/test_parsers.py @@ -19,6 +19,7 @@ from .c_parser_only import CParserTests from .parse_dates import ParseDatesTests from .compression import CompressionTests +from .mangle_dupes import DupeColumnTests from .multithread import MultithreadTests from .python_parser_only import PythonParserTests from .dtypes import DtypeTests @@ -26,11 +27,12 @@ class BaseParser(CommentTests, CompressionTests, ConverterTests, DialectTests, + DtypeTests, DupeColumnTests, HeaderTests, IndexColTests, MultithreadTests, NAvaluesTests, ParseDatesTests, ParserTests, SkipRowsTests, UsecolsTests, - QuotingTests, DtypeTests): + QuotingTests): def read_csv(self, *args, **kwargs): raise NotImplementedError
When the user specifies `mangle_dupe_cols=False`, the columns should be thoroughly deduped. However, `master` fails on examples like this: ~~~python data = "a,a,a.1,a.1.1" read_csv(StringIO(data)) a a.1 a.1.1 a.1.1 0 1 2 3 4 ~~~ Note how there are two "a.1.1" columns. Now, the output of those same commands is this: ~~~python a a.1 a.1.1 a.1.1.1 0 1 2 3 4 ~~~
https://api.github.com/repos/pandas-dev/pandas/pulls/17060
2017-07-24T03:57:44Z
2017-07-25T15:17:23Z
2017-07-25T15:17:23Z
2017-08-21T15:28:57Z
TST: remove some test warnings in parser tests
diff --git a/pandas/conftest.py b/pandas/conftest.py index bae45743bbcfb..101af46a63db4 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -10,7 +10,7 @@ def pytest_addoption(parser): help="skip slow tests") parser.addoption("--skip-network", action="store_true", help="skip network tests") - parser.addoption("--run-highmemory", action="store_true", + parser.addoption("--run-high-memory", action="store_true", help="run high memory tests") parser.addoption("--only-slow", action="store_true", help="run only slow tests") @@ -27,7 +27,7 @@ def pytest_runtest_setup(item): pytest.skip("skipping due to --skip-network") if 'high_memory' in item.keywords and not item.config.getoption( - "--run-highmemory"): + "--run-high-memory"): pytest.skip( "skipping high memory test since --run-highmemory was not set") diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py index 48812c04e3b55..c68b2bf064d97 100644 --- a/pandas/tests/io/parser/c_parser_only.py +++ b/pandas/tests/io/parser/c_parser_only.py @@ -476,9 +476,23 @@ def test_read_tarfile(self, tar_suffix): # iterating through a file-like). tar_path = os.path.join(self.dirpath, "tar_csv" + tar_suffix) - tar = tarfile.open(tar_path, "r") - data_file = tar.extractfile("tar_data.csv") - - out = self.read_csv(data_file) - expected = pd.DataFrame({"a": [1]}) - tm.assert_frame_equal(out, expected) + with tarfile.open(tar_path, "r") as tar: + data_file = tar.extractfile("tar_data.csv") + + out = self.read_csv(data_file) + expected = pd.DataFrame({"a": [1]}) + tm.assert_frame_equal(out, expected) + + @pytest.mark.high_memory + def test_bytes_exceed_2gb(self): + """Read from a "CSV" that has a column larger than 2GB. + + GH 16798 + """ + if self.low_memory: + pytest.skip("not a high_memory test") + + csv = StringIO('strings\n' + '\n'.join( + ['x' * (1 << 20) for _ in range(2100)])) + df = self.read_csv(csv, low_memory=False) + assert not df.empty diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py index f23bd24f5cbe3..9bbc624dff90f 100644 --- a/pandas/tests/io/parser/test_parsers.py +++ b/pandas/tests/io/parser/test_parsers.py @@ -1,10 +1,6 @@ # -*- coding: utf-8 -*- import os -from io import StringIO - -import pytest - import pandas.util.testing as tm from pandas import read_csv, read_table @@ -28,18 +24,6 @@ from .dtypes import DtypeTests -@pytest.mark.high_memory -def test_bytes_exceed_2gb(): - """Read from a "CSV" that has a column larger than 2GB. - - GH 16798 - """ - csv = StringIO('strings\n' + '\n'.join( - ['x' * (1 << 20) for _ in range(2100)])) - df = read_csv(csv, low_memory=False) - assert not df.empty - - class BaseParser(CommentTests, CompressionTests, ConverterTests, DialectTests, HeaderTests, IndexColTests,
TST: move highmemory test to proper location in c_parser_only xref #16798
https://api.github.com/repos/pandas-dev/pandas/pulls/17057
2017-07-23T16:50:17Z
2017-07-23T17:43:59Z
2017-07-23T17:43:59Z
2017-07-23T18:53:26Z
DOC: Add more examples for reset_index
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9514ab8f3b27f..2ceb62dc7a349 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3060,7 +3060,7 @@ class max_speed ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), - ... ('speed', 'type')]) + ... ('species', 'type')]) >>> df = pd.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), @@ -3068,49 +3068,59 @@ class max_speed ... index=index, ... columns=columns) >>> df - speed - max type + speed species + max type class name - bird falcon 389.0 fly - parrot 24.0 fly - mammal lion 80.5 run - monkey NaN jump + bird falcon 389.0 fly + parrot 24.0 fly + mammal lion 80.5 run + monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') - class speed - max type + class speed species + max type name - falcon bird 389.0 fly - parrot bird 24.0 fly - lion mammal 80.5 run - monkey mammal NaN jump + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) - speed - class max type + speed species + class max type name - falcon bird 389.0 fly - parrot bird 24.0 fly - lion mammal 80.5 run - monkey mammal NaN jump + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump When the index is inserted under another level, we can specify under - which one with the parameter `col_fill`. If we specify a nonexistent - level, it is created: + which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, col_fill='species') - species speed - class max type + species speed species + class max type name - falcon bird 389.0 fly - parrot bird 24.0 fly - lion mammal 80.5 run - monkey mammal NaN jump + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump + + If we specify a nonexistent level for `col_fill`, it is created: + + >>> df.reset_index(level='class', col_level=1, col_fill='genus') + genus speed species + class max type + name + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, 'inplace') if inplace:
Expands documentation for `col_fill`. Follow-up to #16975.
https://api.github.com/repos/pandas-dev/pandas/pulls/17055
2017-07-23T10:19:49Z
2017-07-23T17:44:25Z
2017-07-23T17:44:25Z
2017-07-23T18:02:41Z
TST: np.argsort comparision
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 692cdd4957947..842e8fea0df9b 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1846,7 +1846,7 @@ def create_index(self): def test_argsort(self): idx = self.create_index() if PY36: - with tm.assert_raises_regex(TypeError, "'>' not supported"): + with tm.assert_raises_regex(TypeError, "'>|<' not supported"): result = idx.argsort() elif PY3: with tm.assert_raises_regex(TypeError, "unorderable types"): @@ -1859,7 +1859,7 @@ def test_argsort(self): def test_numpy_argsort(self): idx = self.create_index() if PY36: - with tm.assert_raises_regex(TypeError, "'>' not supported"): + with tm.assert_raises_regex(TypeError, "'>|<' not supported"): result = np.argsort(idx) elif PY3: with tm.assert_raises_regex(TypeError, "unorderable types"):
- [ ] closes #17046 - [ ] tests added / passed - [ ] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17051
2017-07-21T23:33:53Z
2017-07-22T04:58:42Z
2017-07-22T04:58:42Z
2017-07-22T07:08:28Z
TST: Move some Series ctor tests to SharedWithSparse
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 91d3e9e7b935b..37b4981c039f5 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -282,7 +282,7 @@ Groupby/Resample/Rolling Sparse ^^^^^^ -- Bug in ``SparseSeries`` raises ``AttributeError`` when a dictionary is passed in as data (:issue:`16777`) +- Bug in ``SparseSeries`` raises ``AttributeError`` when a dictionary is passed in as data (:issue:`16905`) Reshaping diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 1eb2b98a7d7cc..8e22dd38030ee 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -1,5 +1,6 @@ # coding=utf-8 # pylint: disable-msg=E1101,W0612 +from collections import OrderedDict import pytest @@ -20,6 +21,15 @@ class SharedWithSparse(object): + """ + A collection of tests Series and SparseSeries can share. + + In generic tests on this class, use ``self._assert_series_equal()`` + which is implemented in sub-classes. + """ + def _assert_series_equal(self, left, right): + """Dispatch to series class dependent assertion""" + raise NotImplementedError def test_scalarop_preserve_name(self): result = self.ts * 2 @@ -117,9 +127,81 @@ def test_to_sparse_pass_name(self): result = self.ts.to_sparse() assert result.name == self.ts.name + def test_constructor_dict(self): + d = {'a': 0., 'b': 1., 'c': 2.} + result = self.series_klass(d) + expected = self.series_klass(d, index=sorted(d.keys())) + self._assert_series_equal(result, expected) + + result = self.series_klass(d, index=['b', 'c', 'd', 'a']) + expected = self.series_klass([1, 2, np.nan, 0], + index=['b', 'c', 'd', 'a']) + self._assert_series_equal(result, expected) + + def test_constructor_subclass_dict(self): + data = tm.TestSubDict((x, 10.0 * x) for x in range(10)) + series = self.series_klass(data) + expected = self.series_klass(dict(compat.iteritems(data))) + self._assert_series_equal(series, expected) + + def test_constructor_ordereddict(self): + # GH3283 + data = OrderedDict( + ('col%s' % i, np.random.random()) for i in range(12)) + + series = self.series_klass(data) + expected = self.series_klass(list(data.values()), list(data.keys())) + self._assert_series_equal(series, expected) + + # Test with subclass + class A(OrderedDict): + pass + + series = self.series_klass(A(data)) + self._assert_series_equal(series, expected) + + def test_constructor_dict_multiindex(self): + d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.} + _d = sorted(d.items()) + result = self.series_klass(d) + expected = self.series_klass( + [x[1] for x in _d], + index=pd.MultiIndex.from_tuples([x[0] for x in _d])) + self._assert_series_equal(result, expected) + + d['z'] = 111. + _d.insert(0, ('z', d['z'])) + result = self.series_klass(d) + expected = self.series_klass([x[1] for x in _d], + index=pd.Index([x[0] for x in _d], + tupleize_cols=False)) + result = result.reindex(index=expected.index) + self._assert_series_equal(result, expected) + + def test_constructor_dict_timedelta_index(self): + # GH #12169 : Resample category data with timedelta index + # construct Series from dict as data and TimedeltaIndex as index + # will result NaN in result Series data + expected = self.series_klass( + data=['A', 'B', 'C'], + index=pd.to_timedelta([0, 10, 20], unit='s') + ) + + result = self.series_klass( + data={pd.to_timedelta(0, unit='s'): 'A', + pd.to_timedelta(10, unit='s'): 'B', + pd.to_timedelta(20, unit='s'): 'C'}, + index=pd.to_timedelta([0, 10, 20], unit='s') + ) + self._assert_series_equal(result, expected) + class TestSeriesMisc(TestData, SharedWithSparse): + series_klass = Series + # SharedWithSparse tests use generic, series_klass-agnostic assertion + _assert_series_equal = staticmethod(tm.assert_series_equal) + def test_tab_completion(self): # GH 9910 s = Series(list('abcd')) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index d591aa4f567a9..a916c42c007f9 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -20,8 +20,7 @@ from pandas._libs import lib from pandas._libs.tslib import iNaT -from pandas.compat import lrange, range, zip, OrderedDict, long -from pandas import compat +from pandas.compat import lrange, range, zip, long from pandas.util.testing import assert_series_equal import pandas.util.testing as tm @@ -605,48 +604,6 @@ def test_constructor_dict(self): expected.iloc[1] = 1 assert_series_equal(result, expected) - def test_constructor_dict_multiindex(self): - check = lambda result, expected: tm.assert_series_equal( - result, expected, check_dtype=True, check_series_type=True) - d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.} - _d = sorted(d.items()) - ser = Series(d) - expected = Series([x[1] for x in _d], - index=MultiIndex.from_tuples([x[0] for x in _d])) - check(ser, expected) - - d['z'] = 111. - _d.insert(0, ('z', d['z'])) - ser = Series(d) - expected = Series([x[1] for x in _d], index=Index( - [x[0] for x in _d], tupleize_cols=False)) - ser = ser.reindex(index=expected.index) - check(ser, expected) - - def test_constructor_dict_timedelta_index(self): - # GH #12169 : Resample category data with timedelta index - # construct Series from dict as data and TimedeltaIndex as index - # will result NaN in result Series data - expected = Series( - data=['A', 'B', 'C'], - index=pd.to_timedelta([0, 10, 20], unit='s') - ) - - result = Series( - data={pd.to_timedelta(0, unit='s'): 'A', - pd.to_timedelta(10, unit='s'): 'B', - pd.to_timedelta(20, unit='s'): 'C'}, - index=pd.to_timedelta([0, 10, 20], unit='s') - ) - # this should work - assert_series_equal(result, expected) - - def test_constructor_subclass_dict(self): - data = tm.TestSubDict((x, 10.0 * x) for x in range(10)) - series = Series(data) - refseries = Series(dict(compat.iteritems(data))) - assert_series_equal(refseries, series) - def test_constructor_dict_datetime64_index(self): # GH 9456 @@ -670,26 +627,6 @@ def create_data(constructor): assert_series_equal(result_datetime, expected) assert_series_equal(result_Timestamp, expected) - def test_orderedDict_ctor(self): - # GH3283 - import pandas - import random - data = OrderedDict([('col%s' % i, random.random()) for i in range(12)]) - s = pandas.Series(data) - assert all(s.values == list(data.values())) - - def test_orderedDict_subclass_ctor(self): - # GH3283 - import pandas - import random - - class A(OrderedDict): - pass - - data = A([('col%s' % i, random.random()) for i in range(12)]) - s = pandas.Series(data) - assert all(s.values == list(data.values())) - def test_constructor_list_of_tuples(self): data = [(1, 1), (2, 2), (2, 3)] s = Series(data) diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py index a5d514644a8f1..336b8f30716cd 100644 --- a/pandas/tests/sparse/test_frame.py +++ b/pandas/tests/sparse/test_frame.py @@ -1002,12 +1002,14 @@ def _check(frame, orig): shifted = frame.shift(2, freq='B') exp = orig.shift(2, freq='B') - exp = exp.to_sparse(frame.default_fill_value) + exp = exp.to_sparse(frame.default_fill_value, + kind=frame.default_kind) tm.assert_frame_equal(shifted, exp) shifted = frame.shift(2, freq=BDay()) exp = orig.shift(2, freq=BDay()) - exp = exp.to_sparse(frame.default_fill_value) + exp = exp.to_sparse(frame.default_fill_value, + kind=frame.default_kind) tm.assert_frame_equal(shifted, exp) self._check_all(_check) diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py index bb56f8a51897a..a7685abd5ba4d 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/sparse/test_series.py @@ -1,6 +1,8 @@ # pylint: disable-msg=E1101,W0612 import operator +from datetime import datetime + import pytest from numpy import nan @@ -58,6 +60,10 @@ def _test_data2_zero(): class TestSparseSeries(SharedWithSparse): + series_klass = SparseSeries + # SharedWithSparse tests use generic, series_klass-agnostic assertion + _assert_series_equal = staticmethod(tm.assert_sp_series_equal) + def setup_method(self, method): arr, index = _test_data1() @@ -1379,3 +1385,18 @@ def test_numpy_func_call(self): for func in funcs: for series in ('bseries', 'zbseries'): getattr(np, func)(getattr(self, series)) + + +@pytest.mark.parametrize( + 'datetime_type', (np.datetime64, + pd.Timestamp, + lambda x: datetime.strptime(x, '%Y-%m-%d'))) +def test_constructor_dict_datetime64_index(datetime_type): + # GH 9456 + dates = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15'] + values = [42544017.198965244, 1234565, 40512335.181958228, -1] + + result = SparseSeries(dict(zip(map(datetime_type, dates), values))) + expected = SparseSeries(values, map(pd.Timestamp, dates)) + + tm.assert_sp_series_equal(result, expected)
- [x] closes https://github.com/pandas-dev/pandas/pull/16906 - [x] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [x] whatsnew entry Continues https://github.com/pandas-dev/pandas/pull/16906.
https://api.github.com/repos/pandas-dev/pandas/pulls/17050
2017-07-21T23:07:44Z
2017-07-22T18:56:28Z
2017-07-22T18:56:27Z
2017-07-22T18:56:47Z
Fixed 'add_methods' when the 'select' argument is specified.
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 55473ec8d7cad..b3d30f58c528e 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -149,13 +149,15 @@ def names(x): def add_methods(cls, new_methods, force, select, exclude): if select and exclude: raise TypeError("May only pass either select or exclude") - methods = new_methods + if select: select = set(select) methods = {} for key, method in new_methods.items(): if key in select: methods[key] = method + new_methods = methods + if exclude: for k in exclude: new_methods.pop(k, None)
The 'add_methods' function was not behaving correctly when the 'select' argument was specified. This case was not covered by tests - https://codecov.io/gh/pandas-dev/pandas/src/master/pandas/core/ops.py#L149 . The function is not fully tested and it is unclear how to write a proper test for it. The function is currently not used with the 'select' argument in any place within pandas. The intent of the code was clear and the bug was obvious by plain reading.
https://api.github.com/repos/pandas-dev/pandas/pulls/17045
2017-07-21T12:00:21Z
2017-07-22T01:21:51Z
2017-07-22T01:21:51Z
2017-07-22T01:21:57Z
Fixed Minor Typo
diff --git a/doc/source/io.rst b/doc/source/io.rst index 495d4e9c3a5a3..149c86aead135 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4892,7 +4892,7 @@ pandas integrates with this external package. if ``pandas-gbq`` is installed, yo use the pandas methods ``pd.read_gbq`` and ``DataFrame.to_gbq``, which will call the respective functions from ``pandas-gbq``. -Full cocumentation can be found `here <https://pandas-gbq.readthedocs.io/>`__ +Full documentation can be found `here <https://pandas-gbq.readthedocs.io/>`__ .. _io.stata:
Cocumentation to Documentation - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17043
2017-07-20T22:00:36Z
2017-07-20T22:04:07Z
2017-07-20T22:04:07Z
2017-07-22T01:01:48Z
CLN: move safe_sort from core.algorithms to core.sorting
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 79beb95d93ea1..3ccd7216fa81a 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -30,7 +30,6 @@ from pandas.core.dtypes.missing import isnull from pandas.core import common as com -from pandas.compat import string_types from pandas._libs import algos, lib, hashtable as htable from pandas._libs.tslib import iNaT @@ -431,104 +430,6 @@ def isin(comps, values): return f(comps, values) -def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False): - """ - Sort ``values`` and reorder corresponding ``labels``. - ``values`` should be unique if ``labels`` is not None. - Safe for use with mixed types (int, str), orders ints before strs. - - .. versionadded:: 0.19.0 - - Parameters - ---------- - values : list-like - Sequence; must be unique if ``labels`` is not None. - labels : list_like - Indices to ``values``. All out of bound indices are treated as - "not found" and will be masked with ``na_sentinel``. - na_sentinel : int, default -1 - Value in ``labels`` to mark "not found". - Ignored when ``labels`` is None. - assume_unique : bool, default False - When True, ``values`` are assumed to be unique, which can speed up - the calculation. Ignored when ``labels`` is None. - - Returns - ------- - ordered : ndarray - Sorted ``values`` - new_labels : ndarray - Reordered ``labels``; returned when ``labels`` is not None. - - Raises - ------ - TypeError - * If ``values`` is not list-like or if ``labels`` is neither None - nor list-like - * If ``values`` cannot be sorted - ValueError - * If ``labels`` is not None and ``values`` contain duplicates. - """ - if not is_list_like(values): - raise TypeError("Only list-like objects are allowed to be passed to" - "safe_sort as values") - values = np.asarray(values) - - def sort_mixed(values): - # order ints before strings, safe in py3 - str_pos = np.array([isinstance(x, string_types) for x in values], - dtype=bool) - nums = np.sort(values[~str_pos]) - strs = np.sort(values[str_pos]) - return _ensure_object(np.concatenate([nums, strs])) - - sorter = None - if compat.PY3 and lib.infer_dtype(values) == 'mixed-integer': - # unorderable in py3 if mixed str/int - ordered = sort_mixed(values) - else: - try: - sorter = values.argsort() - ordered = values.take(sorter) - except TypeError: - # try this anyway - ordered = sort_mixed(values) - - # labels: - - if labels is None: - return ordered - - if not is_list_like(labels): - raise TypeError("Only list-like objects or None are allowed to be" - "passed to safe_sort as labels") - labels = _ensure_platform_int(np.asarray(labels)) - - from pandas import Index - if not assume_unique and not Index(values).is_unique: - raise ValueError("values should be unique if labels is not None") - - if sorter is None: - # mixed types - (hash_klass, _), values = _get_data_algo(values, _hashtables) - t = hash_klass(len(values)) - t.map_locations(values) - sorter = _ensure_platform_int(t.lookup(ordered)) - - reverse_indexer = np.empty(len(sorter), dtype=np.int_) - reverse_indexer.put(sorter, np.arange(len(sorter))) - - mask = (labels < -len(values)) | (labels >= len(values)) | \ - (labels == na_sentinel) - - # (Out of bound indices will be masked with `na_sentinel` next, so we may - # deal with them here without performance loss using `mode='wrap'`.) - new_labels = reverse_indexer.take(labels, mode='wrap') - np.putmask(new_labels, mask, na_sentinel) - - return ordered, _ensure_platform_int(new_labels) - - def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None): """ Encode input values as an enumerated type or categorical variable @@ -568,6 +469,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None): uniques = uniques.to_array() if sort and len(uniques) > 0: + from pandas.core.sorting import safe_sort uniques, labels = safe_sort(uniques, labels, na_sentinel=na_sentinel, assume_unique=True) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5d50f961927c7..c95a9598604ee 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -48,6 +48,7 @@ import pandas.core.dtypes.concat as _concat import pandas.core.missing as missing import pandas.core.algorithms as algos +import pandas.core.sorting as sorting from pandas.io.formats.printing import pprint_thing from pandas.core.ops import _comp_method_OBJECT_ARRAY from pandas.core.strings import StringAccessorMixin @@ -2306,7 +2307,7 @@ def difference(self, other): assume_unique=True) the_diff = this.values.take(label_diff) try: - the_diff = algos.safe_sort(the_diff) + the_diff = sorting.safe_sort(the_diff) except TypeError: pass @@ -2366,7 +2367,7 @@ def symmetric_difference(self, other, result_name=None): the_diff = _concat._concat_compat([left_diff, right_diff]) try: - the_diff = algos.safe_sort(the_diff) + the_diff = sorting.safe_sort(the_diff) except TypeError: pass diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index beebe06e7477e..8e4367a6784da 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -38,6 +38,7 @@ from pandas.core.sorting import is_int64_overflow_possible import pandas.core.algorithms as algos +import pandas.core.sorting as sorting import pandas.core.common as com from pandas._libs import hashtable as libhashtable, join as libjoin, lib from pandas.errors import MergeError @@ -1491,7 +1492,7 @@ def _sort_labels(uniques, left, right): l = len(left) labels = np.concatenate([left, right]) - _, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1) + _, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1) new_labels = _ensure_int64(new_labels) new_left, new_right = new_labels[:l], new_labels[l:] diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 10b80cbc3483d..44a27bb5cbae1 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -1,12 +1,14 @@ """ miscellaneous sorting / groupby utilities """ import numpy as np -from pandas.compat import long +from pandas.compat import long, string_types, PY3 from pandas.core.categorical import Categorical from pandas.core.dtypes.common import ( _ensure_platform_int, _ensure_int64, + is_list_like, is_categorical_dtype) +from pandas.core.dtypes.cast import infer_dtype_from_array from pandas.core.dtypes.missing import isnull import pandas.core.algorithms as algorithms from pandas._libs import lib, algos, hashtable @@ -376,3 +378,107 @@ def _reorder_by_uniques(uniques, labels): uniques = algorithms.take_nd(uniques, sorter, allow_fill=False) return uniques, labels + + +def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False): + """ + Sort ``values`` and reorder corresponding ``labels``. + ``values`` should be unique if ``labels`` is not None. + Safe for use with mixed types (int, str), orders ints before strs. + + .. versionadded:: 0.19.0 + + Parameters + ---------- + values : list-like + Sequence; must be unique if ``labels`` is not None. + labels : list_like + Indices to ``values``. All out of bound indices are treated as + "not found" and will be masked with ``na_sentinel``. + na_sentinel : int, default -1 + Value in ``labels`` to mark "not found". + Ignored when ``labels`` is None. + assume_unique : bool, default False + When True, ``values`` are assumed to be unique, which can speed up + the calculation. Ignored when ``labels`` is None. + + Returns + ------- + ordered : ndarray + Sorted ``values`` + new_labels : ndarray + Reordered ``labels``; returned when ``labels`` is not None. + + Raises + ------ + TypeError + * If ``values`` is not list-like or if ``labels`` is neither None + nor list-like + * If ``values`` cannot be sorted + ValueError + * If ``labels`` is not None and ``values`` contain duplicates. + """ + if not is_list_like(values): + raise TypeError("Only list-like objects are allowed to be passed to" + "safe_sort as values") + + if not isinstance(values, np.ndarray): + + # don't convert to string types + dtype, _ = infer_dtype_from_array(values) + values = np.asarray(values, dtype=dtype) + + def sort_mixed(values): + # order ints before strings, safe in py3 + str_pos = np.array([isinstance(x, string_types) for x in values], + dtype=bool) + nums = np.sort(values[~str_pos]) + strs = np.sort(values[str_pos]) + return np.concatenate([nums, np.asarray(strs, dtype=object)]) + + sorter = None + if PY3 and lib.infer_dtype(values) == 'mixed-integer': + # unorderable in py3 if mixed str/int + ordered = sort_mixed(values) + else: + try: + sorter = values.argsort() + ordered = values.take(sorter) + except TypeError: + # try this anyway + ordered = sort_mixed(values) + + # labels: + + if labels is None: + return ordered + + if not is_list_like(labels): + raise TypeError("Only list-like objects or None are allowed to be" + "passed to safe_sort as labels") + labels = _ensure_platform_int(np.asarray(labels)) + + from pandas import Index + if not assume_unique and not Index(values).is_unique: + raise ValueError("values should be unique if labels is not None") + + if sorter is None: + # mixed types + (hash_klass, _), values = algorithms._get_data_algo( + values, algorithms._hashtables) + t = hash_klass(len(values)) + t.map_locations(values) + sorter = _ensure_platform_int(t.lookup(ordered)) + + reverse_indexer = np.empty(len(sorter), dtype=np.int_) + reverse_indexer.put(sorter, np.arange(len(sorter))) + + mask = (labels < -len(values)) | (labels >= len(values)) | \ + (labels == na_sentinel) + + # (Out of bound indices will be masked with `na_sentinel` next, so we may + # deal with them here without performance loss using `mode='wrap'`.) + new_labels = reverse_indexer.take(labels, mode='wrap') + np.putmask(new_labels, mask, na_sentinel) + + return ordered, _ensure_platform_int(new_labels) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 4588bf17fdbeb..9e7b97f19e0c3 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -2,7 +2,6 @@ import numpy as np import pytest -import warnings from numpy.random import RandomState from numpy import nan @@ -60,93 +59,6 @@ def test_strings(self): tm.assert_series_equal(result, expected) -class TestSafeSort(object): - - def test_basic_sort(self): - values = [3, 1, 2, 0, 4] - result = algos.safe_sort(values) - expected = np.array([0, 1, 2, 3, 4]) - tm.assert_numpy_array_equal(result, expected) - - values = list("baaacb") - result = algos.safe_sort(values) - expected = np.array(list("aaabbc")) - tm.assert_numpy_array_equal(result, expected) - - values = [] - result = algos.safe_sort(values) - expected = np.array([]) - tm.assert_numpy_array_equal(result, expected) - - def test_labels(self): - values = [3, 1, 2, 0, 4] - expected = np.array([0, 1, 2, 3, 4]) - - labels = [0, 1, 1, 2, 3, 0, -1, 4] - result, result_labels = algos.safe_sort(values, labels) - expected_labels = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - tm.assert_numpy_array_equal(result_labels, expected_labels) - - # na_sentinel - labels = [0, 1, 1, 2, 3, 0, 99, 4] - result, result_labels = algos.safe_sort(values, labels, - na_sentinel=99) - expected_labels = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - tm.assert_numpy_array_equal(result_labels, expected_labels) - - # out of bound indices - labels = [0, 101, 102, 2, 3, 0, 99, 4] - result, result_labels = algos.safe_sort(values, labels) - expected_labels = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - tm.assert_numpy_array_equal(result_labels, expected_labels) - - labels = [] - result, result_labels = algos.safe_sort(values, labels) - expected_labels = np.array([], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - tm.assert_numpy_array_equal(result_labels, expected_labels) - - def test_mixed_integer(self): - values = np.array(['b', 1, 0, 'a', 0, 'b'], dtype=object) - result = algos.safe_sort(values) - expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object) - tm.assert_numpy_array_equal(result, expected) - - values = np.array(['b', 1, 0, 'a'], dtype=object) - labels = [0, 1, 2, 3, 0, -1, 1] - result, result_labels = algos.safe_sort(values, labels) - expected = np.array([0, 1, 'a', 'b'], dtype=object) - expected_labels = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - tm.assert_numpy_array_equal(result_labels, expected_labels) - - def test_unsortable(self): - # GH 13714 - arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object) - if compat.PY2 and not pd._np_version_under1p10: - # RuntimeWarning: tp_compare didn't return -1 or -2 for exception - with warnings.catch_warnings(): - pytest.raises(TypeError, algos.safe_sort, arr) - else: - pytest.raises(TypeError, algos.safe_sort, arr) - - def test_exceptions(self): - with tm.assert_raises_regex(TypeError, - "Only list-like objects are allowed"): - algos.safe_sort(values=1) - - with tm.assert_raises_regex(TypeError, - "Only list-like objects or None"): - algos.safe_sort(values=[0, 1, 2], labels=1) - - with tm.assert_raises_regex(ValueError, - "values should be unique"): - algos.safe_sort(values=[0, 1, 2, 1], labels=[0, 1]) - - class TestFactorize(object): def test_basic(self): diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index e09270bcadf27..f6973cccb82b0 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -1,6 +1,8 @@ import pytest from itertools import product from collections import defaultdict +import warnings +from datetime import datetime import numpy as np from numpy import nan @@ -13,7 +15,8 @@ decons_group_index, get_group_index, nargsort, - lexsort_indexer) + lexsort_indexer, + safe_sort) class TestSorting(object): @@ -340,3 +343,96 @@ def testit(label_list, shape): shape = (10000, 10000) label_list = [np.tile(np.arange(10000), 5), np.tile(np.arange(10000), 5)] testit(label_list, shape) + + +class TestSafeSort(object): + + def test_basic_sort(self): + values = [3, 1, 2, 0, 4] + result = safe_sort(values) + expected = np.array([0, 1, 2, 3, 4]) + tm.assert_numpy_array_equal(result, expected) + + values = list("baaacb") + result = safe_sort(values) + expected = np.array(list("aaabbc"), dtype='object') + tm.assert_numpy_array_equal(result, expected) + + values = [] + result = safe_sort(values) + expected = np.array([]) + tm.assert_numpy_array_equal(result, expected) + + def test_labels(self): + values = [3, 1, 2, 0, 4] + expected = np.array([0, 1, 2, 3, 4]) + + labels = [0, 1, 1, 2, 3, 0, -1, 4] + result, result_labels = safe_sort(values, labels) + expected_labels = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_labels, expected_labels) + + # na_sentinel + labels = [0, 1, 1, 2, 3, 0, 99, 4] + result, result_labels = safe_sort(values, labels, + na_sentinel=99) + expected_labels = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_labels, expected_labels) + + # out of bound indices + labels = [0, 101, 102, 2, 3, 0, 99, 4] + result, result_labels = safe_sort(values, labels) + expected_labels = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_labels, expected_labels) + + labels = [] + result, result_labels = safe_sort(values, labels) + expected_labels = np.array([], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_labels, expected_labels) + + def test_mixed_integer(self): + values = np.array(['b', 1, 0, 'a', 0, 'b'], dtype=object) + result = safe_sort(values) + expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + values = np.array(['b', 1, 0, 'a'], dtype=object) + labels = [0, 1, 2, 3, 0, -1, 1] + result, result_labels = safe_sort(values, labels) + expected = np.array([0, 1, 'a', 'b'], dtype=object) + expected_labels = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_labels, expected_labels) + + def test_mixed_interger_from_list(self): + values = ['b', 1, 0, 'a', 0, 'b'] + result = safe_sort(values) + expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_unsortable(self): + # GH 13714 + arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object) + if compat.PY2 and not pd._np_version_under1p10: + # RuntimeWarning: tp_compare didn't return -1 or -2 for exception + with warnings.catch_warnings(): + pytest.raises(TypeError, safe_sort, arr) + else: + pytest.raises(TypeError, safe_sort, arr) + + def test_exceptions(self): + with tm.assert_raises_regex(TypeError, + "Only list-like objects are allowed"): + safe_sort(values=1) + + with tm.assert_raises_regex(TypeError, + "Only list-like objects or None"): + safe_sort(values=[0, 1, 2], labels=1) + + with tm.assert_raises_regex(ValueError, + "values should be unique"): + safe_sort(values=[0, 1, 2, 1], labels=[0, 1])
COMPAT: safe_sort will only coerce list-likes to object, not a numpy string type xref: https://github.com/pandas-dev/pandas/pull/17003#discussion_r128332208
https://api.github.com/repos/pandas-dev/pandas/pulls/17034
2017-07-20T11:04:00Z
2017-07-20T14:23:40Z
2017-07-20T14:23:40Z
2017-07-20T14:25:42Z
Fix double ` in 'Reshaping by Melt' section
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 5f125e329f6f1..3dce73b302c7c 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -265,7 +265,7 @@ the right thing: Reshaping by Melt ----------------- -The top-level :func:``melt` and :func:`~DataFrame.melt` functions are useful to +The top-level :func:`melt` and :func:`~DataFrame.melt` functions are useful to massage a DataFrame into a format where one or more columns are identifier variables, while all other columns, considered measured variables, are "unpivoted" to the row axis, leaving just two non-identifier columns, "variable" and "value". The
See current stable docs for the issue: https://pandas.pydata.org/pandas-docs/stable/reshaping.html#reshaping-by-melt The double ` is causing the entire paragraph to be fixed width until the next double `. This commit removes the extra ` - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17030
2017-07-20T05:30:04Z
2017-07-20T06:48:23Z
2017-07-20T06:48:23Z
2017-07-20T06:57:57Z
DOC: Document business frequency aliases
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 8f02a86adbd48..ce4a920ad77b5 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1093,9 +1093,9 @@ frequencies. We will refer to these aliases as *offset aliases* "QS", "quarter start frequency" "BQS", "business quarter start frequency" "A, Y", "year end frequency" - "BA", "business year end frequency" + "BA, BY", "business year end frequency" "AS, YS", "year start frequency" - "BAS", "business year start frequency" + "BAS, BYS", "business year start frequency" "BH", "business hour frequency" "H", "hourly frequency" "T, min", "minutely frequency"
Follow-up to #16978.
https://api.github.com/repos/pandas-dev/pandas/pulls/17028
2017-07-19T15:33:19Z
2017-07-19T16:33:44Z
2017-07-19T16:33:44Z
2017-07-19T16:33:51Z
DOC remove redundant backtick.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 48006b11993c7..b2083a4454f84 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3161,7 +3161,7 @@ def pipe(self, func, *args, **kwargs): _shared_docs['transform'] = (""" Call function producing a like-indexed %(klass)s - and return a %(klass)s with the transformed values` + and return a %(klass)s with the transformed values .. versionadded:: 0.20.0
https://api.github.com/repos/pandas-dev/pandas/pulls/17025
2017-07-19T14:05:33Z
2017-07-19T16:00:22Z
2017-07-19T16:00:22Z
2017-07-19T16:00:29Z
Set pd.options.display.max_columns=0 by default
diff --git a/doc/source/_static/print_df_new.png b/doc/source/_static/print_df_new.png new file mode 100644 index 0000000000000..767d7d3f0ef06 Binary files /dev/null and b/doc/source/_static/print_df_new.png differ diff --git a/doc/source/_static/print_df_old.png b/doc/source/_static/print_df_old.png new file mode 100644 index 0000000000000..5f458722f1269 Binary files /dev/null and b/doc/source/_static/print_df_old.png differ diff --git a/doc/source/options.rst b/doc/source/options.rst index a82be4d84bf3f..48247eb48baaf 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -78,8 +78,8 @@ with no argument ``describe_option`` will print out the descriptions for all ava Getting and Setting Options --------------------------- -As described above, :func:`~pandas.get_option` and :func:`~pandas.set_option` -are available from the pandas namespace. To change an option, call +As described above, :func:`~pandas.get_option` and :func:`~pandas.set_option` +are available from the pandas namespace. To change an option, call ``set_option('option regex', new_value)``. .. ipython:: python @@ -230,7 +230,7 @@ can specify the option ``df.info(null_counts=True)`` to override on showing a pa df.info() pd.reset_option('max_info_rows') -``display.precision`` sets the output display precision in terms of decimal places. +``display.precision`` sets the output display precision in terms of decimal places. This is only a suggestion. .. ipython:: python @@ -323,21 +323,21 @@ display.latex.multicolumn_format 'l' Alignment of multicolumn la display.latex.multirow False Combines rows when using a MultiIndex. Centered instead of top-aligned, separated by clines. -display.max_columns 20 max_rows and max_columns are used +display.max_columns 0 or 20 max_rows and max_columns are used in __repr__() methods to decide if to_string() or info() is used to render an object to a string. In - case python/IPython is running in - a terminal this can be set to 0 and + case Python/IPython is running in + a terminal this is set to 0 by default and pandas will correctly auto-detect - the width the terminal and swap to + the width of the terminal and switch to a smaller format in case all columns would not fit vertically. The IPython notebook, IPython qtconsole, or IDLE do not run in a terminal and hence it is not possible to do correct - auto-detection. 'None' value means - unlimited. + auto-detection, in which case the default + is set to 20. 'None' value means unlimited. display.max_colwidth 50 The maximum width in characters of a column in the repr of a pandas data structure. When the column overflows, @@ -402,9 +402,9 @@ display.html.table_schema False Whether to publish a Table display.html.border 1 A ``border=value`` attribute is inserted in the ``<table>`` tag for the DataFrame HTML repr. -display.html.use_mathjax True When True, Jupyter notebook will process - table contents using MathJax, rendering - mathematical expressions enclosed by the +display.html.use_mathjax True When True, Jupyter notebook will process + table contents using MathJax, rendering + mathematical expressions enclosed by the dollar symbol. io.excel.xls.writer xlwt The default Excel writer engine for 'xls' files. @@ -422,7 +422,7 @@ io.hdf.dropna_table True drop ALL nan rows when appe io.parquet.engine None The engine to use as a default for parquet reading and writing. If None then try 'pyarrow' and 'fastparquet' -mode.chained_assignment warn Controls ``SettingWithCopyWarning``: +mode.chained_assignment warn Controls ``SettingWithCopyWarning``: 'raise', 'warn', or None. Raise an exception, warn, or no action if trying to use :ref:`chained assignment <indexing.evaluation_order>`. diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 107ce7855a00d..ced7bddcaa5b3 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -658,6 +658,35 @@ Notice in the example above that the converted ``Categorical`` has retained ``or Note that the unintenional conversion of ``ordered`` discussed above did not arise in previous versions due to separate bugs that prevented ``astype`` from doing any type of category to category conversion (:issue:`10696`, :issue:`18593`). These bugs have been fixed in this release, and motivated changing the default value of ``ordered``. +.. _whatsnew_0230.api_breaking.pretty_printing: + +Better pretty-printing of DataFrames in a terminal +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Previously, the default value for the maximum number of columns was +``pd.options.display.max_columns=20``. This meant that relatively wide data +frames would not fit within the terminal width, and pandas would introduce line +breaks to display these 20 columns. This resulted in an output that was +relatively difficult to read: + +.. image:: _static/print_df_old.png + +If Python runs in a terminal, the maximum number of columns is now determined +automatically so that the printed data frame fits within the current terminal +width (``pd.options.display.max_columns=0``) (:issue:`17023`). If Python runs +as a Jupyter kernel (such as the Jupyter QtConsole or a Jupyter notebook, as +well as in many IDEs), this value cannot be inferred automatically and is thus +set to `20` as in previous versions. In a terminal, this results in a much +nicer output: + +.. image:: _static/print_df_new.png + +Note that if you don't like the new default, you can always set this option +yourself. To revert to the old setting, you can run this line: + +.. code-block:: python + + pd.options.display.max_columns = 20 + .. _whatsnew_0230.api.datetimelike: Datetimelike API Changes diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 0edbf892172a9..b836a35b8cf29 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -13,6 +13,7 @@ from pandas.core.config import (is_int, is_bool, is_text, is_instance_factory, is_one_of_factory, is_callable) from pandas.io.formats.console import detect_console_encoding +from pandas.io.formats.terminal import is_terminal # compute @@ -314,7 +315,11 @@ def table_schema_cb(key): cf.register_option('max_categories', 8, pc_max_categories_doc, validator=is_int) cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int) - cf.register_option('max_columns', 20, pc_max_cols_doc, + if is_terminal(): + max_cols = 0 # automatically determine optimal number of columns + else: + max_cols = 20 # cannot determine optimal number of columns + cf.register_option('max_columns', max_cols, pc_max_cols_doc, validator=is_instance_factory([type(None), int])) cf.register_option('large_repr', 'truncate', pc_large_repr_doc, validator=is_one_of_factory(['truncate', 'info'])) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 1731dbb3ac68d..12201f62946ac 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -625,7 +625,8 @@ def to_string(self): max_len += size_tr_col # Need to make space for largest row # plus truncate dot col dif = max_len - self.w - adj_dif = dif + # '+ 1' to avoid too wide repr (GH PR #17023) + adj_dif = dif + 1 col_lens = Series([Series(ele).apply(len).max() for ele in strcols]) n_cols = len(col_lens) diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py index 4bcb28fa59b86..07ab445182680 100644 --- a/pandas/io/formats/terminal.py +++ b/pandas/io/formats/terminal.py @@ -17,7 +17,7 @@ import sys import shutil -__all__ = ['get_terminal_size'] +__all__ = ['get_terminal_size', 'is_terminal'] def get_terminal_size(): @@ -48,6 +48,23 @@ def get_terminal_size(): return tuple_xy +def is_terminal(): + """ + Detect if Python is running in a terminal. + + Returns True if Python is running in a terminal or False if not. + """ + try: + ip = get_ipython() + except NameError: # assume standard Python interpreter in a terminal + return True + else: + if hasattr(ip, 'kernel'): # IPython as a Jupyter kernel + return False + else: # IPython in a terminal + return True + + def _get_terminal_size_windows(): res = None try: diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 90daa9aa882c8..152159965036d 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -875,10 +875,11 @@ def test_astype_str(self): columns=self.tzframe.columns) tm.assert_frame_equal(result, expected) - result = str(self.tzframe) - assert ('0 2013-01-01 2013-01-01 00:00:00-05:00 ' - '2013-01-01 00:00:00+01:00') in result - assert ('1 2013-01-02 ' - 'NaT NaT') in result - assert ('2 2013-01-03 2013-01-03 00:00:00-05:00 ' - '2013-01-03 00:00:00+01:00') in result + with option_context('display.max_columns', 20): + result = str(self.tzframe) + assert ('0 2013-01-01 2013-01-01 00:00:00-05:00 ' + '2013-01-01 00:00:00+01:00') in result + assert ('1 2013-01-02 ' + 'NaT NaT') in result + assert ('2 2013-01-03 2013-01-03 00:00:00-05:00 ' + '2013-01-03 00:00:00+01:00') in result diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 3e5aae10618e9..8fc6fef11798a 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -172,8 +172,8 @@ def test_repr_column_name_unicode_truncation_bug(self): 'the CSV file externally. I want to Call' ' the File through the code..')}) - result = repr(df) - assert 'StringCol' in result + with option_context('display.max_columns', 20): + assert 'StringCol' in repr(df) def test_latex_repr(self): result = r"""\begin{tabular}{llll} diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 6c3b75cdfa6df..ab9f61cffc16b 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -961,7 +961,8 @@ def test_pprint_thing(self): def test_wide_repr(self): with option_context('mode.sim_interactive', True, - 'display.show_dimensions', True): + 'display.show_dimensions', True, + 'display.max_columns', 20): max_cols = get_option('display.max_columns') df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1))) set_option('display.expand_frame_repr', False) @@ -979,7 +980,8 @@ def test_wide_repr(self): reset_option('display.expand_frame_repr') def test_wide_repr_wide_columns(self): - with option_context('mode.sim_interactive', True): + with option_context('mode.sim_interactive', True, + 'display.max_columns', 20): df = DataFrame(np.random.randn(5, 3), columns=['a' * 90, 'b' * 90, 'c' * 90]) rep_str = repr(df) @@ -987,7 +989,8 @@ def test_wide_repr_wide_columns(self): assert len(rep_str.splitlines()) == 20 def test_wide_repr_named(self): - with option_context('mode.sim_interactive', True): + with option_context('mode.sim_interactive', True, + 'display.max_columns', 20): max_cols = get_option('display.max_columns') df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1))) df.index.name = 'DataFrame Index' @@ -1008,7 +1011,8 @@ def test_wide_repr_named(self): reset_option('display.expand_frame_repr') def test_wide_repr_multiindex(self): - with option_context('mode.sim_interactive', True): + with option_context('mode.sim_interactive', True, + 'display.max_columns', 20): midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10))) max_cols = get_option('display.max_columns') df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), @@ -1030,7 +1034,8 @@ def test_wide_repr_multiindex(self): reset_option('display.expand_frame_repr') def test_wide_repr_multiindex_cols(self): - with option_context('mode.sim_interactive', True): + with option_context('mode.sim_interactive', True, + 'display.max_columns', 20): max_cols = get_option('display.max_columns') midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10))) mcols = MultiIndex.from_arrays( @@ -1044,15 +1049,16 @@ def test_wide_repr_multiindex_cols(self): wide_repr = repr(df) assert rep_str != wide_repr - with option_context('display.width', 150): + with option_context('display.width', 150, 'display.max_columns', 20): wider_repr = repr(df) assert len(wider_repr) < len(wide_repr) reset_option('display.expand_frame_repr') def test_wide_repr_unicode(self): - with option_context('mode.sim_interactive', True): - max_cols = get_option('display.max_columns') + with option_context('mode.sim_interactive', True, + 'display.max_columns', 20): + max_cols = 20 df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1))) set_option('display.expand_frame_repr', False) rep_str = repr(df) @@ -1442,17 +1448,17 @@ def test_repr_html_mathjax(self): assert 'tex2jax_ignore' in df._repr_html_() def test_repr_html_wide(self): - max_cols = get_option('display.max_columns') + max_cols = 20 df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1))) - reg_repr = df._repr_html_() - assert "..." not in reg_repr + with option_context('display.max_rows', 60, 'display.max_columns', 20): + assert "..." not in df._repr_html_() wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1))) - wide_repr = wide_df._repr_html_() - assert "..." in wide_repr + with option_context('display.max_rows', 60, 'display.max_columns', 20): + assert "..." in wide_df._repr_html_() def test_repr_html_wide_multiindex_cols(self): - max_cols = get_option('display.max_columns') + max_cols = 20 mcols = MultiIndex.from_product([np.arange(max_cols // 2), ['foo', 'bar']], @@ -1467,8 +1473,8 @@ def test_repr_html_wide_multiindex_cols(self): names=['first', 'second']) df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols) - wide_repr = df._repr_html_() - assert '...' in wide_repr + with option_context('display.max_rows', 60, 'display.max_columns', 20): + assert '...' in df._repr_html_() def test_repr_html_long(self): with option_context('display.max_rows', 60): @@ -1512,14 +1518,15 @@ def test_repr_html_float(self): assert u('2 columns') in long_repr def test_repr_html_long_multiindex(self): - max_rows = get_option('display.max_rows') + max_rows = 60 max_L1 = max_rows // 2 tuples = list(itertools.product(np.arange(max_L1), ['foo', 'bar'])) idx = MultiIndex.from_tuples(tuples, names=['first', 'second']) df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=['A', 'B']) - reg_repr = df._repr_html_() + with option_context('display.max_rows', 60, 'display.max_columns', 20): + reg_repr = df._repr_html_() assert '...' not in reg_repr tuples = list(itertools.product(np.arange(max_L1 + 1), ['foo', 'bar'])) @@ -1530,20 +1537,22 @@ def test_repr_html_long_multiindex(self): assert '...' in long_repr def test_repr_html_long_and_wide(self): - max_cols = get_option('display.max_columns') - max_rows = get_option('display.max_rows') + max_cols = 20 + max_rows = 60 h, w = max_rows - 1, max_cols - 1 df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) - assert '...' not in df._repr_html_() + with option_context('display.max_rows', 60, 'display.max_columns', 20): + assert '...' not in df._repr_html_() h, w = max_rows + 1, max_cols + 1 df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) - assert '...' in df._repr_html_() + with option_context('display.max_rows', 60, 'display.max_columns', 20): + assert '...' in df._repr_html_() def test_info_repr(self): - max_rows = get_option('display.max_rows') - max_cols = get_option('display.max_columns') + max_rows = 60 + max_cols = 20 # Long h, w = max_rows + 1, max_cols - 1 df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) @@ -1555,7 +1564,8 @@ def test_info_repr(self): h, w = max_rows - 1, max_cols + 1 df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) assert has_horizontally_truncated_repr(df) - with option_context('display.large_repr', 'info'): + with option_context('display.large_repr', 'info', + 'display.max_columns', max_cols): assert has_info_repr(df) def test_info_repr_max_cols(self): @@ -1575,8 +1585,8 @@ def test_info_repr_max_cols(self): # fmt.set_option('display.max_info_columns', 4) # exceeded def test_info_repr_html(self): - max_rows = get_option('display.max_rows') - max_cols = get_option('display.max_columns') + max_rows = 60 + max_cols = 20 # Long h, w = max_rows + 1, max_cols - 1 df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) @@ -1588,7 +1598,8 @@ def test_info_repr_html(self): h, w = max_rows - 1, max_cols + 1 df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)}) assert '<class' not in df._repr_html_() - with option_context('display.large_repr', 'info'): + with option_context('display.large_repr', 'info', + 'display.max_columns', max_cols): assert '&lt;class' in df._repr_html_() def test_fake_qtconsole_repr_html(self):
Update: Remove everything related to `max_rows` and only deal with `max_columns` in this PR. Changed `max_columns` to `0` (automatically adapt the number of displayed columns to the actual terminal width) when run in a terminal ~~and `max_rows` to `20` (because I'd like to see the "whole" data frame at a glance like in R's tibble)~~. - [x] closes #16579 - [x] tests added / passed - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff`` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/17023
2017-07-19T11:04:00Z
2018-03-28T07:51:23Z
2018-03-28T07:51:23Z
2018-03-28T08:30:45Z
Define DataFrame plot methods in DataFrame
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9a79ca1d4eab1..0cd1c0e05ce21 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5651,12 +5651,21 @@ def isin(self, values): values).reshape(self.shape), self.index, self.columns) + # ---------------------------------------------------------------------- + # Add plotting methods to DataFrame + plot = base.AccessorProperty(gfx.FramePlotMethods, gfx.FramePlotMethods) + hist = gfx.hist_frame + boxplot = gfx.boxplot_frame + DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True, aliases={'rows': 0}) DataFrame._add_numeric_operations() DataFrame._add_series_or_dataframe_operations() +ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs) +ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs) + _EMPTY_SERIES = Series([]) @@ -6002,28 +6011,3 @@ def _from_nested_dict(data): def _put_str(s, space): return ('%s' % s)[:space].ljust(space) - - -# ---------------------------------------------------------------------- -# Add plotting methods to DataFrame -DataFrame.plot = base.AccessorProperty(gfx.FramePlotMethods, - gfx.FramePlotMethods) -DataFrame.hist = gfx.hist_frame - - -@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs) -def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0, - grid=True, figsize=None, layout=None, return_type=None, **kwds): - from pandas.plotting._core import boxplot - import matplotlib.pyplot as plt - ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, - grid=grid, rot=rot, figsize=figsize, layout=layout, - return_type=return_type, **kwds) - plt.draw_if_interactive() - return ax - - -DataFrame.boxplot = boxplot - -ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs) -ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index a623288efc1ae..de96d17da2a9f 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2034,6 +2034,18 @@ def plot_group(keys, values, ax): return result +@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs) +def boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0, + grid=True, figsize=None, layout=None, + return_type=None, **kwds): + import matplotlib.pyplot as plt + ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, + grid=grid, rot=rot, figsize=figsize, layout=layout, + return_type=return_type, **kwds) + plt.draw_if_interactive() + return ax + + def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, **kwargs): """
instead of pinning them on at the bottom of the module. This is a follow-up to <s>#16391</s>#16931 with a smaller scope, avoiding the as-yet-unresolved circular import problem. - [x] passes ``git diff upstream/master -u -- "*.py" | flake8 --diff``
https://api.github.com/repos/pandas-dev/pandas/pulls/17020
2017-07-19T07:24:39Z
2017-07-20T10:36:36Z
2017-07-20T10:36:35Z
2017-07-20T18:16:17Z
DOC: Fixing EX01 - Added examples
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 4915997277006..4962469dbf429 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -82,14 +82,8 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX01 --ignore_functions \ pandas.Series.backfill \ pandas.Series.pad \ - pandas.Series.str.normalize \ - pandas.Series.str.rfind \ - pandas.Series.str.rindex \ - pandas.Series.str.translate \ pandas.Series.sparse \ pandas.DataFrame.sparse \ - pandas.Series.cat.categories \ - pandas.Series.cat.ordered \ pandas.Series.cat.codes \ pandas.Series.cat.reorder_categories \ pandas.Series.cat.set_categories \ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index e9f8eb9c3f23f..4cc7236ff1083 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -748,6 +748,17 @@ def categories(self) -> Index: remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. + + Examples + -------- + >>> ser = pd.Series(["a", "b", "c", "a"], dtype="category") + >>> ser.cat.categories + Index(['a', 'b', 'c'], dtype='object') + + >>> raw_cat = pd.Categorical(["a", "b", "c", "a"], categories=["b", "c", "d"],) + >>> ser = pd.Series(raw_cat) + >>> ser.cat.categories + Index(['b', 'c', 'd'], dtype='object') """ return self.dtype.categories @@ -755,6 +766,17 @@ def categories(self) -> Index: def ordered(self) -> Ordered: """ Whether the categories have an ordered relationship. + + Examples + -------- + >>> ser = pd.Series(["a", "b", "c", "a"], dtype="category") + >>> ser.cat.ordered + False + + >>> raw_cat = pd.Categorical(["a", "b", "c", "a"], ordered=True) + >>> ser = pd.Series(raw_cat) + >>> ser.cat.ordered + True """ return self.dtype.ordered diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index e8769370ca88d..08c2736bf9816 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -2261,6 +2261,15 @@ def translate(self, table): Returns ------- Series or Index + + Examples + -------- + >>> ser = pd.Series(["El niño", "Françoise"]) + >>> mytable = str.maketrans({'ñ': 'n', 'ç': 'c'}) + >>> ser.str.translate(mytable) + 0 El nino + 1 Francoise + dtype: object """ result = self._data.array._str_translate(table) return self._wrap_result(result) @@ -2800,12 +2809,23 @@ def extractall(self, pat, flags: int = 0) -> DataFrame: Examples -------- + For Series.str.find: + >>> ser = pd.Series(["cow_", "duck_", "do_ve"]) >>> ser.str.find("_") 0 3 1 4 2 2 dtype: int64 + + For Series.str.rfind: + + >>> ser = pd.Series(["_cow_", "duck_", "do_v_e"]) + >>> ser.str.rfind("_") + 0 4 + 1 4 + 2 4 + dtype: int64 """ @Appender( @@ -2858,6 +2878,13 @@ def normalize(self, form): Returns ------- Series/Index of objects + + Examples + -------- + >>> ser = pd.Series(['ñ']) + >>> ser.str.normalize('NFC') == ser.str.normalize('NFD') + 0 False + dtype: bool """ result = self._data.array._str_normalize(form) return self._wrap_result(result) @@ -2892,12 +2919,23 @@ def normalize(self, form): Examples -------- + For Series.str.index: + >>> ser = pd.Series(["horse", "eagle", "donkey"]) >>> ser.str.index("e") 0 4 1 0 2 4 dtype: int64 + + For Series.str.rindex: + + >>> ser = pd.Series(["Deer", "eagle", "Sheep"]) + >>> ser.str.rindex("e") + 0 2 + 1 4 + 2 3 + dtype: int64 """ @Appender(
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). Towards https://github.com/pandas-dev/pandas/issues/37875
https://api.github.com/repos/pandas-dev/pandas/pulls/53502
2023-06-02T16:58:48Z
2023-06-02T18:48:23Z
2023-06-02T18:48:23Z
2023-06-03T15:40:41Z